python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 Novell Inc. * Copyright (C) 2016 Red Hat, Inc. */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/cred.h> #include <linux/xattr.h> #include <linux/exportfs.h> #include <linux/file.h> #include <linux/fileattr.h> #include <linux/uuid.h> #include <linux/namei.h> #include <linux/ratelimit.h> #include "overlayfs.h" int ovl_want_write(struct dentry *dentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); return mnt_want_write(ovl_upper_mnt(ofs)); } void ovl_drop_write(struct dentry *dentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); mnt_drop_write(ovl_upper_mnt(ofs)); } struct dentry *ovl_workdir(struct dentry *dentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); return ofs->workdir; } const struct cred *ovl_override_creds(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); return override_creds(ofs->creator_cred); } /* * Check if underlying fs supports file handles and try to determine encoding * type, in order to deduce maximum inode number used by fs. * * Return 0 if file handles are not supported. * Return 1 (FILEID_INO32_GEN) if fs uses the default 32bit inode encoding. * Return -1 if fs uses a non default encoding with unknown inode size. */ int ovl_can_decode_fh(struct super_block *sb) { if (!capable(CAP_DAC_READ_SEARCH)) return 0; if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry) return 0; return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN; } struct dentry *ovl_indexdir(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); return ofs->indexdir; } /* Index all files on copy up. For now only enabled for NFS export */ bool ovl_index_all(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); return ofs->config.nfs_export && ofs->config.index; } /* Verify lower origin on lookup. For now only enabled for NFS export */ bool ovl_verify_lower(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); return ofs->config.nfs_export && ofs->config.index; } struct ovl_path *ovl_stack_alloc(unsigned int n) { return kcalloc(n, sizeof(struct ovl_path), GFP_KERNEL); } void ovl_stack_cpy(struct ovl_path *dst, struct ovl_path *src, unsigned int n) { unsigned int i; memcpy(dst, src, sizeof(struct ovl_path) * n); for (i = 0; i < n; i++) dget(src[i].dentry); } void ovl_stack_put(struct ovl_path *stack, unsigned int n) { unsigned int i; for (i = 0; stack && i < n; i++) dput(stack[i].dentry); } void ovl_stack_free(struct ovl_path *stack, unsigned int n) { ovl_stack_put(stack, n); kfree(stack); } struct ovl_entry *ovl_alloc_entry(unsigned int numlower) { size_t size = offsetof(struct ovl_entry, __lowerstack[numlower]); struct ovl_entry *oe = kzalloc(size, GFP_KERNEL); if (oe) oe->__numlower = numlower; return oe; } void ovl_free_entry(struct ovl_entry *oe) { ovl_stack_put(ovl_lowerstack(oe), ovl_numlower(oe)); kfree(oe); } #define OVL_D_REVALIDATE (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE) bool ovl_dentry_remote(struct dentry *dentry) { return dentry->d_flags & OVL_D_REVALIDATE; } void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry) { if (!ovl_dentry_remote(realdentry)) return; spin_lock(&dentry->d_lock); dentry->d_flags |= realdentry->d_flags & OVL_D_REVALIDATE; spin_unlock(&dentry->d_lock); } void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry, struct ovl_entry *oe) { return ovl_dentry_init_flags(dentry, upperdentry, oe, OVL_D_REVALIDATE); } void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry, struct ovl_entry *oe, unsigned int mask) { struct ovl_path *lowerstack = ovl_lowerstack(oe); unsigned int i, flags = 0; if (upperdentry) flags |= upperdentry->d_flags; for (i = 0; i < ovl_numlower(oe) && lowerstack[i].dentry; i++) flags |= lowerstack[i].dentry->d_flags; spin_lock(&dentry->d_lock); dentry->d_flags &= ~mask; dentry->d_flags |= flags & mask; spin_unlock(&dentry->d_lock); } bool ovl_dentry_weird(struct dentry *dentry) { return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT | DCACHE_MANAGE_TRANSIT | DCACHE_OP_HASH | DCACHE_OP_COMPARE); } enum ovl_path_type ovl_path_type(struct dentry *dentry) { struct ovl_entry *oe = OVL_E(dentry); enum ovl_path_type type = 0; if (ovl_dentry_upper(dentry)) { type = __OVL_PATH_UPPER; /* * Non-dir dentry can hold lower dentry of its copy up origin. */ if (ovl_numlower(oe)) { if (ovl_test_flag(OVL_CONST_INO, d_inode(dentry))) type |= __OVL_PATH_ORIGIN; if (d_is_dir(dentry) || !ovl_has_upperdata(d_inode(dentry))) type |= __OVL_PATH_MERGE; } } else { if (ovl_numlower(oe) > 1) type |= __OVL_PATH_MERGE; } return type; } void ovl_path_upper(struct dentry *dentry, struct path *path) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); path->mnt = ovl_upper_mnt(ofs); path->dentry = ovl_dentry_upper(dentry); } void ovl_path_lower(struct dentry *dentry, struct path *path) { struct ovl_entry *oe = OVL_E(dentry); struct ovl_path *lowerpath = ovl_lowerstack(oe); if (ovl_numlower(oe)) { path->mnt = lowerpath->layer->mnt; path->dentry = lowerpath->dentry; } else { *path = (struct path) { }; } } void ovl_path_lowerdata(struct dentry *dentry, struct path *path) { struct ovl_entry *oe = OVL_E(dentry); struct ovl_path *lowerdata = ovl_lowerdata(oe); struct dentry *lowerdata_dentry = ovl_lowerdata_dentry(oe); if (lowerdata_dentry) { path->dentry = lowerdata_dentry; /* * Pairs with smp_wmb() in ovl_dentry_set_lowerdata(). * Make sure that if lowerdata->dentry is visible, then * datapath->layer is visible as well. */ smp_rmb(); path->mnt = READ_ONCE(lowerdata->layer)->mnt; } else { *path = (struct path) { }; } } enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) { enum ovl_path_type type = ovl_path_type(dentry); if (!OVL_TYPE_UPPER(type)) ovl_path_lower(dentry, path); else ovl_path_upper(dentry, path); return type; } enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path) { enum ovl_path_type type = ovl_path_type(dentry); WARN_ON_ONCE(d_is_dir(dentry)); if (!OVL_TYPE_UPPER(type) || OVL_TYPE_MERGE(type)) ovl_path_lowerdata(dentry, path); else ovl_path_upper(dentry, path); return type; } struct dentry *ovl_dentry_upper(struct dentry *dentry) { return ovl_upperdentry_dereference(OVL_I(d_inode(dentry))); } struct dentry *ovl_dentry_lower(struct dentry *dentry) { struct ovl_entry *oe = OVL_E(dentry); return ovl_numlower(oe) ? ovl_lowerstack(oe)->dentry : NULL; } const struct ovl_layer *ovl_layer_lower(struct dentry *dentry) { struct ovl_entry *oe = OVL_E(dentry); return ovl_numlower(oe) ? ovl_lowerstack(oe)->layer : NULL; } /* * ovl_dentry_lower() could return either a data dentry or metacopy dentry * depending on what is stored in lowerstack[0]. At times we need to find * lower dentry which has data (and not metacopy dentry). This helper * returns the lower data dentry. */ struct dentry *ovl_dentry_lowerdata(struct dentry *dentry) { return ovl_lowerdata_dentry(OVL_E(dentry)); } int ovl_dentry_set_lowerdata(struct dentry *dentry, struct ovl_path *datapath) { struct ovl_entry *oe = OVL_E(dentry); struct ovl_path *lowerdata = ovl_lowerdata(oe); struct dentry *datadentry = datapath->dentry; if (WARN_ON_ONCE(ovl_numlower(oe) <= 1)) return -EIO; WRITE_ONCE(lowerdata->layer, datapath->layer); /* * Pairs with smp_rmb() in ovl_path_lowerdata(). * Make sure that if lowerdata->dentry is visible, then * lowerdata->layer is visible as well. */ smp_wmb(); WRITE_ONCE(lowerdata->dentry, dget(datadentry)); ovl_dentry_update_reval(dentry, datadentry); return 0; } struct dentry *ovl_dentry_real(struct dentry *dentry) { return ovl_dentry_upper(dentry) ?: ovl_dentry_lower(dentry); } struct dentry *ovl_i_dentry_upper(struct inode *inode) { return ovl_upperdentry_dereference(OVL_I(inode)); } struct inode *ovl_i_path_real(struct inode *inode, struct path *path) { struct ovl_path *lowerpath = ovl_lowerpath(OVL_I_E(inode)); path->dentry = ovl_i_dentry_upper(inode); if (!path->dentry) { path->dentry = lowerpath->dentry; path->mnt = lowerpath->layer->mnt; } else { path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb)); } return path->dentry ? d_inode_rcu(path->dentry) : NULL; } struct inode *ovl_inode_upper(struct inode *inode) { struct dentry *upperdentry = ovl_i_dentry_upper(inode); return upperdentry ? d_inode(upperdentry) : NULL; } struct inode *ovl_inode_lower(struct inode *inode) { struct ovl_path *lowerpath = ovl_lowerpath(OVL_I_E(inode)); return lowerpath ? d_inode(lowerpath->dentry) : NULL; } struct inode *ovl_inode_real(struct inode *inode) { return ovl_inode_upper(inode) ?: ovl_inode_lower(inode); } /* Return inode which contains lower data. Do not return metacopy */ struct inode *ovl_inode_lowerdata(struct inode *inode) { struct dentry *lowerdata = ovl_lowerdata_dentry(OVL_I_E(inode)); if (WARN_ON(!S_ISREG(inode->i_mode))) return NULL; return lowerdata ? d_inode(lowerdata) : NULL; } /* Return real inode which contains data. Does not return metacopy inode */ struct inode *ovl_inode_realdata(struct inode *inode) { struct inode *upperinode; upperinode = ovl_inode_upper(inode); if (upperinode && ovl_has_upperdata(inode)) return upperinode; return ovl_inode_lowerdata(inode); } const char *ovl_lowerdata_redirect(struct inode *inode) { return inode && S_ISREG(inode->i_mode) ? OVL_I(inode)->lowerdata_redirect : NULL; } struct ovl_dir_cache *ovl_dir_cache(struct inode *inode) { return inode && S_ISDIR(inode->i_mode) ? OVL_I(inode)->cache : NULL; } void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache) { OVL_I(inode)->cache = cache; } void ovl_dentry_set_flag(unsigned long flag, struct dentry *dentry) { set_bit(flag, OVL_E_FLAGS(dentry)); } void ovl_dentry_clear_flag(unsigned long flag, struct dentry *dentry) { clear_bit(flag, OVL_E_FLAGS(dentry)); } bool ovl_dentry_test_flag(unsigned long flag, struct dentry *dentry) { return test_bit(flag, OVL_E_FLAGS(dentry)); } bool ovl_dentry_is_opaque(struct dentry *dentry) { return ovl_dentry_test_flag(OVL_E_OPAQUE, dentry); } bool ovl_dentry_is_whiteout(struct dentry *dentry) { return !dentry->d_inode && ovl_dentry_is_opaque(dentry); } void ovl_dentry_set_opaque(struct dentry *dentry) { ovl_dentry_set_flag(OVL_E_OPAQUE, dentry); } /* * For hard links and decoded file handles, it's possible for ovl_dentry_upper() * to return positive, while there's no actual upper alias for the inode. * Copy up code needs to know about the existence of the upper alias, so it * can't use ovl_dentry_upper(). */ bool ovl_dentry_has_upper_alias(struct dentry *dentry) { return ovl_dentry_test_flag(OVL_E_UPPER_ALIAS, dentry); } void ovl_dentry_set_upper_alias(struct dentry *dentry) { ovl_dentry_set_flag(OVL_E_UPPER_ALIAS, dentry); } static bool ovl_should_check_upperdata(struct inode *inode) { if (!S_ISREG(inode->i_mode)) return false; if (!ovl_inode_lower(inode)) return false; return true; } bool ovl_has_upperdata(struct inode *inode) { if (!ovl_should_check_upperdata(inode)) return true; if (!ovl_test_flag(OVL_UPPERDATA, inode)) return false; /* * Pairs with smp_wmb() in ovl_set_upperdata(). Main user of * ovl_has_upperdata() is ovl_copy_up_meta_inode_data(). Make sure * if setting of OVL_UPPERDATA is visible, then effects of writes * before that are visible too. */ smp_rmb(); return true; } void ovl_set_upperdata(struct inode *inode) { /* * Pairs with smp_rmb() in ovl_has_upperdata(). Make sure * if OVL_UPPERDATA flag is visible, then effects of write operations * before it are visible as well. */ smp_wmb(); ovl_set_flag(OVL_UPPERDATA, inode); } /* Caller should hold ovl_inode->lock */ bool ovl_dentry_needs_data_copy_up_locked(struct dentry *dentry, int flags) { if (!ovl_open_flags_need_copy_up(flags)) return false; return !ovl_test_flag(OVL_UPPERDATA, d_inode(dentry)); } bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags) { if (!ovl_open_flags_need_copy_up(flags)) return false; return !ovl_has_upperdata(d_inode(dentry)); } const char *ovl_dentry_get_redirect(struct dentry *dentry) { return OVL_I(d_inode(dentry))->redirect; } void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect) { struct ovl_inode *oi = OVL_I(d_inode(dentry)); kfree(oi->redirect); oi->redirect = redirect; } void ovl_inode_update(struct inode *inode, struct dentry *upperdentry) { struct inode *upperinode = d_inode(upperdentry); WARN_ON(OVL_I(inode)->__upperdentry); /* * Make sure upperdentry is consistent before making it visible */ smp_wmb(); OVL_I(inode)->__upperdentry = upperdentry; if (inode_unhashed(inode)) { inode->i_private = upperinode; __insert_inode_hash(inode, (unsigned long) upperinode); } } static void ovl_dir_version_inc(struct dentry *dentry, bool impurity) { struct inode *inode = d_inode(dentry); WARN_ON(!inode_is_locked(inode)); WARN_ON(!d_is_dir(dentry)); /* * Version is used by readdir code to keep cache consistent. * For merge dirs (or dirs with origin) all changes need to be noted. * For non-merge dirs, cache contains only impure entries (i.e. ones * which have been copied up and have origins), so only need to note * changes to impure entries. */ if (!ovl_dir_is_real(inode) || impurity) OVL_I(inode)->version++; } void ovl_dir_modified(struct dentry *dentry, bool impurity) { /* Copy mtime/ctime */ ovl_copyattr(d_inode(dentry)); ovl_dir_version_inc(dentry, impurity); } u64 ovl_inode_version_get(struct inode *inode) { WARN_ON(!inode_is_locked(inode)); return OVL_I(inode)->version; } bool ovl_is_whiteout(struct dentry *dentry) { struct inode *inode = dentry->d_inode; return inode && IS_WHITEOUT(inode); } struct file *ovl_path_open(const struct path *path, int flags) { struct inode *inode = d_inode(path->dentry); struct mnt_idmap *real_idmap = mnt_idmap(path->mnt); int err, acc_mode; if (flags & ~(O_ACCMODE | O_LARGEFILE)) BUG(); switch (flags & O_ACCMODE) { case O_RDONLY: acc_mode = MAY_READ; break; case O_WRONLY: acc_mode = MAY_WRITE; break; default: BUG(); } err = inode_permission(real_idmap, inode, acc_mode | MAY_OPEN); if (err) return ERR_PTR(err); /* O_NOATIME is an optimization, don't fail if not permitted */ if (inode_owner_or_capable(real_idmap, inode)) flags |= O_NOATIME; return dentry_open(path, flags, current_cred()); } /* Caller should hold ovl_inode->lock */ static bool ovl_already_copied_up_locked(struct dentry *dentry, int flags) { bool disconnected = dentry->d_flags & DCACHE_DISCONNECTED; if (ovl_dentry_upper(dentry) && (ovl_dentry_has_upper_alias(dentry) || disconnected) && !ovl_dentry_needs_data_copy_up_locked(dentry, flags)) return true; return false; } bool ovl_already_copied_up(struct dentry *dentry, int flags) { bool disconnected = dentry->d_flags & DCACHE_DISCONNECTED; /* * Check if copy-up has happened as well as for upper alias (in * case of hard links) is there. * * Both checks are lockless: * - false negatives: will recheck under oi->lock * - false positives: * + ovl_dentry_upper() uses memory barriers to ensure the * upper dentry is up-to-date * + ovl_dentry_has_upper_alias() relies on locking of * upper parent i_rwsem to prevent reordering copy-up * with rename. */ if (ovl_dentry_upper(dentry) && (ovl_dentry_has_upper_alias(dentry) || disconnected) && !ovl_dentry_needs_data_copy_up(dentry, flags)) return true; return false; } int ovl_copy_up_start(struct dentry *dentry, int flags) { struct inode *inode = d_inode(dentry); int err; err = ovl_inode_lock_interruptible(inode); if (!err && ovl_already_copied_up_locked(dentry, flags)) { err = 1; /* Already copied up */ ovl_inode_unlock(inode); } return err; } void ovl_copy_up_end(struct dentry *dentry) { ovl_inode_unlock(d_inode(dentry)); } bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path) { int res; res = ovl_path_getxattr(ofs, path, OVL_XATTR_ORIGIN, NULL, 0); /* Zero size value means "copied up but origin unknown" */ if (res >= 0) return true; return false; } /* * Load persistent uuid from xattr into s_uuid if found, or store a new * random generated value in s_uuid and in xattr. */ bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs, const struct path *upperpath) { bool set = false; int res; /* Try to load existing persistent uuid */ res = ovl_path_getxattr(ofs, upperpath, OVL_XATTR_UUID, sb->s_uuid.b, UUID_SIZE); if (res == UUID_SIZE) return true; if (res != -ENODATA) goto fail; /* * With uuid=auto, if uuid xattr is found, it will be used. * If uuid xattrs is not found, generate a persistent uuid only on mount * of new overlays where upper root dir is not yet marked as impure. * An upper dir is marked as impure on copy up or lookup of its subdirs. */ if (ofs->config.uuid == OVL_UUID_AUTO) { res = ovl_path_getxattr(ofs, upperpath, OVL_XATTR_IMPURE, NULL, 0); if (res > 0) { /* Any mount of old overlay - downgrade to uuid=null */ ofs->config.uuid = OVL_UUID_NULL; return true; } else if (res == -ENODATA) { /* First mount of new overlay - upgrade to uuid=on */ ofs->config.uuid = OVL_UUID_ON; } else if (res < 0) { goto fail; } } /* Generate overlay instance uuid */ uuid_gen(&sb->s_uuid); /* Try to store persistent uuid */ set = true; res = ovl_setxattr(ofs, upperpath->dentry, OVL_XATTR_UUID, sb->s_uuid.b, UUID_SIZE); if (res == 0) return true; fail: memset(sb->s_uuid.b, 0, UUID_SIZE); ofs->config.uuid = OVL_UUID_NULL; pr_warn("failed to %s uuid (%pd2, err=%i); falling back to uuid=null.\n", set ? "set" : "get", upperpath->dentry, res); return false; } bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path, enum ovl_xattr ox) { int res; char val; if (!d_is_dir(path->dentry)) return false; res = ovl_path_getxattr(ofs, path, ox, &val, 1); if (res == 1 && val == 'y') return true; return false; } #define OVL_XATTR_OPAQUE_POSTFIX "opaque" #define OVL_XATTR_REDIRECT_POSTFIX "redirect" #define OVL_XATTR_ORIGIN_POSTFIX "origin" #define OVL_XATTR_IMPURE_POSTFIX "impure" #define OVL_XATTR_NLINK_POSTFIX "nlink" #define OVL_XATTR_UPPER_POSTFIX "upper" #define OVL_XATTR_UUID_POSTFIX "uuid" #define OVL_XATTR_METACOPY_POSTFIX "metacopy" #define OVL_XATTR_PROTATTR_POSTFIX "protattr" #define OVL_XATTR_TAB_ENTRY(x) \ [x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \ [true] = OVL_XATTR_USER_PREFIX x ## _POSTFIX } const char *const ovl_xattr_table[][2] = { OVL_XATTR_TAB_ENTRY(OVL_XATTR_OPAQUE), OVL_XATTR_TAB_ENTRY(OVL_XATTR_REDIRECT), OVL_XATTR_TAB_ENTRY(OVL_XATTR_ORIGIN), OVL_XATTR_TAB_ENTRY(OVL_XATTR_IMPURE), OVL_XATTR_TAB_ENTRY(OVL_XATTR_NLINK), OVL_XATTR_TAB_ENTRY(OVL_XATTR_UPPER), OVL_XATTR_TAB_ENTRY(OVL_XATTR_UUID), OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY), OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR), }; int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry, enum ovl_xattr ox, const void *value, size_t size, int xerr) { int err; if (ofs->noxattr) return xerr; err = ovl_setxattr(ofs, upperdentry, ox, value, size); if (err == -EOPNOTSUPP) { pr_warn("cannot set %s xattr on upper\n", ovl_xattr(ofs, ox)); ofs->noxattr = true; return xerr; } return err; } int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); int err; if (ovl_test_flag(OVL_IMPURE, d_inode(dentry))) return 0; /* * Do not fail when upper doesn't support xattrs. * Upper inodes won't have origin nor redirect xattr anyway. */ err = ovl_check_setxattr(ofs, upperdentry, OVL_XATTR_IMPURE, "y", 1, 0); if (!err) ovl_set_flag(OVL_IMPURE, d_inode(dentry)); return err; } #define OVL_PROTATTR_MAX 32 /* Reserved for future flags */ void ovl_check_protattr(struct inode *inode, struct dentry *upper) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); u32 iflags = inode->i_flags & OVL_PROT_I_FLAGS_MASK; char buf[OVL_PROTATTR_MAX+1]; int res, n; res = ovl_getxattr_upper(ofs, upper, OVL_XATTR_PROTATTR, buf, OVL_PROTATTR_MAX); if (res < 0) return; /* * Initialize inode flags from overlay.protattr xattr and upper inode * flags. If upper inode has those fileattr flags set (i.e. from old * kernel), we do not clear them on ovl_get_inode(), but we will clear * them on next fileattr_set(). */ for (n = 0; n < res; n++) { if (buf[n] == 'a') iflags |= S_APPEND; else if (buf[n] == 'i') iflags |= S_IMMUTABLE; else break; } if (!res || n < res) { pr_warn_ratelimited("incompatible overlay.protattr format (%pd2, len=%d)\n", upper, res); } else { inode_set_flags(inode, iflags, OVL_PROT_I_FLAGS_MASK); } } int ovl_set_protattr(struct inode *inode, struct dentry *upper, struct fileattr *fa) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); char buf[OVL_PROTATTR_MAX]; int len = 0, err = 0; u32 iflags = 0; BUILD_BUG_ON(HWEIGHT32(OVL_PROT_FS_FLAGS_MASK) > OVL_PROTATTR_MAX); if (fa->flags & FS_APPEND_FL) { buf[len++] = 'a'; iflags |= S_APPEND; } if (fa->flags & FS_IMMUTABLE_FL) { buf[len++] = 'i'; iflags |= S_IMMUTABLE; } /* * Do not allow to set protection flags when upper doesn't support * xattrs, because we do not set those fileattr flags on upper inode. * Remove xattr if it exist and all protection flags are cleared. */ if (len) { err = ovl_check_setxattr(ofs, upper, OVL_XATTR_PROTATTR, buf, len, -EPERM); } else if (inode->i_flags & OVL_PROT_I_FLAGS_MASK) { err = ovl_removexattr(ofs, upper, OVL_XATTR_PROTATTR); if (err == -EOPNOTSUPP || err == -ENODATA) err = 0; } if (err) return err; inode_set_flags(inode, iflags, OVL_PROT_I_FLAGS_MASK); /* Mask out the fileattr flags that should not be set in upper inode */ fa->flags &= ~OVL_PROT_FS_FLAGS_MASK; fa->fsx_xflags &= ~OVL_PROT_FSX_FLAGS_MASK; return 0; } /** * Caller must hold a reference to inode to prevent it from being freed while * it is marked inuse. */ bool ovl_inuse_trylock(struct dentry *dentry) { struct inode *inode = d_inode(dentry); bool locked = false; spin_lock(&inode->i_lock); if (!(inode->i_state & I_OVL_INUSE)) { inode->i_state |= I_OVL_INUSE; locked = true; } spin_unlock(&inode->i_lock); return locked; } void ovl_inuse_unlock(struct dentry *dentry) { if (dentry) { struct inode *inode = d_inode(dentry); spin_lock(&inode->i_lock); WARN_ON(!(inode->i_state & I_OVL_INUSE)); inode->i_state &= ~I_OVL_INUSE; spin_unlock(&inode->i_lock); } } bool ovl_is_inuse(struct dentry *dentry) { struct inode *inode = d_inode(dentry); bool inuse; spin_lock(&inode->i_lock); inuse = (inode->i_state & I_OVL_INUSE); spin_unlock(&inode->i_lock); return inuse; } /* * Does this overlay dentry need to be indexed on copy up? */ bool ovl_need_index(struct dentry *dentry) { struct dentry *lower = ovl_dentry_lower(dentry); if (!lower || !ovl_indexdir(dentry->d_sb)) return false; /* Index all files for NFS export and consistency verification */ if (ovl_index_all(dentry->d_sb)) return true; /* Index only lower hardlinks on copy up */ if (!d_is_dir(lower) && d_inode(lower)->i_nlink > 1) return true; return false; } /* Caller must hold OVL_I(inode)->lock */ static void ovl_cleanup_index(struct dentry *dentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *indexdir = ovl_indexdir(dentry->d_sb); struct inode *dir = indexdir->d_inode; struct dentry *lowerdentry = ovl_dentry_lower(dentry); struct dentry *upperdentry = ovl_dentry_upper(dentry); struct dentry *index = NULL; struct inode *inode; struct qstr name = { }; int err; err = ovl_get_index_name(ofs, lowerdentry, &name); if (err) goto fail; inode = d_inode(upperdentry); if (!S_ISDIR(inode->i_mode) && inode->i_nlink != 1) { pr_warn_ratelimited("cleanup linked index (%pd2, ino=%lu, nlink=%u)\n", upperdentry, inode->i_ino, inode->i_nlink); /* * We either have a bug with persistent union nlink or a lower * hardlink was added while overlay is mounted. Adding a lower * hardlink and then unlinking all overlay hardlinks would drop * overlay nlink to zero before all upper inodes are unlinked. * As a safety measure, when that situation is detected, set * the overlay nlink to the index inode nlink minus one for the * index entry itself. */ set_nlink(d_inode(dentry), inode->i_nlink - 1); ovl_set_nlink_upper(dentry); goto out; } inode_lock_nested(dir, I_MUTEX_PARENT); index = ovl_lookup_upper(ofs, name.name, indexdir, name.len); err = PTR_ERR(index); if (IS_ERR(index)) { index = NULL; } else if (ovl_index_all(dentry->d_sb)) { /* Whiteout orphan index to block future open by handle */ err = ovl_cleanup_and_whiteout(OVL_FS(dentry->d_sb), dir, index); } else { /* Cleanup orphan index entries */ err = ovl_cleanup(ofs, dir, index); } inode_unlock(dir); if (err) goto fail; out: kfree(name.name); dput(index); return; fail: pr_err("cleanup index of '%pd2' failed (%i)\n", dentry, err); goto out; } /* * Operations that change overlay inode and upper inode nlink need to be * synchronized with copy up for persistent nlink accounting. */ int ovl_nlink_start(struct dentry *dentry) { struct inode *inode = d_inode(dentry); const struct cred *old_cred; int err; if (WARN_ON(!inode)) return -ENOENT; /* * With inodes index is enabled, we store the union overlay nlink * in an xattr on the index inode. When whiting out an indexed lower, * we need to decrement the overlay persistent nlink, but before the * first copy up, we have no upper index inode to store the xattr. * * As a workaround, before whiteout/rename over an indexed lower, * copy up to create the upper index. Creating the upper index will * initialize the overlay nlink, so it could be dropped if unlink * or rename succeeds. * * TODO: implement metadata only index copy up when called with * ovl_copy_up_flags(dentry, O_PATH). */ if (ovl_need_index(dentry) && !ovl_dentry_has_upper_alias(dentry)) { err = ovl_copy_up(dentry); if (err) return err; } err = ovl_inode_lock_interruptible(inode); if (err) return err; if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, inode)) goto out; old_cred = ovl_override_creds(dentry->d_sb); /* * The overlay inode nlink should be incremented/decremented IFF the * upper operation succeeds, along with nlink change of upper inode. * Therefore, before link/unlink/rename, we store the union nlink * value relative to the upper inode nlink in an upper inode xattr. */ err = ovl_set_nlink_upper(dentry); revert_creds(old_cred); out: if (err) ovl_inode_unlock(inode); return err; } void ovl_nlink_end(struct dentry *dentry) { struct inode *inode = d_inode(dentry); if (ovl_test_flag(OVL_INDEX, inode) && inode->i_nlink == 0) { const struct cred *old_cred; old_cred = ovl_override_creds(dentry->d_sb); ovl_cleanup_index(dentry); revert_creds(old_cred); } ovl_inode_unlock(inode); } int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir) { /* Workdir should not be the same as upperdir */ if (workdir == upperdir) goto err; /* Workdir should not be subdir of upperdir and vice versa */ if (lock_rename(workdir, upperdir) != NULL) goto err_unlock; return 0; err_unlock: unlock_rename(workdir, upperdir); err: pr_err("failed to lock workdir+upperdir\n"); return -EIO; } /* * err < 0, 0 if no metacopy xattr, metacopy data size if xattr found. * an empty xattr returns OVL_METACOPY_MIN_SIZE to distinguish from no xattr value. */ int ovl_check_metacopy_xattr(struct ovl_fs *ofs, const struct path *path, struct ovl_metacopy *data) { int res; /* Only regular files can have metacopy xattr */ if (!S_ISREG(d_inode(path->dentry)->i_mode)) return 0; res = ovl_path_getxattr(ofs, path, OVL_XATTR_METACOPY, data, data ? OVL_METACOPY_MAX_SIZE : 0); if (res < 0) { if (res == -ENODATA || res == -EOPNOTSUPP) return 0; /* * getxattr on user.* may fail with EACCES in case there's no * read permission on the inode. Not much we can do, other than * tell the caller that this is not a metacopy inode. */ if (ofs->config.userxattr && res == -EACCES) return 0; goto out; } if (res == 0) { /* Emulate empty data for zero size metacopy xattr */ res = OVL_METACOPY_MIN_SIZE; if (data) { memset(data, 0, res); data->len = res; } } else if (res < OVL_METACOPY_MIN_SIZE) { pr_warn_ratelimited("metacopy file '%pd' has too small xattr\n", path->dentry); return -EIO; } else if (data) { if (data->version != 0) { pr_warn_ratelimited("metacopy file '%pd' has unsupported version\n", path->dentry); return -EIO; } if (res != data->len) { pr_warn_ratelimited("metacopy file '%pd' has invalid xattr size\n", path->dentry); return -EIO; } } return res; out: pr_warn_ratelimited("failed to get metacopy (%i)\n", res); return res; } int ovl_set_metacopy_xattr(struct ovl_fs *ofs, struct dentry *d, struct ovl_metacopy *metacopy) { size_t len = metacopy->len; /* If no flags or digest fall back to empty metacopy file */ if (metacopy->version == 0 && metacopy->flags == 0 && metacopy->digest_algo == 0) len = 0; return ovl_check_setxattr(ofs, d, OVL_XATTR_METACOPY, metacopy, len, -EOPNOTSUPP); } bool ovl_is_metacopy_dentry(struct dentry *dentry) { struct ovl_entry *oe = OVL_E(dentry); if (!d_is_reg(dentry)) return false; if (ovl_dentry_upper(dentry)) { if (!ovl_has_upperdata(d_inode(dentry))) return true; return false; } return (ovl_numlower(oe) > 1); } char *ovl_get_redirect_xattr(struct ovl_fs *ofs, const struct path *path, int padding) { int res; char *s, *next, *buf = NULL; res = ovl_path_getxattr(ofs, path, OVL_XATTR_REDIRECT, NULL, 0); if (res == -ENODATA || res == -EOPNOTSUPP) return NULL; if (res < 0) goto fail; if (res == 0) goto invalid; buf = kzalloc(res + padding + 1, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); res = ovl_path_getxattr(ofs, path, OVL_XATTR_REDIRECT, buf, res); if (res < 0) goto fail; if (res == 0) goto invalid; if (buf[0] == '/') { for (s = buf; *s++ == '/'; s = next) { next = strchrnul(s, '/'); if (s == next) goto invalid; } } else { if (strchr(buf, '/') != NULL) goto invalid; } return buf; invalid: pr_warn_ratelimited("invalid redirect (%s)\n", buf); res = -EINVAL; goto err_free; fail: pr_warn_ratelimited("failed to get redirect (%i)\n", res); err_free: kfree(buf); return ERR_PTR(res); } /* Call with mounter creds as it may open the file */ int ovl_ensure_verity_loaded(struct path *datapath) { struct inode *inode = d_inode(datapath->dentry); struct file *filp; if (!fsverity_active(inode) && IS_VERITY(inode)) { /* * If this inode was not yet opened, the verity info hasn't been * loaded yet, so we need to do that here to force it into memory. */ filp = kernel_file_open(datapath, O_RDONLY, inode, current_cred()); if (IS_ERR(filp)) return PTR_ERR(filp); fput(filp); } return 0; } int ovl_validate_verity(struct ovl_fs *ofs, struct path *metapath, struct path *datapath) { struct ovl_metacopy metacopy_data; u8 actual_digest[FS_VERITY_MAX_DIGEST_SIZE]; int xattr_digest_size, digest_size; int xattr_size, err; u8 verity_algo; if (!ofs->config.verity_mode || /* Verity only works on regular files */ !S_ISREG(d_inode(metapath->dentry)->i_mode)) return 0; xattr_size = ovl_check_metacopy_xattr(ofs, metapath, &metacopy_data); if (xattr_size < 0) return xattr_size; if (!xattr_size || !metacopy_data.digest_algo) { if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { pr_warn_ratelimited("metacopy file '%pd' has no digest specified\n", metapath->dentry); return -EIO; } return 0; } xattr_digest_size = ovl_metadata_digest_size(&metacopy_data); err = ovl_ensure_verity_loaded(datapath); if (err < 0) { pr_warn_ratelimited("lower file '%pd' failed to load fs-verity info\n", datapath->dentry); return -EIO; } digest_size = fsverity_get_digest(d_inode(datapath->dentry), actual_digest, &verity_algo, NULL); if (digest_size == 0) { pr_warn_ratelimited("lower file '%pd' has no fs-verity digest\n", datapath->dentry); return -EIO; } if (xattr_digest_size != digest_size || metacopy_data.digest_algo != verity_algo || memcmp(metacopy_data.digest, actual_digest, xattr_digest_size) != 0) { pr_warn_ratelimited("lower file '%pd' has the wrong fs-verity digest\n", datapath->dentry); return -EIO; } return 0; } int ovl_get_verity_digest(struct ovl_fs *ofs, struct path *src, struct ovl_metacopy *metacopy) { int err, digest_size; if (!ofs->config.verity_mode || !S_ISREG(d_inode(src->dentry)->i_mode)) return 0; err = ovl_ensure_verity_loaded(src); if (err < 0) { pr_warn_ratelimited("lower file '%pd' failed to load fs-verity info\n", src->dentry); return -EIO; } digest_size = fsverity_get_digest(d_inode(src->dentry), metacopy->digest, &metacopy->digest_algo, NULL); if (digest_size == 0 || WARN_ON_ONCE(digest_size > FS_VERITY_MAX_DIGEST_SIZE)) { if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { pr_warn_ratelimited("lower file '%pd' has no fs-verity digest\n", src->dentry); return -EIO; } return 0; } metacopy->len += digest_size; return 0; } /* * ovl_sync_status() - Check fs sync status for volatile mounts * * Returns 1 if this is not a volatile mount and a real sync is required. * * Returns 0 if syncing can be skipped because mount is volatile, and no errors * have occurred on the upperdir since the mount. * * Returns -errno if it is a volatile mount, and the error that occurred since * the last mount. If the error code changes, it'll return the latest error * code. */ int ovl_sync_status(struct ovl_fs *ofs) { struct vfsmount *mnt; if (ovl_should_sync(ofs)) return 1; mnt = ovl_upper_mnt(ofs); if (!mnt) return 0; return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq); } /* * ovl_copyattr() - copy inode attributes from layer to ovl inode * * When overlay copies inode information from an upper or lower layer to the * relevant overlay inode it will apply the idmapping of the upper or lower * layer when doing so ensuring that the ovl inode ownership will correctly * reflect the ownership of the idmapped upper or lower layer. For example, an * idmapped upper or lower layer mapping id 1001 to id 1000 will take care to * map any lower or upper inode owned by id 1001 to id 1000. These mapping * helpers are nops when the relevant layer isn't idmapped. */ void ovl_copyattr(struct inode *inode) { struct path realpath; struct inode *realinode; struct mnt_idmap *real_idmap; vfsuid_t vfsuid; vfsgid_t vfsgid; realinode = ovl_i_path_real(inode, &realpath); real_idmap = mnt_idmap(realpath.mnt); vfsuid = i_uid_into_vfsuid(real_idmap, realinode); vfsgid = i_gid_into_vfsgid(real_idmap, realinode); inode->i_uid = vfsuid_into_kuid(vfsuid); inode->i_gid = vfsgid_into_kgid(vfsgid); inode->i_mode = realinode->i_mode; inode->i_atime = realinode->i_atime; inode->i_mtime = realinode->i_mtime; inode_set_ctime_to_ts(inode, inode_get_ctime(realinode)); i_size_write(inode, i_size_read(realinode)); }
linux-master
fs/overlayfs/util.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/module.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include "overlayfs.h" static unsigned short ovl_redirect_max = 256; module_param_named(redirect_max, ovl_redirect_max, ushort, 0644); MODULE_PARM_DESC(redirect_max, "Maximum length of absolute redirect xattr value"); static int ovl_set_redirect(struct dentry *dentry, bool samedir); int ovl_cleanup(struct ovl_fs *ofs, struct inode *wdir, struct dentry *wdentry) { int err; dget(wdentry); if (d_is_dir(wdentry)) err = ovl_do_rmdir(ofs, wdir, wdentry); else err = ovl_do_unlink(ofs, wdir, wdentry); dput(wdentry); if (err) { pr_err("cleanup of '%pd2' failed (%i)\n", wdentry, err); } return err; } struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir) { struct dentry *temp; char name[20]; static atomic_t temp_id = ATOMIC_INIT(0); /* counter is allowed to wrap, since temp dentries are ephemeral */ snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id)); temp = ovl_lookup_upper(ofs, name, workdir, strlen(name)); if (!IS_ERR(temp) && temp->d_inode) { pr_err("workdir/%s already exists\n", name); dput(temp); temp = ERR_PTR(-EIO); } return temp; } /* caller holds i_mutex on workdir */ static struct dentry *ovl_whiteout(struct ovl_fs *ofs) { int err; struct dentry *whiteout; struct dentry *workdir = ofs->workdir; struct inode *wdir = workdir->d_inode; if (!ofs->whiteout) { whiteout = ovl_lookup_temp(ofs, workdir); if (IS_ERR(whiteout)) goto out; err = ovl_do_whiteout(ofs, wdir, whiteout); if (err) { dput(whiteout); whiteout = ERR_PTR(err); goto out; } ofs->whiteout = whiteout; } if (!ofs->no_shared_whiteout) { whiteout = ovl_lookup_temp(ofs, workdir); if (IS_ERR(whiteout)) goto out; err = ovl_do_link(ofs, ofs->whiteout, wdir, whiteout); if (!err) goto out; if (err != -EMLINK) { pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n", ofs->whiteout->d_inode->i_nlink, err); ofs->no_shared_whiteout = true; } dput(whiteout); } whiteout = ofs->whiteout; ofs->whiteout = NULL; out: return whiteout; } /* Caller must hold i_mutex on both workdir and dir */ int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry) { struct inode *wdir = ofs->workdir->d_inode; struct dentry *whiteout; int err; int flags = 0; whiteout = ovl_whiteout(ofs); err = PTR_ERR(whiteout); if (IS_ERR(whiteout)) return err; if (d_is_dir(dentry)) flags = RENAME_EXCHANGE; err = ovl_do_rename(ofs, wdir, whiteout, dir, dentry, flags); if (err) goto kill_whiteout; if (flags) ovl_cleanup(ofs, wdir, dentry); out: dput(whiteout); return err; kill_whiteout: ovl_cleanup(ofs, wdir, whiteout); goto out; } int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir, struct dentry **newdentry, umode_t mode) { int err; struct dentry *d, *dentry = *newdentry; err = ovl_do_mkdir(ofs, dir, dentry, mode); if (err) return err; if (likely(!d_unhashed(dentry))) return 0; /* * vfs_mkdir() may succeed and leave the dentry passed * to it unhashed and negative. If that happens, try to * lookup a new hashed and positive dentry. */ d = ovl_lookup_upper(ofs, dentry->d_name.name, dentry->d_parent, dentry->d_name.len); if (IS_ERR(d)) { pr_warn("failed lookup after mkdir (%pd2, err=%i).\n", dentry, err); return PTR_ERR(d); } dput(dentry); *newdentry = d; return 0; } struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir, struct dentry *newdentry, struct ovl_cattr *attr) { int err; if (IS_ERR(newdentry)) return newdentry; err = -ESTALE; if (newdentry->d_inode) goto out; if (attr->hardlink) { err = ovl_do_link(ofs, attr->hardlink, dir, newdentry); } else { switch (attr->mode & S_IFMT) { case S_IFREG: err = ovl_do_create(ofs, dir, newdentry, attr->mode); break; case S_IFDIR: /* mkdir is special... */ err = ovl_mkdir_real(ofs, dir, &newdentry, attr->mode); break; case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: err = ovl_do_mknod(ofs, dir, newdentry, attr->mode, attr->rdev); break; case S_IFLNK: err = ovl_do_symlink(ofs, dir, newdentry, attr->link); break; default: err = -EPERM; } } if (!err && WARN_ON(!newdentry->d_inode)) { /* * Not quite sure if non-instantiated dentry is legal or not. * VFS doesn't seem to care so check and warn here. */ err = -EIO; } out: if (err) { dput(newdentry); return ERR_PTR(err); } return newdentry; } struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir, struct ovl_cattr *attr) { return ovl_create_real(ofs, d_inode(workdir), ovl_lookup_temp(ofs, workdir), attr); } static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper, int xerr) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); int err; err = ovl_check_setxattr(ofs, upper, OVL_XATTR_OPAQUE, "y", 1, xerr); if (!err) ovl_dentry_set_opaque(dentry); return err; } static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) { /* * Fail with -EIO when trying to create opaque dir and upper doesn't * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to * return a specific error for noxattr case. */ return ovl_set_opaque_xerr(dentry, upperdentry, -EIO); } /* * Common operations required to be done after creation of file on upper. * If @hardlink is false, then @inode is a pre-allocated inode, we may or * may not use to instantiate the new dentry. */ static int ovl_instantiate(struct dentry *dentry, struct inode *inode, struct dentry *newdentry, bool hardlink) { struct ovl_inode_params oip = { .upperdentry = newdentry, .newinode = inode, }; ovl_dir_modified(dentry->d_parent, false); ovl_dentry_set_upper_alias(dentry); ovl_dentry_init_reval(dentry, newdentry, NULL); if (!hardlink) { /* * ovl_obtain_alias() can be called after ovl_create_real() * and before we get here, so we may get an inode from cache * with the same real upperdentry that is not the inode we * pre-allocated. In this case we will use the cached inode * to instantiate the new dentry. * * XXX: if we ever use ovl_obtain_alias() to decode directory * file handles, need to use ovl_get_inode_locked() and * d_instantiate_new() here to prevent from creating two * hashed directory inode aliases. */ inode = ovl_get_inode(dentry->d_sb, &oip); if (IS_ERR(inode)) return PTR_ERR(inode); if (inode == oip.newinode) ovl_set_flag(OVL_UPPERDATA, inode); } else { WARN_ON(ovl_inode_real(inode) != d_inode(newdentry)); dput(newdentry); inc_nlink(inode); } d_instantiate(dentry, inode); if (inode != oip.newinode) { pr_warn_ratelimited("newly created inode found in cache (%pd2)\n", dentry); } /* Force lookup of new upper hardlink to find its lower */ if (hardlink) d_drop(dentry); return 0; } static bool ovl_type_merge(struct dentry *dentry) { return OVL_TYPE_MERGE(ovl_path_type(dentry)); } static bool ovl_type_origin(struct dentry *dentry) { return OVL_TYPE_ORIGIN(ovl_path_type(dentry)); } static int ovl_create_upper(struct dentry *dentry, struct inode *inode, struct ovl_cattr *attr) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *udir = upperdir->d_inode; struct dentry *newdentry; int err; if (!attr->hardlink && !IS_POSIXACL(udir)) attr->mode &= ~current_umask(); inode_lock_nested(udir, I_MUTEX_PARENT); newdentry = ovl_create_real(ofs, udir, ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len), attr); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_unlock; if (ovl_type_merge(dentry->d_parent) && d_is_dir(newdentry) && !ovl_allow_offline_changes(ofs)) { /* Setting opaque here is just an optimization, allow to fail */ ovl_set_opaque(dentry, newdentry); } err = ovl_instantiate(dentry, inode, newdentry, !!attr->hardlink); if (err) goto out_cleanup; out_unlock: inode_unlock(udir); return err; out_cleanup: ovl_cleanup(ofs, udir, newdentry); dput(newdentry); goto out_unlock; } static struct dentry *ovl_clear_empty(struct dentry *dentry, struct list_head *list) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); struct inode *wdir = workdir->d_inode; struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *udir = upperdir->d_inode; struct path upperpath; struct dentry *upper; struct dentry *opaquedir; struct kstat stat; int err; if (WARN_ON(!workdir)) return ERR_PTR(-EROFS); err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; ovl_path_upper(dentry, &upperpath); err = vfs_getattr(&upperpath, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) goto out_unlock; err = -ESTALE; if (!S_ISDIR(stat.mode)) goto out_unlock; upper = upperpath.dentry; if (upper->d_parent->d_inode != udir) goto out_unlock; opaquedir = ovl_create_temp(ofs, workdir, OVL_CATTR(stat.mode)); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out_unlock; err = ovl_copy_xattr(dentry->d_sb, &upperpath, opaquedir); if (err) goto out_cleanup; err = ovl_set_opaque(dentry, opaquedir); if (err) goto out_cleanup; inode_lock(opaquedir->d_inode); err = ovl_set_attr(ofs, opaquedir, &stat); inode_unlock(opaquedir->d_inode); if (err) goto out_cleanup; err = ovl_do_rename(ofs, wdir, opaquedir, udir, upper, RENAME_EXCHANGE); if (err) goto out_cleanup; ovl_cleanup_whiteouts(ofs, upper, list); ovl_cleanup(ofs, wdir, upper); unlock_rename(workdir, upperdir); /* dentry's upper doesn't match now, get rid of it */ d_drop(dentry); return opaquedir; out_cleanup: ovl_cleanup(ofs, wdir, opaquedir); dput(opaquedir); out_unlock: unlock_rename(workdir, upperdir); out: return ERR_PTR(err); } static int ovl_set_upper_acl(struct ovl_fs *ofs, struct dentry *upperdentry, const char *acl_name, struct posix_acl *acl) { if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl) return 0; return ovl_do_set_acl(ofs, upperdentry, acl_name, acl); } static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, struct ovl_cattr *cattr) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); struct inode *wdir = workdir->d_inode; struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *udir = upperdir->d_inode; struct dentry *upper; struct dentry *newdentry; int err; struct posix_acl *acl, *default_acl; bool hardlink = !!cattr->hardlink; if (WARN_ON(!workdir)) return -EROFS; if (!hardlink) { err = posix_acl_create(dentry->d_parent->d_inode, &cattr->mode, &default_acl, &acl); if (err) return err; } err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto out_unlock; err = -ESTALE; if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper))) goto out_dput; newdentry = ovl_create_temp(ofs, workdir, cattr); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_dput; /* * mode could have been mutilated due to umask (e.g. sgid directory) */ if (!hardlink && !S_ISLNK(cattr->mode) && newdentry->d_inode->i_mode != cattr->mode) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = cattr->mode, }; inode_lock(newdentry->d_inode); err = ovl_do_notify_change(ofs, newdentry, &attr); inode_unlock(newdentry->d_inode); if (err) goto out_cleanup; } if (!hardlink) { err = ovl_set_upper_acl(ofs, newdentry, XATTR_NAME_POSIX_ACL_ACCESS, acl); if (err) goto out_cleanup; err = ovl_set_upper_acl(ofs, newdentry, XATTR_NAME_POSIX_ACL_DEFAULT, default_acl); if (err) goto out_cleanup; } if (!hardlink && S_ISDIR(cattr->mode)) { err = ovl_set_opaque(dentry, newdentry); if (err) goto out_cleanup; err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, RENAME_EXCHANGE); if (err) goto out_cleanup; ovl_cleanup(ofs, wdir, upper); } else { err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, 0); if (err) goto out_cleanup; } err = ovl_instantiate(dentry, inode, newdentry, hardlink); if (err) { ovl_cleanup(ofs, udir, newdentry); dput(newdentry); } out_dput: dput(upper); out_unlock: unlock_rename(workdir, upperdir); out: if (!hardlink) { posix_acl_release(acl); posix_acl_release(default_acl); } return err; out_cleanup: ovl_cleanup(ofs, wdir, newdentry); dput(newdentry); goto out_dput; } static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, struct ovl_cattr *attr, bool origin) { int err; const struct cred *old_cred; struct cred *override_cred; struct dentry *parent = dentry->d_parent; err = ovl_copy_up(parent); if (err) return err; old_cred = ovl_override_creds(dentry->d_sb); /* * When linking a file with copy up origin into a new parent, mark the * new parent dir "impure". */ if (origin) { err = ovl_set_impure(parent, ovl_dentry_upper(parent)); if (err) goto out_revert_creds; } if (!attr->hardlink) { err = -ENOMEM; override_cred = prepare_creds(); if (!override_cred) goto out_revert_creds; /* * In the creation cases(create, mkdir, mknod, symlink), * ovl should transfer current's fs{u,g}id to underlying * fs. Because underlying fs want to initialize its new * inode owner using current's fs{u,g}id. And in this * case, the @inode is a new inode that is initialized * in inode_init_owner() to current's fs{u,g}id. So use * the inode's i_{u,g}id to override the cred's fs{u,g}id. * * But in the other hardlink case, ovl_link() does not * create a new inode, so just use the ovl mounter's * fs{u,g}id. */ override_cred->fsuid = inode->i_uid; override_cred->fsgid = inode->i_gid; err = security_dentry_create_files_as(dentry, attr->mode, &dentry->d_name, old_cred, override_cred); if (err) { put_cred(override_cred); goto out_revert_creds; } put_cred(override_creds(override_cred)); put_cred(override_cred); } if (!ovl_dentry_is_whiteout(dentry)) err = ovl_create_upper(dentry, inode, attr); else err = ovl_create_over_whiteout(dentry, inode, attr); out_revert_creds: revert_creds(old_cred); return err; } static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev, const char *link) { int err; struct inode *inode; struct ovl_cattr attr = { .rdev = rdev, .link = link, }; err = ovl_want_write(dentry); if (err) goto out; /* Preallocate inode to be used by ovl_get_inode() */ err = -ENOMEM; inode = ovl_new_inode(dentry->d_sb, mode, rdev); if (!inode) goto out_drop_write; spin_lock(&inode->i_lock); inode->i_state |= I_CREATING; spin_unlock(&inode->i_lock); inode_init_owner(&nop_mnt_idmap, inode, dentry->d_parent->d_inode, mode); attr.mode = inode->i_mode; err = ovl_create_or_link(dentry, inode, &attr, false); /* Did we end up using the preallocated inode? */ if (inode != d_inode(dentry)) iput(inode); out_drop_write: ovl_drop_write(dentry); out: return err; } static int ovl_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL); } static int ovl_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL); } static int ovl_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { /* Don't allow creation of "whiteout" on overlay */ if (S_ISCHR(mode) && rdev == WHITEOUT_DEV) return -EPERM; return ovl_create_object(dentry, mode, rdev, NULL); } static int ovl_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *link) { return ovl_create_object(dentry, S_IFLNK, 0, link); } static int ovl_set_link_redirect(struct dentry *dentry) { const struct cred *old_cred; int err; old_cred = ovl_override_creds(dentry->d_sb); err = ovl_set_redirect(dentry, false); revert_creds(old_cred); return err; } static int ovl_link(struct dentry *old, struct inode *newdir, struct dentry *new) { int err; struct inode *inode; err = ovl_want_write(old); if (err) goto out; err = ovl_copy_up(old); if (err) goto out_drop_write; err = ovl_copy_up(new->d_parent); if (err) goto out_drop_write; if (ovl_is_metacopy_dentry(old)) { err = ovl_set_link_redirect(old); if (err) goto out_drop_write; } err = ovl_nlink_start(old); if (err) goto out_drop_write; inode = d_inode(old); ihold(inode); err = ovl_create_or_link(new, inode, &(struct ovl_cattr) {.hardlink = ovl_dentry_upper(old)}, ovl_type_origin(old)); if (err) iput(inode); ovl_nlink_end(old); out_drop_write: ovl_drop_write(old); out: return err; } static bool ovl_matches_upper(struct dentry *dentry, struct dentry *upper) { return d_inode(ovl_dentry_upper(dentry)) == d_inode(upper); } static int ovl_remove_and_whiteout(struct dentry *dentry, struct list_head *list) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct dentry *upper; struct dentry *opaquedir = NULL; int err; if (WARN_ON(!workdir)) return -EROFS; if (!list_empty(list)) { opaquedir = ovl_clear_empty(dentry, list); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out; } err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out_dput; upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto out_unlock; err = -ESTALE; if ((opaquedir && upper != opaquedir) || (!opaquedir && ovl_dentry_upper(dentry) && !ovl_matches_upper(dentry, upper))) { goto out_dput_upper; } err = ovl_cleanup_and_whiteout(ofs, d_inode(upperdir), upper); if (err) goto out_d_drop; ovl_dir_modified(dentry->d_parent, true); out_d_drop: d_drop(dentry); out_dput_upper: dput(upper); out_unlock: unlock_rename(workdir, upperdir); out_dput: dput(opaquedir); out: return err; } static int ovl_remove_upper(struct dentry *dentry, bool is_dir, struct list_head *list) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *dir = upperdir->d_inode; struct dentry *upper; struct dentry *opaquedir = NULL; int err; if (!list_empty(list)) { opaquedir = ovl_clear_empty(dentry, list); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out; } inode_lock_nested(dir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto out_unlock; err = -ESTALE; if ((opaquedir && upper != opaquedir) || (!opaquedir && !ovl_matches_upper(dentry, upper))) goto out_dput_upper; if (is_dir) err = ovl_do_rmdir(ofs, dir, upper); else err = ovl_do_unlink(ofs, dir, upper); ovl_dir_modified(dentry->d_parent, ovl_type_origin(dentry)); /* * Keeping this dentry hashed would mean having to release * upperpath/lowerpath, which could only be done if we are the * sole user of this dentry. Too tricky... Just unhash for * now. */ if (!err) d_drop(dentry); out_dput_upper: dput(upper); out_unlock: inode_unlock(dir); dput(opaquedir); out: return err; } static bool ovl_pure_upper(struct dentry *dentry) { return !ovl_dentry_lower(dentry) && !ovl_test_flag(OVL_WHITEOUTS, d_inode(dentry)); } static void ovl_drop_nlink(struct dentry *dentry) { struct inode *inode = d_inode(dentry); struct dentry *alias; /* Try to find another, hashed alias */ spin_lock(&inode->i_lock); hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { if (alias != dentry && !d_unhashed(alias)) break; } spin_unlock(&inode->i_lock); /* * Changes to underlying layers may cause i_nlink to lose sync with * reality. In this case prevent the link count from going to zero * prematurely. */ if (inode->i_nlink > !!alias) drop_nlink(inode); } static int ovl_do_remove(struct dentry *dentry, bool is_dir) { int err; const struct cred *old_cred; bool lower_positive = ovl_lower_positive(dentry); LIST_HEAD(list); /* No need to clean pure upper removed by vfs_rmdir() */ if (is_dir && (lower_positive || !ovl_pure_upper(dentry))) { err = ovl_check_empty_dir(dentry, &list); if (err) goto out; } err = ovl_want_write(dentry); if (err) goto out; err = ovl_copy_up(dentry->d_parent); if (err) goto out_drop_write; err = ovl_nlink_start(dentry); if (err) goto out_drop_write; old_cred = ovl_override_creds(dentry->d_sb); if (!lower_positive) err = ovl_remove_upper(dentry, is_dir, &list); else err = ovl_remove_and_whiteout(dentry, &list); revert_creds(old_cred); if (!err) { if (is_dir) clear_nlink(dentry->d_inode); else ovl_drop_nlink(dentry); } ovl_nlink_end(dentry); /* * Copy ctime * * Note: we fail to update ctime if there was no copy-up, only a * whiteout */ if (ovl_dentry_upper(dentry)) ovl_copyattr(d_inode(dentry)); out_drop_write: ovl_drop_write(dentry); out: ovl_cache_free(&list); return err; } static int ovl_unlink(struct inode *dir, struct dentry *dentry) { return ovl_do_remove(dentry, false); } static int ovl_rmdir(struct inode *dir, struct dentry *dentry) { return ovl_do_remove(dentry, true); } static bool ovl_type_merge_or_lower(struct dentry *dentry) { enum ovl_path_type type = ovl_path_type(dentry); return OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type); } static bool ovl_can_move(struct dentry *dentry) { return ovl_redirect_dir(OVL_FS(dentry->d_sb)) || !d_is_dir(dentry) || !ovl_type_merge_or_lower(dentry); } static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) { char *buf, *ret; struct dentry *d, *tmp; int buflen = ovl_redirect_max + 1; if (!abs_redirect) { ret = kstrndup(dentry->d_name.name, dentry->d_name.len, GFP_KERNEL); goto out; } buf = ret = kmalloc(buflen, GFP_KERNEL); if (!buf) goto out; buflen--; buf[buflen] = '\0'; for (d = dget(dentry); !IS_ROOT(d);) { const char *name; int thislen; spin_lock(&d->d_lock); name = ovl_dentry_get_redirect(d); if (name) { thislen = strlen(name); } else { name = d->d_name.name; thislen = d->d_name.len; } /* If path is too long, fall back to userspace move */ if (thislen + (name[0] != '/') > buflen) { ret = ERR_PTR(-EXDEV); spin_unlock(&d->d_lock); goto out_put; } buflen -= thislen; memcpy(&buf[buflen], name, thislen); spin_unlock(&d->d_lock); tmp = dget_parent(d); dput(d); d = tmp; /* Absolute redirect: finished */ if (buf[buflen] == '/') break; buflen--; buf[buflen] = '/'; } ret = kstrdup(&buf[buflen], GFP_KERNEL); out_put: dput(d); kfree(buf); out: return ret ? ret : ERR_PTR(-ENOMEM); } static bool ovl_need_absolute_redirect(struct dentry *dentry, bool samedir) { struct dentry *lowerdentry; if (!samedir) return true; if (d_is_dir(dentry)) return false; /* * For non-dir hardlinked files, we need absolute redirects * in general as two upper hardlinks could be in different * dirs. We could put a relative redirect now and convert * it to absolute redirect later. But when nlink > 1 and * indexing is on, that means relative redirect needs to be * converted to absolute during copy up of another lower * hardllink as well. * * So without optimizing too much, just check if lower is * a hard link or not. If lower is hard link, put absolute * redirect. */ lowerdentry = ovl_dentry_lower(dentry); return (d_inode(lowerdentry)->i_nlink > 1); } static int ovl_set_redirect(struct dentry *dentry, bool samedir) { int err; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); const char *redirect = ovl_dentry_get_redirect(dentry); bool absolute_redirect = ovl_need_absolute_redirect(dentry, samedir); if (redirect && (!absolute_redirect || redirect[0] == '/')) return 0; redirect = ovl_get_redirect(dentry, absolute_redirect); if (IS_ERR(redirect)) return PTR_ERR(redirect); err = ovl_check_setxattr(ofs, ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT, redirect, strlen(redirect), -EXDEV); if (!err) { spin_lock(&dentry->d_lock); ovl_dentry_set_redirect(dentry, redirect); spin_unlock(&dentry->d_lock); } else { kfree(redirect); pr_warn_ratelimited("failed to set redirect (%i)\n", err); /* Fall back to userspace copy-up */ err = -EXDEV; } return err; } static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *old, struct inode *newdir, struct dentry *new, unsigned int flags) { int err; struct dentry *old_upperdir; struct dentry *new_upperdir; struct dentry *olddentry; struct dentry *newdentry; struct dentry *trap; bool old_opaque; bool new_opaque; bool cleanup_whiteout = false; bool update_nlink = false; bool overwrite = !(flags & RENAME_EXCHANGE); bool is_dir = d_is_dir(old); bool new_is_dir = d_is_dir(new); bool samedir = olddir == newdir; struct dentry *opaquedir = NULL; const struct cred *old_cred = NULL; struct ovl_fs *ofs = OVL_FS(old->d_sb); LIST_HEAD(list); err = -EINVAL; if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) goto out; flags &= ~RENAME_NOREPLACE; /* Don't copy up directory trees */ err = -EXDEV; if (!ovl_can_move(old)) goto out; if (!overwrite && !ovl_can_move(new)) goto out; if (overwrite && new_is_dir && !ovl_pure_upper(new)) { err = ovl_check_empty_dir(new, &list); if (err) goto out; } if (overwrite) { if (ovl_lower_positive(old)) { if (!ovl_dentry_is_whiteout(new)) { /* Whiteout source */ flags |= RENAME_WHITEOUT; } else { /* Switch whiteouts */ flags |= RENAME_EXCHANGE; } } else if (is_dir && ovl_dentry_is_whiteout(new)) { flags |= RENAME_EXCHANGE; cleanup_whiteout = true; } } err = ovl_want_write(old); if (err) goto out; err = ovl_copy_up(old); if (err) goto out_drop_write; err = ovl_copy_up(new->d_parent); if (err) goto out_drop_write; if (!overwrite) { err = ovl_copy_up(new); if (err) goto out_drop_write; } else if (d_inode(new)) { err = ovl_nlink_start(new); if (err) goto out_drop_write; update_nlink = true; } old_cred = ovl_override_creds(old->d_sb); if (!list_empty(&list)) { opaquedir = ovl_clear_empty(new, &list); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { opaquedir = NULL; goto out_revert_creds; } } old_upperdir = ovl_dentry_upper(old->d_parent); new_upperdir = ovl_dentry_upper(new->d_parent); if (!samedir) { /* * When moving a merge dir or non-dir with copy up origin into * a new parent, we are marking the new parent dir "impure". * When ovl_iterate() iterates an "impure" upper dir, it will * lookup the origin inodes of the entries to fill d_ino. */ if (ovl_type_origin(old)) { err = ovl_set_impure(new->d_parent, new_upperdir); if (err) goto out_revert_creds; } if (!overwrite && ovl_type_origin(new)) { err = ovl_set_impure(old->d_parent, old_upperdir); if (err) goto out_revert_creds; } } trap = lock_rename(new_upperdir, old_upperdir); olddentry = ovl_lookup_upper(ofs, old->d_name.name, old_upperdir, old->d_name.len); err = PTR_ERR(olddentry); if (IS_ERR(olddentry)) goto out_unlock; err = -ESTALE; if (!ovl_matches_upper(old, olddentry)) goto out_dput_old; newdentry = ovl_lookup_upper(ofs, new->d_name.name, new_upperdir, new->d_name.len); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_dput_old; old_opaque = ovl_dentry_is_opaque(old); new_opaque = ovl_dentry_is_opaque(new); err = -ESTALE; if (d_inode(new) && ovl_dentry_upper(new)) { if (opaquedir) { if (newdentry != opaquedir) goto out_dput; } else { if (!ovl_matches_upper(new, newdentry)) goto out_dput; } } else { if (!d_is_negative(newdentry)) { if (!new_opaque || !ovl_is_whiteout(newdentry)) goto out_dput; } else { if (flags & RENAME_EXCHANGE) goto out_dput; } } if (olddentry == trap) goto out_dput; if (newdentry == trap) goto out_dput; if (olddentry->d_inode == newdentry->d_inode) goto out_dput; err = 0; if (ovl_type_merge_or_lower(old)) err = ovl_set_redirect(old, samedir); else if (is_dir && !old_opaque && ovl_type_merge(new->d_parent)) err = ovl_set_opaque_xerr(old, olddentry, -EXDEV); if (err) goto out_dput; if (!overwrite && ovl_type_merge_or_lower(new)) err = ovl_set_redirect(new, samedir); else if (!overwrite && new_is_dir && !new_opaque && ovl_type_merge(old->d_parent)) err = ovl_set_opaque_xerr(new, newdentry, -EXDEV); if (err) goto out_dput; err = ovl_do_rename(ofs, old_upperdir->d_inode, olddentry, new_upperdir->d_inode, newdentry, flags); if (err) goto out_dput; if (cleanup_whiteout) ovl_cleanup(ofs, old_upperdir->d_inode, newdentry); if (overwrite && d_inode(new)) { if (new_is_dir) clear_nlink(d_inode(new)); else ovl_drop_nlink(new); } ovl_dir_modified(old->d_parent, ovl_type_origin(old) || (!overwrite && ovl_type_origin(new))); ovl_dir_modified(new->d_parent, ovl_type_origin(old) || (d_inode(new) && ovl_type_origin(new))); /* copy ctime: */ ovl_copyattr(d_inode(old)); if (d_inode(new) && ovl_dentry_upper(new)) ovl_copyattr(d_inode(new)); out_dput: dput(newdentry); out_dput_old: dput(olddentry); out_unlock: unlock_rename(new_upperdir, old_upperdir); out_revert_creds: revert_creds(old_cred); if (update_nlink) ovl_nlink_end(new); out_drop_write: ovl_drop_write(old); out: dput(opaquedir); ovl_cache_free(&list); return err; } const struct inode_operations ovl_dir_inode_operations = { .lookup = ovl_lookup, .mkdir = ovl_mkdir, .symlink = ovl_symlink, .unlink = ovl_unlink, .rmdir = ovl_rmdir, .rename = ovl_rename, .link = ovl_link, .setattr = ovl_setattr, .create = ovl_create, .mknod = ovl_mknod, .permission = ovl_permission, .getattr = ovl_getattr, .listxattr = ovl_listxattr, .get_inode_acl = ovl_get_inode_acl, .get_acl = ovl_get_acl, .set_acl = ovl_set_acl, .update_time = ovl_update_time, .fileattr_get = ovl_fileattr_get, .fileattr_set = ovl_fileattr_set, };
linux-master
fs/overlayfs/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/file.h> #include <linux/xattr.h> #include <linux/rbtree.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/ratelimit.h> #include "overlayfs.h" struct ovl_cache_entry { unsigned int len; unsigned int type; u64 real_ino; u64 ino; struct list_head l_node; struct rb_node node; struct ovl_cache_entry *next_maybe_whiteout; bool is_upper; bool is_whiteout; char name[]; }; struct ovl_dir_cache { long refcount; u64 version; struct list_head entries; struct rb_root root; }; struct ovl_readdir_data { struct dir_context ctx; struct dentry *dentry; bool is_lowest; struct rb_root *root; struct list_head *list; struct list_head middle; struct ovl_cache_entry *first_maybe_whiteout; int count; int err; bool is_upper; bool d_type_supported; }; struct ovl_dir_file { bool is_real; bool is_upper; struct ovl_dir_cache *cache; struct list_head *cursor; struct file *realfile; struct file *upperfile; }; static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) { return rb_entry(n, struct ovl_cache_entry, node); } static bool ovl_cache_entry_find_link(const char *name, int len, struct rb_node ***link, struct rb_node **parent) { bool found = false; struct rb_node **newp = *link; while (!found && *newp) { int cmp; struct ovl_cache_entry *tmp; *parent = *newp; tmp = ovl_cache_entry_from_node(*newp); cmp = strncmp(name, tmp->name, len); if (cmp > 0) newp = &tmp->node.rb_right; else if (cmp < 0 || len < tmp->len) newp = &tmp->node.rb_left; else found = true; } *link = newp; return found; } static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, const char *name, int len) { struct rb_node *node = root->rb_node; int cmp; while (node) { struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); cmp = strncmp(name, p->name, len); if (cmp > 0) node = p->node.rb_right; else if (cmp < 0 || len < p->len) node = p->node.rb_left; else return p; } return NULL; } static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd, struct ovl_cache_entry *p) { /* Don't care if not doing ovl_iter() */ if (!rdd->dentry) return false; /* Always recalc d_ino when remapping lower inode numbers */ if (ovl_xino_bits(OVL_FS(rdd->dentry->d_sb))) return true; /* Always recalc d_ino for parent */ if (strcmp(p->name, "..") == 0) return true; /* If this is lower, then native d_ino will do */ if (!rdd->is_upper) return false; /* * Recalc d_ino for '.' and for all entries if dir is impure (contains * copied up entries) */ if ((p->name[0] == '.' && p->len == 1) || ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry))) return true; return false; } static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd, const char *name, int len, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); p = kmalloc(size, GFP_KERNEL); if (!p) return NULL; memcpy(p->name, name, len); p->name[len] = '\0'; p->len = len; p->type = d_type; p->real_ino = ino; p->ino = ino; /* Defer setting d_ino for upper entry to ovl_iterate() */ if (ovl_calc_d_ino(rdd, p)) p->ino = 0; p->is_upper = rdd->is_upper; p->is_whiteout = false; if (d_type == DT_CHR) { p->next_maybe_whiteout = rdd->first_maybe_whiteout; rdd->first_maybe_whiteout = p; } return p; } static bool ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, const char *name, int len, u64 ino, unsigned int d_type) { struct rb_node **newp = &rdd->root->rb_node; struct rb_node *parent = NULL; struct ovl_cache_entry *p; if (ovl_cache_entry_find_link(name, len, &newp, &parent)) return true; p = ovl_cache_entry_new(rdd, name, len, ino, d_type); if (p == NULL) { rdd->err = -ENOMEM; return false; } list_add_tail(&p->l_node, rdd->list); rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, rdd->root); return true; } static bool ovl_fill_lowest(struct ovl_readdir_data *rdd, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; p = ovl_cache_entry_find(rdd->root, name, namelen); if (p) { list_move_tail(&p->l_node, &rdd->middle); } else { p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); if (p == NULL) rdd->err = -ENOMEM; else list_add_tail(&p->l_node, &rdd->middle); } return rdd->err == 0; } void ovl_cache_free(struct list_head *list) { struct ovl_cache_entry *p; struct ovl_cache_entry *n; list_for_each_entry_safe(p, n, list, l_node) kfree(p); INIT_LIST_HEAD(list); } void ovl_dir_cache_free(struct inode *inode) { struct ovl_dir_cache *cache = ovl_dir_cache(inode); if (cache) { ovl_cache_free(&cache->entries); kfree(cache); } } static void ovl_cache_put(struct ovl_dir_file *od, struct inode *inode) { struct ovl_dir_cache *cache = od->cache; WARN_ON(cache->refcount <= 0); cache->refcount--; if (!cache->refcount) { if (ovl_dir_cache(inode) == cache) ovl_set_dir_cache(inode, NULL); ovl_cache_free(&cache->entries); kfree(cache); } } static bool ovl_fill_merge(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_readdir_data *rdd = container_of(ctx, struct ovl_readdir_data, ctx); rdd->count++; if (!rdd->is_lowest) return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); else return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type); } static int ovl_check_whiteouts(const struct path *path, struct ovl_readdir_data *rdd) { int err; struct ovl_cache_entry *p; struct dentry *dentry, *dir = path->dentry; const struct cred *old_cred; old_cred = ovl_override_creds(rdd->dentry->d_sb); err = down_write_killable(&dir->d_inode->i_rwsem); if (!err) { while (rdd->first_maybe_whiteout) { p = rdd->first_maybe_whiteout; rdd->first_maybe_whiteout = p->next_maybe_whiteout; dentry = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len); if (!IS_ERR(dentry)) { p->is_whiteout = ovl_is_whiteout(dentry); dput(dentry); } } inode_unlock(dir->d_inode); } revert_creds(old_cred); return err; } static inline int ovl_dir_read(const struct path *realpath, struct ovl_readdir_data *rdd) { struct file *realfile; int err; realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE); if (IS_ERR(realfile)) return PTR_ERR(realfile); rdd->first_maybe_whiteout = NULL; rdd->ctx.pos = 0; do { rdd->count = 0; rdd->err = 0; err = iterate_dir(realfile, &rdd->ctx); if (err >= 0) err = rdd->err; } while (!err && rdd->count); if (!err && rdd->first_maybe_whiteout && rdd->dentry) err = ovl_check_whiteouts(realpath, rdd); fput(realfile); return err; } static void ovl_dir_reset(struct file *file) { struct ovl_dir_file *od = file->private_data; struct ovl_dir_cache *cache = od->cache; struct inode *inode = file_inode(file); bool is_real; if (cache && ovl_inode_version_get(inode) != cache->version) { ovl_cache_put(od, inode); od->cache = NULL; od->cursor = NULL; } is_real = ovl_dir_is_real(inode); if (od->is_real != is_real) { /* is_real can only become false when dir is copied up */ if (WARN_ON(is_real)) return; od->is_real = false; } } static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list, struct rb_root *root) { int err; struct path realpath; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_merge, .dentry = dentry, .list = list, .root = root, .is_lowest = false, }; int idx, next; for (idx = 0; idx != -1; idx = next) { next = ovl_path_next(idx, dentry, &realpath); rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry; if (next != -1) { err = ovl_dir_read(&realpath, &rdd); if (err) break; } else { /* * Insert lowest layer entries before upper ones, this * allows offsets to be reasonably constant */ list_add(&rdd.middle, rdd.list); rdd.is_lowest = true; err = ovl_dir_read(&realpath, &rdd); list_del(&rdd.middle); } } return err; } static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) { struct list_head *p; loff_t off = 0; list_for_each(p, &od->cache->entries) { if (off >= pos) break; off++; } /* Cursor is safe since the cache is stable */ od->cursor = p; } static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) { int res; struct ovl_dir_cache *cache; struct inode *inode = d_inode(dentry); cache = ovl_dir_cache(inode); if (cache && ovl_inode_version_get(inode) == cache->version) { WARN_ON(!cache->refcount); cache->refcount++; return cache; } ovl_set_dir_cache(d_inode(dentry), NULL); cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); if (!cache) return ERR_PTR(-ENOMEM); cache->refcount = 1; INIT_LIST_HEAD(&cache->entries); cache->root = RB_ROOT; res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); if (res) { ovl_cache_free(&cache->entries); kfree(cache); return ERR_PTR(res); } cache->version = ovl_inode_version_get(inode); ovl_set_dir_cache(inode, cache); return cache; } /* Map inode number to lower fs unique range */ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid, const char *name, int namelen, bool warn) { unsigned int xinoshift = 64 - xinobits; if (unlikely(ino >> xinoshift)) { if (warn) { pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n", namelen, name, ino, xinobits); } return ino; } /* * The lowest xinobit is reserved for mapping the non-peresistent inode * numbers range, but this range is only exposed via st_ino, not here. */ return ino | ((u64)fsid) << (xinoshift + 1); } /* * Set d_ino for upper entries. Non-upper entries should always report * the uppermost real inode ino and should not call this function. * * When not all layer are on same fs, report real ino also for upper. * * When all layers are on the same fs, and upper has a reference to * copy up origin, call vfs_getattr() on the overlay entry to make * sure that d_ino will be consistent with st_ino from stat(2). */ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry *p) { struct dentry *dir = path->dentry; struct ovl_fs *ofs = OVL_FS(dir->d_sb); struct dentry *this = NULL; enum ovl_path_type type; u64 ino = p->real_ino; int xinobits = ovl_xino_bits(ofs); int err = 0; if (!ovl_same_dev(ofs)) goto out; if (p->name[0] == '.') { if (p->len == 1) { this = dget(dir); goto get; } if (p->len == 2 && p->name[1] == '.') { /* we shall not be moved */ this = dget(dir->d_parent); goto get; } } this = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len); if (IS_ERR_OR_NULL(this) || !this->d_inode) { /* Mark a stale entry */ p->is_whiteout = true; if (IS_ERR(this)) { err = PTR_ERR(this); this = NULL; goto fail; } goto out; } get: type = ovl_path_type(this); if (OVL_TYPE_ORIGIN(type)) { struct kstat stat; struct path statpath = *path; statpath.dentry = this; err = vfs_getattr(&statpath, &stat, STATX_INO, 0); if (err) goto fail; /* * Directory inode is always on overlay st_dev. * Non-dir with ovl_same_dev() could be on pseudo st_dev in case * of xino bits overflow. */ WARN_ON_ONCE(S_ISDIR(stat.mode) && dir->d_sb->s_dev != stat.dev); ino = stat.ino; } else if (xinobits && !OVL_TYPE_UPPER(type)) { ino = ovl_remap_lower_ino(ino, xinobits, ovl_layer_lower(this)->fsid, p->name, p->len, ovl_xino_warn(ofs)); } out: p->ino = ino; dput(this); return err; fail: pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n", p->name, err); goto out; } static bool ovl_fill_plain(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; struct ovl_readdir_data *rdd = container_of(ctx, struct ovl_readdir_data, ctx); rdd->count++; p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); if (p == NULL) { rdd->err = -ENOMEM; return false; } list_add_tail(&p->l_node, rdd->list); return true; } static int ovl_dir_read_impure(const struct path *path, struct list_head *list, struct rb_root *root) { int err; struct path realpath; struct ovl_cache_entry *p, *n; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, .list = list, .root = root, }; INIT_LIST_HEAD(list); *root = RB_ROOT; ovl_path_upper(path->dentry, &realpath); err = ovl_dir_read(&realpath, &rdd); if (err) return err; list_for_each_entry_safe(p, n, list, l_node) { if (strcmp(p->name, ".") != 0 && strcmp(p->name, "..") != 0) { err = ovl_cache_update_ino(path, p); if (err) return err; } if (p->ino == p->real_ino) { list_del(&p->l_node); kfree(p); } else { struct rb_node **newp = &root->rb_node; struct rb_node *parent = NULL; if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len, &newp, &parent))) return -EIO; rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, root); } } return 0; } static struct ovl_dir_cache *ovl_cache_get_impure(const struct path *path) { int res; struct dentry *dentry = path->dentry; struct inode *inode = d_inode(dentry); struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct ovl_dir_cache *cache; cache = ovl_dir_cache(inode); if (cache && ovl_inode_version_get(inode) == cache->version) return cache; /* Impure cache is not refcounted, free it here */ ovl_dir_cache_free(inode); ovl_set_dir_cache(inode, NULL); cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); if (!cache) return ERR_PTR(-ENOMEM); res = ovl_dir_read_impure(path, &cache->entries, &cache->root); if (res) { ovl_cache_free(&cache->entries); kfree(cache); return ERR_PTR(res); } if (list_empty(&cache->entries)) { /* * A good opportunity to get rid of an unneeded "impure" flag. * Removing the "impure" xattr is best effort. */ if (!ovl_want_write(dentry)) { ovl_removexattr(ofs, ovl_dentry_upper(dentry), OVL_XATTR_IMPURE); ovl_drop_write(dentry); } ovl_clear_flag(OVL_IMPURE, inode); kfree(cache); return NULL; } cache->version = ovl_inode_version_get(inode); ovl_set_dir_cache(inode, cache); return cache; } struct ovl_readdir_translate { struct dir_context *orig_ctx; struct ovl_dir_cache *cache; struct dir_context ctx; u64 parent_ino; int fsid; int xinobits; bool xinowarn; }; static bool ovl_fill_real(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_readdir_translate *rdt = container_of(ctx, struct ovl_readdir_translate, ctx); struct dir_context *orig_ctx = rdt->orig_ctx; if (rdt->parent_ino && strcmp(name, "..") == 0) { ino = rdt->parent_ino; } else if (rdt->cache) { struct ovl_cache_entry *p; p = ovl_cache_entry_find(&rdt->cache->root, name, namelen); if (p) ino = p->ino; } else if (rdt->xinobits) { ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid, name, namelen, rdt->xinowarn); } return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); } static bool ovl_is_impure_dir(struct file *file) { struct ovl_dir_file *od = file->private_data; struct inode *dir = file_inode(file); /* * Only upper dir can be impure, but if we are in the middle of * iterating a lower real dir, dir could be copied up and marked * impure. We only want the impure cache if we started iterating * a real upper dir to begin with. */ return od->is_upper && ovl_test_flag(OVL_IMPURE, dir); } static int ovl_iterate_real(struct file *file, struct dir_context *ctx) { int err; struct ovl_dir_file *od = file->private_data; struct dentry *dir = file->f_path.dentry; struct ovl_fs *ofs = OVL_FS(dir->d_sb); const struct ovl_layer *lower_layer = ovl_layer_lower(dir); struct ovl_readdir_translate rdt = { .ctx.actor = ovl_fill_real, .orig_ctx = ctx, .xinobits = ovl_xino_bits(ofs), .xinowarn = ovl_xino_warn(ofs), }; if (rdt.xinobits && lower_layer) rdt.fsid = lower_layer->fsid; if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) { struct kstat stat; struct path statpath = file->f_path; statpath.dentry = dir->d_parent; err = vfs_getattr(&statpath, &stat, STATX_INO, 0); if (err) return err; WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); rdt.parent_ino = stat.ino; } if (ovl_is_impure_dir(file)) { rdt.cache = ovl_cache_get_impure(&file->f_path); if (IS_ERR(rdt.cache)) return PTR_ERR(rdt.cache); } err = iterate_dir(od->realfile, &rdt.ctx); ctx->pos = rdt.ctx.pos; return err; } static int ovl_iterate(struct file *file, struct dir_context *ctx) { struct ovl_dir_file *od = file->private_data; struct dentry *dentry = file->f_path.dentry; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct ovl_cache_entry *p; const struct cred *old_cred; int err; old_cred = ovl_override_creds(dentry->d_sb); if (!ctx->pos) ovl_dir_reset(file); if (od->is_real) { /* * If parent is merge, then need to adjust d_ino for '..', if * dir is impure then need to adjust d_ino for copied up * entries. */ if (ovl_xino_bits(ofs) || (ovl_same_fs(ofs) && (ovl_is_impure_dir(file) || OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) { err = ovl_iterate_real(file, ctx); } else { err = iterate_dir(od->realfile, ctx); } goto out; } if (!od->cache) { struct ovl_dir_cache *cache; cache = ovl_cache_get(dentry); err = PTR_ERR(cache); if (IS_ERR(cache)) goto out; od->cache = cache; ovl_seek_cursor(od, ctx->pos); } while (od->cursor != &od->cache->entries) { p = list_entry(od->cursor, struct ovl_cache_entry, l_node); if (!p->is_whiteout) { if (!p->ino) { err = ovl_cache_update_ino(&file->f_path, p); if (err) goto out; } } /* ovl_cache_update_ino() sets is_whiteout on stale entry */ if (!p->is_whiteout) { if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) break; } od->cursor = p->l_node.next; ctx->pos++; } err = 0; out: revert_creds(old_cred); return err; } static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) { loff_t res; struct ovl_dir_file *od = file->private_data; inode_lock(file_inode(file)); if (!file->f_pos) ovl_dir_reset(file); if (od->is_real) { res = vfs_llseek(od->realfile, offset, origin); file->f_pos = od->realfile->f_pos; } else { res = -EINVAL; switch (origin) { case SEEK_CUR: offset += file->f_pos; break; case SEEK_SET: break; default: goto out_unlock; } if (offset < 0) goto out_unlock; if (offset != file->f_pos) { file->f_pos = offset; if (od->cache) ovl_seek_cursor(od, offset); } res = offset; } out_unlock: inode_unlock(file_inode(file)); return res; } static struct file *ovl_dir_open_realfile(const struct file *file, const struct path *realpath) { struct file *res; const struct cred *old_cred; old_cred = ovl_override_creds(file_inode(file)->i_sb); res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE)); revert_creds(old_cred); return res; } /* * Like ovl_real_fdget(), returns upperfile if dir was copied up since open. * Unlike ovl_real_fdget(), this caches upperfile in file->private_data. * * TODO: use same abstract type for file->private_data of dir and file so * upperfile could also be cached for files as well. */ struct file *ovl_dir_real_file(const struct file *file, bool want_upper) { struct ovl_dir_file *od = file->private_data; struct dentry *dentry = file->f_path.dentry; struct file *old, *realfile = od->realfile; if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) return want_upper ? NULL : realfile; /* * Need to check if we started out being a lower dir, but got copied up */ if (!od->is_upper) { realfile = READ_ONCE(od->upperfile); if (!realfile) { struct path upperpath; ovl_path_upper(dentry, &upperpath); realfile = ovl_dir_open_realfile(file, &upperpath); if (IS_ERR(realfile)) return realfile; old = cmpxchg_release(&od->upperfile, NULL, realfile); if (old) { fput(realfile); realfile = old; } } } return realfile; } static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct file *realfile; int err; err = ovl_sync_status(OVL_FS(file_inode(file)->i_sb)); if (err <= 0) return err; realfile = ovl_dir_real_file(file, true); err = PTR_ERR_OR_ZERO(realfile); /* Nothing to sync for lower */ if (!realfile || err) return err; return vfs_fsync_range(realfile, start, end, datasync); } static int ovl_dir_release(struct inode *inode, struct file *file) { struct ovl_dir_file *od = file->private_data; if (od->cache) { inode_lock(inode); ovl_cache_put(od, inode); inode_unlock(inode); } fput(od->realfile); if (od->upperfile) fput(od->upperfile); kfree(od); return 0; } static int ovl_dir_open(struct inode *inode, struct file *file) { struct path realpath; struct file *realfile; struct ovl_dir_file *od; enum ovl_path_type type; od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); if (!od) return -ENOMEM; type = ovl_path_real(file->f_path.dentry, &realpath); realfile = ovl_dir_open_realfile(file, &realpath); if (IS_ERR(realfile)) { kfree(od); return PTR_ERR(realfile); } od->realfile = realfile; od->is_real = ovl_dir_is_real(inode); od->is_upper = OVL_TYPE_UPPER(type); file->private_data = od; return 0; } WRAP_DIR_ITER(ovl_iterate) // FIXME! const struct file_operations ovl_dir_operations = { .read = generic_read_dir, .open = ovl_dir_open, .iterate_shared = shared_ovl_iterate, .llseek = ovl_dir_llseek, .fsync = ovl_dir_fsync, .release = ovl_dir_release, }; int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) { int err; struct ovl_cache_entry *p, *n; struct rb_root root = RB_ROOT; const struct cred *old_cred; old_cred = ovl_override_creds(dentry->d_sb); err = ovl_dir_read_merged(dentry, list, &root); revert_creds(old_cred); if (err) return err; err = 0; list_for_each_entry_safe(p, n, list, l_node) { /* * Select whiteouts in upperdir, they should * be cleared when deleting this directory. */ if (p->is_whiteout) { if (p->is_upper) continue; goto del_entry; } if (p->name[0] == '.') { if (p->len == 1) goto del_entry; if (p->len == 2 && p->name[1] == '.') goto del_entry; } err = -ENOTEMPTY; break; del_entry: list_del(&p->l_node); kfree(p); } return err; } void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper, struct list_head *list) { struct ovl_cache_entry *p; inode_lock_nested(upper->d_inode, I_MUTEX_CHILD); list_for_each_entry(p, list, l_node) { struct dentry *dentry; if (WARN_ON(!p->is_whiteout || !p->is_upper)) continue; dentry = ovl_lookup_upper(ofs, p->name, upper, p->len); if (IS_ERR(dentry)) { pr_err("lookup '%s/%.*s' failed (%i)\n", upper->d_name.name, p->len, p->name, (int) PTR_ERR(dentry)); continue; } if (dentry->d_inode) ovl_cleanup(ofs, upper->d_inode, dentry); dput(dentry); } inode_unlock(upper->d_inode); } static bool ovl_check_d_type(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_readdir_data *rdd = container_of(ctx, struct ovl_readdir_data, ctx); /* Even if d_type is not supported, DT_DIR is returned for . and .. */ if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen)) return true; if (d_type != DT_UNKNOWN) rdd->d_type_supported = true; return true; } /* * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values * if error is encountered. */ int ovl_check_d_type_supported(const struct path *realpath) { int err; struct ovl_readdir_data rdd = { .ctx.actor = ovl_check_d_type, .d_type_supported = false, }; err = ovl_dir_read(realpath, &rdd); if (err) return err; return rdd.d_type_supported; } #define OVL_INCOMPATDIR_NAME "incompat" static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *path, int level) { int err; struct inode *dir = path->dentry->d_inode; LIST_HEAD(list); struct ovl_cache_entry *p; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, .list = &list, }; bool incompat = false; /* * The "work/incompat" directory is treated specially - if it is not * empty, instead of printing a generic error and mounting read-only, * we will error about incompat features and fail the mount. * * When called from ovl_indexdir_cleanup(), path->dentry->d_name.name * starts with '#'. */ if (level == 2 && !strcmp(path->dentry->d_name.name, OVL_INCOMPATDIR_NAME)) incompat = true; err = ovl_dir_read(path, &rdd); if (err) goto out; inode_lock_nested(dir, I_MUTEX_PARENT); list_for_each_entry(p, &list, l_node) { struct dentry *dentry; if (p->name[0] == '.') { if (p->len == 1) continue; if (p->len == 2 && p->name[1] == '.') continue; } else if (incompat) { pr_err("overlay with incompat feature '%s' cannot be mounted\n", p->name); err = -EINVAL; break; } dentry = ovl_lookup_upper(ofs, p->name, path->dentry, p->len); if (IS_ERR(dentry)) continue; if (dentry->d_inode) err = ovl_workdir_cleanup(ofs, dir, path->mnt, dentry, level); dput(dentry); if (err) break; } inode_unlock(dir); out: ovl_cache_free(&list); return err; } int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir, struct vfsmount *mnt, struct dentry *dentry, int level) { int err; if (!d_is_dir(dentry) || level > 1) { return ovl_cleanup(ofs, dir, dentry); } err = ovl_do_rmdir(ofs, dir, dentry); if (err) { struct path path = { .mnt = mnt, .dentry = dentry }; inode_unlock(dir); err = ovl_workdir_cleanup_recurse(ofs, &path, level + 1); inode_lock_nested(dir, I_MUTEX_PARENT); if (!err) err = ovl_cleanup(ofs, dir, dentry); } return err; } int ovl_indexdir_cleanup(struct ovl_fs *ofs) { int err; struct dentry *indexdir = ofs->indexdir; struct dentry *index = NULL; struct inode *dir = indexdir->d_inode; struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir }; LIST_HEAD(list); struct ovl_cache_entry *p; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, .list = &list, }; err = ovl_dir_read(&path, &rdd); if (err) goto out; inode_lock_nested(dir, I_MUTEX_PARENT); list_for_each_entry(p, &list, l_node) { if (p->name[0] == '.') { if (p->len == 1) continue; if (p->len == 2 && p->name[1] == '.') continue; } index = ovl_lookup_upper(ofs, p->name, indexdir, p->len); if (IS_ERR(index)) { err = PTR_ERR(index); index = NULL; break; } /* Cleanup leftover from index create/cleanup attempt */ if (index->d_name.name[0] == '#') { err = ovl_workdir_cleanup(ofs, dir, path.mnt, index, 1); if (err) break; goto next; } err = ovl_verify_index(ofs, index); if (!err) { goto next; } else if (err == -ESTALE) { /* Cleanup stale index entries */ err = ovl_cleanup(ofs, dir, index); } else if (err != -ENOENT) { /* * Abort mount to avoid corrupting the index if * an incompatible index entry was found or on out * of memory. */ break; } else if (ofs->config.nfs_export) { /* * Whiteout orphan index to block future open by * handle after overlay nlink dropped to zero. */ err = ovl_cleanup_and_whiteout(ofs, dir, index); } else { /* Cleanup orphan index entries */ err = ovl_cleanup(ofs, dir, index); } if (err) break; next: dput(index); index = NULL; } dput(index); inode_unlock(dir); out: ovl_cache_free(&list); if (err) pr_err("failed index dir cleanup (%i)\n", err); return err; }
linux-master
fs/overlayfs/readdir.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/cred.h> #include <linux/xattr.h> #include <linux/posix_acl.h> #include <linux/ratelimit.h> #include <linux/fiemap.h> #include <linux/fileattr.h> #include <linux/security.h> #include <linux/namei.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include "overlayfs.h" int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { int err; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); bool full_copy_up = false; struct dentry *upperdentry; const struct cred *old_cred; err = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (err) return err; err = ovl_want_write(dentry); if (err) goto out; if (attr->ia_valid & ATTR_SIZE) { /* Truncate should trigger data copy up as well */ full_copy_up = true; } if (!full_copy_up) err = ovl_copy_up(dentry); else err = ovl_copy_up_with_data(dentry); if (!err) { struct inode *winode = NULL; upperdentry = ovl_dentry_upper(dentry); if (attr->ia_valid & ATTR_SIZE) { winode = d_inode(upperdentry); err = get_write_access(winode); if (err) goto out_drop_write; } if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) attr->ia_valid &= ~ATTR_MODE; /* * We might have to translate ovl file into real file object * once use cases emerge. For now, simply don't let underlying * filesystem rely on attr->ia_file */ attr->ia_valid &= ~ATTR_FILE; /* * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN * set. Overlayfs does not pass O_TRUNC flag to underlying * filesystem during open -> do not pass ATTR_OPEN. This * disables optimization in fuse which assumes open(O_TRUNC) * already set file size to 0. But we never passed O_TRUNC to * fuse. So by clearing ATTR_OPEN, fuse will be forced to send * setattr request to server. */ attr->ia_valid &= ~ATTR_OPEN; inode_lock(upperdentry->d_inode); old_cred = ovl_override_creds(dentry->d_sb); err = ovl_do_notify_change(ofs, upperdentry, attr); revert_creds(old_cred); if (!err) ovl_copyattr(dentry->d_inode); inode_unlock(upperdentry->d_inode); if (winode) put_write_access(winode); } out_drop_write: ovl_drop_write(dentry); out: return err; } static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); bool samefs = ovl_same_fs(ofs); unsigned int xinobits = ovl_xino_bits(ofs); unsigned int xinoshift = 64 - xinobits; if (samefs) { /* * When all layers are on the same fs, all real inode * number are unique, so we use the overlay st_dev, * which is friendly to du -x. */ stat->dev = dentry->d_sb->s_dev; return; } else if (xinobits) { /* * All inode numbers of underlying fs should not be using the * high xinobits, so we use high xinobits to partition the * overlay st_ino address space. The high bits holds the fsid * (upper fsid is 0). The lowest xinobit is reserved for mapping * the non-persistent inode numbers range in case of overflow. * This way all overlay inode numbers are unique and use the * overlay st_dev. */ if (likely(!(stat->ino >> xinoshift))) { stat->ino |= ((u64)fsid) << (xinoshift + 1); stat->dev = dentry->d_sb->s_dev; return; } else if (ovl_xino_warn(ofs)) { pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n", dentry, stat->ino, xinobits); } } /* The inode could not be mapped to a unified st_ino address space */ if (S_ISDIR(dentry->d_inode->i_mode)) { /* * Always use the overlay st_dev for directories, so 'find * -xdev' will scan the entire overlay mount and won't cross the * overlay mount boundaries. * * If not all layers are on the same fs the pair {real st_ino; * overlay st_dev} is not unique, so use the non persistent * overlay st_ino for directories. */ stat->dev = dentry->d_sb->s_dev; stat->ino = dentry->d_inode->i_ino; } else { /* * For non-samefs setup, if we cannot map all layers st_ino * to a unified address space, we need to make sure that st_dev * is unique per underlying fs, so we use the unique anonymous * bdev assigned to the underlying fs. */ stat->dev = ofs->fs[fsid].pseudo_dev; } } int ovl_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct dentry *dentry = path->dentry; enum ovl_path_type type; struct path realpath; const struct cred *old_cred; struct inode *inode = d_inode(dentry); bool is_dir = S_ISDIR(inode->i_mode); int fsid = 0; int err; bool metacopy_blocks = false; metacopy_blocks = ovl_is_metacopy_dentry(dentry); type = ovl_path_real(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); err = vfs_getattr(&realpath, stat, request_mask, flags); if (err) goto out; /* Report the effective immutable/append-only STATX flags */ generic_fill_statx_attr(inode, stat); /* * For non-dir or same fs, we use st_ino of the copy up origin. * This guaranties constant st_dev/st_ino across copy up. * With xino feature and non-samefs, we use st_ino of the copy up * origin masked with high bits that represent the layer id. * * If lower filesystem supports NFS file handles, this also guaranties * persistent st_ino across mount cycle. */ if (!is_dir || ovl_same_dev(OVL_FS(dentry->d_sb))) { if (!OVL_TYPE_UPPER(type)) { fsid = ovl_layer_lower(dentry)->fsid; } else if (OVL_TYPE_ORIGIN(type)) { struct kstat lowerstat; u32 lowermask = STATX_INO | STATX_BLOCKS | (!is_dir ? STATX_NLINK : 0); ovl_path_lower(dentry, &realpath); err = vfs_getattr(&realpath, &lowerstat, lowermask, flags); if (err) goto out; /* * Lower hardlinks may be broken on copy up to different * upper files, so we cannot use the lower origin st_ino * for those different files, even for the same fs case. * * Similarly, several redirected dirs can point to the * same dir on a lower layer. With the "verify_lower" * feature, we do not use the lower origin st_ino, if * we haven't verified that this redirect is unique. * * With inodes index enabled, it is safe to use st_ino * of an indexed origin. The index validates that the * upper hardlink is not broken and that a redirected * dir is the only redirect to that origin. */ if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) || (!ovl_verify_lower(dentry->d_sb) && (is_dir || lowerstat.nlink == 1))) { fsid = ovl_layer_lower(dentry)->fsid; stat->ino = lowerstat.ino; } /* * If we are querying a metacopy dentry and lower * dentry is data dentry, then use the blocks we * queried just now. We don't have to do additional * vfs_getattr(). If lower itself is metacopy, then * additional vfs_getattr() is unavoidable. */ if (metacopy_blocks && realpath.dentry == ovl_dentry_lowerdata(dentry)) { stat->blocks = lowerstat.blocks; metacopy_blocks = false; } } if (metacopy_blocks) { /* * If lower is not same as lowerdata or if there was * no origin on upper, we can end up here. * With lazy lowerdata lookup, guess lowerdata blocks * from size to avoid lowerdata lookup on stat(2). */ struct kstat lowerdatastat; u32 lowermask = STATX_BLOCKS; ovl_path_lowerdata(dentry, &realpath); if (realpath.dentry) { err = vfs_getattr(&realpath, &lowerdatastat, lowermask, flags); if (err) goto out; } else { lowerdatastat.blocks = round_up(stat->size, stat->blksize) >> 9; } stat->blocks = lowerdatastat.blocks; } } ovl_map_dev_ino(dentry, stat, fsid); /* * It's probably not worth it to count subdirs to get the * correct link count. nlink=1 seems to pacify 'find' and * other utilities. */ if (is_dir && OVL_TYPE_MERGE(type)) stat->nlink = 1; /* * Return the overlay inode nlinks for indexed upper inodes. * Overlay inode nlink counts the union of the upper hardlinks * and non-covered lower hardlinks. It does not include the upper * index hardlink. */ if (!is_dir && ovl_test_flag(OVL_INDEX, d_inode(dentry))) stat->nlink = dentry->d_inode->i_nlink; out: revert_creds(old_cred); return err; } int ovl_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { struct inode *upperinode = ovl_inode_upper(inode); struct inode *realinode; struct path realpath; const struct cred *old_cred; int err; /* Careful in RCU walk mode */ realinode = ovl_i_path_real(inode, &realpath); if (!realinode) { WARN_ON(!(mask & MAY_NOT_BLOCK)); return -ECHILD; } /* * Check overlay inode with the creds of task and underlying inode * with creds of mounter */ err = generic_permission(&nop_mnt_idmap, inode, mask); if (err) return err; old_cred = ovl_override_creds(inode->i_sb); if (!upperinode && !special_file(realinode->i_mode) && mask & MAY_WRITE) { mask &= ~(MAY_WRITE | MAY_APPEND); /* Make sure mounter can read file for copy up later */ mask |= MAY_READ; } err = inode_permission(mnt_idmap(realpath.mnt), realinode, mask); revert_creds(old_cred); return err; } static const char *ovl_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { const struct cred *old_cred; const char *p; if (!dentry) return ERR_PTR(-ECHILD); old_cred = ovl_override_creds(dentry->d_sb); p = vfs_get_link(ovl_dentry_real(dentry), done); revert_creds(old_cred); return p; } bool ovl_is_private_xattr(struct super_block *sb, const char *name) { struct ovl_fs *ofs = OVL_FS(sb); if (ofs->config.userxattr) return strncmp(name, OVL_XATTR_USER_PREFIX, sizeof(OVL_XATTR_USER_PREFIX) - 1) == 0; else return strncmp(name, OVL_XATTR_TRUSTED_PREFIX, sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1) == 0; } int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { int err; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *upperdentry = ovl_i_dentry_upper(inode); struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry); struct path realpath; const struct cred *old_cred; err = ovl_want_write(dentry); if (err) goto out; if (!value && !upperdentry) { ovl_path_lower(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); err = vfs_getxattr(mnt_idmap(realpath.mnt), realdentry, name, NULL, 0); revert_creds(old_cred); if (err < 0) goto out_drop_write; } if (!upperdentry) { err = ovl_copy_up(dentry); if (err) goto out_drop_write; realdentry = ovl_dentry_upper(dentry); } old_cred = ovl_override_creds(dentry->d_sb); if (value) { err = ovl_do_setxattr(ofs, realdentry, name, value, size, flags); } else { WARN_ON(flags != XATTR_REPLACE); err = ovl_do_removexattr(ofs, realdentry, name); } revert_creds(old_cred); /* copy c/mtime */ ovl_copyattr(inode); out_drop_write: ovl_drop_write(dentry); out: return err; } int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name, void *value, size_t size) { ssize_t res; const struct cred *old_cred; struct path realpath; ovl_i_path_real(inode, &realpath); old_cred = ovl_override_creds(dentry->d_sb); res = vfs_getxattr(mnt_idmap(realpath.mnt), realpath.dentry, name, value, size); revert_creds(old_cred); return res; } static bool ovl_can_list(struct super_block *sb, const char *s) { /* Never list private (.overlay) */ if (ovl_is_private_xattr(sb, s)) return false; /* List all non-trusted xattrs */ if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) return true; /* list other trusted for superuser only */ return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN); } ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) { struct dentry *realdentry = ovl_dentry_real(dentry); ssize_t res; size_t len; char *s; const struct cred *old_cred; old_cred = ovl_override_creds(dentry->d_sb); res = vfs_listxattr(realdentry, list, size); revert_creds(old_cred); if (res <= 0 || size == 0) return res; /* filter out private xattrs */ for (s = list, len = res; len;) { size_t slen = strnlen(s, len) + 1; /* underlying fs providing us with an broken xattr list? */ if (WARN_ON(slen > len)) return -EIO; len -= slen; if (!ovl_can_list(dentry->d_sb, s)) { res -= slen; memmove(s, s + slen, len); } else { s += slen; } } return res; } #ifdef CONFIG_FS_POSIX_ACL /* * Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone * of the POSIX ACLs retrieved from the lower layer to this function to not * alter the POSIX ACLs for the underlying filesystem. */ static void ovl_idmap_posix_acl(const struct inode *realinode, struct mnt_idmap *idmap, struct posix_acl *acl) { struct user_namespace *fs_userns = i_user_ns(realinode); for (unsigned int i = 0; i < acl->a_count; i++) { vfsuid_t vfsuid; vfsgid_t vfsgid; struct posix_acl_entry *e = &acl->a_entries[i]; switch (e->e_tag) { case ACL_USER: vfsuid = make_vfsuid(idmap, fs_userns, e->e_uid); e->e_uid = vfsuid_into_kuid(vfsuid); break; case ACL_GROUP: vfsgid = make_vfsgid(idmap, fs_userns, e->e_gid); e->e_gid = vfsgid_into_kgid(vfsgid); break; } } } /* * The @noperm argument is used to skip permission checking and is a temporary * measure. Quoting Miklos from an earlier discussion: * * > So there are two paths to getting an acl: * > 1) permission checking and 2) retrieving the value via getxattr(2). * > This is a similar situation as reading a symlink vs. following it. * > When following a symlink overlayfs always reads the link on the * > underlying fs just as if it was a readlink(2) call, calling * > security_inode_readlink() instead of security_inode_follow_link(). * > This is logical: we are reading the link from the underlying storage, * > and following it on overlayfs. * > * > Applying the same logic to acl: we do need to call the * > security_inode_getxattr() on the underlying fs, even if just want to * > check permissions on overlay. This is currently not done, which is an * > inconsistency. * > * > Maybe adding the check to ovl_get_acl() is the right way to go, but * > I'm a little afraid of a performance regression. Will look into that. * * Until we have made a decision allow this helper to take the @noperm * argument. We should hopefully be able to remove it soon. */ struct posix_acl *ovl_get_acl_path(const struct path *path, const char *acl_name, bool noperm) { struct posix_acl *real_acl, *clone; struct mnt_idmap *idmap; struct inode *realinode = d_inode(path->dentry); idmap = mnt_idmap(path->mnt); if (noperm) real_acl = get_inode_acl(realinode, posix_acl_type(acl_name)); else real_acl = vfs_get_acl(idmap, path->dentry, acl_name); if (IS_ERR_OR_NULL(real_acl)) return real_acl; if (!is_idmapped_mnt(path->mnt)) return real_acl; /* * We cannot alter the ACLs returned from the relevant layer as that * would alter the cached values filesystem wide for the lower * filesystem. Instead we can clone the ACLs and then apply the * relevant idmapping of the layer. */ clone = posix_acl_clone(real_acl, GFP_KERNEL); posix_acl_release(real_acl); /* release original acl */ if (!clone) return ERR_PTR(-ENOMEM); ovl_idmap_posix_acl(realinode, idmap, clone); return clone; } /* * When the relevant layer is an idmapped mount we need to take the idmapping * of the layer into account and translate any ACL_{GROUP,USER} values * according to the idmapped mount. * * We cannot alter the ACLs returned from the relevant layer as that would * alter the cached values filesystem wide for the lower filesystem. Instead we * can clone the ACLs and then apply the relevant idmapping of the layer. * * This is obviously only relevant when idmapped layers are used. */ struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap, struct inode *inode, int type, bool rcu, bool noperm) { struct inode *realinode; struct posix_acl *acl; struct path realpath; /* Careful in RCU walk mode */ realinode = ovl_i_path_real(inode, &realpath); if (!realinode) { WARN_ON(!rcu); return ERR_PTR(-ECHILD); } if (!IS_POSIXACL(realinode)) return NULL; if (rcu) { /* * If the layer is idmapped drop out of RCU path walk * so we can clone the ACLs. */ if (is_idmapped_mnt(realpath.mnt)) return ERR_PTR(-ECHILD); acl = get_cached_acl_rcu(realinode, type); } else { const struct cred *old_cred; old_cred = ovl_override_creds(inode->i_sb); acl = ovl_get_acl_path(&realpath, posix_acl_xattr_name(type), noperm); revert_creds(old_cred); } return acl; } static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode, struct posix_acl *acl, int type) { int err; struct path realpath; const char *acl_name; const struct cred *old_cred; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *upperdentry = ovl_dentry_upper(dentry); struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry); err = ovl_want_write(dentry); if (err) return err; /* * If ACL is to be removed from a lower file, check if it exists in * the first place before copying it up. */ acl_name = posix_acl_xattr_name(type); if (!acl && !upperdentry) { struct posix_acl *real_acl; ovl_path_lower(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); real_acl = vfs_get_acl(mnt_idmap(realpath.mnt), realdentry, acl_name); revert_creds(old_cred); if (IS_ERR(real_acl)) { err = PTR_ERR(real_acl); goto out_drop_write; } posix_acl_release(real_acl); } if (!upperdentry) { err = ovl_copy_up(dentry); if (err) goto out_drop_write; realdentry = ovl_dentry_upper(dentry); } old_cred = ovl_override_creds(dentry->d_sb); if (acl) err = ovl_do_set_acl(ofs, realdentry, acl_name, acl); else err = ovl_do_remove_acl(ofs, realdentry, acl_name); revert_creds(old_cred); /* copy c/mtime */ ovl_copyattr(inode); out_drop_write: ovl_drop_write(dentry); return err; } int ovl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type) { int err; struct inode *inode = d_inode(dentry); struct dentry *workdir = ovl_workdir(dentry); struct inode *realinode = ovl_inode_real(inode); if (!IS_POSIXACL(d_inode(workdir))) return -EOPNOTSUPP; if (!realinode->i_op->set_acl) return -EOPNOTSUPP; if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; if (!inode_owner_or_capable(&nop_mnt_idmap, inode)) return -EPERM; /* * Check if sgid bit needs to be cleared (actual setacl operation will * be done with mounter's capabilities and so that won't do it for us). */ if (unlikely(inode->i_mode & S_ISGID) && type == ACL_TYPE_ACCESS && !in_group_p(inode->i_gid) && !capable_wrt_inode_uidgid(&nop_mnt_idmap, inode, CAP_FSETID)) { struct iattr iattr = { .ia_valid = ATTR_KILL_SGID }; err = ovl_setattr(&nop_mnt_idmap, dentry, &iattr); if (err) return err; } return ovl_set_or_remove_acl(dentry, inode, acl, type); } #endif int ovl_update_time(struct inode *inode, int flags) { if (flags & S_ATIME) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); struct path upperpath = { .mnt = ovl_upper_mnt(ofs), .dentry = ovl_upperdentry_dereference(OVL_I(inode)), }; if (upperpath.dentry) { touch_atime(&upperpath); inode->i_atime = d_inode(upperpath.dentry)->i_atime; } } return 0; } static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { int err; struct inode *realinode = ovl_inode_realdata(inode); const struct cred *old_cred; if (!realinode) return -EIO; if (!realinode->i_op->fiemap) return -EOPNOTSUPP; old_cred = ovl_override_creds(inode->i_sb); err = realinode->i_op->fiemap(realinode, fieinfo, start, len); revert_creds(old_cred); return err; } /* * Work around the fact that security_file_ioctl() takes a file argument. * Introducing security_inode_fileattr_get/set() hooks would solve this issue * properly. */ static int ovl_security_fileattr(const struct path *realpath, struct fileattr *fa, bool set) { struct file *file; unsigned int cmd; int err; file = dentry_open(realpath, O_RDONLY, current_cred()); if (IS_ERR(file)) return PTR_ERR(file); if (set) cmd = fa->fsx_valid ? FS_IOC_FSSETXATTR : FS_IOC_SETFLAGS; else cmd = fa->fsx_valid ? FS_IOC_FSGETXATTR : FS_IOC_GETFLAGS; err = security_file_ioctl(file, cmd, 0); fput(file); return err; } int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa) { int err; err = ovl_security_fileattr(realpath, fa, true); if (err) return err; return vfs_fileattr_set(mnt_idmap(realpath->mnt), realpath->dentry, fa); } int ovl_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); struct path upperpath; const struct cred *old_cred; unsigned int flags; int err; err = ovl_want_write(dentry); if (err) goto out; err = ovl_copy_up(dentry); if (!err) { ovl_path_real(dentry, &upperpath); old_cred = ovl_override_creds(inode->i_sb); /* * Store immutable/append-only flags in xattr and clear them * in upper fileattr (in case they were set by older kernel) * so children of "ovl-immutable" directories lower aliases of * "ovl-immutable" hardlinks could be copied up. * Clear xattr when flags are cleared. */ err = ovl_set_protattr(inode, upperpath.dentry, fa); if (!err) err = ovl_real_fileattr_set(&upperpath, fa); revert_creds(old_cred); /* * Merge real inode flags with inode flags read from * overlay.protattr xattr */ flags = ovl_inode_real(inode)->i_flags & OVL_COPY_I_FLAGS_MASK; BUILD_BUG_ON(OVL_PROT_I_FLAGS_MASK & ~OVL_COPY_I_FLAGS_MASK); flags |= inode->i_flags & OVL_PROT_I_FLAGS_MASK; inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK); /* Update ctime */ ovl_copyattr(inode); } ovl_drop_write(dentry); out: return err; } /* Convert inode protection flags to fileattr flags */ static void ovl_fileattr_prot_flags(struct inode *inode, struct fileattr *fa) { BUILD_BUG_ON(OVL_PROT_FS_FLAGS_MASK & ~FS_COMMON_FL); BUILD_BUG_ON(OVL_PROT_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON); if (inode->i_flags & S_APPEND) { fa->flags |= FS_APPEND_FL; fa->fsx_xflags |= FS_XFLAG_APPEND; } if (inode->i_flags & S_IMMUTABLE) { fa->flags |= FS_IMMUTABLE_FL; fa->fsx_xflags |= FS_XFLAG_IMMUTABLE; } } int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa) { int err; err = ovl_security_fileattr(realpath, fa, false); if (err) return err; err = vfs_fileattr_get(realpath->dentry, fa); if (err == -ENOIOCTLCMD) err = -ENOTTY; return err; } int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); struct path realpath; const struct cred *old_cred; int err; ovl_path_real(dentry, &realpath); old_cred = ovl_override_creds(inode->i_sb); err = ovl_real_fileattr_get(&realpath, fa); ovl_fileattr_prot_flags(inode, fa); revert_creds(old_cred); return err; } static const struct inode_operations ovl_file_inode_operations = { .setattr = ovl_setattr, .permission = ovl_permission, .getattr = ovl_getattr, .listxattr = ovl_listxattr, .get_inode_acl = ovl_get_inode_acl, .get_acl = ovl_get_acl, .set_acl = ovl_set_acl, .update_time = ovl_update_time, .fiemap = ovl_fiemap, .fileattr_get = ovl_fileattr_get, .fileattr_set = ovl_fileattr_set, }; static const struct inode_operations ovl_symlink_inode_operations = { .setattr = ovl_setattr, .get_link = ovl_get_link, .getattr = ovl_getattr, .listxattr = ovl_listxattr, .update_time = ovl_update_time, }; static const struct inode_operations ovl_special_inode_operations = { .setattr = ovl_setattr, .permission = ovl_permission, .getattr = ovl_getattr, .listxattr = ovl_listxattr, .get_inode_acl = ovl_get_inode_acl, .get_acl = ovl_get_acl, .set_acl = ovl_set_acl, .update_time = ovl_update_time, }; static const struct address_space_operations ovl_aops = { /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */ .direct_IO = noop_direct_IO, }; /* * It is possible to stack overlayfs instance on top of another * overlayfs instance as lower layer. We need to annotate the * stackable i_mutex locks according to stack level of the super * block instance. An overlayfs instance can never be in stack * depth 0 (there is always a real fs below it). An overlayfs * inode lock will use the lockdep annotation ovl_i_mutex_key[depth]. * * For example, here is a snip from /proc/lockdep_chains after * dir_iterate of nested overlayfs: * * [...] &ovl_i_mutex_dir_key[depth] (stack_depth=2) * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1) * [...] &type->i_mutex_dir_key (stack_depth=0) * * Locking order w.r.t ovl_want_write() is important for nested overlayfs. * * This chain is valid: * - inode->i_rwsem (inode_lock[2]) * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0]) * - OVL_I(inode)->lock (ovl_inode_lock[2]) * - OVL_I(lowerinode)->lock (ovl_inode_lock[1]) * * And this chain is valid: * - inode->i_rwsem (inode_lock[2]) * - OVL_I(inode)->lock (ovl_inode_lock[2]) * - lowerinode->i_rwsem (inode_lock[1]) * - OVL_I(lowerinode)->lock (ovl_inode_lock[1]) * * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is * held, because it is in reverse order of the non-nested case using the same * upper fs: * - inode->i_rwsem (inode_lock[1]) * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0]) * - OVL_I(inode)->lock (ovl_inode_lock[1]) */ #define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode) { #ifdef CONFIG_LOCKDEP static struct lock_class_key ovl_i_mutex_key[OVL_MAX_NESTING]; static struct lock_class_key ovl_i_mutex_dir_key[OVL_MAX_NESTING]; static struct lock_class_key ovl_i_lock_key[OVL_MAX_NESTING]; int depth = inode->i_sb->s_stack_depth - 1; if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING)) depth = 0; if (S_ISDIR(inode->i_mode)) lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]); else lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]); lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]); #endif } static void ovl_next_ino(struct inode *inode) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); inode->i_ino = atomic_long_inc_return(&ofs->last_ino); if (unlikely(!inode->i_ino)) inode->i_ino = atomic_long_inc_return(&ofs->last_ino); } static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); int xinobits = ovl_xino_bits(ofs); unsigned int xinoshift = 64 - xinobits; /* * When d_ino is consistent with st_ino (samefs or i_ino has enough * bits to encode layer), set the same value used for st_ino to i_ino, * so inode number exposed via /proc/locks and a like will be * consistent with d_ino and st_ino values. An i_ino value inconsistent * with d_ino also causes nfsd readdirplus to fail. */ inode->i_ino = ino; if (ovl_same_fs(ofs)) { return; } else if (xinobits && likely(!(ino >> xinoshift))) { inode->i_ino |= (unsigned long)fsid << (xinoshift + 1); return; } /* * For directory inodes on non-samefs with xino disabled or xino * overflow, we allocate a non-persistent inode number, to be used for * resolving st_ino collisions in ovl_map_dev_ino(). * * To avoid ino collision with legitimate xino values from upper * layer (fsid 0), use the lowest xinobit to map the non * persistent inode numbers to the unified st_ino address space. */ if (S_ISDIR(inode->i_mode)) { ovl_next_ino(inode); if (xinobits) { inode->i_ino &= ~0UL >> xinobits; inode->i_ino |= 1UL << xinoshift; } } } void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip, unsigned long ino, int fsid) { struct inode *realinode; struct ovl_inode *oi = OVL_I(inode); oi->__upperdentry = oip->upperdentry; oi->oe = oip->oe; oi->redirect = oip->redirect; oi->lowerdata_redirect = oip->lowerdata_redirect; realinode = ovl_inode_real(inode); ovl_copyattr(inode); ovl_copyflags(realinode, inode); ovl_map_ino(inode, ino, fsid); } static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev) { inode->i_mode = mode; inode->i_flags |= S_NOCMTIME; #ifdef CONFIG_FS_POSIX_ACL inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE; #endif ovl_lockdep_annotate_inode_mutex_key(inode); switch (mode & S_IFMT) { case S_IFREG: inode->i_op = &ovl_file_inode_operations; inode->i_fop = &ovl_file_operations; inode->i_mapping->a_ops = &ovl_aops; break; case S_IFDIR: inode->i_op = &ovl_dir_inode_operations; inode->i_fop = &ovl_dir_operations; break; case S_IFLNK: inode->i_op = &ovl_symlink_inode_operations; break; default: inode->i_op = &ovl_special_inode_operations; init_special_inode(inode, mode, rdev); break; } } /* * With inodes index enabled, an overlay inode nlink counts the union of upper * hardlinks and non-covered lower hardlinks. During the lifetime of a non-pure * upper inode, the following nlink modifying operations can happen: * * 1. Lower hardlink copy up * 2. Upper hardlink created, unlinked or renamed over * 3. Lower hardlink whiteout or renamed over * * For the first, copy up case, the union nlink does not change, whether the * operation succeeds or fails, but the upper inode nlink may change. * Therefore, before copy up, we store the union nlink value relative to the * lower inode nlink in the index inode xattr .overlay.nlink. * * For the second, upper hardlink case, the union nlink should be incremented * or decremented IFF the operation succeeds, aligned with nlink change of the * upper inode. Therefore, before link/unlink/rename, we store the union nlink * value relative to the upper inode nlink in the index inode. * * For the last, lower cover up case, we simplify things by preceding the * whiteout or cover up with copy up. This makes sure that there is an index * upper inode where the nlink xattr can be stored before the copied up upper * entry is unlink. */ #define OVL_NLINK_ADD_UPPER (1 << 0) /* * On-disk format for indexed nlink: * * nlink relative to the upper inode - "U[+-]NUM" * nlink relative to the lower inode - "L[+-]NUM" */ static int ovl_set_nlink_common(struct dentry *dentry, struct dentry *realdentry, const char *format) { struct inode *inode = d_inode(dentry); struct inode *realinode = d_inode(realdentry); char buf[13]; int len; len = snprintf(buf, sizeof(buf), format, (int) (inode->i_nlink - realinode->i_nlink)); if (WARN_ON(len >= sizeof(buf))) return -EIO; return ovl_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry), OVL_XATTR_NLINK, buf, len); } int ovl_set_nlink_upper(struct dentry *dentry) { return ovl_set_nlink_common(dentry, ovl_dentry_upper(dentry), "U%+i"); } int ovl_set_nlink_lower(struct dentry *dentry) { return ovl_set_nlink_common(dentry, ovl_dentry_lower(dentry), "L%+i"); } unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry, struct dentry *upperdentry, unsigned int fallback) { int nlink_diff; int nlink; char buf[13]; int err; if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1) return fallback; err = ovl_getxattr_upper(ofs, upperdentry, OVL_XATTR_NLINK, &buf, sizeof(buf) - 1); if (err < 0) goto fail; buf[err] = '\0'; if ((buf[0] != 'L' && buf[0] != 'U') || (buf[1] != '+' && buf[1] != '-')) goto fail; err = kstrtoint(buf + 1, 10, &nlink_diff); if (err < 0) goto fail; nlink = d_inode(buf[0] == 'L' ? lowerdentry : upperdentry)->i_nlink; nlink += nlink_diff; if (nlink <= 0) goto fail; return nlink; fail: pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n", upperdentry, err); return fallback; } struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev) { struct inode *inode; inode = new_inode(sb); if (inode) ovl_fill_inode(inode, mode, rdev); return inode; } static int ovl_inode_test(struct inode *inode, void *data) { return inode->i_private == data; } static int ovl_inode_set(struct inode *inode, void *data) { inode->i_private = data; return 0; } static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry, struct dentry *upperdentry, bool strict) { /* * For directories, @strict verify from lookup path performs consistency * checks, so NULL lower/upper in dentry must match NULL lower/upper in * inode. Non @strict verify from NFS handle decode path passes NULL for * 'unknown' lower/upper. */ if (S_ISDIR(inode->i_mode) && strict) { /* Real lower dir moved to upper layer under us? */ if (!lowerdentry && ovl_inode_lower(inode)) return false; /* Lookup of an uncovered redirect origin? */ if (!upperdentry && ovl_inode_upper(inode)) return false; } /* * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL. * This happens when finding a copied up overlay inode for a renamed * or hardlinked overlay dentry and lower dentry cannot be followed * by origin because lower fs does not support file handles. */ if (lowerdentry && ovl_inode_lower(inode) != d_inode(lowerdentry)) return false; /* * Allow non-NULL __upperdentry in inode even if upperdentry is NULL. * This happens when finding a lower alias for a copied up hard link. */ if (upperdentry && ovl_inode_upper(inode) != d_inode(upperdentry)) return false; return true; } struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, bool is_upper) { struct inode *inode, *key = d_inode(real); inode = ilookup5(sb, (unsigned long) key, ovl_inode_test, key); if (!inode) return NULL; if (!ovl_verify_inode(inode, is_upper ? NULL : real, is_upper ? real : NULL, false)) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir) { struct inode *key = d_inode(dir); struct inode *trap; bool res; trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key); if (!trap) return false; res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) && !ovl_inode_lower(trap); iput(trap); return res; } /* * Create an inode cache entry for layer root dir, that will intentionally * fail ovl_verify_inode(), so any lookup that will find some layer root * will fail. */ struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir) { struct inode *key = d_inode(dir); struct inode *trap; if (!d_is_dir(dir)) return ERR_PTR(-ENOTDIR); trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test, ovl_inode_set, key); if (!trap) return ERR_PTR(-ENOMEM); if (!(trap->i_state & I_NEW)) { /* Conflicting layer roots? */ iput(trap); return ERR_PTR(-ELOOP); } trap->i_mode = S_IFDIR; trap->i_flags = S_DEAD; unlock_new_inode(trap); return trap; } /* * Does overlay inode need to be hashed by lower inode? */ static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper, struct dentry *lower, bool index) { struct ovl_fs *ofs = OVL_FS(sb); /* No, if pure upper */ if (!lower) return false; /* Yes, if already indexed */ if (index) return true; /* Yes, if won't be copied up */ if (!ovl_upper_mnt(ofs)) return true; /* No, if lower hardlink is or will be broken on copy up */ if ((upper || !ovl_indexdir(sb)) && !d_is_dir(lower) && d_inode(lower)->i_nlink > 1) return false; /* No, if non-indexed upper with NFS export */ if (ofs->config.nfs_export && upper) return false; /* Otherwise, hash by lower inode for fsnotify */ return true; } static struct inode *ovl_iget5(struct super_block *sb, struct inode *newinode, struct inode *key) { return newinode ? inode_insert5(newinode, (unsigned long) key, ovl_inode_test, ovl_inode_set, key) : iget5_locked(sb, (unsigned long) key, ovl_inode_test, ovl_inode_set, key); } struct inode *ovl_get_inode(struct super_block *sb, struct ovl_inode_params *oip) { struct ovl_fs *ofs = OVL_FS(sb); struct dentry *upperdentry = oip->upperdentry; struct ovl_path *lowerpath = ovl_lowerpath(oip->oe); struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; struct inode *inode; struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL; struct path realpath = { .dentry = upperdentry ?: lowerdentry, .mnt = upperdentry ? ovl_upper_mnt(ofs) : lowerpath->layer->mnt, }; bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, oip->index); int fsid = bylower ? lowerpath->layer->fsid : 0; bool is_dir; unsigned long ino = 0; int err = oip->newinode ? -EEXIST : -ENOMEM; if (!realinode) realinode = d_inode(lowerdentry); /* * Copy up origin (lower) may exist for non-indexed upper, but we must * not use lower as hash key if this is a broken hardlink. */ is_dir = S_ISDIR(realinode->i_mode); if (upperdentry || bylower) { struct inode *key = d_inode(bylower ? lowerdentry : upperdentry); unsigned int nlink = is_dir ? 1 : realinode->i_nlink; inode = ovl_iget5(sb, oip->newinode, key); if (!inode) goto out_err; if (!(inode->i_state & I_NEW)) { /* * Verify that the underlying files stored in the inode * match those in the dentry. */ if (!ovl_verify_inode(inode, lowerdentry, upperdentry, true)) { iput(inode); err = -ESTALE; goto out_err; } dput(upperdentry); ovl_free_entry(oip->oe); kfree(oip->redirect); kfree(oip->lowerdata_redirect); goto out; } /* Recalculate nlink for non-dir due to indexing */ if (!is_dir) nlink = ovl_get_nlink(ofs, lowerdentry, upperdentry, nlink); set_nlink(inode, nlink); ino = key->i_ino; } else { /* Lower hardlink that will be broken on copy up */ inode = new_inode(sb); if (!inode) { err = -ENOMEM; goto out_err; } ino = realinode->i_ino; fsid = lowerpath->layer->fsid; } ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev); ovl_inode_init(inode, oip, ino, fsid); if (upperdentry && ovl_is_impuredir(sb, upperdentry)) ovl_set_flag(OVL_IMPURE, inode); if (oip->index) ovl_set_flag(OVL_INDEX, inode); if (bylower) ovl_set_flag(OVL_CONST_INO, inode); /* Check for non-merge dir that may have whiteouts */ if (is_dir) { if (((upperdentry && lowerdentry) || ovl_numlower(oip->oe) > 1) || ovl_path_check_origin_xattr(ofs, &realpath)) { ovl_set_flag(OVL_WHITEOUTS, inode); } } /* Check for immutable/append-only inode flags in xattr */ if (upperdentry) ovl_check_protattr(inode, upperdentry); if (inode->i_state & I_NEW) unlock_new_inode(inode); out: return inode; out_err: pr_warn_ratelimited("failed to get inode (%i)\n", err); inode = ERR_PTR(err); goto out; }
linux-master
fs/overlayfs/inode.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fileattr.h> #include <linux/splice.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/uaccess.h> #include <linux/sched/signal.h> #include <linux/cred.h> #include <linux/namei.h> #include <linux/fdtable.h> #include <linux/ratelimit.h> #include <linux/exportfs.h> #include "overlayfs.h" #define OVL_COPY_UP_CHUNK_SIZE (1 << 20) static int ovl_ccup_set(const char *buf, const struct kernel_param *param) { pr_warn("\"check_copy_up\" module option is obsolete\n"); return 0; } static int ovl_ccup_get(char *buf, const struct kernel_param *param) { return sprintf(buf, "N\n"); } module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644); MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing"); static bool ovl_must_copy_xattr(const char *name) { return !strcmp(name, XATTR_POSIX_ACL_ACCESS) || !strcmp(name, XATTR_POSIX_ACL_DEFAULT) || !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); } static int ovl_copy_acl(struct ovl_fs *ofs, const struct path *path, struct dentry *dentry, const char *acl_name) { int err; struct posix_acl *clone, *real_acl = NULL; real_acl = ovl_get_acl_path(path, acl_name, false); if (!real_acl) return 0; if (IS_ERR(real_acl)) { err = PTR_ERR(real_acl); if (err == -ENODATA || err == -EOPNOTSUPP) return 0; return err; } clone = posix_acl_clone(real_acl, GFP_KERNEL); posix_acl_release(real_acl); /* release original acl */ if (!clone) return -ENOMEM; err = ovl_do_set_acl(ofs, dentry, acl_name, clone); /* release cloned acl */ posix_acl_release(clone); return err; } int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct dentry *new) { struct dentry *old = oldpath->dentry; ssize_t list_size, size, value_size = 0; char *buf, *name, *value = NULL; int error = 0; size_t slen; if (!old->d_inode->i_op->listxattr || !new->d_inode->i_op->listxattr) return 0; list_size = vfs_listxattr(old, NULL, 0); if (list_size <= 0) { if (list_size == -EOPNOTSUPP) return 0; return list_size; } buf = kvzalloc(list_size, GFP_KERNEL); if (!buf) return -ENOMEM; list_size = vfs_listxattr(old, buf, list_size); if (list_size <= 0) { error = list_size; goto out; } for (name = buf; list_size; name += slen) { slen = strnlen(name, list_size) + 1; /* underlying fs providing us with an broken xattr list? */ if (WARN_ON(slen > list_size)) { error = -EIO; break; } list_size -= slen; if (ovl_is_private_xattr(sb, name)) continue; error = security_inode_copy_up_xattr(name); if (error < 0 && error != -EOPNOTSUPP) break; if (error == 1) { error = 0; continue; /* Discard */ } if (is_posix_acl_xattr(name)) { error = ovl_copy_acl(OVL_FS(sb), oldpath, new, name); if (!error) continue; /* POSIX ACLs must be copied. */ break; } retry: size = ovl_do_getxattr(oldpath, name, value, value_size); if (size == -ERANGE) size = ovl_do_getxattr(oldpath, name, NULL, 0); if (size < 0) { error = size; break; } if (size > value_size) { void *new; new = kvmalloc(size, GFP_KERNEL); if (!new) { error = -ENOMEM; break; } kvfree(value); value = new; value_size = size; goto retry; } error = ovl_do_setxattr(OVL_FS(sb), new, name, value, size, 0); if (error) { if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name)) break; /* Ignore failure to copy unknown xattrs */ error = 0; } } kvfree(value); out: kvfree(buf); return error; } static int ovl_copy_fileattr(struct inode *inode, const struct path *old, const struct path *new) { struct fileattr oldfa = { .flags_valid = true }; struct fileattr newfa = { .flags_valid = true }; int err; err = ovl_real_fileattr_get(old, &oldfa); if (err) { /* Ntfs-3g returns -EINVAL for "no fileattr support" */ if (err == -ENOTTY || err == -EINVAL) return 0; pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n", old->dentry, err); return err; } /* * We cannot set immutable and append-only flags on upper inode, * because we would not be able to link upper inode to upper dir * not set overlay private xattr on upper inode. * Store these flags in overlay.protattr xattr instead. */ if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) { err = ovl_set_protattr(inode, new->dentry, &oldfa); if (err == -EPERM) pr_warn_once("copying fileattr: no xattr on upper\n"); else if (err) return err; } /* Don't bother copying flags if none are set */ if (!(oldfa.flags & OVL_COPY_FS_FLAGS_MASK)) return 0; err = ovl_real_fileattr_get(new, &newfa); if (err) { /* * Returning an error if upper doesn't support fileattr will * result in a regression, so revert to the old behavior. */ if (err == -ENOTTY || err == -EINVAL) { pr_warn_once("copying fileattr: no support on upper\n"); return 0; } pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n", new->dentry, err); return err; } BUILD_BUG_ON(OVL_COPY_FS_FLAGS_MASK & ~FS_COMMON_FL); newfa.flags &= ~OVL_COPY_FS_FLAGS_MASK; newfa.flags |= (oldfa.flags & OVL_COPY_FS_FLAGS_MASK); BUILD_BUG_ON(OVL_COPY_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON); newfa.fsx_xflags &= ~OVL_COPY_FSX_FLAGS_MASK; newfa.fsx_xflags |= (oldfa.fsx_xflags & OVL_COPY_FSX_FLAGS_MASK); return ovl_real_fileattr_set(new, &newfa); } static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry, struct file *new_file, loff_t len) { struct path datapath; struct file *old_file; loff_t old_pos = 0; loff_t new_pos = 0; loff_t cloned; loff_t data_pos = -1; loff_t hole_len; bool skip_hole = false; int error = 0; ovl_path_lowerdata(dentry, &datapath); if (WARN_ON(datapath.dentry == NULL)) return -EIO; old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY); if (IS_ERR(old_file)) return PTR_ERR(old_file); /* Try to use clone_file_range to clone up within the same fs */ cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0); if (cloned == len) goto out_fput; /* Couldn't clone, so now we try to copy the data */ /* Check if lower fs supports seek operation */ if (old_file->f_mode & FMODE_LSEEK) skip_hole = true; while (len) { size_t this_len = OVL_COPY_UP_CHUNK_SIZE; long bytes; if (len < this_len) this_len = len; if (signal_pending_state(TASK_KILLABLE, current)) { error = -EINTR; break; } /* * Fill zero for hole will cost unnecessary disk space * and meanwhile slow down the copy-up speed, so we do * an optimization for hole during copy-up, it relies * on SEEK_DATA implementation in lower fs so if lower * fs does not support it, copy-up will behave as before. * * Detail logic of hole detection as below: * When we detect next data position is larger than current * position we will skip that hole, otherwise we copy * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually, * it may not recognize all kind of holes and sometimes * only skips partial of hole area. However, it will be * enough for most of the use cases. */ if (skip_hole && data_pos < old_pos) { data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA); if (data_pos > old_pos) { hole_len = data_pos - old_pos; len -= hole_len; old_pos = new_pos = data_pos; continue; } else if (data_pos == -ENXIO) { break; } else if (data_pos < 0) { skip_hole = false; } } bytes = do_splice_direct(old_file, &old_pos, new_file, &new_pos, this_len, SPLICE_F_MOVE); if (bytes <= 0) { error = bytes; break; } WARN_ON(old_pos != new_pos); len -= bytes; } if (!error && ovl_should_sync(ofs)) error = vfs_fsync(new_file, 0); out_fput: fput(old_file); return error; } static int ovl_set_size(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { struct iattr attr = { .ia_valid = ATTR_SIZE, .ia_size = stat->size, }; return ovl_do_notify_change(ofs, upperdentry, &attr); } static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { struct iattr attr = { .ia_valid = ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET, .ia_atime = stat->atime, .ia_mtime = stat->mtime, }; return ovl_do_notify_change(ofs, upperdentry, &attr); } int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { int err = 0; if (!S_ISLNK(stat->mode)) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = stat->mode, }; err = ovl_do_notify_change(ofs, upperdentry, &attr); } if (!err) { struct iattr attr = { .ia_valid = ATTR_UID | ATTR_GID, .ia_vfsuid = VFSUIDT_INIT(stat->uid), .ia_vfsgid = VFSGIDT_INIT(stat->gid), }; err = ovl_do_notify_change(ofs, upperdentry, &attr); } if (!err) ovl_set_timestamps(ofs, upperdentry, stat); return err; } struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real, bool is_upper) { struct ovl_fh *fh; int fh_type, dwords; int buflen = MAX_HANDLE_SZ; uuid_t *uuid = &real->d_sb->s_uuid; int err; /* Make sure the real fid stays 32bit aligned */ BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4); BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255); fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL); if (!fh) return ERR_PTR(-ENOMEM); /* * We encode a non-connectable file handle for non-dir, because we * only need to find the lower inode number and we don't want to pay * the price or reconnecting the dentry. */ dwords = buflen >> 2; fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0); buflen = (dwords << 2); err = -EIO; if (WARN_ON(fh_type < 0) || WARN_ON(buflen > MAX_HANDLE_SZ) || WARN_ON(fh_type == FILEID_INVALID)) goto out_err; fh->fb.version = OVL_FH_VERSION; fh->fb.magic = OVL_FH_MAGIC; fh->fb.type = fh_type; fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN; /* * When we will want to decode an overlay dentry from this handle * and all layers are on the same fs, if we get a disconncted real * dentry when we decode fid, the only way to tell if we should assign * it to upperdentry or to lowerstack is by checking this flag. */ if (is_upper) fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER; fh->fb.len = sizeof(fh->fb) + buflen; if (ovl_origin_uuid(ofs)) fh->fb.uuid = *uuid; return fh; out_err: kfree(fh); return ERR_PTR(err); } int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower, struct dentry *upper) { const struct ovl_fh *fh = NULL; int err; /* * When lower layer doesn't support export operations store a 'null' fh, * so we can use the overlay.origin xattr to distignuish between a copy * up and a pure upper inode. */ if (ovl_can_decode_fh(lower->d_sb)) { fh = ovl_encode_real_fh(ofs, lower, false); if (IS_ERR(fh)) return PTR_ERR(fh); } /* * Do not fail when upper doesn't support xattrs. */ err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf, fh ? fh->fb.len : 0, 0); kfree(fh); /* Ignore -EPERM from setting "user.*" on symlink/special */ return err == -EPERM ? 0 : err; } /* Store file handle of @upper dir in @index dir entry */ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper, struct dentry *index) { const struct ovl_fh *fh; int err; fh = ovl_encode_real_fh(ofs, upper, true); if (IS_ERR(fh)) return PTR_ERR(fh); err = ovl_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len); kfree(fh); return err; } /* * Create and install index entry. * * Caller must hold i_mutex on indexdir. */ static int ovl_create_index(struct dentry *dentry, struct dentry *origin, struct dentry *upper) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *indexdir = ovl_indexdir(dentry->d_sb); struct inode *dir = d_inode(indexdir); struct dentry *index = NULL; struct dentry *temp = NULL; struct qstr name = { }; int err; /* * For now this is only used for creating index entry for directories, * because non-dir are copied up directly to index and then hardlinked * to upper dir. * * TODO: implement create index for non-dir, so we can call it when * encoding file handle for non-dir in case index does not exist. */ if (WARN_ON(!d_is_dir(dentry))) return -EIO; /* Directory not expected to be indexed before copy up */ if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry)))) return -EIO; err = ovl_get_index_name(ofs, origin, &name); if (err) return err; temp = ovl_create_temp(ofs, indexdir, OVL_CATTR(S_IFDIR | 0)); err = PTR_ERR(temp); if (IS_ERR(temp)) goto free_name; err = ovl_set_upper_fh(ofs, upper, temp); if (err) goto out; index = ovl_lookup_upper(ofs, name.name, indexdir, name.len); if (IS_ERR(index)) { err = PTR_ERR(index); } else { err = ovl_do_rename(ofs, dir, temp, dir, index, 0); dput(index); } out: if (err) ovl_cleanup(ofs, dir, temp); dput(temp); free_name: kfree(name.name); return err; } struct ovl_copy_up_ctx { struct dentry *parent; struct dentry *dentry; struct path lowerpath; struct kstat stat; struct kstat pstat; const char *link; struct dentry *destdir; struct qstr destname; struct dentry *workdir; bool origin; bool indexed; bool metacopy; bool metacopy_digest; }; static int ovl_link_up(struct ovl_copy_up_ctx *c) { int err; struct dentry *upper; struct dentry *upperdir = ovl_dentry_upper(c->parent); struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *udir = d_inode(upperdir); /* Mark parent "impure" because it may now contain non-pure upper */ err = ovl_set_impure(c->parent, upperdir); if (err) return err; err = ovl_set_nlink_lower(c->dentry); if (err) return err; inode_lock_nested(udir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir, c->dentry->d_name.len); err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper); dput(upper); if (!err) { /* Restore timestamps on parent (best effort) */ ovl_set_timestamps(ofs, upperdir, &c->pstat); ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, upper); } } inode_unlock(udir); if (err) return err; err = ovl_set_nlink_upper(c->dentry); return err; } static int ovl_copy_up_data(struct ovl_copy_up_ctx *c, const struct path *temp) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct file *new_file; int err; if (!S_ISREG(c->stat.mode) || c->metacopy || !c->stat.size) return 0; new_file = ovl_path_open(temp, O_LARGEFILE | O_WRONLY); if (IS_ERR(new_file)) return PTR_ERR(new_file); err = ovl_copy_up_file(ofs, c->dentry, new_file, c->stat.size); fput(new_file); return err; } static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *inode = d_inode(c->dentry); struct path upperpath = { .mnt = ovl_upper_mnt(ofs), .dentry = temp }; int err; err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp); if (err) return err; if (inode->i_flags & OVL_COPY_I_FLAGS_MASK && (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) { /* * Copy the fileattr inode flags that are the source of already * copied i_flags */ err = ovl_copy_fileattr(inode, &c->lowerpath, &upperpath); if (err) return err; } /* * Store identifier of lower inode in upper inode xattr to * allow lookup of the copy up origin inode. * * Don't set origin when we are breaking the association with a lower * hard link. */ if (c->origin) { err = ovl_set_origin(ofs, c->lowerpath.dentry, temp); if (err) return err; } if (c->metacopy) { struct path lowerdatapath; struct ovl_metacopy metacopy_data = OVL_METACOPY_INIT; ovl_path_lowerdata(c->dentry, &lowerdatapath); if (WARN_ON_ONCE(lowerdatapath.dentry == NULL)) return -EIO; err = ovl_get_verity_digest(ofs, &lowerdatapath, &metacopy_data); if (err) return err; if (metacopy_data.digest_algo) c->metacopy_digest = true; err = ovl_set_metacopy_xattr(ofs, temp, &metacopy_data); if (err) return err; } inode_lock(temp->d_inode); if (S_ISREG(c->stat.mode)) err = ovl_set_size(ofs, temp, &c->stat); if (!err) err = ovl_set_attr(ofs, temp, &c->stat); inode_unlock(temp->d_inode); return err; } struct ovl_cu_creds { const struct cred *old; struct cred *new; }; static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc) { int err; cc->old = cc->new = NULL; err = security_inode_copy_up(dentry, &cc->new); if (err < 0) return err; if (cc->new) cc->old = override_creds(cc->new); return 0; } static void ovl_revert_cu_creds(struct ovl_cu_creds *cc) { if (cc->new) { revert_creds(cc->old); put_cred(cc->new); } } /* * Copyup using workdir to prepare temp file. Used when copying up directories, * special files or when upper fs doesn't support O_TMPFILE. */ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *inode; struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir); struct path path = { .mnt = ovl_upper_mnt(ofs) }; struct dentry *temp, *upper; struct ovl_cu_creds cc; int err; struct ovl_cattr cattr = { /* Can't properly set mode on creation because of the umask */ .mode = c->stat.mode & S_IFMT, .rdev = c->stat.rdev, .link = c->link }; /* workdir and destdir could be the same when copying up to indexdir */ err = -EIO; if (lock_rename(c->workdir, c->destdir) != NULL) goto unlock; err = ovl_prep_cu_creds(c->dentry, &cc); if (err) goto unlock; temp = ovl_create_temp(ofs, c->workdir, &cattr); ovl_revert_cu_creds(&cc); err = PTR_ERR(temp); if (IS_ERR(temp)) goto unlock; /* * Copy up data first and then xattrs. Writing data after * xattrs will remove security.capability xattr automatically. */ path.dentry = temp; err = ovl_copy_up_data(c, &path); if (err) goto cleanup; err = ovl_copy_up_metadata(c, temp); if (err) goto cleanup; if (S_ISDIR(c->stat.mode) && c->indexed) { err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp); if (err) goto cleanup; } upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir, c->destname.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto cleanup; err = ovl_do_rename(ofs, wdir, temp, udir, upper, 0); dput(upper); if (err) goto cleanup; inode = d_inode(c->dentry); if (c->metacopy_digest) ovl_set_flag(OVL_HAS_DIGEST, inode); else ovl_clear_flag(OVL_HAS_DIGEST, inode); ovl_clear_flag(OVL_VERIFIED_DIGEST, inode); if (!c->metacopy) ovl_set_upperdata(inode); ovl_inode_update(inode, temp); if (S_ISDIR(inode->i_mode)) ovl_set_flag(OVL_WHITEOUTS, inode); unlock: unlock_rename(c->workdir, c->destdir); return err; cleanup: ovl_cleanup(ofs, wdir, temp); dput(temp); goto unlock; } /* Copyup using O_TMPFILE which does not require cross dir locking */ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *udir = d_inode(c->destdir); struct dentry *temp, *upper; struct file *tmpfile; struct ovl_cu_creds cc; int err; err = ovl_prep_cu_creds(c->dentry, &cc); if (err) return err; tmpfile = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode); ovl_revert_cu_creds(&cc); if (IS_ERR(tmpfile)) return PTR_ERR(tmpfile); temp = tmpfile->f_path.dentry; if (!c->metacopy && c->stat.size) { err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size); if (err) goto out_fput; } err = ovl_copy_up_metadata(c, temp); if (err) goto out_fput; inode_lock_nested(udir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir, c->destname.len); err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, temp, udir, upper); dput(upper); } inode_unlock(udir); if (err) goto out_fput; if (c->metacopy_digest) ovl_set_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); else ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry)); if (!c->metacopy) ovl_set_upperdata(d_inode(c->dentry)); ovl_inode_update(d_inode(c->dentry), dget(temp)); out_fput: fput(tmpfile); return err; } /* * Copy up a single dentry * * All renames start with copy up of source if necessary. The actual * rename will only proceed once the copy up was successful. Copy up uses * upper parent i_mutex for exclusion. Since rename can change d_parent it * is possible that the copy up will lock the old parent. At that point * the file will have already been copied up anyway. */ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c) { int err; struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); bool to_index = false; /* * Indexed non-dir is copied up directly to the index entry and then * hardlinked to upper dir. Indexed dir is copied up to indexdir, * then index entry is created and then copied up dir installed. * Copying dir up to indexdir instead of workdir simplifies locking. */ if (ovl_need_index(c->dentry)) { c->indexed = true; if (S_ISDIR(c->stat.mode)) c->workdir = ovl_indexdir(c->dentry->d_sb); else to_index = true; } if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) c->origin = true; if (to_index) { c->destdir = ovl_indexdir(c->dentry->d_sb); err = ovl_get_index_name(ofs, c->lowerpath.dentry, &c->destname); if (err) return err; } else if (WARN_ON(!c->parent)) { /* Disconnected dentry must be copied up to index dir */ return -EIO; } else { /* * Mark parent "impure" because it may now contain non-pure * upper */ err = ovl_set_impure(c->parent, c->destdir); if (err) return err; } /* Should we copyup with O_TMPFILE or with workdir? */ if (S_ISREG(c->stat.mode) && ofs->tmpfile) err = ovl_copy_up_tmpfile(c); else err = ovl_copy_up_workdir(c); if (err) goto out; if (c->indexed) ovl_set_flag(OVL_INDEX, d_inode(c->dentry)); if (to_index) { /* Initialize nlink for copy up of disconnected dentry */ err = ovl_set_nlink_upper(c->dentry); } else { struct inode *udir = d_inode(c->destdir); /* Restore timestamps on parent (best effort) */ inode_lock(udir); ovl_set_timestamps(ofs, c->destdir, &c->pstat); inode_unlock(udir); ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry)); } out: if (to_index) kfree(c->destname.name); return err; } static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode, int flags) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); if (!ofs->config.metacopy) return false; if (!S_ISREG(mode)) return false; if (flags && ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC))) return false; /* Fall back to full copy if no fsverity on source data and we require verity */ if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { struct path lowerdata; ovl_path_lowerdata(dentry, &lowerdata); if (WARN_ON_ONCE(lowerdata.dentry == NULL) || ovl_ensure_verity_loaded(&lowerdata) || !fsverity_active(d_inode(lowerdata.dentry))) { return false; } } return true; } static ssize_t ovl_getxattr_value(const struct path *path, char *name, char **value) { ssize_t res; char *buf; res = ovl_do_getxattr(path, name, NULL, 0); if (res == -ENODATA || res == -EOPNOTSUPP) res = 0; if (res > 0) { buf = kzalloc(res, GFP_KERNEL); if (!buf) return -ENOMEM; res = ovl_do_getxattr(path, name, buf, res); if (res < 0) kfree(buf); else *value = buf; } return res; } /* Copy up data of an inode which was copied up metadata only in the past. */ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct path upperpath; int err; char *capability = NULL; ssize_t cap_size; ovl_path_upper(c->dentry, &upperpath); if (WARN_ON(upperpath.dentry == NULL)) return -EIO; if (c->stat.size) { err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS, &capability); if (cap_size < 0) goto out; } err = ovl_copy_up_data(c, &upperpath); if (err) goto out_free; /* * Writing to upper file will clear security.capability xattr. We * don't want that to happen for normal copy-up operation. */ if (capability) { err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS, capability, cap_size, 0); if (err) goto out_free; } err = ovl_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY); if (err) goto out_free; ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry)); ovl_set_upperdata(d_inode(c->dentry)); out_free: kfree(capability); out: return err; } static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, int flags) { int err; DEFINE_DELAYED_CALL(done); struct path parentpath; struct ovl_copy_up_ctx ctx = { .parent = parent, .dentry = dentry, .workdir = ovl_workdir(dentry), }; if (WARN_ON(!ctx.workdir)) return -EROFS; ovl_path_lower(dentry, &ctx.lowerpath); err = vfs_getattr(&ctx.lowerpath, &ctx.stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) return err; if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) || !kgid_has_mapping(current_user_ns(), ctx.stat.gid)) return -EOVERFLOW; ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags); if (parent) { ovl_path_upper(parent, &parentpath); ctx.destdir = parentpath.dentry; ctx.destname = dentry->d_name; err = vfs_getattr(&parentpath, &ctx.pstat, STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); if (err) return err; } /* maybe truncate regular file. this has no effect on dirs */ if (flags & O_TRUNC) ctx.stat.size = 0; if (S_ISLNK(ctx.stat.mode)) { ctx.link = vfs_get_link(ctx.lowerpath.dentry, &done); if (IS_ERR(ctx.link)) return PTR_ERR(ctx.link); } err = ovl_copy_up_start(dentry, flags); /* err < 0: interrupted, err > 0: raced with another copy-up */ if (unlikely(err)) { if (err > 0) err = 0; } else { if (!ovl_dentry_upper(dentry)) err = ovl_do_copy_up(&ctx); if (!err && parent && !ovl_dentry_has_upper_alias(dentry)) err = ovl_link_up(&ctx); if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags)) err = ovl_copy_up_meta_inode_data(&ctx); ovl_copy_up_end(dentry); } do_delayed_call(&done); return err; } static int ovl_copy_up_flags(struct dentry *dentry, int flags) { int err = 0; const struct cred *old_cred; bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED); /* * With NFS export, copy up can get called for a disconnected non-dir. * In this case, we will copy up lower inode to index dir without * linking it to upper dir. */ if (WARN_ON(disconnected && d_is_dir(dentry))) return -EIO; /* * We may not need lowerdata if we are only doing metacopy up, but it is * not very important to optimize this case, so do lazy lowerdata lookup * before any copy up, so we can do it before taking ovl_inode_lock(). */ err = ovl_verify_lowerdata(dentry); if (err) return err; old_cred = ovl_override_creds(dentry->d_sb); while (!err) { struct dentry *next; struct dentry *parent = NULL; if (ovl_already_copied_up(dentry, flags)) break; next = dget(dentry); /* find the topmost dentry not yet copied up */ for (; !disconnected;) { parent = dget_parent(next); if (ovl_dentry_upper(parent)) break; dput(next); next = parent; } err = ovl_copy_up_one(parent, next, flags); dput(parent); dput(next); } revert_creds(old_cred); return err; } static bool ovl_open_need_copy_up(struct dentry *dentry, int flags) { /* Copy up of disconnected dentry does not set upper alias */ if (ovl_already_copied_up(dentry, flags)) return false; if (special_file(d_inode(dentry)->i_mode)) return false; if (!ovl_open_flags_need_copy_up(flags)) return false; return true; } int ovl_maybe_copy_up(struct dentry *dentry, int flags) { int err = 0; if (ovl_open_need_copy_up(dentry, flags)) { err = ovl_want_write(dentry); if (!err) { err = ovl_copy_up_flags(dentry, flags); ovl_drop_write(dentry); } } return err; } int ovl_copy_up_with_data(struct dentry *dentry) { return ovl_copy_up_flags(dentry, O_WRONLY); } int ovl_copy_up(struct dentry *dentry) { return ovl_copy_up_flags(dentry, 0); }
linux-master
fs/overlayfs/copy_up.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 Novell Inc. * Copyright (C) 2016 Red Hat, Inc. */ #include <linux/fs.h> #include <linux/cred.h> #include <linux/ctype.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/ratelimit.h> #include <linux/mount.h> #include <linux/exportfs.h> #include "overlayfs.h" #include "../internal.h" /* for vfs_path_lookup */ struct ovl_lookup_data { struct super_block *sb; struct vfsmount *mnt; struct qstr name; bool is_dir; bool opaque; bool stop; bool last; char *redirect; int metacopy; /* Referring to last redirect xattr */ bool absolute_redirect; }; static int ovl_check_redirect(const struct path *path, struct ovl_lookup_data *d, size_t prelen, const char *post) { int res; char *buf; struct ovl_fs *ofs = OVL_FS(d->sb); d->absolute_redirect = false; buf = ovl_get_redirect_xattr(ofs, path, prelen + strlen(post)); if (IS_ERR_OR_NULL(buf)) return PTR_ERR(buf); if (buf[0] == '/') { d->absolute_redirect = true; /* * One of the ancestor path elements in an absolute path * lookup in ovl_lookup_layer() could have been opaque and * that will stop further lookup in lower layers (d->stop=true) * But we have found an absolute redirect in descendant path * element and that should force continue lookup in lower * layers (reset d->stop). */ d->stop = false; } else { res = strlen(buf) + 1; memmove(buf + prelen, buf, res); memcpy(buf, d->name.name, prelen); } strcat(buf, post); kfree(d->redirect); d->redirect = buf; d->name.name = d->redirect; d->name.len = strlen(d->redirect); return 0; } static int ovl_acceptable(void *ctx, struct dentry *dentry) { /* * A non-dir origin may be disconnected, which is fine, because * we only need it for its unique inode number. */ if (!d_is_dir(dentry)) return 1; /* Don't decode a deleted empty directory */ if (d_unhashed(dentry)) return 0; /* Check if directory belongs to the layer we are decoding from */ return is_subdir(dentry, ((struct vfsmount *)ctx)->mnt_root); } /* * Check validity of an overlay file handle buffer. * * Return 0 for a valid file handle. * Return -ENODATA for "origin unknown". * Return <0 for an invalid file handle. */ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len) { if (fb_len < sizeof(struct ovl_fb) || fb_len < fb->len) return -EINVAL; if (fb->magic != OVL_FH_MAGIC) return -EINVAL; /* Treat larger version and unknown flags as "origin unknown" */ if (fb->version > OVL_FH_VERSION || fb->flags & ~OVL_FH_FLAG_ALL) return -ENODATA; /* Treat endianness mismatch as "origin unknown" */ if (!(fb->flags & OVL_FH_FLAG_ANY_ENDIAN) && (fb->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN) return -ENODATA; return 0; } static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *upperdentry, enum ovl_xattr ox) { int res, err; struct ovl_fh *fh = NULL; res = ovl_getxattr_upper(ofs, upperdentry, ox, NULL, 0); if (res < 0) { if (res == -ENODATA || res == -EOPNOTSUPP) return NULL; goto fail; } /* Zero size value means "copied up but origin unknown" */ if (res == 0) return NULL; fh = kzalloc(res + OVL_FH_WIRE_OFFSET, GFP_KERNEL); if (!fh) return ERR_PTR(-ENOMEM); res = ovl_getxattr_upper(ofs, upperdentry, ox, fh->buf, res); if (res < 0) goto fail; err = ovl_check_fb_len(&fh->fb, res); if (err < 0) { if (err == -ENODATA) goto out; goto invalid; } return fh; out: kfree(fh); return NULL; fail: pr_warn_ratelimited("failed to get origin (%i)\n", res); goto out; invalid: pr_warn_ratelimited("invalid origin (%*phN)\n", res, fh); goto out; } struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh, struct vfsmount *mnt, bool connected) { struct dentry *real; int bytes; if (!capable(CAP_DAC_READ_SEARCH)) return NULL; /* * Make sure that the stored uuid matches the uuid of the lower * layer where file handle will be decoded. * In case of uuid=off option just make sure that stored uuid is null. */ if (ovl_origin_uuid(ofs) ? !uuid_equal(&fh->fb.uuid, &mnt->mnt_sb->s_uuid) : !uuid_is_null(&fh->fb.uuid)) return NULL; bytes = (fh->fb.len - offsetof(struct ovl_fb, fid)); real = exportfs_decode_fh(mnt, (struct fid *)fh->fb.fid, bytes >> 2, (int)fh->fb.type, connected ? ovl_acceptable : NULL, mnt); if (IS_ERR(real)) { /* * Treat stale file handle to lower file as "origin unknown". * upper file handle could become stale when upper file is * unlinked and this information is needed to handle stale * index entries correctly. */ if (real == ERR_PTR(-ESTALE) && !(fh->fb.flags & OVL_FH_FLAG_PATH_UPPER)) real = NULL; return real; } if (ovl_dentry_weird(real)) { dput(real); return NULL; } return real; } static bool ovl_is_opaquedir(struct ovl_fs *ofs, const struct path *path) { return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE); } static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d, const char *name, struct dentry *base, int len, bool drop_negative) { struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->mnt), name, base, len); if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { if (drop_negative && ret->d_lockref.count == 1) { spin_lock(&ret->d_lock); /* Recheck condition under lock */ if (d_is_negative(ret) && ret->d_lockref.count == 1) __d_drop(ret); spin_unlock(&ret->d_lock); } dput(ret); ret = ERR_PTR(-ENOENT); } return ret; } static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, const char *name, unsigned int namelen, size_t prelen, const char *post, struct dentry **ret, bool drop_negative) { struct dentry *this; struct path path; int err; bool last_element = !post[0]; this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative); if (IS_ERR(this)) { err = PTR_ERR(this); this = NULL; if (err == -ENOENT || err == -ENAMETOOLONG) goto out; goto out_err; } if (ovl_dentry_weird(this)) { /* Don't support traversing automounts and other weirdness */ err = -EREMOTE; goto out_err; } if (ovl_is_whiteout(this)) { d->stop = d->opaque = true; goto put_and_out; } /* * This dentry should be a regular file if previous layer lookup * found a metacopy dentry. */ if (last_element && d->metacopy && !d_is_reg(this)) { d->stop = true; goto put_and_out; } path.dentry = this; path.mnt = d->mnt; if (!d_can_lookup(this)) { if (d->is_dir || !last_element) { d->stop = true; goto put_and_out; } err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path, NULL); if (err < 0) goto out_err; d->metacopy = err; d->stop = !d->metacopy; if (!d->metacopy || d->last) goto out; } else { if (ovl_lookup_trap_inode(d->sb, this)) { /* Caught in a trap of overlapping layers */ err = -ELOOP; goto out_err; } if (last_element) d->is_dir = true; if (d->last) goto out; if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) { d->stop = true; if (last_element) d->opaque = true; goto out; } } err = ovl_check_redirect(&path, d, prelen, post); if (err) goto out_err; out: *ret = this; return 0; put_and_out: dput(this); this = NULL; goto out; out_err: dput(this); return err; } static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, struct dentry **ret, bool drop_negative) { /* Counting down from the end, since the prefix can change */ size_t rem = d->name.len - 1; struct dentry *dentry = NULL; int err; if (d->name.name[0] != '/') return ovl_lookup_single(base, d, d->name.name, d->name.len, 0, "", ret, drop_negative); while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) { const char *s = d->name.name + d->name.len - rem; const char *next = strchrnul(s, '/'); size_t thislen = next - s; bool end = !next[0]; /* Verify we did not go off the rails */ if (WARN_ON(s[-1] != '/')) return -EIO; err = ovl_lookup_single(base, d, s, thislen, d->name.len - rem, next, &base, drop_negative); dput(dentry); if (err) return err; dentry = base; if (end) break; rem -= thislen + 1; if (WARN_ON(rem >= d->name.len)) return -EIO; } *ret = dentry; return 0; } static int ovl_lookup_data_layer(struct dentry *dentry, const char *redirect, const struct ovl_layer *layer, struct path *datapath) { int err; err = vfs_path_lookup(layer->mnt->mnt_root, layer->mnt, redirect, LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS | LOOKUP_NO_XDEV, datapath); pr_debug("lookup lowerdata (%pd2, redirect=\"%s\", layer=%d, err=%i)\n", dentry, redirect, layer->idx, err); if (err) return err; err = -EREMOTE; if (ovl_dentry_weird(datapath->dentry)) goto out_path_put; err = -ENOENT; /* Only regular file is acceptable as lower data */ if (!d_is_reg(datapath->dentry)) goto out_path_put; return 0; out_path_put: path_put(datapath); return err; } /* Lookup in data-only layers by absolute redirect to layer root */ static int ovl_lookup_data_layers(struct dentry *dentry, const char *redirect, struct ovl_path *lowerdata) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); const struct ovl_layer *layer; struct path datapath; int err = -ENOENT; int i; layer = &ofs->layers[ofs->numlayer - ofs->numdatalayer]; for (i = 0; i < ofs->numdatalayer; i++, layer++) { err = ovl_lookup_data_layer(dentry, redirect, layer, &datapath); if (!err) { mntput(datapath.mnt); lowerdata->dentry = datapath.dentry; lowerdata->layer = layer; return 0; } } return err; } int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected, struct dentry *upperdentry, struct ovl_path **stackp) { struct dentry *origin = NULL; int i; for (i = 1; i <= ovl_numlowerlayer(ofs); i++) { /* * If lower fs uuid is not unique among lower fs we cannot match * fh->uuid to layer. */ if (ofs->layers[i].fsid && ofs->layers[i].fs->bad_uuid) continue; origin = ovl_decode_real_fh(ofs, fh, ofs->layers[i].mnt, connected); if (origin) break; } if (!origin) return -ESTALE; else if (IS_ERR(origin)) return PTR_ERR(origin); if (upperdentry && !ovl_is_whiteout(upperdentry) && inode_wrong_type(d_inode(upperdentry), d_inode(origin)->i_mode)) goto invalid; if (!*stackp) *stackp = kmalloc(sizeof(struct ovl_path), GFP_KERNEL); if (!*stackp) { dput(origin); return -ENOMEM; } **stackp = (struct ovl_path){ .dentry = origin, .layer = &ofs->layers[i] }; return 0; invalid: pr_warn_ratelimited("invalid origin (%pd2, ftype=%x, origin ftype=%x).\n", upperdentry, d_inode(upperdentry)->i_mode & S_IFMT, d_inode(origin)->i_mode & S_IFMT); dput(origin); return -ESTALE; } static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry, struct ovl_path **stackp) { struct ovl_fh *fh = ovl_get_fh(ofs, upperdentry, OVL_XATTR_ORIGIN); int err; if (IS_ERR_OR_NULL(fh)) return PTR_ERR(fh); err = ovl_check_origin_fh(ofs, fh, false, upperdentry, stackp); kfree(fh); if (err) { if (err == -ESTALE) return 0; return err; } return 0; } /* * Verify that @fh matches the file handle stored in xattr @name. * Return 0 on match, -ESTALE on mismatch, < 0 on error. */ static int ovl_verify_fh(struct ovl_fs *ofs, struct dentry *dentry, enum ovl_xattr ox, const struct ovl_fh *fh) { struct ovl_fh *ofh = ovl_get_fh(ofs, dentry, ox); int err = 0; if (!ofh) return -ENODATA; if (IS_ERR(ofh)) return PTR_ERR(ofh); if (fh->fb.len != ofh->fb.len || memcmp(&fh->fb, &ofh->fb, fh->fb.len)) err = -ESTALE; kfree(ofh); return err; } /* * Verify that @real dentry matches the file handle stored in xattr @name. * * If @set is true and there is no stored file handle, encode @real and store * file handle in xattr @name. * * Return 0 on match, -ESTALE on mismatch, -ENODATA on no xattr, < 0 on error. */ int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry, enum ovl_xattr ox, struct dentry *real, bool is_upper, bool set) { struct inode *inode; struct ovl_fh *fh; int err; fh = ovl_encode_real_fh(ofs, real, is_upper); err = PTR_ERR(fh); if (IS_ERR(fh)) { fh = NULL; goto fail; } err = ovl_verify_fh(ofs, dentry, ox, fh); if (set && err == -ENODATA) err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len); if (err) goto fail; out: kfree(fh); return err; fail: inode = d_inode(real); pr_warn_ratelimited("failed to verify %s (%pd2, ino=%lu, err=%i)\n", is_upper ? "upper" : "origin", real, inode ? inode->i_ino : 0, err); goto out; } /* Get upper dentry from index */ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index, bool connected) { struct ovl_fh *fh; struct dentry *upper; if (!d_is_dir(index)) return dget(index); fh = ovl_get_fh(ofs, index, OVL_XATTR_UPPER); if (IS_ERR_OR_NULL(fh)) return ERR_CAST(fh); upper = ovl_decode_real_fh(ofs, fh, ovl_upper_mnt(ofs), connected); kfree(fh); if (IS_ERR_OR_NULL(upper)) return upper ?: ERR_PTR(-ESTALE); if (!d_is_dir(upper)) { pr_warn_ratelimited("invalid index upper (%pd2, upper=%pd2).\n", index, upper); dput(upper); return ERR_PTR(-EIO); } return upper; } /* * Verify that an index entry name matches the origin file handle stored in * OVL_XATTR_ORIGIN and that origin file handle can be decoded to lower path. * Return 0 on match, -ESTALE on mismatch or stale origin, < 0 on error. */ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index) { struct ovl_fh *fh = NULL; size_t len; struct ovl_path origin = { }; struct ovl_path *stack = &origin; struct dentry *upper = NULL; int err; if (!d_inode(index)) return 0; err = -EINVAL; if (index->d_name.len < sizeof(struct ovl_fb)*2) goto fail; err = -ENOMEM; len = index->d_name.len / 2; fh = kzalloc(len + OVL_FH_WIRE_OFFSET, GFP_KERNEL); if (!fh) goto fail; err = -EINVAL; if (hex2bin(fh->buf, index->d_name.name, len)) goto fail; err = ovl_check_fb_len(&fh->fb, len); if (err) goto fail; /* * Whiteout index entries are used as an indication that an exported * overlay file handle should be treated as stale (i.e. after unlink * of the overlay inode). These entries contain no origin xattr. */ if (ovl_is_whiteout(index)) goto out; /* * Verifying directory index entries are not stale is expensive, so * only verify stale dir index if NFS export is enabled. */ if (d_is_dir(index) && !ofs->config.nfs_export) goto out; /* * Directory index entries should have 'upper' xattr pointing to the * real upper dir. Non-dir index entries are hardlinks to the upper * real inode. For non-dir index, we can read the copy up origin xattr * directly from the index dentry, but for dir index we first need to * decode the upper directory. */ upper = ovl_index_upper(ofs, index, false); if (IS_ERR_OR_NULL(upper)) { err = PTR_ERR(upper); /* * Directory index entries with no 'upper' xattr need to be * removed. When dir index entry has a stale 'upper' xattr, * we assume that upper dir was removed and we treat the dir * index as orphan entry that needs to be whited out. */ if (err == -ESTALE) goto orphan; else if (!err) err = -ESTALE; goto fail; } err = ovl_verify_fh(ofs, upper, OVL_XATTR_ORIGIN, fh); dput(upper); if (err) goto fail; /* Check if non-dir index is orphan and don't warn before cleaning it */ if (!d_is_dir(index) && d_inode(index)->i_nlink == 1) { err = ovl_check_origin_fh(ofs, fh, false, index, &stack); if (err) goto fail; if (ovl_get_nlink(ofs, origin.dentry, index, 0) == 0) goto orphan; } out: dput(origin.dentry); kfree(fh); return err; fail: pr_warn_ratelimited("failed to verify index (%pd2, ftype=%x, err=%i)\n", index, d_inode(index)->i_mode & S_IFMT, err); goto out; orphan: pr_warn_ratelimited("orphan index entry (%pd2, ftype=%x, nlink=%u)\n", index, d_inode(index)->i_mode & S_IFMT, d_inode(index)->i_nlink); err = -ENOENT; goto out; } static int ovl_get_index_name_fh(struct ovl_fh *fh, struct qstr *name) { char *n, *s; n = kcalloc(fh->fb.len, 2, GFP_KERNEL); if (!n) return -ENOMEM; s = bin2hex(n, fh->buf, fh->fb.len); *name = (struct qstr) QSTR_INIT(n, s - n); return 0; } /* * Lookup in indexdir for the index entry of a lower real inode or a copy up * origin inode. The index entry name is the hex representation of the lower * inode file handle. * * If the index dentry in negative, then either no lower aliases have been * copied up yet, or aliases have been copied up in older kernels and are * not indexed. * * If the index dentry for a copy up origin inode is positive, but points * to an inode different than the upper inode, then either the upper inode * has been copied up and not indexed or it was indexed, but since then * index dir was cleared. Either way, that index cannot be used to identify * the overlay inode. */ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin, struct qstr *name) { struct ovl_fh *fh; int err; fh = ovl_encode_real_fh(ofs, origin, false); if (IS_ERR(fh)) return PTR_ERR(fh); err = ovl_get_index_name_fh(fh, name); kfree(fh); return err; } /* Lookup index by file handle for NFS export */ struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh) { struct dentry *index; struct qstr name; int err; err = ovl_get_index_name_fh(fh, &name); if (err) return ERR_PTR(err); index = lookup_positive_unlocked(name.name, ofs->indexdir, name.len); kfree(name.name); if (IS_ERR(index)) { if (PTR_ERR(index) == -ENOENT) index = NULL; return index; } if (ovl_is_whiteout(index)) err = -ESTALE; else if (ovl_dentry_weird(index)) err = -EIO; else return index; dput(index); return ERR_PTR(err); } struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper, struct dentry *origin, bool verify) { struct dentry *index; struct inode *inode; struct qstr name; bool is_dir = d_is_dir(origin); int err; err = ovl_get_index_name(ofs, origin, &name); if (err) return ERR_PTR(err); index = lookup_one_positive_unlocked(ovl_upper_mnt_idmap(ofs), name.name, ofs->indexdir, name.len); if (IS_ERR(index)) { err = PTR_ERR(index); if (err == -ENOENT) { index = NULL; goto out; } pr_warn_ratelimited("failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n" "overlayfs: mount with '-o index=off' to disable inodes index.\n", d_inode(origin)->i_ino, name.len, name.name, err); goto out; } inode = d_inode(index); if (ovl_is_whiteout(index) && !verify) { /* * When index lookup is called with !verify for decoding an * overlay file handle, a whiteout index implies that decode * should treat file handle as stale and no need to print a * warning about it. */ dput(index); index = ERR_PTR(-ESTALE); goto out; } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) || inode_wrong_type(inode, d_inode(origin)->i_mode)) { /* * Index should always be of the same file type as origin * except for the case of a whiteout index. A whiteout * index should only exist if all lower aliases have been * unlinked, which means that finding a lower origin on lookup * whose index is a whiteout should be treated as an error. */ pr_warn_ratelimited("bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n", index, d_inode(index)->i_mode & S_IFMT, d_inode(origin)->i_mode & S_IFMT); goto fail; } else if (is_dir && verify) { if (!upper) { pr_warn_ratelimited("suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n", origin, index); goto fail; } /* Verify that dir index 'upper' xattr points to upper dir */ err = ovl_verify_upper(ofs, index, upper, false); if (err) { if (err == -ESTALE) { pr_warn_ratelimited("suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n", upper, origin, index); } goto fail; } } else if (upper && d_inode(upper) != inode) { goto out_dput; } out: kfree(name.name); return index; out_dput: dput(index); index = NULL; goto out; fail: dput(index); index = ERR_PTR(-EIO); goto out; } /* * Returns next layer in stack starting from top. * Returns -1 if this is the last layer. */ int ovl_path_next(int idx, struct dentry *dentry, struct path *path) { struct ovl_entry *oe = OVL_E(dentry); struct ovl_path *lowerstack = ovl_lowerstack(oe); BUG_ON(idx < 0); if (idx == 0) { ovl_path_upper(dentry, path); if (path->dentry) return ovl_numlower(oe) ? 1 : -1; idx++; } BUG_ON(idx > ovl_numlower(oe)); path->dentry = lowerstack[idx - 1].dentry; path->mnt = lowerstack[idx - 1].layer->mnt; return (idx < ovl_numlower(oe)) ? idx + 1 : -1; } /* Fix missing 'origin' xattr */ static int ovl_fix_origin(struct ovl_fs *ofs, struct dentry *dentry, struct dentry *lower, struct dentry *upper) { int err; if (ovl_check_origin_xattr(ofs, upper)) return 0; err = ovl_want_write(dentry); if (err) return err; err = ovl_set_origin(ofs, lower, upper); if (!err) err = ovl_set_impure(dentry->d_parent, upper->d_parent); ovl_drop_write(dentry); return err; } static int ovl_maybe_validate_verity(struct dentry *dentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct inode *inode = d_inode(dentry); struct path datapath, metapath; int err; if (!ofs->config.verity_mode || !ovl_is_metacopy_dentry(dentry) || ovl_test_flag(OVL_VERIFIED_DIGEST, inode)) return 0; if (!ovl_test_flag(OVL_HAS_DIGEST, inode)) { if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { pr_warn_ratelimited("metacopy file '%pd' has no digest specified\n", dentry); return -EIO; } return 0; } ovl_path_lowerdata(dentry, &datapath); if (!datapath.dentry) return -EIO; ovl_path_real(dentry, &metapath); if (!metapath.dentry) return -EIO; err = ovl_inode_lock_interruptible(inode); if (err) return err; if (!ovl_test_flag(OVL_VERIFIED_DIGEST, inode)) { const struct cred *old_cred; old_cred = ovl_override_creds(dentry->d_sb); err = ovl_validate_verity(ofs, &metapath, &datapath); if (err == 0) ovl_set_flag(OVL_VERIFIED_DIGEST, inode); revert_creds(old_cred); } ovl_inode_unlock(inode); return err; } /* Lazy lookup of lowerdata */ static int ovl_maybe_lookup_lowerdata(struct dentry *dentry) { struct inode *inode = d_inode(dentry); const char *redirect = ovl_lowerdata_redirect(inode); struct ovl_path datapath = {}; const struct cred *old_cred; int err; if (!redirect || ovl_dentry_lowerdata(dentry)) return 0; if (redirect[0] != '/') return -EIO; err = ovl_inode_lock_interruptible(inode); if (err) return err; err = 0; /* Someone got here before us? */ if (ovl_dentry_lowerdata(dentry)) goto out; old_cred = ovl_override_creds(dentry->d_sb); err = ovl_lookup_data_layers(dentry, redirect, &datapath); revert_creds(old_cred); if (err) goto out_err; err = ovl_dentry_set_lowerdata(dentry, &datapath); if (err) goto out_err; out: ovl_inode_unlock(inode); dput(datapath.dentry); return err; out_err: pr_warn_ratelimited("lazy lowerdata lookup failed (%pd2, err=%i)\n", dentry, err); goto out; } int ovl_verify_lowerdata(struct dentry *dentry) { int err; err = ovl_maybe_lookup_lowerdata(dentry); if (err) return err; return ovl_maybe_validate_verity(dentry); } struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct ovl_entry *oe = NULL; const struct cred *old_cred; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct ovl_entry *poe = OVL_E(dentry->d_parent); struct ovl_entry *roe = OVL_E(dentry->d_sb->s_root); struct ovl_path *stack = NULL, *origin_path = NULL; struct dentry *upperdir, *upperdentry = NULL; struct dentry *origin = NULL; struct dentry *index = NULL; unsigned int ctr = 0; struct inode *inode = NULL; bool upperopaque = false; char *upperredirect = NULL; struct dentry *this; unsigned int i; int err; bool uppermetacopy = false; int metacopy_size = 0; struct ovl_lookup_data d = { .sb = dentry->d_sb, .name = dentry->d_name, .is_dir = false, .opaque = false, .stop = false, .last = ovl_redirect_follow(ofs) ? false : !ovl_numlower(poe), .redirect = NULL, .metacopy = 0, }; if (dentry->d_name.len > ofs->namelen) return ERR_PTR(-ENAMETOOLONG); old_cred = ovl_override_creds(dentry->d_sb); upperdir = ovl_dentry_upper(dentry->d_parent); if (upperdir) { d.mnt = ovl_upper_mnt(ofs); err = ovl_lookup_layer(upperdir, &d, &upperdentry, true); if (err) goto out; if (upperdentry && upperdentry->d_flags & DCACHE_OP_REAL) { dput(upperdentry); err = -EREMOTE; goto out; } if (upperdentry && !d.is_dir) { /* * Lookup copy up origin by decoding origin file handle. * We may get a disconnected dentry, which is fine, * because we only need to hold the origin inode in * cache and use its inode number. We may even get a * connected dentry, that is not under any of the lower * layers root. That is also fine for using it's inode * number - it's the same as if we held a reference * to a dentry in lower layer that was moved under us. */ err = ovl_check_origin(ofs, upperdentry, &origin_path); if (err) goto out_put_upper; if (d.metacopy) uppermetacopy = true; metacopy_size = d.metacopy; } if (d.redirect) { err = -ENOMEM; upperredirect = kstrdup(d.redirect, GFP_KERNEL); if (!upperredirect) goto out_put_upper; if (d.redirect[0] == '/') poe = roe; } upperopaque = d.opaque; } if (!d.stop && ovl_numlower(poe)) { err = -ENOMEM; stack = ovl_stack_alloc(ofs->numlayer - 1); if (!stack) goto out_put_upper; } for (i = 0; !d.stop && i < ovl_numlower(poe); i++) { struct ovl_path lower = ovl_lowerstack(poe)[i]; if (!ovl_redirect_follow(ofs)) d.last = i == ovl_numlower(poe) - 1; else if (d.is_dir || !ofs->numdatalayer) d.last = lower.layer->idx == ovl_numlower(roe); d.mnt = lower.layer->mnt; err = ovl_lookup_layer(lower.dentry, &d, &this, false); if (err) goto out_put; if (!this) continue; if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) { dput(this); err = -EPERM; pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry); goto out_put; } /* * If no origin fh is stored in upper of a merge dir, store fh * of lower dir and set upper parent "impure". */ if (upperdentry && !ctr && !ofs->noxattr && d.is_dir) { err = ovl_fix_origin(ofs, dentry, this, upperdentry); if (err) { dput(this); goto out_put; } } /* * When "verify_lower" feature is enabled, do not merge with a * lower dir that does not match a stored origin xattr. In any * case, only verified origin is used for index lookup. * * For non-dir dentry, if index=on, then ensure origin * matches the dentry found using path based lookup, * otherwise error out. */ if (upperdentry && !ctr && ((d.is_dir && ovl_verify_lower(dentry->d_sb)) || (!d.is_dir && ofs->config.index && origin_path))) { err = ovl_verify_origin(ofs, upperdentry, this, false); if (err) { dput(this); if (d.is_dir) break; goto out_put; } origin = this; } if (!upperdentry && !d.is_dir && !ctr && d.metacopy) metacopy_size = d.metacopy; if (d.metacopy && ctr) { /* * Do not store intermediate metacopy dentries in * lower chain, except top most lower metacopy dentry. * Continue the loop so that if there is an absolute * redirect on this dentry, poe can be reset to roe. */ dput(this); this = NULL; } else { stack[ctr].dentry = this; stack[ctr].layer = lower.layer; ctr++; } /* * Following redirects can have security consequences: it's like * a symlink into the lower layer without the permission checks. * This is only a problem if the upper layer is untrusted (e.g * comes from an USB drive). This can allow a non-readable file * or directory to become readable. * * Only following redirects when redirects are enabled disables * this attack vector when not necessary. */ err = -EPERM; if (d.redirect && !ovl_redirect_follow(ofs)) { pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n", dentry); goto out_put; } if (d.stop) break; if (d.redirect && d.redirect[0] == '/' && poe != roe) { poe = roe; /* Find the current layer on the root dentry */ i = lower.layer->idx - 1; } } /* Defer lookup of lowerdata in data-only layers to first access */ if (d.metacopy && ctr && ofs->numdatalayer && d.absolute_redirect) { d.metacopy = 0; ctr++; } /* * For regular non-metacopy upper dentries, there is no lower * path based lookup, hence ctr will be zero. If a dentry is found * using ORIGIN xattr on upper, install it in stack. * * For metacopy dentry, path based lookup will find lower dentries. * Just make sure a corresponding data dentry has been found. */ if (d.metacopy || (uppermetacopy && !ctr)) { pr_warn_ratelimited("metacopy with no lower data found - abort lookup (%pd2)\n", dentry); err = -EIO; goto out_put; } else if (!d.is_dir && upperdentry && !ctr && origin_path) { if (WARN_ON(stack != NULL)) { err = -EIO; goto out_put; } stack = origin_path; ctr = 1; origin = origin_path->dentry; origin_path = NULL; } /* * Always lookup index if there is no-upperdentry. * * For the case of upperdentry, we have set origin by now if it * needed to be set. There are basically three cases. * * For directories, lookup index by lower inode and verify it matches * upper inode. We only trust dir index if we verified that lower dir * matches origin, otherwise dir index entries may be inconsistent * and we ignore them. * * For regular upper, we already set origin if upper had ORIGIN * xattr. There is no verification though as there is no path * based dentry lookup in lower in this case. * * For metacopy upper, we set a verified origin already if index * is enabled and if upper had an ORIGIN xattr. * */ if (!upperdentry && ctr) origin = stack[0].dentry; if (origin && ovl_indexdir(dentry->d_sb) && (!d.is_dir || ovl_index_all(dentry->d_sb))) { index = ovl_lookup_index(ofs, upperdentry, origin, true); if (IS_ERR(index)) { err = PTR_ERR(index); index = NULL; goto out_put; } } if (ctr) { oe = ovl_alloc_entry(ctr); err = -ENOMEM; if (!oe) goto out_put; ovl_stack_cpy(ovl_lowerstack(oe), stack, ctr); } if (upperopaque) ovl_dentry_set_opaque(dentry); if (upperdentry) ovl_dentry_set_upper_alias(dentry); else if (index) { struct path upperpath = { .dentry = upperdentry = dget(index), .mnt = ovl_upper_mnt(ofs), }; /* * It's safe to assign upperredirect here: the previous * assignment of happens only if upperdentry is non-NULL, and * this one only if upperdentry is NULL. */ upperredirect = ovl_get_redirect_xattr(ofs, &upperpath, 0); if (IS_ERR(upperredirect)) { err = PTR_ERR(upperredirect); upperredirect = NULL; goto out_free_oe; } err = ovl_check_metacopy_xattr(ofs, &upperpath, NULL); if (err < 0) goto out_free_oe; uppermetacopy = err; metacopy_size = err; } if (upperdentry || ctr) { struct ovl_inode_params oip = { .upperdentry = upperdentry, .oe = oe, .index = index, .redirect = upperredirect, }; /* Store lowerdata redirect for lazy lookup */ if (ctr > 1 && !d.is_dir && !stack[ctr - 1].dentry) { oip.lowerdata_redirect = d.redirect; d.redirect = NULL; } inode = ovl_get_inode(dentry->d_sb, &oip); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_free_oe; if (upperdentry && !uppermetacopy) ovl_set_flag(OVL_UPPERDATA, inode); if (metacopy_size > OVL_METACOPY_MIN_SIZE) ovl_set_flag(OVL_HAS_DIGEST, inode); } ovl_dentry_init_reval(dentry, upperdentry, OVL_I_E(inode)); revert_creds(old_cred); if (origin_path) { dput(origin_path->dentry); kfree(origin_path); } dput(index); ovl_stack_free(stack, ctr); kfree(d.redirect); return d_splice_alias(inode, dentry); out_free_oe: ovl_free_entry(oe); out_put: dput(index); ovl_stack_free(stack, ctr); out_put_upper: if (origin_path) { dput(origin_path->dentry); kfree(origin_path); } dput(upperdentry); kfree(upperredirect); out: kfree(d.redirect); revert_creds(old_cred); return ERR_PTR(err); } bool ovl_lower_positive(struct dentry *dentry) { struct ovl_entry *poe = OVL_E(dentry->d_parent); const struct qstr *name = &dentry->d_name; const struct cred *old_cred; unsigned int i; bool positive = false; bool done = false; /* * If dentry is negative, then lower is positive iff this is a * whiteout. */ if (!dentry->d_inode) return ovl_dentry_is_opaque(dentry); /* Negative upper -> positive lower */ if (!ovl_dentry_upper(dentry)) return true; old_cred = ovl_override_creds(dentry->d_sb); /* Positive upper -> have to look up lower to see whether it exists */ for (i = 0; !done && !positive && i < ovl_numlower(poe); i++) { struct dentry *this; struct ovl_path *parentpath = &ovl_lowerstack(poe)[i]; this = lookup_one_positive_unlocked( mnt_idmap(parentpath->layer->mnt), name->name, parentpath->dentry, name->len); if (IS_ERR(this)) { switch (PTR_ERR(this)) { case -ENOENT: case -ENAMETOOLONG: break; default: /* * Assume something is there, we just couldn't * access it. */ positive = true; break; } } else { positive = !ovl_is_whiteout(this); done = true; dput(this); } } revert_creds(old_cred); return positive; }
linux-master
fs/overlayfs/namei.c
// SPDX-License-Identifier: GPL-2.0-only /* * Overlayfs NFS export support. * * Amir Goldstein <[email protected]> * * Copyright (C) 2017-2018 CTERA Networks. All Rights Reserved. */ #include <linux/fs.h> #include <linux/cred.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/exportfs.h> #include <linux/ratelimit.h> #include "overlayfs.h" static int ovl_encode_maybe_copy_up(struct dentry *dentry) { int err; if (ovl_dentry_upper(dentry)) return 0; err = ovl_want_write(dentry); if (!err) { err = ovl_copy_up(dentry); ovl_drop_write(dentry); } if (err) { pr_warn_ratelimited("failed to copy up on encode (%pd2, err=%i)\n", dentry, err); } return err; } /* * Before encoding a non-upper directory file handle from real layer N, we need * to check if it will be possible to reconnect an overlay dentry from the real * lower decoded dentry. This is done by following the overlay ancestry up to a * "layer N connected" ancestor and verifying that all parents along the way are * "layer N connectable". If an ancestor that is NOT "layer N connectable" is * found, we need to copy up an ancestor, which is "layer N connectable", thus * making that ancestor "layer N connected". For example: * * layer 1: /a * layer 2: /a/b/c * * The overlay dentry /a is NOT "layer 2 connectable", because if dir /a is * copied up and renamed, upper dir /a will be indexed by lower dir /a from * layer 1. The dir /a from layer 2 will never be indexed, so the algorithm (*) * in ovl_lookup_real_ancestor() will not be able to lookup a connected overlay * dentry from the connected lower dentry /a/b/c. * * To avoid this problem on decode time, we need to copy up an ancestor of * /a/b/c, which is "layer 2 connectable", on encode time. That ancestor is * /a/b. After copy up (and index) of /a/b, it will become "layer 2 connected" * and when the time comes to decode the file handle from lower dentry /a/b/c, * ovl_lookup_real_ancestor() will find the indexed ancestor /a/b and decoding * a connected overlay dentry will be accomplished. * * (*) the algorithm in ovl_lookup_real_ancestor() can be improved to lookup an * entry /a in the lower layers above layer N and find the indexed dir /a from * layer 1. If that improvement is made, then the check for "layer N connected" * will need to verify there are no redirects in lower layers above N. In the * example above, /a will be "layer 2 connectable". However, if layer 2 dir /a * is a target of a layer 1 redirect, then /a will NOT be "layer 2 connectable": * * layer 1: /A (redirect = /a) * layer 2: /a/b/c */ /* Return the lowest layer for encoding a connectable file handle */ static int ovl_connectable_layer(struct dentry *dentry) { struct ovl_entry *oe = OVL_E(dentry); /* We can get overlay root from root of any layer */ if (dentry == dentry->d_sb->s_root) return ovl_numlower(oe); /* * If it's an unindexed merge dir, then it's not connectable with any * lower layer */ if (ovl_dentry_upper(dentry) && !ovl_test_flag(OVL_INDEX, d_inode(dentry))) return 0; /* We can get upper/overlay path from indexed/lower dentry */ return ovl_lowerstack(oe)->layer->idx; } /* * @dentry is "connected" if all ancestors up to root or a "connected" ancestor * have the same uppermost lower layer as the origin's layer. We may need to * copy up a "connectable" ancestor to make it "connected". A "connected" dentry * cannot become non "connected", so cache positive result in dentry flags. * * Return the connected origin layer or < 0 on error. */ static int ovl_connect_layer(struct dentry *dentry) { struct dentry *next, *parent = NULL; struct ovl_entry *oe = OVL_E(dentry); int origin_layer; int err = 0; if (WARN_ON(dentry == dentry->d_sb->s_root) || WARN_ON(!ovl_dentry_lower(dentry))) return -EIO; origin_layer = ovl_lowerstack(oe)->layer->idx; if (ovl_dentry_test_flag(OVL_E_CONNECTED, dentry)) return origin_layer; /* Find the topmost origin layer connectable ancestor of @dentry */ next = dget(dentry); for (;;) { parent = dget_parent(next); if (WARN_ON(parent == next)) { err = -EIO; break; } /* * If @parent is not origin layer connectable, then copy up * @next which is origin layer connectable and we are done. */ if (ovl_connectable_layer(parent) < origin_layer) { err = ovl_encode_maybe_copy_up(next); break; } /* If @parent is connected or indexed we are done */ if (ovl_dentry_test_flag(OVL_E_CONNECTED, parent) || ovl_test_flag(OVL_INDEX, d_inode(parent))) break; dput(next); next = parent; } dput(parent); dput(next); if (!err) ovl_dentry_set_flag(OVL_E_CONNECTED, dentry); return err ?: origin_layer; } /* * We only need to encode origin if there is a chance that the same object was * encoded pre copy up and then we need to stay consistent with the same * encoding also after copy up. If non-pure upper is not indexed, then it was * copied up before NFS export was enabled. In that case we don't need to worry * about staying consistent with pre copy up encoding and we encode an upper * file handle. Overlay root dentry is a private case of non-indexed upper. * * The following table summarizes the different file handle encodings used for * different overlay object types: * * Object type | Encoding * -------------------------------- * Pure upper | U * Non-indexed upper | U * Indexed upper | L (*) * Non-upper | L (*) * * U = upper file handle * L = lower file handle * * (*) Decoding a connected overlay dir from real lower dentry is not always * possible when there are redirects in lower layers and non-indexed merge dirs. * To mitigate those case, we may copy up the lower dir ancestor before encode * of a decodable file handle for non-upper dir. * * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error. */ static int ovl_check_encode_origin(struct dentry *dentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); bool decodable = ofs->config.nfs_export; /* Lower file handle for non-upper non-decodable */ if (!ovl_dentry_upper(dentry) && !decodable) return 0; /* Upper file handle for pure upper */ if (!ovl_dentry_lower(dentry)) return 0; /* * Root is never indexed, so if there's an upper layer, encode upper for * root. */ if (dentry == dentry->d_sb->s_root) return 0; /* * Upper decodable file handle for non-indexed upper. */ if (ovl_dentry_upper(dentry) && decodable && !ovl_test_flag(OVL_INDEX, d_inode(dentry))) return 0; /* * Decoding a merge dir, whose origin's ancestor is under a redirected * lower dir or under a non-indexed upper is not always possible. * ovl_connect_layer() will try to make origin's layer "connected" by * copying up a "connectable" ancestor. */ if (d_is_dir(dentry) && ovl_upper_mnt(ofs) && decodable) return ovl_connect_layer(dentry); /* Lower file handle for indexed and non-upper dir/non-dir */ return 1; } static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry, u32 *fid, int buflen) { struct ovl_fh *fh = NULL; int err, enc_lower; int len; /* * Check if we should encode a lower or upper file handle and maybe * copy up an ancestor to make lower file handle connectable. */ err = enc_lower = ovl_check_encode_origin(dentry); if (enc_lower < 0) goto fail; /* Encode an upper or lower file handle */ fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_dentry_lower(dentry) : ovl_dentry_upper(dentry), !enc_lower); if (IS_ERR(fh)) return PTR_ERR(fh); len = OVL_FH_LEN(fh); if (len <= buflen) memcpy(fid, fh, len); err = len; out: kfree(fh); return err; fail: pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n", dentry, err); goto out; } static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len, struct inode *parent) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); struct dentry *dentry; int bytes, buflen = *max_len << 2; /* TODO: encode connectable file handles */ if (parent) return FILEID_INVALID; dentry = d_find_any_alias(inode); if (!dentry) return FILEID_INVALID; bytes = ovl_dentry_to_fid(ofs, dentry, fid, buflen); dput(dentry); if (bytes <= 0) return FILEID_INVALID; *max_len = bytes >> 2; if (bytes > buflen) return FILEID_INVALID; return OVL_FILEID_V1; } /* * Find or instantiate an overlay dentry from real dentries and index. */ static struct dentry *ovl_obtain_alias(struct super_block *sb, struct dentry *upper_alias, struct ovl_path *lowerpath, struct dentry *index) { struct dentry *lower = lowerpath ? lowerpath->dentry : NULL; struct dentry *upper = upper_alias ?: index; struct dentry *dentry; struct inode *inode = NULL; struct ovl_entry *oe; struct ovl_inode_params oip = { .index = index, }; /* We get overlay directory dentries with ovl_lookup_real() */ if (d_is_dir(upper ?: lower)) return ERR_PTR(-EIO); oe = ovl_alloc_entry(!!lower); if (!oe) return ERR_PTR(-ENOMEM); oip.upperdentry = dget(upper); if (lower) { ovl_lowerstack(oe)->dentry = dget(lower); ovl_lowerstack(oe)->layer = lowerpath->layer; } oip.oe = oe; inode = ovl_get_inode(sb, &oip); if (IS_ERR(inode)) { ovl_free_entry(oe); dput(upper); return ERR_CAST(inode); } if (upper) ovl_set_flag(OVL_UPPERDATA, inode); dentry = d_find_any_alias(inode); if (dentry) goto out_iput; dentry = d_alloc_anon(inode->i_sb); if (unlikely(!dentry)) goto nomem; if (upper_alias) ovl_dentry_set_upper_alias(dentry); ovl_dentry_init_reval(dentry, upper, OVL_I_E(inode)); return d_instantiate_anon(dentry, inode); nomem: dput(dentry); dentry = ERR_PTR(-ENOMEM); out_iput: iput(inode); return dentry; } /* Get the upper or lower dentry in stack whose on layer @idx */ static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx) { struct ovl_entry *oe = OVL_E(dentry); struct ovl_path *lowerstack = ovl_lowerstack(oe); int i; if (!idx) return ovl_dentry_upper(dentry); for (i = 0; i < ovl_numlower(oe); i++) { if (lowerstack[i].layer->idx == idx) return lowerstack[i].dentry; } return NULL; } /* * Lookup a child overlay dentry to get a connected overlay dentry whose real * dentry is @real. If @real is on upper layer, we lookup a child overlay * dentry with the same name as the real dentry. Otherwise, we need to consult * index for lookup. */ static struct dentry *ovl_lookup_real_one(struct dentry *connected, struct dentry *real, const struct ovl_layer *layer) { struct inode *dir = d_inode(connected); struct dentry *this, *parent = NULL; struct name_snapshot name; int err; /* * Lookup child overlay dentry by real name. The dir mutex protects us * from racing with overlay rename. If the overlay dentry that is above * real has already been moved to a parent that is not under the * connected overlay dir, we return -ECHILD and restart the lookup of * connected real path from the top. */ inode_lock_nested(dir, I_MUTEX_PARENT); err = -ECHILD; parent = dget_parent(real); if (ovl_dentry_real_at(connected, layer->idx) != parent) goto fail; /* * We also need to take a snapshot of real dentry name to protect us * from racing with underlying layer rename. In this case, we don't * care about returning ESTALE, only from dereferencing a free name * pointer because we hold no lock on the real dentry. */ take_dentry_name_snapshot(&name, real); /* * No idmap handling here: it's an internal lookup. Could skip * permission checking altogether, but for now just use non-idmap * transformed ids. */ this = lookup_one_len(name.name.name, connected, name.name.len); release_dentry_name_snapshot(&name); err = PTR_ERR(this); if (IS_ERR(this)) { goto fail; } else if (!this || !this->d_inode) { dput(this); err = -ENOENT; goto fail; } else if (ovl_dentry_real_at(this, layer->idx) != real) { dput(this); err = -ESTALE; goto fail; } out: dput(parent); inode_unlock(dir); return this; fail: pr_warn_ratelimited("failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n", real, layer->idx, connected, err); this = ERR_PTR(err); goto out; } static struct dentry *ovl_lookup_real(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer); /* * Lookup an indexed or hashed overlay dentry by real inode. */ static struct dentry *ovl_lookup_real_inode(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer) { struct ovl_fs *ofs = OVL_FS(sb); struct dentry *index = NULL; struct dentry *this = NULL; struct inode *inode; /* * Decoding upper dir from index is expensive, so first try to lookup * overlay dentry in inode/dcache. */ inode = ovl_lookup_inode(sb, real, !layer->idx); if (IS_ERR(inode)) return ERR_CAST(inode); if (inode) { this = d_find_any_alias(inode); iput(inode); } /* * For decoded lower dir file handle, lookup index by origin to check * if lower dir was copied up and and/or removed. */ if (!this && layer->idx && ofs->indexdir && !WARN_ON(!d_is_dir(real))) { index = ovl_lookup_index(ofs, NULL, real, false); if (IS_ERR(index)) return index; } /* Get connected upper overlay dir from index */ if (index) { struct dentry *upper = ovl_index_upper(ofs, index, true); dput(index); if (IS_ERR_OR_NULL(upper)) return upper; /* * ovl_lookup_real() in lower layer may call recursively once to * ovl_lookup_real() in upper layer. The first level call walks * back lower parents to the topmost indexed parent. The second * recursive call walks back from indexed upper to the topmost * connected/hashed upper parent (or up to root). */ this = ovl_lookup_real(sb, upper, &ofs->layers[0]); dput(upper); } if (IS_ERR_OR_NULL(this)) return this; if (ovl_dentry_real_at(this, layer->idx) != real) { dput(this); this = ERR_PTR(-EIO); } return this; } /* * Lookup an indexed or hashed overlay dentry, whose real dentry is an * ancestor of @real. */ static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer) { struct dentry *next, *parent = NULL; struct dentry *ancestor = ERR_PTR(-EIO); if (real == layer->mnt->mnt_root) return dget(sb->s_root); /* Find the topmost indexed or hashed ancestor */ next = dget(real); for (;;) { parent = dget_parent(next); /* * Lookup a matching overlay dentry in inode/dentry * cache or in index by real inode. */ ancestor = ovl_lookup_real_inode(sb, next, layer); if (ancestor) break; if (parent == layer->mnt->mnt_root) { ancestor = dget(sb->s_root); break; } /* * If @real has been moved out of the layer root directory, * we will eventully hit the real fs root. This cannot happen * by legit overlay rename, so we return error in that case. */ if (parent == next) { ancestor = ERR_PTR(-EXDEV); break; } dput(next); next = parent; } dput(parent); dput(next); return ancestor; } /* * Lookup a connected overlay dentry whose real dentry is @real. * If @real is on upper layer, we lookup a child overlay dentry with the same * path the real dentry. Otherwise, we need to consult index for lookup. */ static struct dentry *ovl_lookup_real(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer) { struct dentry *connected; int err = 0; connected = ovl_lookup_real_ancestor(sb, real, layer); if (IS_ERR(connected)) return connected; while (!err) { struct dentry *next, *this; struct dentry *parent = NULL; struct dentry *real_connected = ovl_dentry_real_at(connected, layer->idx); if (real_connected == real) break; /* Find the topmost dentry not yet connected */ next = dget(real); for (;;) { parent = dget_parent(next); if (parent == real_connected) break; /* * If real has been moved out of 'real_connected', * we will not find 'real_connected' and hit the layer * root. In that case, we need to restart connecting. * This game can go on forever in the worst case. We * may want to consider taking s_vfs_rename_mutex if * this happens more than once. */ if (parent == layer->mnt->mnt_root) { dput(connected); connected = dget(sb->s_root); break; } /* * If real file has been moved out of the layer root * directory, we will eventully hit the real fs root. * This cannot happen by legit overlay rename, so we * return error in that case. */ if (parent == next) { err = -EXDEV; break; } dput(next); next = parent; } if (!err) { this = ovl_lookup_real_one(connected, next, layer); if (IS_ERR(this)) err = PTR_ERR(this); /* * Lookup of child in overlay can fail when racing with * overlay rename of child away from 'connected' parent. * In this case, we need to restart the lookup from the * top, because we cannot trust that 'real_connected' is * still an ancestor of 'real'. There is a good chance * that the renamed overlay ancestor is now in cache, so * ovl_lookup_real_ancestor() will find it and we can * continue to connect exactly from where lookup failed. */ if (err == -ECHILD) { this = ovl_lookup_real_ancestor(sb, real, layer); err = PTR_ERR_OR_ZERO(this); } if (!err) { dput(connected); connected = this; } } dput(parent); dput(next); } if (err) goto fail; return connected; fail: pr_warn_ratelimited("failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n", real, layer->idx, connected, err); dput(connected); return ERR_PTR(err); } /* * Get an overlay dentry from upper/lower real dentries and index. */ static struct dentry *ovl_get_dentry(struct super_block *sb, struct dentry *upper, struct ovl_path *lowerpath, struct dentry *index) { struct ovl_fs *ofs = OVL_FS(sb); const struct ovl_layer *layer = upper ? &ofs->layers[0] : lowerpath->layer; struct dentry *real = upper ?: (index ?: lowerpath->dentry); /* * Obtain a disconnected overlay dentry from a non-dir real dentry * and index. */ if (!d_is_dir(real)) return ovl_obtain_alias(sb, upper, lowerpath, index); /* Removed empty directory? */ if ((real->d_flags & DCACHE_DISCONNECTED) || d_unhashed(real)) return ERR_PTR(-ENOENT); /* * If real dentry is connected and hashed, get a connected overlay * dentry whose real dentry is @real. */ return ovl_lookup_real(sb, real, layer); } static struct dentry *ovl_upper_fh_to_d(struct super_block *sb, struct ovl_fh *fh) { struct ovl_fs *ofs = OVL_FS(sb); struct dentry *dentry; struct dentry *upper; if (!ovl_upper_mnt(ofs)) return ERR_PTR(-EACCES); upper = ovl_decode_real_fh(ofs, fh, ovl_upper_mnt(ofs), true); if (IS_ERR_OR_NULL(upper)) return upper; dentry = ovl_get_dentry(sb, upper, NULL, NULL); dput(upper); return dentry; } static struct dentry *ovl_lower_fh_to_d(struct super_block *sb, struct ovl_fh *fh) { struct ovl_fs *ofs = OVL_FS(sb); struct ovl_path origin = { }; struct ovl_path *stack = &origin; struct dentry *dentry = NULL; struct dentry *index = NULL; struct inode *inode; int err; /* First lookup overlay inode in inode cache by origin fh */ err = ovl_check_origin_fh(ofs, fh, false, NULL, &stack); if (err) return ERR_PTR(err); if (!d_is_dir(origin.dentry) || !(origin.dentry->d_flags & DCACHE_DISCONNECTED)) { inode = ovl_lookup_inode(sb, origin.dentry, false); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_err; if (inode) { dentry = d_find_any_alias(inode); iput(inode); if (dentry) goto out; } } /* Then lookup indexed upper/whiteout by origin fh */ if (ofs->indexdir) { index = ovl_get_index_fh(ofs, fh); err = PTR_ERR(index); if (IS_ERR(index)) { index = NULL; goto out_err; } } /* Then try to get a connected upper dir by index */ if (index && d_is_dir(index)) { struct dentry *upper = ovl_index_upper(ofs, index, true); err = PTR_ERR(upper); if (IS_ERR_OR_NULL(upper)) goto out_err; dentry = ovl_get_dentry(sb, upper, NULL, NULL); dput(upper); goto out; } /* Find origin.dentry again with ovl_acceptable() layer check */ if (d_is_dir(origin.dentry)) { dput(origin.dentry); origin.dentry = NULL; err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack); if (err) goto out_err; } if (index) { err = ovl_verify_origin(ofs, index, origin.dentry, false); if (err) goto out_err; } /* Get a connected non-upper dir or disconnected non-dir */ dentry = ovl_get_dentry(sb, NULL, &origin, index); out: dput(origin.dentry); dput(index); return dentry; out_err: dentry = ERR_PTR(err); goto out; } static struct ovl_fh *ovl_fid_to_fh(struct fid *fid, int buflen, int fh_type) { struct ovl_fh *fh; /* If on-wire inner fid is aligned - nothing to do */ if (fh_type == OVL_FILEID_V1) return (struct ovl_fh *)fid; if (fh_type != OVL_FILEID_V0) return ERR_PTR(-EINVAL); if (buflen <= OVL_FH_WIRE_OFFSET) return ERR_PTR(-EINVAL); fh = kzalloc(buflen, GFP_KERNEL); if (!fh) return ERR_PTR(-ENOMEM); /* Copy unaligned inner fh into aligned buffer */ memcpy(fh->buf, fid, buflen - OVL_FH_WIRE_OFFSET); return fh; } static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct dentry *dentry = NULL; struct ovl_fh *fh = NULL; int len = fh_len << 2; unsigned int flags = 0; int err; fh = ovl_fid_to_fh(fid, len, fh_type); err = PTR_ERR(fh); if (IS_ERR(fh)) goto out_err; err = ovl_check_fh_len(fh, len); if (err) goto out_err; flags = fh->fb.flags; dentry = (flags & OVL_FH_FLAG_PATH_UPPER) ? ovl_upper_fh_to_d(sb, fh) : ovl_lower_fh_to_d(sb, fh); err = PTR_ERR(dentry); if (IS_ERR(dentry) && err != -ESTALE) goto out_err; out: /* We may have needed to re-align OVL_FILEID_V0 */ if (!IS_ERR_OR_NULL(fh) && fh != (void *)fid) kfree(fh); return dentry; out_err: pr_warn_ratelimited("failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n", fh_len, fh_type, flags, err); dentry = ERR_PTR(err); goto out; } static struct dentry *ovl_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { pr_warn_ratelimited("connectable file handles not supported; use 'no_subtree_check' exportfs option.\n"); return ERR_PTR(-EACCES); } static int ovl_get_name(struct dentry *parent, char *name, struct dentry *child) { /* * ovl_fh_to_dentry() returns connected dir overlay dentries and * ovl_fh_to_parent() is not implemented, so we should not get here. */ WARN_ON_ONCE(1); return -EIO; } static struct dentry *ovl_get_parent(struct dentry *dentry) { /* * ovl_fh_to_dentry() returns connected dir overlay dentries, so we * should not get here. */ WARN_ON_ONCE(1); return ERR_PTR(-EIO); } const struct export_operations ovl_export_operations = { .encode_fh = ovl_encode_fh, .fh_to_dentry = ovl_fh_to_dentry, .fh_to_parent = ovl_fh_to_parent, .get_name = ovl_get_name, .get_parent = ovl_get_parent, }; /* encode_fh() encodes non-decodable file handles with nfs_export=off */ const struct export_operations ovl_export_fid_operations = { .encode_fh = ovl_encode_fh, };
linux-master
fs/overlayfs/export.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/fs.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/posix_acl_xattr.h> #include <linux/seq_file.h> #include <linux/xattr.h> #include "overlayfs.h" #include "params.h" static bool ovl_redirect_dir_def = IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_DIR); module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644); MODULE_PARM_DESC(redirect_dir, "Default to on or off for the redirect_dir feature"); static bool ovl_redirect_always_follow = IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW); module_param_named(redirect_always_follow, ovl_redirect_always_follow, bool, 0644); MODULE_PARM_DESC(redirect_always_follow, "Follow redirects even if redirect_dir feature is turned off"); static bool ovl_xino_auto_def = IS_ENABLED(CONFIG_OVERLAY_FS_XINO_AUTO); module_param_named(xino_auto, ovl_xino_auto_def, bool, 0644); MODULE_PARM_DESC(xino_auto, "Auto enable xino feature"); static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX); module_param_named(index, ovl_index_def, bool, 0644); MODULE_PARM_DESC(index, "Default to on or off for the inodes index feature"); static bool ovl_nfs_export_def = IS_ENABLED(CONFIG_OVERLAY_FS_NFS_EXPORT); module_param_named(nfs_export, ovl_nfs_export_def, bool, 0644); MODULE_PARM_DESC(nfs_export, "Default to on or off for the NFS export feature"); static bool ovl_metacopy_def = IS_ENABLED(CONFIG_OVERLAY_FS_METACOPY); module_param_named(metacopy, ovl_metacopy_def, bool, 0644); MODULE_PARM_DESC(metacopy, "Default to on or off for the metadata only copy up feature"); enum { Opt_lowerdir, Opt_upperdir, Opt_workdir, Opt_default_permissions, Opt_redirect_dir, Opt_index, Opt_uuid, Opt_nfs_export, Opt_userxattr, Opt_xino, Opt_metacopy, Opt_verity, Opt_volatile, }; static const struct constant_table ovl_parameter_bool[] = { { "on", true }, { "off", false }, {} }; static const struct constant_table ovl_parameter_uuid[] = { { "off", OVL_UUID_OFF }, { "null", OVL_UUID_NULL }, { "auto", OVL_UUID_AUTO }, { "on", OVL_UUID_ON }, {} }; static const char *ovl_uuid_mode(struct ovl_config *config) { return ovl_parameter_uuid[config->uuid].name; } static int ovl_uuid_def(void) { return OVL_UUID_AUTO; } static const struct constant_table ovl_parameter_xino[] = { { "off", OVL_XINO_OFF }, { "auto", OVL_XINO_AUTO }, { "on", OVL_XINO_ON }, {} }; const char *ovl_xino_mode(struct ovl_config *config) { return ovl_parameter_xino[config->xino].name; } static int ovl_xino_def(void) { return ovl_xino_auto_def ? OVL_XINO_AUTO : OVL_XINO_OFF; } const struct constant_table ovl_parameter_redirect_dir[] = { { "off", OVL_REDIRECT_OFF }, { "follow", OVL_REDIRECT_FOLLOW }, { "nofollow", OVL_REDIRECT_NOFOLLOW }, { "on", OVL_REDIRECT_ON }, {} }; static const char *ovl_redirect_mode(struct ovl_config *config) { return ovl_parameter_redirect_dir[config->redirect_mode].name; } static int ovl_redirect_mode_def(void) { return ovl_redirect_dir_def ? OVL_REDIRECT_ON : ovl_redirect_always_follow ? OVL_REDIRECT_FOLLOW : OVL_REDIRECT_NOFOLLOW; } static const struct constant_table ovl_parameter_verity[] = { { "off", OVL_VERITY_OFF }, { "on", OVL_VERITY_ON }, { "require", OVL_VERITY_REQUIRE }, {} }; static const char *ovl_verity_mode(struct ovl_config *config) { return ovl_parameter_verity[config->verity_mode].name; } static int ovl_verity_mode_def(void) { return OVL_VERITY_OFF; } #define fsparam_string_empty(NAME, OPT) \ __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) const struct fs_parameter_spec ovl_parameter_spec[] = { fsparam_string_empty("lowerdir", Opt_lowerdir), fsparam_string("upperdir", Opt_upperdir), fsparam_string("workdir", Opt_workdir), fsparam_flag("default_permissions", Opt_default_permissions), fsparam_enum("redirect_dir", Opt_redirect_dir, ovl_parameter_redirect_dir), fsparam_enum("index", Opt_index, ovl_parameter_bool), fsparam_enum("uuid", Opt_uuid, ovl_parameter_uuid), fsparam_enum("nfs_export", Opt_nfs_export, ovl_parameter_bool), fsparam_flag("userxattr", Opt_userxattr), fsparam_enum("xino", Opt_xino, ovl_parameter_xino), fsparam_enum("metacopy", Opt_metacopy, ovl_parameter_bool), fsparam_enum("verity", Opt_verity, ovl_parameter_verity), fsparam_flag("volatile", Opt_volatile), {} }; static ssize_t ovl_parse_param_split_lowerdirs(char *str) { ssize_t nr_layers = 1, nr_colons = 0; char *s, *d; for (s = d = str;; s++, d++) { if (*s == '\\') { s++; } else if (*s == ':') { bool next_colon = (*(s + 1) == ':'); nr_colons++; if (nr_colons == 2 && next_colon) { pr_err("only single ':' or double '::' sequences of unescaped colons in lowerdir mount option allowed.\n"); return -EINVAL; } /* count layers, not colons */ if (!next_colon) nr_layers++; *d = '\0'; continue; } *d = *s; if (!*s) { /* trailing colons */ if (nr_colons) { pr_err("unescaped trailing colons in lowerdir mount option.\n"); return -EINVAL; } break; } nr_colons = 0; } return nr_layers; } static int ovl_mount_dir_noesc(const char *name, struct path *path) { int err = -EINVAL; if (!*name) { pr_err("empty lowerdir\n"); goto out; } err = kern_path(name, LOOKUP_FOLLOW, path); if (err) { pr_err("failed to resolve '%s': %i\n", name, err); goto out; } err = -EINVAL; if (ovl_dentry_weird(path->dentry)) { pr_err("filesystem on '%s' not supported\n", name); goto out_put; } if (!d_is_dir(path->dentry)) { pr_err("'%s' not a directory\n", name); goto out_put; } return 0; out_put: path_put_init(path); out: return err; } static void ovl_unescape(char *s) { char *d = s; for (;; s++, d++) { if (*s == '\\') s++; *d = *s; if (!*s) break; } } static int ovl_mount_dir(const char *name, struct path *path) { int err = -ENOMEM; char *tmp = kstrdup(name, GFP_KERNEL); if (tmp) { ovl_unescape(tmp); err = ovl_mount_dir_noesc(tmp, path); if (!err && path->dentry->d_flags & DCACHE_OP_REAL) { pr_err("filesystem on '%s' not supported as upperdir\n", tmp); path_put_init(path); err = -EINVAL; } kfree(tmp); } return err; } static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc, bool workdir) { int err; struct ovl_fs *ofs = fc->s_fs_info; struct ovl_config *config = &ofs->config; struct ovl_fs_context *ctx = fc->fs_private; struct path path; char *dup; err = ovl_mount_dir(name, &path); if (err) return err; /* * Check whether upper path is read-only here to report failures * early. Don't forget to recheck when the superblock is created * as the mount attributes could change. */ if (__mnt_is_readonly(path.mnt)) { path_put(&path); return -EINVAL; } dup = kstrdup(name, GFP_KERNEL); if (!dup) { path_put(&path); return -ENOMEM; } if (workdir) { kfree(config->workdir); config->workdir = dup; path_put(&ctx->work); ctx->work = path; } else { kfree(config->upperdir); config->upperdir = dup; path_put(&ctx->upper); ctx->upper = path; } return 0; } static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx) { for (size_t nr = 0; nr < ctx->nr; nr++) { path_put(&ctx->lower[nr].path); kfree(ctx->lower[nr].name); ctx->lower[nr].name = NULL; } ctx->nr = 0; ctx->nr_data = 0; } /* * Parse lowerdir= mount option: * * (1) lowerdir=/lower1:/lower2:/lower3::/data1::/data2 * Set "/lower1", "/lower2", and "/lower3" as lower layers and * "/data1" and "/data2" as data lower layers. Any existing lower * layers are replaced. * (2) lowerdir=:/lower4 * Append "/lower4" to current stack of lower layers. This requires * that there already is at least one lower layer configured. * (3) lowerdir=::/lower5 * Append data "/lower5" as data lower layer. This requires that * there's at least one regular lower layer present. */ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) { int err; struct ovl_fs_context *ctx = fc->fs_private; struct ovl_fs_context_layer *l; char *dup = NULL, *dup_iter; ssize_t nr_lower = 0, nr = 0, nr_data = 0; bool append = false, data_layer = false; /* * Ensure we're backwards compatible with mount(2) * by allowing relative paths. */ /* drop all existing lower layers */ if (!*name) { ovl_parse_param_drop_lowerdir(ctx); return 0; } if (strncmp(name, "::", 2) == 0) { /* * This is a data layer. * There must be at least one regular lower layer * specified. */ if (ctx->nr == 0) { pr_err("data lower layers without regular lower layers not allowed"); return -EINVAL; } /* Skip the leading "::". */ name += 2; data_layer = true; /* * A data layer is automatically an append as there * must've been at least one regular lower layer. */ append = true; } else if (*name == ':') { /* * This is a regular lower layer. * If users want to append a layer enforce that they * have already specified a first layer before. It's * better to be strict. */ if (ctx->nr == 0) { pr_err("cannot append layer if no previous layer has been specified"); return -EINVAL; } /* * Once a sequence of data layers has started regular * lower layers are forbidden. */ if (ctx->nr_data > 0) { pr_err("regular lower layers cannot follow data lower layers"); return -EINVAL; } /* Skip the leading ":". */ name++; append = true; } dup = kstrdup(name, GFP_KERNEL); if (!dup) return -ENOMEM; err = -EINVAL; nr_lower = ovl_parse_param_split_lowerdirs(dup); if (nr_lower < 0) goto out_err; if ((nr_lower > OVL_MAX_STACK) || (append && (size_add(ctx->nr, nr_lower) > OVL_MAX_STACK))) { pr_err("too many lower directories, limit is %d\n", OVL_MAX_STACK); goto out_err; } if (!append) ovl_parse_param_drop_lowerdir(ctx); /* * (1) append * * We want nr <= nr_lower <= capacity We know nr > 0 and nr <= * capacity. If nr == 0 this wouldn't be append. If nr + * nr_lower is <= capacity then nr <= nr_lower <= capacity * already holds. If nr + nr_lower exceeds capacity, we realloc. * * (2) replace * * Ensure we're backwards compatible with mount(2) which allows * "lowerdir=/a:/b:/c,lowerdir=/d:/e:/f" causing the last * specified lowerdir mount option to win. * * We want nr <= nr_lower <= capacity We know either (i) nr == 0 * or (ii) nr > 0. We also know nr_lower > 0. The capacity * could've been changed multiple times already so we only know * nr <= capacity. If nr + nr_lower > capacity we realloc, * otherwise nr <= nr_lower <= capacity holds already. */ nr_lower += ctx->nr; if (nr_lower > ctx->capacity) { err = -ENOMEM; l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower), GFP_KERNEL_ACCOUNT); if (!l) goto out_err; ctx->lower = l; ctx->capacity = nr_lower; } /* * (3) By (1) and (2) we know nr <= nr_lower <= capacity. * (4) If ctx->nr == 0 => replace * We have verified above that the lowerdir mount option * isn't an append, i.e., the lowerdir mount option * doesn't start with ":" or "::". * (4.1) The lowerdir mount options only contains regular lower * layers ":". * => Nothing to verify. * (4.2) The lowerdir mount options contains regular ":" and * data "::" layers. * => We need to verify that data lower layers "::" aren't * followed by regular ":" lower layers * (5) If ctx->nr > 0 => append * We know that there's at least one regular layer * otherwise we would've failed when parsing the previous * lowerdir mount option. * (5.1) The lowerdir mount option is a regular layer ":" append * => We need to verify that no data layers have been * specified before. * (5.2) The lowerdir mount option is a data layer "::" append * We know that there's at least one regular layer or * other data layers. => There's nothing to verify. */ dup_iter = dup; for (nr = ctx->nr; nr < nr_lower; nr++) { l = &ctx->lower[nr]; memset(l, 0, sizeof(*l)); err = ovl_mount_dir_noesc(dup_iter, &l->path); if (err) goto out_put; err = -ENOMEM; l->name = kstrdup(dup_iter, GFP_KERNEL_ACCOUNT); if (!l->name) goto out_put; if (data_layer) nr_data++; /* Calling strchr() again would overrun. */ if ((nr + 1) == nr_lower) break; err = -EINVAL; dup_iter = strchr(dup_iter, '\0') + 1; if (*dup_iter) { /* * This is a regular layer so we require that * there are no data layers. */ if ((ctx->nr_data + nr_data) > 0) { pr_err("regular lower layers cannot follow data lower layers"); goto out_put; } data_layer = false; continue; } /* This is a data lower layer. */ data_layer = true; dup_iter++; } ctx->nr = nr_lower; ctx->nr_data += nr_data; kfree(dup); return 0; out_put: /* * We know nr >= ctx->nr < nr_lower. If we failed somewhere * we want to undo until nr == ctx->nr. This is correct for * both ctx->nr == 0 and ctx->nr > 0. */ for (; nr >= ctx->nr; nr--) { l = &ctx->lower[nr]; kfree(l->name); l->name = NULL; path_put(&l->path); /* don't overflow */ if (nr == 0) break; } out_err: kfree(dup); /* Intentionally don't realloc to a smaller size. */ return err; } static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param) { int err = 0; struct fs_parse_result result; struct ovl_fs *ofs = fc->s_fs_info; struct ovl_config *config = &ofs->config; struct ovl_fs_context *ctx = fc->fs_private; int opt; if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { /* * On remount overlayfs has always ignored all mount * options no matter if malformed or not so for * backwards compatibility we do the same here. */ if (fc->oldapi) return 0; /* * Give us the freedom to allow changing mount options * with the new mount api in the future. So instead of * silently ignoring everything we report a proper * error. This is only visible for users of the new * mount api. */ return invalfc(fc, "No changes allowed in reconfigure"); } opt = fs_parse(fc, ovl_parameter_spec, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_lowerdir: err = ovl_parse_param_lowerdir(param->string, fc); break; case Opt_upperdir: fallthrough; case Opt_workdir: err = ovl_parse_param_upperdir(param->string, fc, (Opt_workdir == opt)); break; case Opt_default_permissions: config->default_permissions = true; break; case Opt_redirect_dir: config->redirect_mode = result.uint_32; if (config->redirect_mode == OVL_REDIRECT_OFF) { config->redirect_mode = ovl_redirect_always_follow ? OVL_REDIRECT_FOLLOW : OVL_REDIRECT_NOFOLLOW; } ctx->set.redirect = true; break; case Opt_index: config->index = result.uint_32; ctx->set.index = true; break; case Opt_uuid: config->uuid = result.uint_32; break; case Opt_nfs_export: config->nfs_export = result.uint_32; ctx->set.nfs_export = true; break; case Opt_xino: config->xino = result.uint_32; break; case Opt_metacopy: config->metacopy = result.uint_32; ctx->set.metacopy = true; break; case Opt_verity: config->verity_mode = result.uint_32; break; case Opt_volatile: config->ovl_volatile = true; break; case Opt_userxattr: config->userxattr = true; break; default: pr_err("unrecognized mount option \"%s\" or missing value\n", param->key); return -EINVAL; } return err; } static int ovl_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, ovl_fill_super); } static inline void ovl_fs_context_free(struct ovl_fs_context *ctx) { ovl_parse_param_drop_lowerdir(ctx); path_put(&ctx->upper); path_put(&ctx->work); kfree(ctx->lower); kfree(ctx); } static void ovl_free(struct fs_context *fc) { struct ovl_fs *ofs = fc->s_fs_info; struct ovl_fs_context *ctx = fc->fs_private; /* * ofs is stored in the fs_context when it is initialized. * ofs is transferred to the superblock on a successful mount, * but if an error occurs before the transfer we have to free * it here. */ if (ofs) ovl_free_fs(ofs); if (ctx) ovl_fs_context_free(ctx); } static int ovl_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; struct ovl_fs *ofs = OVL_FS(sb); struct super_block *upper_sb; int ret = 0; if (!(fc->sb_flags & SB_RDONLY) && ovl_force_readonly(ofs)) return -EROFS; if (fc->sb_flags & SB_RDONLY && !sb_rdonly(sb)) { upper_sb = ovl_upper_mnt(ofs)->mnt_sb; if (ovl_should_sync(ofs)) { down_read(&upper_sb->s_umount); ret = sync_filesystem(upper_sb); up_read(&upper_sb->s_umount); } } return ret; } static const struct fs_context_operations ovl_context_ops = { .parse_param = ovl_parse_param, .get_tree = ovl_get_tree, .reconfigure = ovl_reconfigure, .free = ovl_free, }; /* * This is called during fsopen() and will record the user namespace of * the caller in fc->user_ns since we've raised FS_USERNS_MOUNT. We'll * need it when we actually create the superblock to verify that the * process creating the superblock is in the same user namespace as * process that called fsopen(). */ int ovl_init_fs_context(struct fs_context *fc) { struct ovl_fs_context *ctx; struct ovl_fs *ofs; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (!ctx) return -ENOMEM; /* * By default we allocate for three lower layers. It's likely * that it'll cover most users. */ ctx->lower = kmalloc_array(3, sizeof(*ctx->lower), GFP_KERNEL_ACCOUNT); if (!ctx->lower) goto out_err; ctx->capacity = 3; ofs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL); if (!ofs) goto out_err; ofs->config.redirect_mode = ovl_redirect_mode_def(); ofs->config.index = ovl_index_def; ofs->config.uuid = ovl_uuid_def(); ofs->config.nfs_export = ovl_nfs_export_def; ofs->config.xino = ovl_xino_def(); ofs->config.metacopy = ovl_metacopy_def; fc->s_fs_info = ofs; fc->fs_private = ctx; fc->ops = &ovl_context_ops; return 0; out_err: ovl_fs_context_free(ctx); return -ENOMEM; } void ovl_free_fs(struct ovl_fs *ofs) { struct vfsmount **mounts; unsigned i; iput(ofs->workbasedir_trap); iput(ofs->indexdir_trap); iput(ofs->workdir_trap); dput(ofs->whiteout); dput(ofs->indexdir); dput(ofs->workdir); if (ofs->workdir_locked) ovl_inuse_unlock(ofs->workbasedir); dput(ofs->workbasedir); if (ofs->upperdir_locked) ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root); /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */ mounts = (struct vfsmount **) ofs->layers; for (i = 0; i < ofs->numlayer; i++) { iput(ofs->layers[i].trap); mounts[i] = ofs->layers[i].mnt; kfree(ofs->layers[i].name); } kern_unmount_array(mounts, ofs->numlayer); kfree(ofs->layers); for (i = 0; i < ofs->numfs; i++) free_anon_bdev(ofs->fs[i].pseudo_dev); kfree(ofs->fs); kfree(ofs->config.upperdir); kfree(ofs->config.workdir); if (ofs->creator_cred) put_cred(ofs->creator_cred); kfree(ofs); } int ovl_fs_params_verify(const struct ovl_fs_context *ctx, struct ovl_config *config) { struct ovl_opt_set set = ctx->set; if (ctx->nr_data > 0 && !config->metacopy) { pr_err("lower data-only dirs require metacopy support.\n"); return -EINVAL; } /* Workdir/index are useless in non-upper mount */ if (!config->upperdir) { if (config->workdir) { pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n", config->workdir); kfree(config->workdir); config->workdir = NULL; } if (config->index && set.index) { pr_info("option \"index=on\" is useless in a non-upper mount, ignore\n"); set.index = false; } config->index = false; } if (!config->upperdir && config->ovl_volatile) { pr_info("option \"volatile\" is meaningless in a non-upper mount, ignoring it.\n"); config->ovl_volatile = false; } if (!config->upperdir && config->uuid == OVL_UUID_ON) { pr_info("option \"uuid=on\" requires an upper fs, falling back to uuid=null.\n"); config->uuid = OVL_UUID_NULL; } /* Resolve verity -> metacopy dependency */ if (config->verity_mode && !config->metacopy) { /* Don't allow explicit specified conflicting combinations */ if (set.metacopy) { pr_err("conflicting options: metacopy=off,verity=%s\n", ovl_verity_mode(config)); return -EINVAL; } /* Otherwise automatically enable metacopy. */ config->metacopy = true; } /* * This is to make the logic below simpler. It doesn't make any other * difference, since redirect_dir=on is only used for upper. */ if (!config->upperdir && config->redirect_mode == OVL_REDIRECT_FOLLOW) config->redirect_mode = OVL_REDIRECT_ON; /* Resolve verity -> metacopy -> redirect_dir dependency */ if (config->metacopy && config->redirect_mode != OVL_REDIRECT_ON) { if (set.metacopy && set.redirect) { pr_err("conflicting options: metacopy=on,redirect_dir=%s\n", ovl_redirect_mode(config)); return -EINVAL; } if (config->verity_mode && set.redirect) { pr_err("conflicting options: verity=%s,redirect_dir=%s\n", ovl_verity_mode(config), ovl_redirect_mode(config)); return -EINVAL; } if (set.redirect) { /* * There was an explicit redirect_dir=... that resulted * in this conflict. */ pr_info("disabling metacopy due to redirect_dir=%s\n", ovl_redirect_mode(config)); config->metacopy = false; } else { /* Automatically enable redirect otherwise. */ config->redirect_mode = OVL_REDIRECT_ON; } } /* Resolve nfs_export -> index dependency */ if (config->nfs_export && !config->index) { if (!config->upperdir && config->redirect_mode != OVL_REDIRECT_NOFOLLOW) { pr_info("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n"); config->nfs_export = false; } else if (set.nfs_export && set.index) { pr_err("conflicting options: nfs_export=on,index=off\n"); return -EINVAL; } else if (set.index) { /* * There was an explicit index=off that resulted * in this conflict. */ pr_info("disabling nfs_export due to index=off\n"); config->nfs_export = false; } else { /* Automatically enable index otherwise. */ config->index = true; } } /* Resolve nfs_export -> !metacopy && !verity dependency */ if (config->nfs_export && config->metacopy) { if (set.nfs_export && set.metacopy) { pr_err("conflicting options: nfs_export=on,metacopy=on\n"); return -EINVAL; } if (set.metacopy) { /* * There was an explicit metacopy=on that resulted * in this conflict. */ pr_info("disabling nfs_export due to metacopy=on\n"); config->nfs_export = false; } else if (config->verity_mode) { /* * There was an explicit verity=.. that resulted * in this conflict. */ pr_info("disabling nfs_export due to verity=%s\n", ovl_verity_mode(config)); config->nfs_export = false; } else { /* * There was an explicit nfs_export=on that resulted * in this conflict. */ pr_info("disabling metacopy due to nfs_export=on\n"); config->metacopy = false; } } /* Resolve userxattr -> !redirect && !metacopy && !verity dependency */ if (config->userxattr) { if (set.redirect && config->redirect_mode != OVL_REDIRECT_NOFOLLOW) { pr_err("conflicting options: userxattr,redirect_dir=%s\n", ovl_redirect_mode(config)); return -EINVAL; } if (config->metacopy && set.metacopy) { pr_err("conflicting options: userxattr,metacopy=on\n"); return -EINVAL; } if (config->verity_mode) { pr_err("conflicting options: userxattr,verity=%s\n", ovl_verity_mode(config)); return -EINVAL; } /* * Silently disable default setting of redirect and metacopy. * This shall be the default in the future as well: these * options must be explicitly enabled if used together with * userxattr. */ config->redirect_mode = OVL_REDIRECT_NOFOLLOW; config->metacopy = false; } return 0; } /** * ovl_show_options * @m: the seq_file handle * @dentry: The dentry to query * * Prints the mount options for a given superblock. * Returns zero; does not fail. */ int ovl_show_options(struct seq_file *m, struct dentry *dentry) { struct super_block *sb = dentry->d_sb; struct ovl_fs *ofs = OVL_FS(sb); size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer; const struct ovl_layer *data_layers = &ofs->layers[nr_merged_lower]; /* ofs->layers[0] is the upper layer */ seq_printf(m, ",lowerdir=%s", ofs->layers[1].name); /* dump regular lower layers */ for (nr = 2; nr < nr_merged_lower; nr++) seq_printf(m, ":%s", ofs->layers[nr].name); /* dump data lower layers */ for (nr = 0; nr < ofs->numdatalayer; nr++) seq_printf(m, "::%s", data_layers[nr].name); if (ofs->config.upperdir) { seq_show_option(m, "upperdir", ofs->config.upperdir); seq_show_option(m, "workdir", ofs->config.workdir); } if (ofs->config.default_permissions) seq_puts(m, ",default_permissions"); if (ofs->config.redirect_mode != ovl_redirect_mode_def()) seq_printf(m, ",redirect_dir=%s", ovl_redirect_mode(&ofs->config)); if (ofs->config.index != ovl_index_def) seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off"); if (ofs->config.uuid != ovl_uuid_def()) seq_printf(m, ",uuid=%s", ovl_uuid_mode(&ofs->config)); if (ofs->config.nfs_export != ovl_nfs_export_def) seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ? "on" : "off"); if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(ofs)) seq_printf(m, ",xino=%s", ovl_xino_mode(&ofs->config)); if (ofs->config.metacopy != ovl_metacopy_def) seq_printf(m, ",metacopy=%s", ofs->config.metacopy ? "on" : "off"); if (ofs->config.ovl_volatile) seq_puts(m, ",volatile"); if (ofs->config.userxattr) seq_puts(m, ",userxattr"); if (ofs->config.verity_mode != ovl_verity_mode_def()) seq_printf(m, ",verity=%s", ovl_verity_mode(&ofs->config)); return 0; }
linux-master
fs/overlayfs/params.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Red Hat, Inc. */ #include <linux/cred.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/xattr.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/splice.h> #include <linux/security.h> #include <linux/mm.h> #include <linux/fs.h> #include "overlayfs.h" struct ovl_aio_req { struct kiocb iocb; refcount_t ref; struct kiocb *orig_iocb; }; static struct kmem_cache *ovl_aio_request_cachep; static char ovl_whatisit(struct inode *inode, struct inode *realinode) { if (realinode != ovl_inode_upper(inode)) return 'l'; if (ovl_has_upperdata(inode)) return 'u'; else return 'm'; } /* No atime modification on underlying */ #define OVL_OPEN_FLAGS (O_NOATIME) static struct file *ovl_open_realfile(const struct file *file, const struct path *realpath) { struct inode *realinode = d_inode(realpath->dentry); struct inode *inode = file_inode(file); struct mnt_idmap *real_idmap; struct file *realfile; const struct cred *old_cred; int flags = file->f_flags | OVL_OPEN_FLAGS; int acc_mode = ACC_MODE(flags); int err; if (flags & O_APPEND) acc_mode |= MAY_APPEND; old_cred = ovl_override_creds(inode->i_sb); real_idmap = mnt_idmap(realpath->mnt); err = inode_permission(real_idmap, realinode, MAY_OPEN | acc_mode); if (err) { realfile = ERR_PTR(err); } else { if (!inode_owner_or_capable(real_idmap, realinode)) flags &= ~O_NOATIME; realfile = backing_file_open(&file->f_path, flags, realpath, current_cred()); } revert_creds(old_cred); pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n", file, file, ovl_whatisit(inode, realinode), file->f_flags, realfile, IS_ERR(realfile) ? 0 : realfile->f_flags); return realfile; } #define OVL_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT) static int ovl_change_flags(struct file *file, unsigned int flags) { struct inode *inode = file_inode(file); int err; flags &= OVL_SETFL_MASK; if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode)) return -EPERM; if ((flags & O_DIRECT) && !(file->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; if (file->f_op->check_flags) { err = file->f_op->check_flags(flags); if (err) return err; } spin_lock(&file->f_lock); file->f_flags = (file->f_flags & ~OVL_SETFL_MASK) | flags; file->f_iocb_flags = iocb_flags(file); spin_unlock(&file->f_lock); return 0; } static int ovl_real_fdget_meta(const struct file *file, struct fd *real, bool allow_meta) { struct dentry *dentry = file_dentry(file); struct path realpath; int err; real->flags = 0; real->file = file->private_data; if (allow_meta) { ovl_path_real(dentry, &realpath); } else { /* lazy lookup and verify of lowerdata */ err = ovl_verify_lowerdata(dentry); if (err) return err; ovl_path_realdata(dentry, &realpath); } if (!realpath.dentry) return -EIO; /* Has it been copied up since we'd opened it? */ if (unlikely(file_inode(real->file) != d_inode(realpath.dentry))) { real->flags = FDPUT_FPUT; real->file = ovl_open_realfile(file, &realpath); return PTR_ERR_OR_ZERO(real->file); } /* Did the flags change since open? */ if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS)) return ovl_change_flags(real->file, file->f_flags); return 0; } static int ovl_real_fdget(const struct file *file, struct fd *real) { if (d_is_dir(file_dentry(file))) { real->flags = 0; real->file = ovl_dir_real_file(file, false); return PTR_ERR_OR_ZERO(real->file); } return ovl_real_fdget_meta(file, real, false); } static int ovl_open(struct inode *inode, struct file *file) { struct dentry *dentry = file_dentry(file); struct file *realfile; struct path realpath; int err; /* lazy lookup and verify lowerdata */ err = ovl_verify_lowerdata(dentry); if (err) return err; err = ovl_maybe_copy_up(dentry, file->f_flags); if (err) return err; /* No longer need these flags, so don't pass them on to underlying fs */ file->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); ovl_path_realdata(dentry, &realpath); if (!realpath.dentry) return -EIO; realfile = ovl_open_realfile(file, &realpath); if (IS_ERR(realfile)) return PTR_ERR(realfile); file->private_data = realfile; return 0; } static int ovl_release(struct inode *inode, struct file *file) { fput(file->private_data); return 0; } static loff_t ovl_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file_inode(file); struct fd real; const struct cred *old_cred; loff_t ret; /* * The two special cases below do not need to involve real fs, * so we can optimizing concurrent callers. */ if (offset == 0) { if (whence == SEEK_CUR) return file->f_pos; if (whence == SEEK_SET) return vfs_setpos(file, 0, 0); } ret = ovl_real_fdget(file, &real); if (ret) return ret; /* * Overlay file f_pos is the master copy that is preserved * through copy up and modified on read/write, but only real * fs knows how to SEEK_HOLE/SEEK_DATA and real fs may impose * limitations that are more strict than ->s_maxbytes for specific * files, so we use the real file to perform seeks. */ ovl_inode_lock(inode); real.file->f_pos = file->f_pos; old_cred = ovl_override_creds(inode->i_sb); ret = vfs_llseek(real.file, offset, whence); revert_creds(old_cred); file->f_pos = real.file->f_pos; ovl_inode_unlock(inode); fdput(real); return ret; } static void ovl_file_accessed(struct file *file) { struct inode *inode, *upperinode; struct timespec64 ctime, uctime; if (file->f_flags & O_NOATIME) return; inode = file_inode(file); upperinode = ovl_inode_upper(inode); if (!upperinode) return; ctime = inode_get_ctime(inode); uctime = inode_get_ctime(upperinode); if ((!timespec64_equal(&inode->i_mtime, &upperinode->i_mtime) || !timespec64_equal(&ctime, &uctime))) { inode->i_mtime = upperinode->i_mtime; inode_set_ctime_to_ts(inode, uctime); } touch_atime(&file->f_path); } static rwf_t ovl_iocb_to_rwf(int ifl) { rwf_t flags = 0; if (ifl & IOCB_NOWAIT) flags |= RWF_NOWAIT; if (ifl & IOCB_HIPRI) flags |= RWF_HIPRI; if (ifl & IOCB_DSYNC) flags |= RWF_DSYNC; if (ifl & IOCB_SYNC) flags |= RWF_SYNC; return flags; } static inline void ovl_aio_put(struct ovl_aio_req *aio_req) { if (refcount_dec_and_test(&aio_req->ref)) { fput(aio_req->iocb.ki_filp); kmem_cache_free(ovl_aio_request_cachep, aio_req); } } static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req) { struct kiocb *iocb = &aio_req->iocb; struct kiocb *orig_iocb = aio_req->orig_iocb; if (iocb->ki_flags & IOCB_WRITE) { struct inode *inode = file_inode(orig_iocb->ki_filp); kiocb_end_write(iocb); ovl_copyattr(inode); } orig_iocb->ki_pos = iocb->ki_pos; ovl_aio_put(aio_req); } static void ovl_aio_rw_complete(struct kiocb *iocb, long res) { struct ovl_aio_req *aio_req = container_of(iocb, struct ovl_aio_req, iocb); struct kiocb *orig_iocb = aio_req->orig_iocb; ovl_aio_cleanup_handler(aio_req); orig_iocb->ki_complete(orig_iocb, res); } static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct fd real; const struct cred *old_cred; ssize_t ret; if (!iov_iter_count(iter)) return 0; ret = ovl_real_fdget(file, &real); if (ret) return ret; ret = -EINVAL; if (iocb->ki_flags & IOCB_DIRECT && !(real.file->f_mode & FMODE_CAN_ODIRECT)) goto out_fdput; old_cred = ovl_override_creds(file_inode(file)->i_sb); if (is_sync_kiocb(iocb)) { ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, ovl_iocb_to_rwf(iocb->ki_flags)); } else { struct ovl_aio_req *aio_req; ret = -ENOMEM; aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL); if (!aio_req) goto out; real.flags = 0; aio_req->orig_iocb = iocb; kiocb_clone(&aio_req->iocb, iocb, get_file(real.file)); aio_req->iocb.ki_complete = ovl_aio_rw_complete; refcount_set(&aio_req->ref, 2); ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter); ovl_aio_put(aio_req); if (ret != -EIOCBQUEUED) ovl_aio_cleanup_handler(aio_req); } out: revert_creds(old_cred); ovl_file_accessed(file); out_fdput: fdput(real); return ret; } static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct fd real; const struct cred *old_cred; ssize_t ret; int ifl = iocb->ki_flags; if (!iov_iter_count(iter)) return 0; inode_lock(inode); /* Update mode */ ovl_copyattr(inode); ret = file_remove_privs(file); if (ret) goto out_unlock; ret = ovl_real_fdget(file, &real); if (ret) goto out_unlock; ret = -EINVAL; if (iocb->ki_flags & IOCB_DIRECT && !(real.file->f_mode & FMODE_CAN_ODIRECT)) goto out_fdput; if (!ovl_should_sync(OVL_FS(inode->i_sb))) ifl &= ~(IOCB_DSYNC | IOCB_SYNC); old_cred = ovl_override_creds(file_inode(file)->i_sb); if (is_sync_kiocb(iocb)) { file_start_write(real.file); ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, ovl_iocb_to_rwf(ifl)); file_end_write(real.file); /* Update size */ ovl_copyattr(inode); } else { struct ovl_aio_req *aio_req; ret = -ENOMEM; aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL); if (!aio_req) goto out; real.flags = 0; aio_req->orig_iocb = iocb; kiocb_clone(&aio_req->iocb, iocb, get_file(real.file)); aio_req->iocb.ki_flags = ifl; aio_req->iocb.ki_complete = ovl_aio_rw_complete; refcount_set(&aio_req->ref, 2); kiocb_start_write(&aio_req->iocb); ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter); ovl_aio_put(aio_req); if (ret != -EIOCBQUEUED) ovl_aio_cleanup_handler(aio_req); } out: revert_creds(old_cred); out_fdput: fdput(real); out_unlock: inode_unlock(inode); return ret; } static ssize_t ovl_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { const struct cred *old_cred; struct fd real; ssize_t ret; ret = ovl_real_fdget(in, &real); if (ret) return ret; old_cred = ovl_override_creds(file_inode(in)->i_sb); ret = vfs_splice_read(real.file, ppos, pipe, len, flags); revert_creds(old_cred); ovl_file_accessed(in); fdput(real); return ret; } /* * Calling iter_file_splice_write() directly from overlay's f_op may deadlock * due to lock order inversion between pipe->mutex in iter_file_splice_write() * and file_start_write(real.file) in ovl_write_iter(). * * So do everything ovl_write_iter() does and call iter_file_splice_write() on * the real file. */ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct fd real; const struct cred *old_cred; struct inode *inode = file_inode(out); ssize_t ret; inode_lock(inode); /* Update mode */ ovl_copyattr(inode); ret = file_remove_privs(out); if (ret) goto out_unlock; ret = ovl_real_fdget(out, &real); if (ret) goto out_unlock; old_cred = ovl_override_creds(inode->i_sb); file_start_write(real.file); ret = iter_file_splice_write(pipe, real.file, ppos, len, flags); file_end_write(real.file); /* Update size */ ovl_copyattr(inode); revert_creds(old_cred); fdput(real); out_unlock: inode_unlock(inode); return ret; } static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct fd real; const struct cred *old_cred; int ret; ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb)); if (ret <= 0) return ret; ret = ovl_real_fdget_meta(file, &real, !datasync); if (ret) return ret; /* Don't sync lower file for fear of receiving EROFS error */ if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) { old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fsync_range(real.file, start, end, datasync); revert_creds(old_cred); } fdput(real); return ret; } static int ovl_mmap(struct file *file, struct vm_area_struct *vma) { struct file *realfile = file->private_data; const struct cred *old_cred; int ret; if (!realfile->f_op->mmap) return -ENODEV; if (WARN_ON(file != vma->vm_file)) return -EIO; vma_set_file(vma, realfile); old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = call_mmap(vma->vm_file, vma); revert_creds(old_cred); ovl_file_accessed(file); return ret; } static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct fd real; const struct cred *old_cred; int ret; inode_lock(inode); /* Update mode */ ovl_copyattr(inode); ret = file_remove_privs(file); if (ret) goto out_unlock; ret = ovl_real_fdget(file, &real); if (ret) goto out_unlock; old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fallocate(real.file, mode, offset, len); revert_creds(old_cred); /* Update size */ ovl_copyattr(inode); fdput(real); out_unlock: inode_unlock(inode); return ret; } static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice) { struct fd real; const struct cred *old_cred; int ret; ret = ovl_real_fdget(file, &real); if (ret) return ret; old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fadvise(real.file, offset, len, advice); revert_creds(old_cred); fdput(real); return ret; } enum ovl_copyop { OVL_COPY, OVL_CLONE, OVL_DEDUPE, }; static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int flags, enum ovl_copyop op) { struct inode *inode_out = file_inode(file_out); struct fd real_in, real_out; const struct cred *old_cred; loff_t ret; inode_lock(inode_out); if (op != OVL_DEDUPE) { /* Update mode */ ovl_copyattr(inode_out); ret = file_remove_privs(file_out); if (ret) goto out_unlock; } ret = ovl_real_fdget(file_out, &real_out); if (ret) goto out_unlock; ret = ovl_real_fdget(file_in, &real_in); if (ret) { fdput(real_out); goto out_unlock; } old_cred = ovl_override_creds(file_inode(file_out)->i_sb); switch (op) { case OVL_COPY: ret = vfs_copy_file_range(real_in.file, pos_in, real_out.file, pos_out, len, flags); break; case OVL_CLONE: ret = vfs_clone_file_range(real_in.file, pos_in, real_out.file, pos_out, len, flags); break; case OVL_DEDUPE: ret = vfs_dedupe_file_range_one(real_in.file, pos_in, real_out.file, pos_out, len, flags); break; } revert_creds(old_cred); /* Update size */ ovl_copyattr(inode_out); fdput(real_in); fdput(real_out); out_unlock: inode_unlock(inode_out); return ret; } static ssize_t ovl_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags) { return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, flags, OVL_COPY); } static loff_t ovl_remap_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags) { enum ovl_copyop op; if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) return -EINVAL; if (remap_flags & REMAP_FILE_DEDUP) op = OVL_DEDUPE; else op = OVL_CLONE; /* * Don't copy up because of a dedupe request, this wouldn't make sense * most of the time (data would be duplicated instead of deduplicated). */ if (op == OVL_DEDUPE && (!ovl_inode_upper(file_inode(file_in)) || !ovl_inode_upper(file_inode(file_out)))) return -EPERM; return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, remap_flags, op); } static int ovl_flush(struct file *file, fl_owner_t id) { struct fd real; const struct cred *old_cred; int err; err = ovl_real_fdget(file, &real); if (err) return err; if (real.file->f_op->flush) { old_cred = ovl_override_creds(file_inode(file)->i_sb); err = real.file->f_op->flush(real.file, id); revert_creds(old_cred); } fdput(real); return err; } const struct file_operations ovl_file_operations = { .open = ovl_open, .release = ovl_release, .llseek = ovl_llseek, .read_iter = ovl_read_iter, .write_iter = ovl_write_iter, .fsync = ovl_fsync, .mmap = ovl_mmap, .fallocate = ovl_fallocate, .fadvise = ovl_fadvise, .flush = ovl_flush, .splice_read = ovl_splice_read, .splice_write = ovl_splice_write, .copy_file_range = ovl_copy_file_range, .remap_file_range = ovl_remap_file_range, }; int __init ovl_aio_request_cache_init(void) { ovl_aio_request_cachep = kmem_cache_create("ovl_aio_req", sizeof(struct ovl_aio_req), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ovl_aio_request_cachep) return -ENOMEM; return 0; } void ovl_aio_request_cache_destroy(void) { kmem_cache_destroy(ovl_aio_request_cachep); }
linux-master
fs/overlayfs/file.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Red Hat, Inc. * Copyright (C) 2012 Jeremy Kerr <[email protected]> */ #include <linux/ctype.h> #include <linux/efi.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/ucs2_string.h> #include <linux/slab.h> #include <linux/magic.h> #include <linux/statfs.h> #include "internal.h" LIST_HEAD(efivarfs_list); static void efivarfs_evict_inode(struct inode *inode) { clear_inode(inode); } static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf) { const u32 attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS; u64 storage_space, remaining_space, max_variable_size; efi_status_t status; /* Some UEFI firmware does not implement QueryVariableInfo() */ storage_space = remaining_space = 0; if (efi_rt_services_supported(EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO)) { status = efivar_query_variable_info(attr, &storage_space, &remaining_space, &max_variable_size); if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) pr_warn_ratelimited("query_variable_info() failed: 0x%lx\n", status); } /* * This is not a normal filesystem, so no point in pretending it has a block * size; we declare f_bsize to 1, so that we can then report the exact value * sent by EFI QueryVariableInfo in f_blocks and f_bfree */ buf->f_bsize = 1; buf->f_namelen = NAME_MAX; buf->f_blocks = storage_space; buf->f_bfree = remaining_space; buf->f_type = dentry->d_sb->s_magic; /* * In f_bavail we declare the free space that the kernel will allow writing * when the storage_paranoia x86 quirk is active. To use more, users * should boot the kernel with efi_no_storage_paranoia. */ if (remaining_space > efivar_reserved_space()) buf->f_bavail = remaining_space - efivar_reserved_space(); else buf->f_bavail = 0; return 0; } static const struct super_operations efivarfs_ops = { .statfs = efivarfs_statfs, .drop_inode = generic_delete_inode, .evict_inode = efivarfs_evict_inode, }; /* * Compare two efivarfs file names. * * An efivarfs filename is composed of two parts, * * 1. A case-sensitive variable name * 2. A case-insensitive GUID * * So we need to perform a case-sensitive match on part 1 and a * case-insensitive match on part 2. */ static int efivarfs_d_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { int guid = len - EFI_VARIABLE_GUID_LEN; if (name->len != len) return 1; /* Case-sensitive compare for the variable name */ if (memcmp(str, name->name, guid)) return 1; /* Case-insensitive compare for the GUID */ return strncasecmp(name->name + guid, str + guid, EFI_VARIABLE_GUID_LEN); } static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr) { unsigned long hash = init_name_hash(dentry); const unsigned char *s = qstr->name; unsigned int len = qstr->len; if (!efivarfs_valid_name(s, len)) return -EINVAL; while (len-- > EFI_VARIABLE_GUID_LEN) hash = partial_name_hash(*s++, hash); /* GUID is case-insensitive. */ while (len--) hash = partial_name_hash(tolower(*s++), hash); qstr->hash = end_name_hash(hash); return 0; } static const struct dentry_operations efivarfs_d_ops = { .d_compare = efivarfs_d_compare, .d_hash = efivarfs_d_hash, .d_delete = always_delete_dentry, }; static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) { struct dentry *d; struct qstr q; int err; q.name = name; q.len = strlen(name); err = efivarfs_d_hash(parent, &q); if (err) return ERR_PTR(err); d = d_alloc(parent, &q); if (d) return d; return ERR_PTR(-ENOMEM); } static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, unsigned long name_size, void *data) { struct super_block *sb = (struct super_block *)data; struct efivar_entry *entry; struct inode *inode = NULL; struct dentry *dentry, *root = sb->s_root; unsigned long size = 0; char *name; int len; int err = -ENOMEM; bool is_removable = false; if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) return 0; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return err; memcpy(entry->var.VariableName, name16, name_size); memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); len = ucs2_utf8size(entry->var.VariableName); /* name, plus '-', plus GUID, plus NUL*/ name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); if (!name) goto fail; ucs2_as_utf8(name, entry->var.VariableName, len); if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) is_removable = true; name[len] = '-'; efi_guid_to_str(&entry->var.VendorGuid, name + len + 1); name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */ strreplace(name, '/', '!'); inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0, is_removable); if (!inode) goto fail_name; dentry = efivarfs_alloc_dentry(root, name); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto fail_inode; } __efivar_entry_get(entry, NULL, &size, NULL); __efivar_entry_add(entry, &efivarfs_list); /* copied by the above to local storage in the dentry. */ kfree(name); inode_lock(inode); inode->i_private = entry; i_size_write(inode, size + sizeof(entry->var.Attributes)); inode_unlock(inode); d_add(dentry, inode); return 0; fail_inode: iput(inode); fail_name: kfree(name); fail: kfree(entry); return err; } static int efivarfs_destroy(struct efivar_entry *entry, void *data) { efivar_entry_remove(entry); kfree(entry); return 0; } static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct inode *inode = NULL; struct dentry *root; int err; if (!efivar_is_available()) return -EOPNOTSUPP; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = EFIVARFS_MAGIC; sb->s_op = &efivarfs_ops; sb->s_d_op = &efivarfs_d_ops; sb->s_time_gran = 1; if (!efivar_supports_writes()) sb->s_flags |= SB_RDONLY; inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); if (!inode) return -ENOMEM; inode->i_op = &efivarfs_dir_inode_operations; root = d_make_root(inode); sb->s_root = root; if (!root) return -ENOMEM; INIT_LIST_HEAD(&efivarfs_list); err = efivar_init(efivarfs_callback, (void *)sb, true, &efivarfs_list); if (err) efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL); return err; } static int efivarfs_get_tree(struct fs_context *fc) { return get_tree_single(fc, efivarfs_fill_super); } static const struct fs_context_operations efivarfs_context_ops = { .get_tree = efivarfs_get_tree, }; static int efivarfs_init_fs_context(struct fs_context *fc) { fc->ops = &efivarfs_context_ops; return 0; } static void efivarfs_kill_sb(struct super_block *sb) { kill_litter_super(sb); if (!efivar_is_available()) return; /* Remove all entries and destroy */ efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL); } static struct file_system_type efivarfs_type = { .owner = THIS_MODULE, .name = "efivarfs", .init_fs_context = efivarfs_init_fs_context, .kill_sb = efivarfs_kill_sb, }; static __init int efivarfs_init(void) { return register_filesystem(&efivarfs_type); } static __exit void efivarfs_exit(void) { unregister_filesystem(&efivarfs_type); } MODULE_AUTHOR("Matthew Garrett, Jeremy Kerr"); MODULE_DESCRIPTION("EFI Variable Filesystem"); MODULE_LICENSE("GPL"); MODULE_ALIAS_FS("efivarfs"); module_init(efivarfs_init); module_exit(efivarfs_exit);
linux-master
fs/efivarfs/super.c
// SPDX-License-Identifier: GPL-2.0+ /* * Originally from efivars.c * * Copyright (C) 2001,2003,2004 Dell <[email protected]> * Copyright (C) 2004 Intel Corporation <[email protected]> */ #include <linux/capability.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/string.h> #include <linux/smp.h> #include <linux/efi.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/ucs2_string.h> #include "internal.h" MODULE_IMPORT_NS(EFIVAR); static bool validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { struct efi_generic_dev_path *node; int offset = 0; node = (struct efi_generic_dev_path *)buffer; if (len < sizeof(*node)) return false; while (offset <= len - sizeof(*node) && node->length >= sizeof(*node) && node->length <= len - offset) { offset += node->length; if ((node->type == EFI_DEV_END_PATH || node->type == EFI_DEV_END_PATH2) && node->sub_type == EFI_DEV_END_ENTIRE) return true; node = (struct efi_generic_dev_path *)(buffer + offset); } /* * If we're here then either node->length pointed past the end * of the buffer or we reached the end of the buffer without * finding a device path end node. */ return false; } static bool validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { /* An array of 16-bit integers */ if ((len % 2) != 0) return false; return true; } static bool validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { u16 filepathlength; int i, desclength = 0, namelen; namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); /* Either "Boot" or "Driver" followed by four digits of hex */ for (i = match; i < match+4; i++) { if (var_name[i] > 127 || hex_to_bin(var_name[i] & 0xff) < 0) return true; } /* Reject it if there's 4 digits of hex and then further content */ if (namelen > match + 4) return false; /* A valid entry must be at least 8 bytes */ if (len < 8) return false; filepathlength = buffer[4] | buffer[5] << 8; /* * There's no stored length for the description, so it has to be * found by hand */ desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; /* Each boot entry must have a descriptor */ if (!desclength) return false; /* * If the sum of the length of the description, the claimed filepath * length and the original header are greater than the length of the * variable, it's malformed */ if ((desclength + filepathlength + 6) > len) return false; /* * And, finally, check the filepath */ return validate_device_path(var_name, match, buffer + desclength + 6, filepathlength); } static bool validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { /* A single 16-bit integer */ if (len != 2) return false; return true; } static bool validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { int i; for (i = 0; i < len; i++) { if (buffer[i] > 127) return false; if (buffer[i] == 0) return true; } return false; } struct variable_validate { efi_guid_t vendor; char *name; bool (*validate)(efi_char16_t *var_name, int match, u8 *data, unsigned long len); }; /* * This is the list of variables we need to validate, as well as the * whitelist for what we think is safe not to default to immutable. * * If it has a validate() method that's not NULL, it'll go into the * validation routine. If not, it is assumed valid, but still used for * whitelisting. * * Note that it's sorted by {vendor,name}, but globbed names must come after * any other name with the same prefix. */ static const struct variable_validate variable_validate[] = { { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, { LINUX_EFI_CRASH_GUID, "*", NULL }, { NULL_GUID, "", NULL }, }; /* * Check if @var_name matches the pattern given in @match_name. * * @var_name: an array of @len non-NUL characters. * @match_name: a NUL-terminated pattern string, optionally ending in "*". A * final "*" character matches any trailing characters @var_name, * including the case when there are none left in @var_name. * @match: on output, the number of non-wildcard characters in @match_name * that @var_name matches, regardless of the return value. * @return: whether @var_name fully matches @match_name. */ static bool variable_matches(const char *var_name, size_t len, const char *match_name, int *match) { for (*match = 0; ; (*match)++) { char c = match_name[*match]; switch (c) { case '*': /* Wildcard in @match_name means we've matched. */ return true; case '\0': /* @match_name has ended. Has @var_name too? */ return (*match == len); default: /* * We've reached a non-wildcard char in @match_name. * Continue only if there's an identical character in * @var_name. */ if (*match < len && c == var_name[*match]) continue; return false; } } } bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, unsigned long data_size) { int i; unsigned long utf8_size; u8 *utf8_name; utf8_size = ucs2_utf8size(var_name); utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); if (!utf8_name) return false; ucs2_as_utf8(utf8_name, var_name, utf8_size); utf8_name[utf8_size] = '\0'; for (i = 0; variable_validate[i].name[0] != '\0'; i++) { const char *name = variable_validate[i].name; int match = 0; if (efi_guidcmp(vendor, variable_validate[i].vendor)) continue; if (variable_matches(utf8_name, utf8_size+1, name, &match)) { if (variable_validate[i].validate == NULL) break; kfree(utf8_name); return variable_validate[i].validate(var_name, match, data, data_size); } } kfree(utf8_name); return true; } bool efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, size_t len) { int i; bool found = false; int match = 0; /* * Check if our variable is in the validated variables list */ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { if (efi_guidcmp(variable_validate[i].vendor, vendor)) continue; if (variable_matches(var_name, len, variable_validate[i].name, &match)) { found = true; break; } } /* * If it's in our list, it is removable. */ return found; } static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, struct list_head *head) { struct efivar_entry *entry, *n; unsigned long strsize1, strsize2; bool found = false; strsize1 = ucs2_strsize(variable_name, 1024); list_for_each_entry_safe(entry, n, head, list) { strsize2 = ucs2_strsize(entry->var.VariableName, 1024); if (strsize1 == strsize2 && !memcmp(variable_name, &(entry->var.VariableName), strsize2) && !efi_guidcmp(entry->var.VendorGuid, *vendor)) { found = true; break; } } return found; } /* * Returns the size of variable_name, in bytes, including the * terminating NULL character, or variable_name_size if no NULL * character is found among the first variable_name_size bytes. */ static unsigned long var_name_strnsize(efi_char16_t *variable_name, unsigned long variable_name_size) { unsigned long len; efi_char16_t c; /* * The variable name is, by definition, a NULL-terminated * string, so make absolutely sure that variable_name_size is * the value we expect it to be. If not, return the real size. */ for (len = 2; len <= variable_name_size; len += sizeof(c)) { c = variable_name[(len / sizeof(c)) - 1]; if (!c) break; } return min(len, variable_name_size); } /* * Print a warning when duplicate EFI variables are encountered and * disable the sysfs workqueue since the firmware is buggy. */ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid, unsigned long len16) { size_t i, len8 = len16 / sizeof(efi_char16_t); char *str8; str8 = kzalloc(len8, GFP_KERNEL); if (!str8) return; for (i = 0; i < len8; i++) str8[i] = str16[i]; printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", str8, vendor_guid); kfree(str8); } /** * efivar_init - build the initial list of EFI variables * @func: callback function to invoke for every variable * @data: function-specific data to pass to @func * @duplicates: error if we encounter duplicates on @head? * @head: initialised head of variable list * * Get every EFI variable from the firmware and invoke @func. @func * should call efivar_entry_add() to build the list of variables. * * Returns 0 on success, or a kernel error code on failure. */ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head) { unsigned long variable_name_size = 1024; efi_char16_t *variable_name; efi_status_t status; efi_guid_t vendor_guid; int err = 0; variable_name = kzalloc(variable_name_size, GFP_KERNEL); if (!variable_name) { printk(KERN_ERR "efivars: Memory allocation failed.\n"); return -ENOMEM; } err = efivar_lock(); if (err) goto free; /* * Per EFI spec, the maximum storage allocated for both * the variable name and variable data is 1024 bytes. */ do { variable_name_size = 1024; status = efivar_get_next_variable(&variable_name_size, variable_name, &vendor_guid); switch (status) { case EFI_SUCCESS: variable_name_size = var_name_strnsize(variable_name, variable_name_size); /* * Some firmware implementations return the * same variable name on multiple calls to * get_next_variable(). Terminate the loop * immediately as there is no guarantee that * we'll ever see a different variable name, * and may end up looping here forever. */ if (duplicates && variable_is_present(variable_name, &vendor_guid, head)) { dup_variable_bug(variable_name, &vendor_guid, variable_name_size); status = EFI_NOT_FOUND; } else { err = func(variable_name, vendor_guid, variable_name_size, data); if (err) status = EFI_NOT_FOUND; } break; case EFI_UNSUPPORTED: err = -EOPNOTSUPP; status = EFI_NOT_FOUND; break; case EFI_NOT_FOUND: break; default: printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n", status); status = EFI_NOT_FOUND; break; } } while (status != EFI_NOT_FOUND); efivar_unlock(); free: kfree(variable_name); return err; } /** * efivar_entry_add - add entry to variable list * @entry: entry to add to list * @head: list head * * Returns 0 on success, or a kernel error code on failure. */ int efivar_entry_add(struct efivar_entry *entry, struct list_head *head) { int err; err = efivar_lock(); if (err) return err; list_add(&entry->list, head); efivar_unlock(); return 0; } /** * __efivar_entry_add - add entry to variable list * @entry: entry to add to list * @head: list head */ void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head) { list_add(&entry->list, head); } /** * efivar_entry_remove - remove entry from variable list * @entry: entry to remove from list * * Returns 0 on success, or a kernel error code on failure. */ void efivar_entry_remove(struct efivar_entry *entry) { list_del(&entry->list); } /* * efivar_entry_list_del_unlock - remove entry from variable list * @entry: entry to remove * * Remove @entry from the variable list and release the list lock. * * NOTE: slightly weird locking semantics here - we expect to be * called with the efivars lock already held, and we release it before * returning. This is because this function is usually called after * set_variable() while the lock is still held. */ static void efivar_entry_list_del_unlock(struct efivar_entry *entry) { list_del(&entry->list); efivar_unlock(); } /** * efivar_entry_delete - delete variable and remove entry from list * @entry: entry containing variable to delete * * Delete the variable from the firmware and remove @entry from the * variable list. It is the caller's responsibility to free @entry * once we return. * * Returns 0 on success, -EINTR if we can't grab the semaphore, * converted EFI status code if set_variable() fails. */ int efivar_entry_delete(struct efivar_entry *entry) { efi_status_t status; int err; err = efivar_lock(); if (err) return err; status = efivar_set_variable_locked(entry->var.VariableName, &entry->var.VendorGuid, 0, 0, NULL, false); if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) { efivar_unlock(); return efi_status_to_err(status); } efivar_entry_list_del_unlock(entry); return 0; } /** * efivar_entry_size - obtain the size of a variable * @entry: entry for this variable * @size: location to store the variable's size */ int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) { efi_status_t status; int err; *size = 0; err = efivar_lock(); if (err) return err; status = efivar_get_variable(entry->var.VariableName, &entry->var.VendorGuid, NULL, size, NULL); efivar_unlock(); if (status != EFI_BUFFER_TOO_SMALL) return efi_status_to_err(status); return 0; } /** * __efivar_entry_get - call get_variable() * @entry: read data for this variable * @attributes: variable attributes * @size: size of @data buffer * @data: buffer to store variable data * * The caller MUST call efivar_entry_iter_begin() and * efivar_entry_iter_end() before and after the invocation of this * function, respectively. */ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data) { efi_status_t status; status = efivar_get_variable(entry->var.VariableName, &entry->var.VendorGuid, attributes, size, data); return efi_status_to_err(status); } /** * efivar_entry_get - call get_variable() * @entry: read data for this variable * @attributes: variable attributes * @size: size of @data buffer * @data: buffer to store variable data */ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data) { int err; err = efivar_lock(); if (err) return err; err = __efivar_entry_get(entry, attributes, size, data); efivar_unlock(); return 0; } /** * efivar_entry_set_get_size - call set_variable() and get new size (atomic) * @entry: entry containing variable to set and get * @attributes: attributes of variable to be written * @size: size of data buffer * @data: buffer containing data to write * @set: did the set_variable() call succeed? * * This is a pretty special (complex) function. See efivarfs_file_write(). * * Atomically call set_variable() for @entry and if the call is * successful, return the new size of the variable from get_variable() * in @size. The success of set_variable() is indicated by @set. * * Returns 0 on success, -EINVAL if the variable data is invalid, * -ENOSPC if the firmware does not have enough available space, or a * converted EFI status code if either of set_variable() or * get_variable() fail. * * If the EFI variable does not exist when calling set_variable() * (EFI_NOT_FOUND), @entry is removed from the variable list. */ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, unsigned long *size, void *data, bool *set) { efi_char16_t *name = entry->var.VariableName; efi_guid_t *vendor = &entry->var.VendorGuid; efi_status_t status; int err; *set = false; if (efivar_validate(*vendor, name, data, *size) == false) return -EINVAL; /* * The lock here protects the get_variable call, the conditional * set_variable call, and removal of the variable from the efivars * list (in the case of an authenticated delete). */ err = efivar_lock(); if (err) return err; status = efivar_set_variable_locked(name, vendor, attributes, *size, data, false); if (status != EFI_SUCCESS) { err = efi_status_to_err(status); goto out; } *set = true; /* * Writing to the variable may have caused a change in size (which * could either be an append or an overwrite), or the variable to be * deleted. Perform a GetVariable() so we can tell what actually * happened. */ *size = 0; status = efivar_get_variable(entry->var.VariableName, &entry->var.VendorGuid, NULL, size, NULL); if (status == EFI_NOT_FOUND) efivar_entry_list_del_unlock(entry); else efivar_unlock(); if (status && status != EFI_BUFFER_TOO_SMALL) return efi_status_to_err(status); return 0; out: efivar_unlock(); return err; } /** * efivar_entry_iter - iterate over variable list * @func: callback function * @head: head of variable list * @data: function-specific data to pass to callback * * Iterate over the list of EFI variables and call @func with every * entry on the list. It is safe for @func to remove entries in the * list via efivar_entry_delete() while iterating. * * Some notes for the callback function: * - a non-zero return value indicates an error and terminates the loop * - @func is called from atomic context */ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), struct list_head *head, void *data) { struct efivar_entry *entry, *n; int err = 0; err = efivar_lock(); if (err) return err; list_for_each_entry_safe(entry, n, head, list) { err = func(entry, data); if (err) break; } efivar_unlock(); return err; }
linux-master
fs/efivarfs/vars.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Red Hat, Inc. * Copyright (C) 2012 Jeremy Kerr <[email protected]> */ #include <linux/efi.h> #include <linux/fs.h> #include <linux/ctype.h> #include <linux/kmemleak.h> #include <linux/slab.h> #include <linux/uuid.h> #include <linux/fileattr.h> #include "internal.h" static const struct inode_operations efivarfs_file_inode_operations; struct inode *efivarfs_get_inode(struct super_block *sb, const struct inode *dir, int mode, dev_t dev, bool is_removable) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); inode->i_flags = is_removable ? 0 : S_IMMUTABLE; switch (mode & S_IFMT) { case S_IFREG: inode->i_op = &efivarfs_file_inode_operations; inode->i_fop = &efivarfs_file_operations; break; case S_IFDIR: inode->i_op = &efivarfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; inc_nlink(inode); break; } } return inode; } /* * Return true if 'str' is a valid efivarfs filename of the form, * * VariableName-12345678-1234-1234-1234-1234567891bc */ bool efivarfs_valid_name(const char *str, int len) { const char *s = str + len - EFI_VARIABLE_GUID_LEN; /* * We need a GUID, plus at least one letter for the variable name, * plus the '-' separator */ if (len < EFI_VARIABLE_GUID_LEN + 2) return false; /* GUID must be preceded by a '-' */ if (*(s - 1) != '-') return false; /* * Validate that 's' is of the correct format, e.g. * * 12345678-1234-1234-1234-123456789abc */ return uuid_is_valid(s); } static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode = NULL; struct efivar_entry *var; int namelen, i = 0, err = 0; bool is_removable = false; if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) return -EINVAL; var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); if (!var) return -ENOMEM; /* length of the variable name itself: remove GUID and separator */ namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid); if (err) goto out; if (guid_equal(&var->var.VendorGuid, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) { err = -EPERM; goto out; } if (efivar_variable_is_removable(var->var.VendorGuid, dentry->d_name.name, namelen)) is_removable = true; inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); if (!inode) { err = -ENOMEM; goto out; } for (i = 0; i < namelen; i++) var->var.VariableName[i] = dentry->d_name.name[i]; var->var.VariableName[i] = '\0'; inode->i_private = var; kmemleak_ignore(var); err = efivar_entry_add(var, &efivarfs_list); if (err) goto out; d_instantiate(dentry, inode); dget(dentry); out: if (err) { kfree(var); if (inode) iput(inode); } return err; } static int efivarfs_unlink(struct inode *dir, struct dentry *dentry) { struct efivar_entry *var = d_inode(dentry)->i_private; if (efivar_entry_delete(var)) return -EINVAL; drop_nlink(d_inode(dentry)); dput(dentry); return 0; }; const struct inode_operations efivarfs_dir_inode_operations = { .lookup = simple_lookup, .unlink = efivarfs_unlink, .create = efivarfs_create, }; static int efivarfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) { unsigned int i_flags; unsigned int flags = 0; i_flags = d_inode(dentry)->i_flags; if (i_flags & S_IMMUTABLE) flags |= FS_IMMUTABLE_FL; fileattr_fill_flags(fa, flags); return 0; } static int efivarfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { unsigned int i_flags = 0; if (fileattr_has_fsx(fa)) return -EOPNOTSUPP; if (fa->flags & ~FS_IMMUTABLE_FL) return -EOPNOTSUPP; if (fa->flags & FS_IMMUTABLE_FL) i_flags |= S_IMMUTABLE; inode_set_flags(d_inode(dentry), i_flags, S_IMMUTABLE); return 0; } static const struct inode_operations efivarfs_file_inode_operations = { .fileattr_get = efivarfs_fileattr_get, .fileattr_set = efivarfs_fileattr_set, };
linux-master
fs/efivarfs/inode.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Red Hat, Inc. * Copyright (C) 2012 Jeremy Kerr <[email protected]> */ #include <linux/efi.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/mount.h> #include "internal.h" static ssize_t efivarfs_file_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct efivar_entry *var = file->private_data; void *data; u32 attributes; struct inode *inode = file->f_mapping->host; unsigned long datasize = count - sizeof(attributes); ssize_t bytes; bool set = false; if (count < sizeof(attributes)) return -EINVAL; if (copy_from_user(&attributes, userbuf, sizeof(attributes))) return -EFAULT; if (attributes & ~(EFI_VARIABLE_MASK)) return -EINVAL; data = memdup_user(userbuf + sizeof(attributes), datasize); if (IS_ERR(data)) return PTR_ERR(data); bytes = efivar_entry_set_get_size(var, attributes, &datasize, data, &set); if (!set && bytes) { if (bytes == -ENOENT) bytes = -EIO; goto out; } if (bytes == -ENOENT) { drop_nlink(inode); d_delete(file->f_path.dentry); dput(file->f_path.dentry); } else { inode_lock(inode); i_size_write(inode, datasize + sizeof(attributes)); inode->i_mtime = inode_set_ctime_current(inode); inode_unlock(inode); } bytes = count; out: kfree(data); return bytes; } static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct efivar_entry *var = file->private_data; unsigned long datasize = 0; u32 attributes; void *data; ssize_t size = 0; int err; while (!__ratelimit(&file->f_cred->user->ratelimit)) msleep(50); err = efivar_entry_size(var, &datasize); /* * efivarfs represents uncommitted variables with * zero-length files. Reading them should return EOF. */ if (err == -ENOENT) return 0; else if (err) return err; data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL); if (!data) return -ENOMEM; size = efivar_entry_get(var, &attributes, &datasize, data + sizeof(attributes)); if (size) goto out_free; memcpy(data, &attributes, sizeof(attributes)); size = simple_read_from_buffer(userbuf, count, ppos, data, datasize + sizeof(attributes)); out_free: kfree(data); return size; } const struct file_operations efivarfs_file_operations = { .open = simple_open, .read = efivarfs_file_read, .write = efivarfs_file_write, .llseek = no_llseek, };
linux-master
fs/efivarfs/file.c
// SPDX-License-Identifier: GPL-2.0 /* * QNX4 file system, Linux implementation. * * Version : 0.2.1 * * Using parts of the xiafs filesystem. * * History : * * 28-05-1998 by Richard Frowijn : first release. * 20-06-1998 by Frank Denis : basic optimisations. * 25-06-1998 by Frank Denis : qnx4_is_free, qnx4_set_bitmap, qnx4_bmap . * 28-06-1998 by Frank Denis : qnx4_free_inode (to be fixed) . */ #include <linux/buffer_head.h> #include <linux/bitops.h> #include "qnx4.h" unsigned long qnx4_count_free_blocks(struct super_block *sb) { int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1; int total = 0; int total_free = 0; int offset = 0; int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size); struct buffer_head *bh; while (total < size) { int bytes = min(size - total, QNX4_BLOCK_SIZE); if ((bh = sb_bread(sb, start + offset)) == NULL) { printk(KERN_ERR "qnx4: I/O error in counting free blocks\n"); break; } total_free += bytes * BITS_PER_BYTE - memweight(bh->b_data, bytes); brelse(bh); total += bytes; offset++; } return total_free; }
linux-master
fs/qnx4/bitmap.c
// SPDX-License-Identifier: GPL-2.0 /* * QNX4 file system, Linux implementation. * * Version : 0.2.1 * * Using parts of the xiafs filesystem. * * History : * * 28-05-1998 by Richard Frowijn : first release. * 20-06-1998 by Frank Denis : Linux 2.1.99+ & dcache support. */ #include <linux/buffer_head.h> #include "qnx4.h" /* * A qnx4 directory entry is an inode entry or link info * depending on the status field in the last byte. The * first byte is where the name start either way, and a * zero means it's empty. * * Also, due to a bug in gcc, we don't want to use the * real (differently sized) name arrays in the inode and * link entries, but always the 'de_name[]' one in the * fake struct entry. * * See * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6 * * for details, but basically gcc will take the size of the * 'name' array from one of the used union entries randomly. * * This use of 'de_name[]' (48 bytes) avoids the false positive * warnings that would happen if gcc decides to use 'inode.di_name' * (16 bytes) even when the pointer and size were to come from * 'link.dl_name' (48 bytes). * * In all cases the actual name pointer itself is the same, it's * only the gcc internal 'what is the size of this field' logic * that can get confused. */ union qnx4_directory_entry { struct { const char de_name[48]; u8 de_pad[15]; u8 de_status; }; struct qnx4_inode_entry inode; struct qnx4_link_info link; }; static int qnx4_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); unsigned int offset; struct buffer_head *bh; unsigned long blknum; int ix, ino; int size; QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size)); QNX4DEBUG((KERN_INFO "pos = %ld\n", (long) ctx->pos)); while (ctx->pos < inode->i_size) { blknum = qnx4_block_map(inode, ctx->pos >> QNX4_BLOCK_SIZE_BITS); bh = sb_bread(inode->i_sb, blknum); if (bh == NULL) { printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum); return 0; } ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { union qnx4_directory_entry *de; offset = ix * QNX4_DIR_ENTRY_SIZE; de = (union qnx4_directory_entry *) (bh->b_data + offset); if (!de->de_name[0]) continue; if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) continue; if (!(de->de_status & QNX4_FILE_LINK)) { size = sizeof(de->inode.di_fname); ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; } else { size = sizeof(de->link.dl_fname); ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) * QNX4_INODES_PER_BLOCK + de->link.dl_inode_ndx; } size = strnlen(de->de_name, size); QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name)); if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) { brelse(bh); return 0; } } brelse(bh); } return 0; } const struct file_operations qnx4_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = qnx4_readdir, .fsync = generic_file_fsync, }; const struct inode_operations qnx4_dir_inode_operations = { .lookup = qnx4_lookup, };
linux-master
fs/qnx4/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * QNX4 file system, Linux implementation. * * Version : 0.2.1 * * Using parts of the xiafs filesystem. * * History : * * 01-06-1998 by Richard Frowijn : first release. * 20-06-1998 by Frank Denis : Linux 2.1.99+ support, boot signature, misc. * 30-06-1998 by Frank Denis : first step to write inodes. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/statfs.h> #include "qnx4.h" #define QNX4_VERSION 4 #define QNX4_BMNAME ".bitmap" static const struct super_operations qnx4_sops; static struct inode *qnx4_alloc_inode(struct super_block *sb); static void qnx4_free_inode(struct inode *inode); static int qnx4_remount(struct super_block *sb, int *flags, char *data); static int qnx4_statfs(struct dentry *, struct kstatfs *); static const struct super_operations qnx4_sops = { .alloc_inode = qnx4_alloc_inode, .free_inode = qnx4_free_inode, .statfs = qnx4_statfs, .remount_fs = qnx4_remount, }; static int qnx4_remount(struct super_block *sb, int *flags, char *data) { struct qnx4_sb_info *qs; sync_filesystem(sb); qs = qnx4_sb(sb); qs->Version = QNX4_VERSION; *flags |= SB_RDONLY; return 0; } static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create ) { unsigned long phys; QNX4DEBUG((KERN_INFO "qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock)); phys = qnx4_block_map( inode, iblock ); if ( phys ) { // logical block is before EOF map_bh(bh, inode->i_sb, phys); } return 0; } static inline u32 try_extent(qnx4_xtnt_t *extent, u32 *offset) { u32 size = le32_to_cpu(extent->xtnt_size); if (*offset < size) return le32_to_cpu(extent->xtnt_blk) + *offset - 1; *offset -= size; return 0; } unsigned long qnx4_block_map( struct inode *inode, long iblock ) { int ix; long i_xblk; struct buffer_head *bh = NULL; struct qnx4_xblk *xblk = NULL; struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode); u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts); u32 offset = iblock; u32 block = try_extent(&qnx4_inode->di_first_xtnt, &offset); if (block) { // iblock is in the first extent. This is easy. } else { // iblock is beyond first extent. We have to follow the extent chain. i_xblk = le32_to_cpu(qnx4_inode->di_xblk); ix = 0; while ( --nxtnt > 0 ) { if ( ix == 0 ) { // read next xtnt block. bh = sb_bread(inode->i_sb, i_xblk - 1); if ( !bh ) { QNX4DEBUG((KERN_ERR "qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1)); return -EIO; } xblk = (struct qnx4_xblk*)bh->b_data; if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) { QNX4DEBUG((KERN_ERR "qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk)); return -EIO; } } block = try_extent(&xblk->xblk_xtnts[ix], &offset); if (block) { // got it! break; } if ( ++ix >= xblk->xblk_num_xtnts ) { i_xblk = le32_to_cpu(xblk->xblk_next_xblk); ix = 0; brelse( bh ); bh = NULL; } } if ( bh ) brelse( bh ); } QNX4DEBUG((KERN_INFO "qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block)); return block; } static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size) * 8; buf->f_bfree = qnx4_count_free_blocks(sb); buf->f_bavail = buf->f_bfree; buf->f_namelen = QNX4_NAME_MAX; buf->f_fsid = u64_to_fsid(id); return 0; } /* * Check the root directory of the filesystem to make sure * it really _is_ a qnx4 filesystem, and to check the size * of the directory entry. */ static const char *qnx4_checkroot(struct super_block *sb, struct qnx4_super_block *s) { struct buffer_head *bh; struct qnx4_inode_entry *rootdir; int rd, rl; int i, j; if (s->RootDir.di_fname[0] != '/' || s->RootDir.di_fname[1] != '\0') return "no qnx4 filesystem (no root dir)."; QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id)); rd = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_blk) - 1; rl = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_size); for (j = 0; j < rl; j++) { bh = sb_bread(sb, rd + j); /* root dir, first block */ if (bh == NULL) return "unable to read root entry."; rootdir = (struct qnx4_inode_entry *) bh->b_data; for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) { QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname)); if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0) continue; qnx4_sb(sb)->BitMap = kmemdup(rootdir, sizeof(struct qnx4_inode_entry), GFP_KERNEL); brelse(bh); if (!qnx4_sb(sb)->BitMap) return "not enough memory for bitmap inode"; /* keep bitmap inode known */ return NULL; } brelse(bh); } return "bitmap file not found."; } static int qnx4_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; struct inode *root; const char *errmsg; struct qnx4_sb_info *qs; qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); if (!qs) return -ENOMEM; s->s_fs_info = qs; sb_set_blocksize(s, QNX4_BLOCK_SIZE); s->s_op = &qnx4_sops; s->s_magic = QNX4_SUPER_MAGIC; s->s_flags |= SB_RDONLY; /* Yup, read-only yet */ s->s_time_min = 0; s->s_time_max = U32_MAX; /* Check the superblock signature. Since the qnx4 code is dangerous, we should leave as quickly as possible if we don't belong here... */ bh = sb_bread(s, 1); if (!bh) { printk(KERN_ERR "qnx4: unable to read the superblock\n"); return -EINVAL; } /* check before allocating dentries, inodes, .. */ errmsg = qnx4_checkroot(s, (struct qnx4_super_block *) bh->b_data); brelse(bh); if (errmsg != NULL) { if (!silent) printk(KERN_ERR "qnx4: %s\n", errmsg); return -EINVAL; } /* does root not have inode number QNX4_ROOT_INO ?? */ root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK); if (IS_ERR(root)) { printk(KERN_ERR "qnx4: get inode failed\n"); return PTR_ERR(root); } s->s_root = d_make_root(root); if (s->s_root == NULL) return -ENOMEM; return 0; } static void qnx4_kill_sb(struct super_block *sb) { struct qnx4_sb_info *qs = qnx4_sb(sb); kill_block_super(sb); if (qs) { kfree(qs->BitMap); kfree(qs); } } static int qnx4_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, qnx4_get_block); } static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,qnx4_get_block); } static const struct address_space_operations qnx4_aops = { .read_folio = qnx4_read_folio, .bmap = qnx4_bmap }; struct inode *qnx4_iget(struct super_block *sb, unsigned long ino) { struct buffer_head *bh; struct qnx4_inode_entry *raw_inode; int block; struct qnx4_inode_entry *qnx4_inode; struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; qnx4_inode = qnx4_raw_inode(inode); inode->i_mode = 0; QNX4DEBUG((KERN_INFO "reading inode : [%d]\n", ino)); if (!ino) { printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is " "out of range\n", sb->s_id, ino); iget_failed(inode); return ERR_PTR(-EIO); } block = ino / QNX4_INODES_PER_BLOCK; if (!(bh = sb_bread(sb, block))) { printk(KERN_ERR "qnx4: major problem: unable to read inode from dev " "%s\n", sb->s_id); iget_failed(inode); return ERR_PTR(-EIO); } raw_inode = ((struct qnx4_inode_entry *) bh->b_data) + (ino % QNX4_INODES_PER_BLOCK); inode->i_mode = le16_to_cpu(raw_inode->di_mode); i_uid_write(inode, (uid_t)le16_to_cpu(raw_inode->di_uid)); i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid)); set_nlink(inode, le16_to_cpu(raw_inode->di_nlink)); inode->i_size = le32_to_cpu(raw_inode->di_size); inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->di_mtime); inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_sec = le32_to_cpu(raw_inode->di_atime); inode->i_atime.tv_nsec = 0; inode_set_ctime(inode, le32_to_cpu(raw_inode->di_ctime), 0); inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size); memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE); if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; inode->i_mapping->a_ops = &qnx4_aops; qnx4_i(inode)->mmu_private = inode->i_size; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &qnx4_dir_inode_operations; inode->i_fop = &qnx4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &qnx4_aops; qnx4_i(inode)->mmu_private = inode->i_size; } else { printk(KERN_ERR "qnx4: bad inode %lu on dev %s\n", ino, sb->s_id); iget_failed(inode); brelse(bh); return ERR_PTR(-EIO); } brelse(bh); unlock_new_inode(inode); return inode; } static struct kmem_cache *qnx4_inode_cachep; static struct inode *qnx4_alloc_inode(struct super_block *sb) { struct qnx4_inode_info *ei; ei = alloc_inode_sb(sb, qnx4_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void qnx4_free_inode(struct inode *inode) { kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode)); } static void init_once(void *foo) { struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { qnx4_inode_cachep = kmem_cache_create("qnx4_inode_cache", sizeof(struct qnx4_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (qnx4_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(qnx4_inode_cachep); } static struct dentry *qnx4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, qnx4_fill_super); } static struct file_system_type qnx4_fs_type = { .owner = THIS_MODULE, .name = "qnx4", .mount = qnx4_mount, .kill_sb = qnx4_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("qnx4"); static int __init init_qnx4_fs(void) { int err; err = init_inodecache(); if (err) return err; err = register_filesystem(&qnx4_fs_type); if (err) { destroy_inodecache(); return err; } printk(KERN_INFO "QNX4 filesystem 0.2.3 registered.\n"); return 0; } static void __exit exit_qnx4_fs(void) { unregister_filesystem(&qnx4_fs_type); destroy_inodecache(); } module_init(init_qnx4_fs) module_exit(exit_qnx4_fs) MODULE_LICENSE("GPL");
linux-master
fs/qnx4/inode.c
// SPDX-License-Identifier: GPL-2.0 /* * QNX4 file system, Linux implementation. * * Version : 0.2.1 * * Using parts of the xiafs filesystem. * * History : * * 01-06-1998 by Richard Frowijn : first release. * 21-06-1998 by Frank Denis : dcache support, fixed error codes. * 04-07-1998 by Frank Denis : first step for rmdir/unlink. */ #include <linux/buffer_head.h> #include "qnx4.h" /* * check if the filename is correct. For some obscure reason, qnx writes a * new file twice in the directory entry, first with all possible options at 0 * and for a second time the way it is, they want us not to access the qnx * filesystem when whe are using linux. */ static int qnx4_match(int len, const char *name, struct buffer_head *bh, unsigned long *offset) { struct qnx4_inode_entry *de; int namelen, thislen; if (bh == NULL) { printk(KERN_WARNING "qnx4: matching unassigned buffer !\n"); return 0; } de = (struct qnx4_inode_entry *) (bh->b_data + *offset); *offset += QNX4_DIR_ENTRY_SIZE; if ((de->di_status & QNX4_FILE_LINK) != 0) { namelen = QNX4_NAME_MAX; } else { namelen = QNX4_SHORT_NAME_MAX; } thislen = strlen( de->di_fname ); if ( thislen > namelen ) thislen = namelen; if (len != thislen) { return 0; } if (strncmp(name, de->di_fname, len) == 0) { if ((de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)) != 0) { return 1; } } return 0; } static struct buffer_head *qnx4_find_entry(int len, struct inode *dir, const char *name, struct qnx4_inode_entry **res_dir, int *ino) { unsigned long block, offset, blkofs; struct buffer_head *bh; *res_dir = NULL; bh = NULL; block = offset = blkofs = 0; while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) { if (!bh) { block = qnx4_block_map(dir, blkofs); if (block) bh = sb_bread(dir->i_sb, block); if (!bh) { blkofs++; continue; } } *res_dir = (struct qnx4_inode_entry *) (bh->b_data + offset); if (qnx4_match(len, name, bh, &offset)) { *ino = block * QNX4_INODES_PER_BLOCK + (offset / QNX4_DIR_ENTRY_SIZE) - 1; return bh; } if (offset < bh->b_size) { continue; } brelse(bh); bh = NULL; offset = 0; blkofs++; } brelse(bh); *res_dir = NULL; return NULL; } struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { int ino; struct qnx4_inode_entry *de; struct qnx4_link_info *lnk; struct buffer_head *bh; const char *name = dentry->d_name.name; int len = dentry->d_name.len; struct inode *foundinode = NULL; if (!(bh = qnx4_find_entry(len, dir, name, &de, &ino))) goto out; /* The entry is linked, let's get the real info */ if ((de->di_status & QNX4_FILE_LINK) == QNX4_FILE_LINK) { lnk = (struct qnx4_link_info *) de; ino = (le32_to_cpu(lnk->dl_inode_blk) - 1) * QNX4_INODES_PER_BLOCK + lnk->dl_inode_ndx; } brelse(bh); foundinode = qnx4_iget(dir->i_sb, ino); if (IS_ERR(foundinode)) QNX4DEBUG((KERN_ERR "qnx4: lookup->iget -> error %ld\n", PTR_ERR(foundinode))); out: return d_splice_alias(foundinode, dentry); }
linux-master
fs/qnx4/namei.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2021 Samsung Electronics Co., Ltd. * Author(s): Namjae Jeon <[email protected]> */ #include <linux/fs.h> #include "glob.h" #include "ndr.h" static inline char *ndr_get_field(struct ndr *n) { return n->data + n->offset; } static int try_to_realloc_ndr_blob(struct ndr *n, size_t sz) { char *data; data = krealloc(n->data, n->offset + sz + 1024, GFP_KERNEL); if (!data) return -ENOMEM; n->data = data; n->length += 1024; memset(n->data + n->offset, 0, 1024); return 0; } static int ndr_write_int16(struct ndr *n, __u16 value) { if (n->length <= n->offset + sizeof(value)) { int ret; ret = try_to_realloc_ndr_blob(n, sizeof(value)); if (ret) return ret; } *(__le16 *)ndr_get_field(n) = cpu_to_le16(value); n->offset += sizeof(value); return 0; } static int ndr_write_int32(struct ndr *n, __u32 value) { if (n->length <= n->offset + sizeof(value)) { int ret; ret = try_to_realloc_ndr_blob(n, sizeof(value)); if (ret) return ret; } *(__le32 *)ndr_get_field(n) = cpu_to_le32(value); n->offset += sizeof(value); return 0; } static int ndr_write_int64(struct ndr *n, __u64 value) { if (n->length <= n->offset + sizeof(value)) { int ret; ret = try_to_realloc_ndr_blob(n, sizeof(value)); if (ret) return ret; } *(__le64 *)ndr_get_field(n) = cpu_to_le64(value); n->offset += sizeof(value); return 0; } static int ndr_write_bytes(struct ndr *n, void *value, size_t sz) { if (n->length <= n->offset + sz) { int ret; ret = try_to_realloc_ndr_blob(n, sz); if (ret) return ret; } memcpy(ndr_get_field(n), value, sz); n->offset += sz; return 0; } static int ndr_write_string(struct ndr *n, char *value) { size_t sz; sz = strlen(value) + 1; if (n->length <= n->offset + sz) { int ret; ret = try_to_realloc_ndr_blob(n, sz); if (ret) return ret; } memcpy(ndr_get_field(n), value, sz); n->offset += sz; n->offset = ALIGN(n->offset, 2); return 0; } static int ndr_read_string(struct ndr *n, void *value, size_t sz) { int len; if (n->offset + sz > n->length) return -EINVAL; len = strnlen(ndr_get_field(n), sz); if (value) memcpy(value, ndr_get_field(n), len); len++; n->offset += len; n->offset = ALIGN(n->offset, 2); return 0; } static int ndr_read_bytes(struct ndr *n, void *value, size_t sz) { if (n->offset + sz > n->length) return -EINVAL; if (value) memcpy(value, ndr_get_field(n), sz); n->offset += sz; return 0; } static int ndr_read_int16(struct ndr *n, __u16 *value) { if (n->offset + sizeof(__u16) > n->length) return -EINVAL; if (value) *value = le16_to_cpu(*(__le16 *)ndr_get_field(n)); n->offset += sizeof(__u16); return 0; } static int ndr_read_int32(struct ndr *n, __u32 *value) { if (n->offset + sizeof(__u32) > n->length) return -EINVAL; if (value) *value = le32_to_cpu(*(__le32 *)ndr_get_field(n)); n->offset += sizeof(__u32); return 0; } static int ndr_read_int64(struct ndr *n, __u64 *value) { if (n->offset + sizeof(__u64) > n->length) return -EINVAL; if (value) *value = le64_to_cpu(*(__le64 *)ndr_get_field(n)); n->offset += sizeof(__u64); return 0; } int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da) { char hex_attr[12] = {0}; int ret; n->offset = 0; n->length = 1024; n->data = kzalloc(n->length, GFP_KERNEL); if (!n->data) return -ENOMEM; if (da->version == 3) { snprintf(hex_attr, 10, "0x%x", da->attr); ret = ndr_write_string(n, hex_attr); } else { ret = ndr_write_string(n, ""); } if (ret) return ret; ret = ndr_write_int16(n, da->version); if (ret) return ret; ret = ndr_write_int32(n, da->version); if (ret) return ret; ret = ndr_write_int32(n, da->flags); if (ret) return ret; ret = ndr_write_int32(n, da->attr); if (ret) return ret; if (da->version == 3) { ret = ndr_write_int32(n, da->ea_size); if (ret) return ret; ret = ndr_write_int64(n, da->size); if (ret) return ret; ret = ndr_write_int64(n, da->alloc_size); } else { ret = ndr_write_int64(n, da->itime); } if (ret) return ret; ret = ndr_write_int64(n, da->create_time); if (ret) return ret; if (da->version == 3) ret = ndr_write_int64(n, da->change_time); return ret; } int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da) { char hex_attr[12]; unsigned int version2; int ret; n->offset = 0; ret = ndr_read_string(n, hex_attr, sizeof(hex_attr)); if (ret) return ret; ret = ndr_read_int16(n, &da->version); if (ret) return ret; if (da->version != 3 && da->version != 4) { ksmbd_debug(VFS, "v%d version is not supported\n", da->version); return -EINVAL; } ret = ndr_read_int32(n, &version2); if (ret) return ret; if (da->version != version2) { ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n", da->version, version2); return -EINVAL; } ret = ndr_read_int32(n, NULL); if (ret) return ret; ret = ndr_read_int32(n, &da->attr); if (ret) return ret; if (da->version == 4) { ret = ndr_read_int64(n, &da->itime); if (ret) return ret; ret = ndr_read_int64(n, &da->create_time); } else { ret = ndr_read_int32(n, NULL); if (ret) return ret; ret = ndr_read_int64(n, NULL); if (ret) return ret; ret = ndr_read_int64(n, NULL); if (ret) return ret; ret = ndr_read_int64(n, &da->create_time); if (ret) return ret; ret = ndr_read_int64(n, NULL); } return ret; } static int ndr_encode_posix_acl_entry(struct ndr *n, struct xattr_smb_acl *acl) { int i, ret; ret = ndr_write_int32(n, acl->count); if (ret) return ret; n->offset = ALIGN(n->offset, 8); ret = ndr_write_int32(n, acl->count); if (ret) return ret; ret = ndr_write_int32(n, 0); if (ret) return ret; for (i = 0; i < acl->count; i++) { n->offset = ALIGN(n->offset, 8); ret = ndr_write_int16(n, acl->entries[i].type); if (ret) return ret; ret = ndr_write_int16(n, acl->entries[i].type); if (ret) return ret; if (acl->entries[i].type == SMB_ACL_USER) { n->offset = ALIGN(n->offset, 8); ret = ndr_write_int64(n, acl->entries[i].uid); } else if (acl->entries[i].type == SMB_ACL_GROUP) { n->offset = ALIGN(n->offset, 8); ret = ndr_write_int64(n, acl->entries[i].gid); } if (ret) return ret; /* push permission */ ret = ndr_write_int32(n, acl->entries[i].perm); } return ret; } int ndr_encode_posix_acl(struct ndr *n, struct mnt_idmap *idmap, struct inode *inode, struct xattr_smb_acl *acl, struct xattr_smb_acl *def_acl) { unsigned int ref_id = 0x00020000; int ret; vfsuid_t vfsuid; vfsgid_t vfsgid; n->offset = 0; n->length = 1024; n->data = kzalloc(n->length, GFP_KERNEL); if (!n->data) return -ENOMEM; if (acl) { /* ACL ACCESS */ ret = ndr_write_int32(n, ref_id); ref_id += 4; } else { ret = ndr_write_int32(n, 0); } if (ret) return ret; if (def_acl) { /* DEFAULT ACL ACCESS */ ret = ndr_write_int32(n, ref_id); ref_id += 4; } else { ret = ndr_write_int32(n, 0); } if (ret) return ret; vfsuid = i_uid_into_vfsuid(idmap, inode); ret = ndr_write_int64(n, from_kuid(&init_user_ns, vfsuid_into_kuid(vfsuid))); if (ret) return ret; vfsgid = i_gid_into_vfsgid(idmap, inode); ret = ndr_write_int64(n, from_kgid(&init_user_ns, vfsgid_into_kgid(vfsgid))); if (ret) return ret; ret = ndr_write_int32(n, inode->i_mode); if (ret) return ret; if (acl) { ret = ndr_encode_posix_acl_entry(n, acl); if (def_acl && !ret) ret = ndr_encode_posix_acl_entry(n, def_acl); } return ret; } int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl) { unsigned int ref_id = 0x00020004; int ret; n->offset = 0; n->length = 2048; n->data = kzalloc(n->length, GFP_KERNEL); if (!n->data) return -ENOMEM; ret = ndr_write_int16(n, acl->version); if (ret) return ret; ret = ndr_write_int32(n, acl->version); if (ret) return ret; ret = ndr_write_int16(n, 2); if (ret) return ret; ret = ndr_write_int32(n, ref_id); if (ret) return ret; /* push hash type and hash 64bytes */ ret = ndr_write_int16(n, acl->hash_type); if (ret) return ret; ret = ndr_write_bytes(n, acl->hash, XATTR_SD_HASH_SIZE); if (ret) return ret; ret = ndr_write_bytes(n, acl->desc, acl->desc_len); if (ret) return ret; ret = ndr_write_int64(n, acl->current_time); if (ret) return ret; ret = ndr_write_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE); if (ret) return ret; /* push ndr for security descriptor */ ret = ndr_write_bytes(n, acl->sd_buf, acl->sd_size); return ret; } int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl) { unsigned int version2; int ret; n->offset = 0; ret = ndr_read_int16(n, &acl->version); if (ret) return ret; if (acl->version != 4) { ksmbd_debug(VFS, "v%d version is not supported\n", acl->version); return -EINVAL; } ret = ndr_read_int32(n, &version2); if (ret) return ret; if (acl->version != version2) { ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n", acl->version, version2); return -EINVAL; } /* Read Level */ ret = ndr_read_int16(n, NULL); if (ret) return ret; /* Read Ref Id */ ret = ndr_read_int32(n, NULL); if (ret) return ret; ret = ndr_read_int16(n, &acl->hash_type); if (ret) return ret; ret = ndr_read_bytes(n, acl->hash, XATTR_SD_HASH_SIZE); if (ret) return ret; ndr_read_bytes(n, acl->desc, 10); if (strncmp(acl->desc, "posix_acl", 9)) { pr_err("Invalid acl description : %s\n", acl->desc); return -EINVAL; } /* Read Time */ ret = ndr_read_int64(n, NULL); if (ret) return ret; /* Read Posix ACL hash */ ret = ndr_read_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE); if (ret) return ret; acl->sd_size = n->length - n->offset; acl->sd_buf = kzalloc(acl->sd_size, GFP_KERNEL); if (!acl->sd_buf) return -ENOMEM; ret = ndr_read_bytes(n, acl->sd_buf, acl->sd_size); return ret; }
linux-master
fs/smb/server/ndr.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2019 Samsung Electronics Co., Ltd. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/sched.h> #include "glob.h" #include "crypto_ctx.h" struct crypto_ctx_list { spinlock_t ctx_lock; int avail_ctx; struct list_head idle_ctx; wait_queue_head_t ctx_wait; }; static struct crypto_ctx_list ctx_list; static inline void free_aead(struct crypto_aead *aead) { if (aead) crypto_free_aead(aead); } static void free_shash(struct shash_desc *shash) { if (shash) { crypto_free_shash(shash->tfm); kfree(shash); } } static struct crypto_aead *alloc_aead(int id) { struct crypto_aead *tfm = NULL; switch (id) { case CRYPTO_AEAD_AES_GCM: tfm = crypto_alloc_aead("gcm(aes)", 0, 0); break; case CRYPTO_AEAD_AES_CCM: tfm = crypto_alloc_aead("ccm(aes)", 0, 0); break; default: pr_err("Does not support encrypt ahead(id : %d)\n", id); return NULL; } if (IS_ERR(tfm)) { pr_err("Failed to alloc encrypt aead : %ld\n", PTR_ERR(tfm)); return NULL; } return tfm; } static struct shash_desc *alloc_shash_desc(int id) { struct crypto_shash *tfm = NULL; struct shash_desc *shash; switch (id) { case CRYPTO_SHASH_HMACMD5: tfm = crypto_alloc_shash("hmac(md5)", 0, 0); break; case CRYPTO_SHASH_HMACSHA256: tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); break; case CRYPTO_SHASH_CMACAES: tfm = crypto_alloc_shash("cmac(aes)", 0, 0); break; case CRYPTO_SHASH_SHA256: tfm = crypto_alloc_shash("sha256", 0, 0); break; case CRYPTO_SHASH_SHA512: tfm = crypto_alloc_shash("sha512", 0, 0); break; default: return NULL; } if (IS_ERR(tfm)) return NULL; shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!shash) crypto_free_shash(tfm); else shash->tfm = tfm; return shash; } static void ctx_free(struct ksmbd_crypto_ctx *ctx) { int i; for (i = 0; i < CRYPTO_SHASH_MAX; i++) free_shash(ctx->desc[i]); for (i = 0; i < CRYPTO_AEAD_MAX; i++) free_aead(ctx->ccmaes[i]); kfree(ctx); } static struct ksmbd_crypto_ctx *ksmbd_find_crypto_ctx(void) { struct ksmbd_crypto_ctx *ctx; while (1) { spin_lock(&ctx_list.ctx_lock); if (!list_empty(&ctx_list.idle_ctx)) { ctx = list_entry(ctx_list.idle_ctx.next, struct ksmbd_crypto_ctx, list); list_del(&ctx->list); spin_unlock(&ctx_list.ctx_lock); return ctx; } if (ctx_list.avail_ctx > num_online_cpus()) { spin_unlock(&ctx_list.ctx_lock); wait_event(ctx_list.ctx_wait, !list_empty(&ctx_list.idle_ctx)); continue; } ctx_list.avail_ctx++; spin_unlock(&ctx_list.ctx_lock); ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL); if (!ctx) { spin_lock(&ctx_list.ctx_lock); ctx_list.avail_ctx--; spin_unlock(&ctx_list.ctx_lock); wait_event(ctx_list.ctx_wait, !list_empty(&ctx_list.idle_ctx)); continue; } break; } return ctx; } void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx) { if (!ctx) return; spin_lock(&ctx_list.ctx_lock); if (ctx_list.avail_ctx <= num_online_cpus()) { list_add(&ctx->list, &ctx_list.idle_ctx); spin_unlock(&ctx_list.ctx_lock); wake_up(&ctx_list.ctx_wait); return; } ctx_list.avail_ctx--; spin_unlock(&ctx_list.ctx_lock); ctx_free(ctx); } static struct ksmbd_crypto_ctx *____crypto_shash_ctx_find(int id) { struct ksmbd_crypto_ctx *ctx; if (id >= CRYPTO_SHASH_MAX) return NULL; ctx = ksmbd_find_crypto_ctx(); if (ctx->desc[id]) return ctx; ctx->desc[id] = alloc_shash_desc(id); if (ctx->desc[id]) return ctx; ksmbd_release_crypto_ctx(ctx); return NULL; } struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void) { return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACMD5); } struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void) { return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACSHA256); } struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void) { return ____crypto_shash_ctx_find(CRYPTO_SHASH_CMACAES); } struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void) { return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA256); } struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void) { return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512); } static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id) { struct ksmbd_crypto_ctx *ctx; if (id >= CRYPTO_AEAD_MAX) return NULL; ctx = ksmbd_find_crypto_ctx(); if (ctx->ccmaes[id]) return ctx; ctx->ccmaes[id] = alloc_aead(id); if (ctx->ccmaes[id]) return ctx; ksmbd_release_crypto_ctx(ctx); return NULL; } struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void) { return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_GCM); } struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void) { return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_CCM); } void ksmbd_crypto_destroy(void) { struct ksmbd_crypto_ctx *ctx; while (!list_empty(&ctx_list.idle_ctx)) { ctx = list_entry(ctx_list.idle_ctx.next, struct ksmbd_crypto_ctx, list); list_del(&ctx->list); ctx_free(ctx); } } int ksmbd_crypto_create(void) { struct ksmbd_crypto_ctx *ctx; spin_lock_init(&ctx_list.ctx_lock); INIT_LIST_HEAD(&ctx_list.idle_ctx); init_waitqueue_head(&ctx_list.ctx_wait); ctx_list.avail_ctx = 1; ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; list_add(&ctx->list, &ctx_list.idle_ctx); return 0; }
linux-master
fs/smb/server/crypto_ctx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Some of the source code in this file came from fs/cifs/cifs_unicode.c * * Copyright (c) International Business Machines Corp., 2000,2009 * Modified by Steve French ([email protected]) * Modified by Namjae Jeon ([email protected]) */ #include <linux/fs.h> #include <linux/slab.h> #include <asm/unaligned.h> #include "glob.h" #include "unicode.h" #include "smb_common.h" /* * smb_utf16_bytes() - how long will a string be after conversion? * @from: pointer to input string * @maxbytes: don't go past this many bytes of input string * @codepage: destination codepage * * Walk a utf16le string and return the number of bytes that the string will * be after being converted to the given charset, not including any null * termination required. Don't walk past maxbytes in the source buffer. * * Return: string length after conversion */ static int smb_utf16_bytes(const __le16 *from, int maxbytes, const struct nls_table *codepage) { int i; int charlen, outlen = 0; int maxwords = maxbytes / 2; char tmp[NLS_MAX_CHARSET_SIZE]; __u16 ftmp; for (i = 0; i < maxwords; i++) { ftmp = get_unaligned_le16(&from[i]); if (ftmp == 0) break; charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE); if (charlen > 0) outlen += charlen; else outlen++; } return outlen; } /* * cifs_mapchar() - convert a host-endian char to proper char in codepage * @target: where converted character should be copied * @src_char: 2 byte host-endian source character * @cp: codepage to which character should be converted * @mapchar: should character be mapped according to mapchars mount option? * * This function handles the conversion of a single character. It is the * responsibility of the caller to ensure that the target buffer is large * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE). * * Return: string length after conversion */ static int cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, bool mapchar) { int len = 1; if (!mapchar) goto cp_convert; /* * BB: Cannot handle remapping UNI_SLASH until all the calls to * build_path_from_dentry are modified, as they use slash as * separator. */ switch (src_char) { case UNI_COLON: *target = ':'; break; case UNI_ASTERISK: *target = '*'; break; case UNI_QUESTION: *target = '?'; break; case UNI_PIPE: *target = '|'; break; case UNI_GRTRTHAN: *target = '>'; break; case UNI_LESSTHAN: *target = '<'; break; default: goto cp_convert; } out: return len; cp_convert: len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); if (len <= 0) { *target = '?'; len = 1; } goto out; } /* * smb_from_utf16() - convert utf16le string to local charset * @to: destination buffer * @from: source buffer * @tolen: destination buffer size (in bytes) * @fromlen: source buffer size (in bytes) * @codepage: codepage to which characters should be converted * @mapchar: should characters be remapped according to the mapchars option? * * Convert a little-endian utf16le string (as sent by the server) to a string * in the provided codepage. The tolen and fromlen parameters are to ensure * that the code doesn't walk off of the end of the buffer (which is always * a danger if the alignment of the source buffer is off). The destination * string is always properly null terminated and fits in the destination * buffer. Returns the length of the destination string in bytes (including * null terminator). * * Note that some windows versions actually send multiword UTF-16 characters * instead of straight UTF16-2. The linux nls routines however aren't able to * deal with those characters properly. In the event that we get some of * those characters, they won't be translated properly. * * Return: string length after conversion */ static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, const struct nls_table *codepage, bool mapchar) { int i, charlen, safelen; int outlen = 0; int nullsize = nls_nullsize(codepage); int fromwords = fromlen / 2; char tmp[NLS_MAX_CHARSET_SIZE]; __u16 ftmp; /* * because the chars can be of varying widths, we need to take care * not to overflow the destination buffer when we get close to the * end of it. Until we get to this offset, we don't need to check * for overflow however. */ safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize); for (i = 0; i < fromwords; i++) { ftmp = get_unaligned_le16(&from[i]); if (ftmp == 0) break; /* * check to see if converting this character might make the * conversion bleed into the null terminator */ if (outlen >= safelen) { charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar); if ((outlen + charlen) > (tolen - nullsize)) break; } /* put converted char into 'to' buffer */ charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar); outlen += charlen; } /* properly null-terminate string */ for (i = 0; i < nullsize; i++) to[outlen++] = 0; return outlen; } /* * smb_strtoUTF16() - Convert character string to unicode string * @to: destination buffer * @from: source buffer * @len: destination buffer size (in bytes) * @codepage: codepage to which characters should be converted * * Return: string length after conversion */ int smb_strtoUTF16(__le16 *to, const char *from, int len, const struct nls_table *codepage) { int charlen; int i; wchar_t wchar_to; /* needed to quiet sparse */ /* special case for utf8 to handle no plane0 chars */ if (!strcmp(codepage->charset, "utf8")) { /* * convert utf8 -> utf16, we assume we have enough space * as caller should have assumed conversion does not overflow * in destination len is length in wchar_t units (16bits) */ i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN, (wchar_t *)to, len); /* if success terminate and exit */ if (i >= 0) goto success; /* * if fails fall back to UCS encoding as this * function should not return negative values * currently can fail only if source contains * invalid encoded characters */ } for (i = 0; len > 0 && *from; i++, from += charlen, len -= charlen) { charlen = codepage->char2uni(from, len, &wchar_to); if (charlen < 1) { /* A question mark */ wchar_to = 0x003f; charlen = 1; } put_unaligned_le16(wchar_to, &to[i]); } success: put_unaligned_le16(0, &to[i]); return i; } /* * smb_strndup_from_utf16() - copy a string from wire format to the local * codepage * @src: source string * @maxlen: don't walk past this many bytes in the source string * @is_unicode: is this a unicode string? * @codepage: destination codepage * * Take a string given by the server, convert it to the local codepage and * put it in a new buffer. Returns a pointer to the new string or NULL on * error. * * Return: destination string buffer or error ptr */ char *smb_strndup_from_utf16(const char *src, const int maxlen, const bool is_unicode, const struct nls_table *codepage) { int len, ret; char *dst; if (is_unicode) { len = smb_utf16_bytes((__le16 *)src, maxlen, codepage); len += nls_nullsize(codepage); dst = kmalloc(len, GFP_KERNEL); if (!dst) return ERR_PTR(-ENOMEM); ret = smb_from_utf16(dst, (__le16 *)src, len, maxlen, codepage, false); if (ret < 0) { kfree(dst); return ERR_PTR(-EINVAL); } } else { len = strnlen(src, maxlen); len++; dst = kmalloc(len, GFP_KERNEL); if (!dst) return ERR_PTR(-ENOMEM); strscpy(dst, src, len); } return dst; } /* * Convert 16 bit Unicode pathname to wire format from string in current code * page. Conversion may involve remapping up the six characters that are * only legal in POSIX-like OS (if they are present in the string). Path * names are little endian 16 bit Unicode on the wire */ /* * smbConvertToUTF16() - convert string from local charset to utf16 * @target: destination buffer * @source: source buffer * @srclen: source buffer size (in bytes) * @cp: codepage to which characters should be converted * @mapchar: should characters be remapped according to the mapchars option? * * Convert 16 bit Unicode pathname to wire format from string in current code * page. Conversion may involve remapping up the six characters that are * only legal in POSIX-like OS (if they are present in the string). Path * names are little endian 16 bit Unicode on the wire * * Return: char length after conversion */ int smbConvertToUTF16(__le16 *target, const char *source, int srclen, const struct nls_table *cp, int mapchars) { int i, j, charlen; char src_char; __le16 dst_char; wchar_t tmp; if (!mapchars) return smb_strtoUTF16(target, source, srclen, cp); for (i = 0, j = 0; i < srclen; j++) { src_char = source[i]; charlen = 1; switch (src_char) { case 0: put_unaligned(0, &target[j]); return j; case ':': dst_char = cpu_to_le16(UNI_COLON); break; case '*': dst_char = cpu_to_le16(UNI_ASTERISK); break; case '?': dst_char = cpu_to_le16(UNI_QUESTION); break; case '<': dst_char = cpu_to_le16(UNI_LESSTHAN); break; case '>': dst_char = cpu_to_le16(UNI_GRTRTHAN); break; case '|': dst_char = cpu_to_le16(UNI_PIPE); break; /* * FIXME: We can not handle remapping backslash (UNI_SLASH) * until all the calls to build_path_from_dentry are modified, * as they use backslash as separator. */ default: charlen = cp->char2uni(source + i, srclen - i, &tmp); dst_char = cpu_to_le16(tmp); /* * if no match, use question mark, which at least in * some cases serves as wild card */ if (charlen < 1) { dst_char = cpu_to_le16(0x003f); charlen = 1; } } /* * character may take more than one byte in the source string, * but will take exactly two bytes in the target string */ i += charlen; put_unaligned(dst_char, &target[j]); } return j; }
linux-master
fs/smb/server/unicode.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. * Copyright (C) 2018 Namjae Jeon <[email protected]> */ #include <linux/user_namespace.h> #include "smb_common.h" #include "server.h" #include "misc.h" #include "smbstatus.h" #include "connection.h" #include "ksmbd_work.h" #include "mgmt/user_session.h" #include "mgmt/user_config.h" #include "mgmt/tree_connect.h" #include "mgmt/share_config.h" /*for shortname implementation */ static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%"; #define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1) #define MAGIC_CHAR '~' #define PERIOD '.' #define mangle(V) ((char)(basechars[(V) % MANGLE_BASE])) struct smb_protocol { int index; char *name; char *prot; __u16 prot_id; }; static struct smb_protocol smb1_protos[] = { { SMB21_PROT, "\2SMB 2.1", "SMB2_10", SMB21_PROT_ID }, { SMB2X_PROT, "\2SMB 2.???", "SMB2_22", SMB2X_PROT_ID }, }; static struct smb_protocol smb2_protos[] = { { SMB21_PROT, "\2SMB 2.1", "SMB2_10", SMB21_PROT_ID }, { SMB30_PROT, "\2SMB 3.0", "SMB3_00", SMB30_PROT_ID }, { SMB302_PROT, "\2SMB 3.02", "SMB3_02", SMB302_PROT_ID }, { SMB311_PROT, "\2SMB 3.1.1", "SMB3_11", SMB311_PROT_ID }, }; unsigned int ksmbd_server_side_copy_max_chunk_count(void) { return 256; } unsigned int ksmbd_server_side_copy_max_chunk_size(void) { return (2U << 30) - 1; } unsigned int ksmbd_server_side_copy_max_total_size(void) { return (2U << 30) - 1; } inline int ksmbd_min_protocol(void) { return SMB21_PROT; } inline int ksmbd_max_protocol(void) { return SMB311_PROT; } int ksmbd_lookup_protocol_idx(char *str) { int offt = ARRAY_SIZE(smb1_protos) - 1; int len = strlen(str); while (offt >= 0) { if (!strncmp(str, smb1_protos[offt].prot, len)) { ksmbd_debug(SMB, "selected %s dialect idx = %d\n", smb1_protos[offt].prot, offt); return smb1_protos[offt].index; } offt--; } offt = ARRAY_SIZE(smb2_protos) - 1; while (offt >= 0) { if (!strncmp(str, smb2_protos[offt].prot, len)) { ksmbd_debug(SMB, "selected %s dialect idx = %d\n", smb2_protos[offt].prot, offt); return smb2_protos[offt].index; } offt--; } return -1; } /** * ksmbd_verify_smb_message() - check for valid smb2 request header * @work: smb work * * check for valid smb signature and packet direction(request/response) * * Return: 0 on success, otherwise -EINVAL */ int ksmbd_verify_smb_message(struct ksmbd_work *work) { struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work); struct smb_hdr *hdr; if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER) return ksmbd_smb2_check_message(work); hdr = work->request_buf; if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER && hdr->Command == SMB_COM_NEGOTIATE) { work->conn->outstanding_credits++; return 0; } return -EINVAL; } /** * ksmbd_smb_request() - check for valid smb request type * @conn: connection instance * * Return: true on success, otherwise false */ bool ksmbd_smb_request(struct ksmbd_conn *conn) { __le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf); if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) { pr_err_ratelimited("smb2 compression not support yet"); return false; } if (*proto != SMB1_PROTO_NUMBER && *proto != SMB2_PROTO_NUMBER && *proto != SMB2_TRANSFORM_PROTO_NUM) return false; return true; } static bool supported_protocol(int idx) { if (idx == SMB2X_PROT && (server_conf.min_protocol >= SMB21_PROT || server_conf.max_protocol <= SMB311_PROT)) return true; return (server_conf.min_protocol <= idx && idx <= server_conf.max_protocol); } static char *next_dialect(char *dialect, int *next_off, int bcount) { dialect = dialect + *next_off; *next_off = strnlen(dialect, bcount); if (dialect[*next_off] != '\0') return NULL; return dialect; } static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count) { int i, seq_num, bcount, next; char *dialect; for (i = ARRAY_SIZE(smb1_protos) - 1; i >= 0; i--) { seq_num = 0; next = 0; dialect = cli_dialects; bcount = le16_to_cpu(byte_count); do { dialect = next_dialect(dialect, &next, bcount); if (!dialect) break; ksmbd_debug(SMB, "client requested dialect %s\n", dialect); if (!strcmp(dialect, smb1_protos[i].name)) { if (supported_protocol(smb1_protos[i].index)) { ksmbd_debug(SMB, "selected %s dialect\n", smb1_protos[i].name); if (smb1_protos[i].index == SMB1_PROT) return seq_num; return smb1_protos[i].prot_id; } } seq_num++; bcount -= (++next); } while (bcount > 0); } return BAD_PROT_ID; } int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count) { int i; int count; for (i = ARRAY_SIZE(smb2_protos) - 1; i >= 0; i--) { count = le16_to_cpu(dialects_count); while (--count >= 0) { ksmbd_debug(SMB, "client requested dialect 0x%x\n", le16_to_cpu(cli_dialects[count])); if (le16_to_cpu(cli_dialects[count]) != smb2_protos[i].prot_id) continue; if (supported_protocol(smb2_protos[i].index)) { ksmbd_debug(SMB, "selected %s dialect\n", smb2_protos[i].name); return smb2_protos[i].prot_id; } } } return BAD_PROT_ID; } static int ksmbd_negotiate_smb_dialect(void *buf) { int smb_buf_length = get_rfc1002_len(buf); __le32 proto = ((struct smb2_hdr *)smb2_get_msg(buf))->ProtocolId; if (proto == SMB2_PROTO_NUMBER) { struct smb2_negotiate_req *req; int smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects); req = (struct smb2_negotiate_req *)smb2_get_msg(buf); if (smb2_neg_size > smb_buf_length) goto err_out; if (struct_size(req, Dialects, le16_to_cpu(req->DialectCount)) > smb_buf_length) goto err_out; return ksmbd_lookup_dialect_by_id(req->Dialects, req->DialectCount); } proto = *(__le32 *)((struct smb_hdr *)buf)->Protocol; if (proto == SMB1_PROTO_NUMBER) { struct smb_negotiate_req *req; req = (struct smb_negotiate_req *)buf; if (le16_to_cpu(req->ByteCount) < 2) goto err_out; if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 + le16_to_cpu(req->ByteCount) > smb_buf_length) { goto err_out; } return ksmbd_lookup_dialect_by_name(req->DialectsArray, req->ByteCount); } err_out: return BAD_PROT_ID; } #define SMB_COM_NEGOTIATE_EX 0x0 /** * get_smb1_cmd_val() - get smb command value from smb header * @work: smb work containing smb header * * Return: smb command value */ static u16 get_smb1_cmd_val(struct ksmbd_work *work) { return SMB_COM_NEGOTIATE_EX; } /** * init_smb1_rsp_hdr() - initialize smb negotiate response header * @work: smb work containing smb request * * Return: 0 on success, otherwise -EINVAL */ static int init_smb1_rsp_hdr(struct ksmbd_work *work) { struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf; struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf; rsp_hdr->Command = SMB_COM_NEGOTIATE; *(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER; rsp_hdr->Flags = SMBFLG_RESPONSE; rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS | SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME; rsp_hdr->Pid = rcv_hdr->Pid; rsp_hdr->Mid = rcv_hdr->Mid; return 0; } /** * smb1_check_user_session() - check for valid session for a user * @work: smb work containing smb request buffer * * Return: 0 on success, otherwise error */ static int smb1_check_user_session(struct ksmbd_work *work) { unsigned int cmd = work->conn->ops->get_cmd_val(work); if (cmd == SMB_COM_NEGOTIATE_EX) return 0; return -EINVAL; } /** * smb1_allocate_rsp_buf() - allocate response buffer for a command * @work: smb work containing smb request * * Return: 0 on success, otherwise -ENOMEM */ static int smb1_allocate_rsp_buf(struct ksmbd_work *work) { work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL); work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE; if (!work->response_buf) { pr_err("Failed to allocate %u bytes buffer\n", MAX_CIFS_SMALL_BUFFER_SIZE); return -ENOMEM; } return 0; } static struct smb_version_ops smb1_server_ops = { .get_cmd_val = get_smb1_cmd_val, .init_rsp_hdr = init_smb1_rsp_hdr, .allocate_rsp_buf = smb1_allocate_rsp_buf, .check_user_session = smb1_check_user_session, }; static int smb1_negotiate(struct ksmbd_work *work) { return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE); } static struct smb_version_cmds smb1_server_cmds[1] = { [SMB_COM_NEGOTIATE_EX] = { .proc = smb1_negotiate, }, }; static int init_smb1_server(struct ksmbd_conn *conn) { conn->ops = &smb1_server_ops; conn->cmds = smb1_server_cmds; conn->max_cmds = ARRAY_SIZE(smb1_server_cmds); return 0; } int ksmbd_init_smb_server(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; __le32 proto; proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol; if (conn->need_neg == false) { if (proto == SMB1_PROTO_NUMBER) return -EINVAL; return 0; } if (proto == SMB1_PROTO_NUMBER) return init_smb1_server(conn); return init_smb3_11_server(conn); } int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level, struct ksmbd_file *dir, struct ksmbd_dir_info *d_info, char *search_pattern, int (*fn)(struct ksmbd_conn *, int, struct ksmbd_dir_info *, struct ksmbd_kstat *)) { int i, rc = 0; struct ksmbd_conn *conn = work->conn; struct mnt_idmap *idmap = file_mnt_idmap(dir->filp); for (i = 0; i < 2; i++) { struct kstat kstat; struct ksmbd_kstat ksmbd_kstat; struct dentry *dentry; if (!dir->dot_dotdot[i]) { /* fill dot entry info */ if (i == 0) { d_info->name = "."; d_info->name_len = 1; dentry = dir->filp->f_path.dentry; } else { d_info->name = ".."; d_info->name_len = 2; dentry = dir->filp->f_path.dentry->d_parent; } if (!match_pattern(d_info->name, d_info->name_len, search_pattern)) { dir->dot_dotdot[i] = 1; continue; } ksmbd_kstat.kstat = &kstat; ksmbd_vfs_fill_dentry_attrs(work, idmap, dentry, &ksmbd_kstat); rc = fn(conn, info_level, d_info, &ksmbd_kstat); if (rc) break; if (d_info->out_buf_len <= 0) break; dir->dot_dotdot[i] = 1; if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY) { d_info->out_buf_len = 0; break; } } } return rc; } /** * ksmbd_extract_shortname() - get shortname from long filename * @conn: connection instance * @longname: source long filename * @shortname: destination short filename * * Return: shortname length or 0 when source long name is '.' or '..' * TODO: Though this function comforms the restriction of 8.3 Filename spec, * but the result is different with Windows 7's one. need to check. */ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname, char *shortname) { const char *p; char base[9], extension[4]; char out[13] = {0}; int baselen = 0; int extlen = 0, len = 0; unsigned int csum = 0; const unsigned char *ptr; bool dot_present = true; p = longname; if ((*p == '.') || (!(strcmp(p, "..")))) { /*no mangling required */ return 0; } p = strrchr(longname, '.'); if (p == longname) { /*name starts with a dot*/ strscpy(extension, "___", strlen("___")); } else { if (p) { p++; while (*p && extlen < 3) { if (*p != '.') extension[extlen++] = toupper(*p); p++; } extension[extlen] = '\0'; } else { dot_present = false; } } p = longname; if (*p == '.') { p++; longname++; } while (*p && (baselen < 5)) { if (*p != '.') base[baselen++] = toupper(*p); p++; } base[baselen] = MAGIC_CHAR; memcpy(out, base, baselen + 1); ptr = longname; len = strlen(longname); for (; len > 0; len--, ptr++) csum += *ptr; csum = csum % (MANGLE_BASE * MANGLE_BASE); out[baselen + 1] = mangle(csum / MANGLE_BASE); out[baselen + 2] = mangle(csum); out[baselen + 3] = PERIOD; if (dot_present) memcpy(out + baselen + 4, extension, 4); else out[baselen + 4] = '\0'; smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX, conn->local_nls, 0); len = strlen(out) * 2; return len; } static int __smb2_negotiate(struct ksmbd_conn *conn) { return (conn->dialect >= SMB20_PROT_ID && conn->dialect <= SMB311_PROT_ID); } static int smb_handle_negotiate(struct ksmbd_work *work) { struct smb_negotiate_rsp *neg_rsp = work->response_buf; ksmbd_debug(SMB, "Unsupported SMB1 protocol\n"); if (ksmbd_iov_pin_rsp(work, (void *)neg_rsp, sizeof(struct smb_negotiate_rsp) - 4)) return -ENOMEM; neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS; neg_rsp->hdr.WordCount = 1; neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect); neg_rsp->ByteCount = 0; return 0; } int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command) { struct ksmbd_conn *conn = work->conn; int ret; conn->dialect = ksmbd_negotiate_smb_dialect(work->request_buf); ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect); if (command == SMB2_NEGOTIATE_HE) { ret = smb2_handle_negotiate(work); return ret; } if (command == SMB_COM_NEGOTIATE) { if (__smb2_negotiate(conn)) { init_smb3_11_server(conn); init_smb2_neg_rsp(work); ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n"); return 0; } return smb_handle_negotiate(work); } pr_err("Unknown SMB negotiation command: %u\n", command); return -EINVAL; } enum SHARED_MODE_ERRORS { SHARE_DELETE_ERROR, SHARE_READ_ERROR, SHARE_WRITE_ERROR, FILE_READ_ERROR, FILE_WRITE_ERROR, FILE_DELETE_ERROR, }; static const char * const shared_mode_errors[] = { "Current access mode does not permit SHARE_DELETE", "Current access mode does not permit SHARE_READ", "Current access mode does not permit SHARE_WRITE", "Desired access mode does not permit FILE_READ", "Desired access mode does not permit FILE_WRITE", "Desired access mode does not permit FILE_DELETE", }; static void smb_shared_mode_error(int error, struct ksmbd_file *prev_fp, struct ksmbd_file *curr_fp) { ksmbd_debug(SMB, "%s\n", shared_mode_errors[error]); ksmbd_debug(SMB, "Current mode: 0x%x Desired mode: 0x%x\n", prev_fp->saccess, curr_fp->daccess); } int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp) { int rc = 0; struct ksmbd_file *prev_fp; /* * Lookup fp in master fp list, and check desired access and * shared mode between previous open and current open. */ read_lock(&curr_fp->f_ci->m_lock); list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) { if (file_inode(filp) != file_inode(prev_fp->filp)) continue; if (filp == prev_fp->filp) continue; if (ksmbd_stream_fd(prev_fp) && ksmbd_stream_fd(curr_fp)) if (strcmp(prev_fp->stream.name, curr_fp->stream.name)) continue; if (prev_fp->attrib_only != curr_fp->attrib_only) continue; if (!(prev_fp->saccess & FILE_SHARE_DELETE_LE) && curr_fp->daccess & FILE_DELETE_LE) { smb_shared_mode_error(SHARE_DELETE_ERROR, prev_fp, curr_fp); rc = -EPERM; break; } /* * Only check FILE_SHARE_DELETE if stream opened and * normal file opened. */ if (ksmbd_stream_fd(prev_fp) && !ksmbd_stream_fd(curr_fp)) continue; if (!(prev_fp->saccess & FILE_SHARE_READ_LE) && curr_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE)) { smb_shared_mode_error(SHARE_READ_ERROR, prev_fp, curr_fp); rc = -EPERM; break; } if (!(prev_fp->saccess & FILE_SHARE_WRITE_LE) && curr_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) { smb_shared_mode_error(SHARE_WRITE_ERROR, prev_fp, curr_fp); rc = -EPERM; break; } if (prev_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE) && !(curr_fp->saccess & FILE_SHARE_READ_LE)) { smb_shared_mode_error(FILE_READ_ERROR, prev_fp, curr_fp); rc = -EPERM; break; } if (prev_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE) && !(curr_fp->saccess & FILE_SHARE_WRITE_LE)) { smb_shared_mode_error(FILE_WRITE_ERROR, prev_fp, curr_fp); rc = -EPERM; break; } if (prev_fp->daccess & FILE_DELETE_LE && !(curr_fp->saccess & FILE_SHARE_DELETE_LE)) { smb_shared_mode_error(FILE_DELETE_ERROR, prev_fp, curr_fp); rc = -EPERM; break; } } read_unlock(&curr_fp->f_ci->m_lock); return rc; } bool is_asterisk(char *p) { return p && p[0] == '*'; } int ksmbd_override_fsids(struct ksmbd_work *work) { struct ksmbd_session *sess = work->sess; struct ksmbd_share_config *share = work->tcon->share_conf; struct cred *cred; struct group_info *gi; unsigned int uid; unsigned int gid; uid = user_uid(sess->user); gid = user_gid(sess->user); if (share->force_uid != KSMBD_SHARE_INVALID_UID) uid = share->force_uid; if (share->force_gid != KSMBD_SHARE_INVALID_GID) gid = share->force_gid; cred = prepare_kernel_cred(&init_task); if (!cred) return -ENOMEM; cred->fsuid = make_kuid(&init_user_ns, uid); cred->fsgid = make_kgid(&init_user_ns, gid); gi = groups_alloc(0); if (!gi) { abort_creds(cred); return -ENOMEM; } set_groups(cred, gi); put_group_info(gi); if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID)) cred->cap_effective = cap_drop_fs_set(cred->cap_effective); WARN_ON(work->saved_cred); work->saved_cred = override_creds(cred); if (!work->saved_cred) { abort_creds(cred); return -EINVAL; } return 0; } void ksmbd_revert_fsids(struct ksmbd_work *work) { const struct cred *cred; WARN_ON(!work->saved_cred); cred = current_cred(); revert_creds(work->saved_cred); put_cred(cred); work->saved_cred = NULL; } __le32 smb_map_generic_desired_access(__le32 daccess) { if (daccess & FILE_GENERIC_READ_LE) { daccess |= cpu_to_le32(GENERIC_READ_FLAGS); daccess &= ~FILE_GENERIC_READ_LE; } if (daccess & FILE_GENERIC_WRITE_LE) { daccess |= cpu_to_le32(GENERIC_WRITE_FLAGS); daccess &= ~FILE_GENERIC_WRITE_LE; } if (daccess & FILE_GENERIC_EXECUTE_LE) { daccess |= cpu_to_le32(GENERIC_EXECUTE_FLAGS); daccess &= ~FILE_GENERIC_EXECUTE_LE; } if (daccess & FILE_GENERIC_ALL_LE) { daccess |= cpu_to_le32(GENERIC_ALL_FLAGS); daccess &= ~FILE_GENERIC_ALL_LE; } return daccess; }
linux-master
fs/smb/server/smb_common.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #include <linux/uio.h> #include <linux/xattr.h> #include <crypto/hash.h> #include <crypto/aead.h> #include <linux/random.h> #include <linux/scatterlist.h> #include "auth.h" #include "glob.h" #include <linux/fips.h> #include <crypto/des.h> #include "server.h" #include "smb_common.h" #include "connection.h" #include "mgmt/user_session.h" #include "mgmt/user_config.h" #include "crypto_ctx.h" #include "transport_ipc.h" #include "../common/arc4.h" /* * Fixed format data defining GSS header and fixed string * "not_defined_in_RFC4178@please_ignore". * So sec blob data in neg phase could be generated statically. */ static char NEGOTIATE_GSS_HEADER[AUTH_GSS_LENGTH] = { #ifdef CONFIG_SMB_SERVER_KERBEROS5 0x60, 0x5e, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x02, 0xa0, 0x54, 0x30, 0x52, 0xa0, 0x24, 0x30, 0x22, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x82, 0xf7, 0x12, 0x01, 0x02, 0x02, 0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a, 0x30, 0x28, 0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f, 0x74, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43, 0x34, 0x31, 0x37, 0x38, 0x40, 0x70, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65 #else 0x60, 0x48, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x02, 0xa0, 0x3e, 0x30, 0x3c, 0xa0, 0x0e, 0x30, 0x0c, 0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a, 0x30, 0x28, 0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f, 0x74, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43, 0x34, 0x31, 0x37, 0x38, 0x40, 0x70, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65 #endif }; void ksmbd_copy_gss_neg_header(void *buf) { memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH); } /** * ksmbd_gen_sess_key() - function to generate session key * @sess: session of connection * @hash: source hash value to be used for find session key * @hmac: source hmac value to be used for finding session key * */ static int ksmbd_gen_sess_key(struct ksmbd_session *sess, char *hash, char *hmac) { struct ksmbd_crypto_ctx *ctx; int rc; ctx = ksmbd_crypto_ctx_find_hmacmd5(); if (!ctx) { ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n"); return -ENOMEM; } rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx), hash, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { ksmbd_debug(AUTH, "hmacmd5 set key fail error %d\n", rc); goto out; } rc = crypto_shash_init(CRYPTO_HMACMD5(ctx)); if (rc) { ksmbd_debug(AUTH, "could not init hmacmd5 error %d\n", rc); goto out; } rc = crypto_shash_update(CRYPTO_HMACMD5(ctx), hmac, SMB2_NTLMV2_SESSKEY_SIZE); if (rc) { ksmbd_debug(AUTH, "Could not update with response error %d\n", rc); goto out; } rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), sess->sess_key); if (rc) { ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n", rc); goto out; } out: ksmbd_release_crypto_ctx(ctx); return rc; } static int calc_ntlmv2_hash(struct ksmbd_conn *conn, struct ksmbd_session *sess, char *ntlmv2_hash, char *dname) { int ret, len, conv_len; wchar_t *domain = NULL; __le16 *uniname = NULL; struct ksmbd_crypto_ctx *ctx; ctx = ksmbd_crypto_ctx_find_hmacmd5(); if (!ctx) { ksmbd_debug(AUTH, "can't generate ntlmv2 hash\n"); return -ENOMEM; } ret = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx), user_passkey(sess->user), CIFS_ENCPWD_SIZE); if (ret) { ksmbd_debug(AUTH, "Could not set NT Hash as a key\n"); goto out; } ret = crypto_shash_init(CRYPTO_HMACMD5(ctx)); if (ret) { ksmbd_debug(AUTH, "could not init hmacmd5\n"); goto out; } /* convert user_name to unicode */ len = strlen(user_name(sess->user)); uniname = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL); if (!uniname) { ret = -ENOMEM; goto out; } conv_len = smb_strtoUTF16(uniname, user_name(sess->user), len, conn->local_nls); if (conv_len < 0 || conv_len > len) { ret = -EINVAL; goto out; } UniStrupr(uniname); ret = crypto_shash_update(CRYPTO_HMACMD5(ctx), (char *)uniname, UNICODE_LEN(conv_len)); if (ret) { ksmbd_debug(AUTH, "Could not update with user\n"); goto out; } /* Convert domain name or conn name to unicode and uppercase */ len = strlen(dname); domain = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL); if (!domain) { ret = -ENOMEM; goto out; } conv_len = smb_strtoUTF16((__le16 *)domain, dname, len, conn->local_nls); if (conv_len < 0 || conv_len > len) { ret = -EINVAL; goto out; } ret = crypto_shash_update(CRYPTO_HMACMD5(ctx), (char *)domain, UNICODE_LEN(conv_len)); if (ret) { ksmbd_debug(AUTH, "Could not update with domain\n"); goto out; } ret = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_hash); if (ret) ksmbd_debug(AUTH, "Could not generate md5 hash\n"); out: kfree(uniname); kfree(domain); ksmbd_release_crypto_ctx(ctx); return ret; } /** * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler * @sess: session of connection * @ntlmv2: NTLMv2 challenge response * @blen: NTLMv2 blob length * @domain_name: domain name * * Return: 0 on success, error number on error */ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2, int blen, char *domain_name, char *cryptkey) { char ntlmv2_hash[CIFS_ENCPWD_SIZE]; char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE]; struct ksmbd_crypto_ctx *ctx = NULL; char *construct = NULL; int rc, len; rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name); if (rc) { ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc); goto out; } ctx = ksmbd_crypto_ctx_find_hmacmd5(); if (!ctx) { ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n"); return -ENOMEM; } rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx), ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { ksmbd_debug(AUTH, "Could not set NTLMV2 Hash as a key\n"); goto out; } rc = crypto_shash_init(CRYPTO_HMACMD5(ctx)); if (rc) { ksmbd_debug(AUTH, "Could not init hmacmd5\n"); goto out; } len = CIFS_CRYPTO_KEY_SIZE + blen; construct = kzalloc(len, GFP_KERNEL); if (!construct) { rc = -ENOMEM; goto out; } memcpy(construct, cryptkey, CIFS_CRYPTO_KEY_SIZE); memcpy(construct + CIFS_CRYPTO_KEY_SIZE, &ntlmv2->blob_signature, blen); rc = crypto_shash_update(CRYPTO_HMACMD5(ctx), construct, len); if (rc) { ksmbd_debug(AUTH, "Could not update with response\n"); goto out; } rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_rsp); if (rc) { ksmbd_debug(AUTH, "Could not generate md5 hash\n"); goto out; } ksmbd_release_crypto_ctx(ctx); ctx = NULL; rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp); if (rc) { ksmbd_debug(AUTH, "Could not generate sess key\n"); goto out; } if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0) rc = -EINVAL; out: if (ctx) ksmbd_release_crypto_ctx(ctx); kfree(construct); return rc; } /** * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct * authenticate blob * @authblob: authenticate blob source pointer * @usr: user details * @sess: session of connection * * Return: 0 on success, error number on error */ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob, int blob_len, struct ksmbd_conn *conn, struct ksmbd_session *sess) { char *domain_name; unsigned int nt_off, dn_off; unsigned short nt_len, dn_len; int ret; if (blob_len < sizeof(struct authenticate_message)) { ksmbd_debug(AUTH, "negotiate blob len %d too small\n", blob_len); return -EINVAL; } if (memcmp(authblob->Signature, "NTLMSSP", 8)) { ksmbd_debug(AUTH, "blob signature incorrect %s\n", authblob->Signature); return -EINVAL; } nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset); nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length); dn_off = le32_to_cpu(authblob->DomainName.BufferOffset); dn_len = le16_to_cpu(authblob->DomainName.Length); if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len || nt_len < CIFS_ENCPWD_SIZE) return -EINVAL; /* TODO : use domain name that imported from configuration file */ domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off, dn_len, true, conn->local_nls); if (IS_ERR(domain_name)) return PTR_ERR(domain_name); /* process NTLMv2 authentication */ ksmbd_debug(AUTH, "decode_ntlmssp_authenticate_blob dname%s\n", domain_name); ret = ksmbd_auth_ntlmv2(conn, sess, (struct ntlmv2_resp *)((char *)authblob + nt_off), nt_len - CIFS_ENCPWD_SIZE, domain_name, conn->ntlmssp.cryptkey); kfree(domain_name); /* The recovered secondary session key */ if (conn->ntlmssp.client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) { struct arc4_ctx *ctx_arc4; unsigned int sess_key_off, sess_key_len; sess_key_off = le32_to_cpu(authblob->SessionKey.BufferOffset); sess_key_len = le16_to_cpu(authblob->SessionKey.Length); if (blob_len < (u64)sess_key_off + sess_key_len) return -EINVAL; if (sess_key_len > CIFS_KEY_SIZE) return -EINVAL; ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL); if (!ctx_arc4) return -ENOMEM; cifs_arc4_setkey(ctx_arc4, sess->sess_key, SMB2_NTLMV2_SESSKEY_SIZE); cifs_arc4_crypt(ctx_arc4, sess->sess_key, (char *)authblob + sess_key_off, sess_key_len); kfree_sensitive(ctx_arc4); } return ret; } /** * ksmbd_decode_ntlmssp_neg_blob() - helper function to construct * negotiate blob * @negblob: negotiate blob source pointer * @rsp: response header pointer to be updated * @sess: session of connection * */ int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob, int blob_len, struct ksmbd_conn *conn) { if (blob_len < sizeof(struct negotiate_message)) { ksmbd_debug(AUTH, "negotiate blob len %d too small\n", blob_len); return -EINVAL; } if (memcmp(negblob->Signature, "NTLMSSP", 8)) { ksmbd_debug(AUTH, "blob signature incorrect %s\n", negblob->Signature); return -EINVAL; } conn->ntlmssp.client_flags = le32_to_cpu(negblob->NegotiateFlags); return 0; } /** * ksmbd_build_ntlmssp_challenge_blob() - helper function to construct * challenge blob * @chgblob: challenge blob source pointer to initialize * @rsp: response header pointer to be updated * @sess: session of connection * */ unsigned int ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob, struct ksmbd_conn *conn) { struct target_info *tinfo; wchar_t *name; __u8 *target_name; unsigned int flags, blob_off, blob_len, type, target_info_len = 0; int len, uni_len, conv_len; int cflags = conn->ntlmssp.client_flags; memcpy(chgblob->Signature, NTLMSSP_SIGNATURE, 8); chgblob->MessageType = NtLmChallenge; flags = NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_TARGET_TYPE_SERVER | NTLMSSP_NEGOTIATE_TARGET_INFO; if (cflags & NTLMSSP_NEGOTIATE_SIGN) { flags |= NTLMSSP_NEGOTIATE_SIGN; flags |= cflags & (NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_56); } if (cflags & NTLMSSP_NEGOTIATE_SEAL && smb3_encryption_negotiated(conn)) flags |= NTLMSSP_NEGOTIATE_SEAL; if (cflags & NTLMSSP_NEGOTIATE_ALWAYS_SIGN) flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; if (cflags & NTLMSSP_REQUEST_TARGET) flags |= NTLMSSP_REQUEST_TARGET; if (conn->use_spnego && (cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC; if (cflags & NTLMSSP_NEGOTIATE_KEY_XCH) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; chgblob->NegotiateFlags = cpu_to_le32(flags); len = strlen(ksmbd_netbios_name()); name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL); if (!name) return -ENOMEM; conv_len = smb_strtoUTF16((__le16 *)name, ksmbd_netbios_name(), len, conn->local_nls); if (conv_len < 0 || conv_len > len) { kfree(name); return -EINVAL; } uni_len = UNICODE_LEN(conv_len); blob_off = sizeof(struct challenge_message); blob_len = blob_off + uni_len; chgblob->TargetName.Length = cpu_to_le16(uni_len); chgblob->TargetName.MaximumLength = cpu_to_le16(uni_len); chgblob->TargetName.BufferOffset = cpu_to_le32(blob_off); /* Initialize random conn challenge */ get_random_bytes(conn->ntlmssp.cryptkey, sizeof(__u64)); memcpy(chgblob->Challenge, conn->ntlmssp.cryptkey, CIFS_CRYPTO_KEY_SIZE); /* Add Target Information to security buffer */ chgblob->TargetInfoArray.BufferOffset = cpu_to_le32(blob_len); target_name = (__u8 *)chgblob + blob_off; memcpy(target_name, name, uni_len); tinfo = (struct target_info *)(target_name + uni_len); chgblob->TargetInfoArray.Length = 0; /* Add target info list for NetBIOS/DNS settings */ for (type = NTLMSSP_AV_NB_COMPUTER_NAME; type <= NTLMSSP_AV_DNS_DOMAIN_NAME; type++) { tinfo->Type = cpu_to_le16(type); tinfo->Length = cpu_to_le16(uni_len); memcpy(tinfo->Content, name, uni_len); tinfo = (struct target_info *)((char *)tinfo + 4 + uni_len); target_info_len += 4 + uni_len; } /* Add terminator subblock */ tinfo->Type = 0; tinfo->Length = 0; target_info_len += 4; chgblob->TargetInfoArray.Length = cpu_to_le16(target_info_len); chgblob->TargetInfoArray.MaximumLength = cpu_to_le16(target_info_len); blob_len += target_info_len; kfree(name); ksmbd_debug(AUTH, "NTLMSSP SecurityBufferLength %d\n", blob_len); return blob_len; } #ifdef CONFIG_SMB_SERVER_KERBEROS5 int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob, int in_len, char *out_blob, int *out_len) { struct ksmbd_spnego_authen_response *resp; struct ksmbd_user *user = NULL; int retval; resp = ksmbd_ipc_spnego_authen_request(in_blob, in_len); if (!resp) { ksmbd_debug(AUTH, "SPNEGO_AUTHEN_REQUEST failure\n"); return -EINVAL; } if (!(resp->login_response.status & KSMBD_USER_FLAG_OK)) { ksmbd_debug(AUTH, "krb5 authentication failure\n"); retval = -EPERM; goto out; } if (*out_len <= resp->spnego_blob_len) { ksmbd_debug(AUTH, "buf len %d, but blob len %d\n", *out_len, resp->spnego_blob_len); retval = -EINVAL; goto out; } if (resp->session_key_len > sizeof(sess->sess_key)) { ksmbd_debug(AUTH, "session key is too long\n"); retval = -EINVAL; goto out; } user = ksmbd_alloc_user(&resp->login_response); if (!user) { ksmbd_debug(AUTH, "login failure\n"); retval = -ENOMEM; goto out; } sess->user = user; memcpy(sess->sess_key, resp->payload, resp->session_key_len); memcpy(out_blob, resp->payload + resp->session_key_len, resp->spnego_blob_len); *out_len = resp->spnego_blob_len; retval = 0; out: kvfree(resp); return retval; } #else int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob, int in_len, char *out_blob, int *out_len) { return -EOPNOTSUPP; } #endif /** * ksmbd_sign_smb2_pdu() - function to generate packet signing * @conn: connection * @key: signing key * @iov: buffer iov array * @n_vec: number of iovecs * @sig: signature value generated for client request packet * */ int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov, int n_vec, char *sig) { struct ksmbd_crypto_ctx *ctx; int rc, i; ctx = ksmbd_crypto_ctx_find_hmacsha256(); if (!ctx) { ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n"); return -ENOMEM; } rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx), key, SMB2_NTLMV2_SESSKEY_SIZE); if (rc) goto out; rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx)); if (rc) { ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc); goto out; } for (i = 0; i < n_vec; i++) { rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), iov[i].iov_base, iov[i].iov_len); if (rc) { ksmbd_debug(AUTH, "hmacsha256 update error %d\n", rc); goto out; } } rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), sig); if (rc) ksmbd_debug(AUTH, "hmacsha256 generation error %d\n", rc); out: ksmbd_release_crypto_ctx(ctx); return rc; } /** * ksmbd_sign_smb3_pdu() - function to generate packet signing * @conn: connection * @key: signing key * @iov: buffer iov array * @n_vec: number of iovecs * @sig: signature value generated for client request packet * */ int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov, int n_vec, char *sig) { struct ksmbd_crypto_ctx *ctx; int rc, i; ctx = ksmbd_crypto_ctx_find_cmacaes(); if (!ctx) { ksmbd_debug(AUTH, "could not crypto alloc cmac\n"); return -ENOMEM; } rc = crypto_shash_setkey(CRYPTO_CMACAES_TFM(ctx), key, SMB2_CMACAES_SIZE); if (rc) goto out; rc = crypto_shash_init(CRYPTO_CMACAES(ctx)); if (rc) { ksmbd_debug(AUTH, "cmaces init error %d\n", rc); goto out; } for (i = 0; i < n_vec; i++) { rc = crypto_shash_update(CRYPTO_CMACAES(ctx), iov[i].iov_base, iov[i].iov_len); if (rc) { ksmbd_debug(AUTH, "cmaces update error %d\n", rc); goto out; } } rc = crypto_shash_final(CRYPTO_CMACAES(ctx), sig); if (rc) ksmbd_debug(AUTH, "cmaces generation error %d\n", rc); out: ksmbd_release_crypto_ctx(ctx); return rc; } struct derivation { struct kvec label; struct kvec context; bool binding; }; static int generate_key(struct ksmbd_conn *conn, struct ksmbd_session *sess, struct kvec label, struct kvec context, __u8 *key, unsigned int key_size) { unsigned char zero = 0x0; __u8 i[4] = {0, 0, 0, 1}; __u8 L128[4] = {0, 0, 0, 128}; __u8 L256[4] = {0, 0, 1, 0}; int rc; unsigned char prfhash[SMB2_HMACSHA256_SIZE]; unsigned char *hashptr = prfhash; struct ksmbd_crypto_ctx *ctx; memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE); memset(key, 0x0, key_size); ctx = ksmbd_crypto_ctx_find_hmacsha256(); if (!ctx) { ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n"); return -ENOMEM; } rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx), sess->sess_key, SMB2_NTLMV2_SESSKEY_SIZE); if (rc) goto smb3signkey_ret; rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx)); if (rc) { ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc); goto smb3signkey_ret; } rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), i, 4); if (rc) { ksmbd_debug(AUTH, "could not update with n\n"); goto smb3signkey_ret; } rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), label.iov_base, label.iov_len); if (rc) { ksmbd_debug(AUTH, "could not update with label\n"); goto smb3signkey_ret; } rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), &zero, 1); if (rc) { ksmbd_debug(AUTH, "could not update with zero\n"); goto smb3signkey_ret; } rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), context.iov_base, context.iov_len); if (rc) { ksmbd_debug(AUTH, "could not update with context\n"); goto smb3signkey_ret; } if (key_size == SMB3_ENC_DEC_KEY_SIZE && (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM || conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4); else rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4); if (rc) { ksmbd_debug(AUTH, "could not update with L\n"); goto smb3signkey_ret; } rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), hashptr); if (rc) { ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n", rc); goto smb3signkey_ret; } memcpy(key, hashptr, key_size); smb3signkey_ret: ksmbd_release_crypto_ctx(ctx); return rc; } static int generate_smb3signingkey(struct ksmbd_session *sess, struct ksmbd_conn *conn, const struct derivation *signing) { int rc; struct channel *chann; char *key; chann = lookup_chann_list(sess, conn); if (!chann) return 0; if (conn->dialect >= SMB30_PROT_ID && signing->binding) key = chann->smb3signingkey; else key = sess->smb3signingkey; rc = generate_key(conn, sess, signing->label, signing->context, key, SMB3_SIGN_KEY_SIZE); if (rc) return rc; if (!(conn->dialect >= SMB30_PROT_ID && signing->binding)) memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE); ksmbd_debug(AUTH, "dumping generated AES signing keys\n"); ksmbd_debug(AUTH, "Session Id %llu\n", sess->id); ksmbd_debug(AUTH, "Session Key %*ph\n", SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key); ksmbd_debug(AUTH, "Signing Key %*ph\n", SMB3_SIGN_KEY_SIZE, key); return 0; } int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess, struct ksmbd_conn *conn) { struct derivation d; d.label.iov_base = "SMB2AESCMAC"; d.label.iov_len = 12; d.context.iov_base = "SmbSign"; d.context.iov_len = 8; d.binding = conn->binding; return generate_smb3signingkey(sess, conn, &d); } int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess, struct ksmbd_conn *conn) { struct derivation d; d.label.iov_base = "SMBSigningKey"; d.label.iov_len = 14; if (conn->binding) { struct preauth_session *preauth_sess; preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id); if (!preauth_sess) return -ENOENT; d.context.iov_base = preauth_sess->Preauth_HashValue; } else { d.context.iov_base = sess->Preauth_HashValue; } d.context.iov_len = 64; d.binding = conn->binding; return generate_smb3signingkey(sess, conn, &d); } struct derivation_twin { struct derivation encryption; struct derivation decryption; }; static int generate_smb3encryptionkey(struct ksmbd_conn *conn, struct ksmbd_session *sess, const struct derivation_twin *ptwin) { int rc; rc = generate_key(conn, sess, ptwin->encryption.label, ptwin->encryption.context, sess->smb3encryptionkey, SMB3_ENC_DEC_KEY_SIZE); if (rc) return rc; rc = generate_key(conn, sess, ptwin->decryption.label, ptwin->decryption.context, sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE); if (rc) return rc; ksmbd_debug(AUTH, "dumping generated AES encryption keys\n"); ksmbd_debug(AUTH, "Cipher type %d\n", conn->cipher_type); ksmbd_debug(AUTH, "Session Id %llu\n", sess->id); ksmbd_debug(AUTH, "Session Key %*ph\n", SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key); if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM || conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) { ksmbd_debug(AUTH, "ServerIn Key %*ph\n", SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey); ksmbd_debug(AUTH, "ServerOut Key %*ph\n", SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey); } else { ksmbd_debug(AUTH, "ServerIn Key %*ph\n", SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey); ksmbd_debug(AUTH, "ServerOut Key %*ph\n", SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey); } return 0; } int ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn, struct ksmbd_session *sess) { struct derivation_twin twin; struct derivation *d; d = &twin.encryption; d->label.iov_base = "SMB2AESCCM"; d->label.iov_len = 11; d->context.iov_base = "ServerOut"; d->context.iov_len = 10; d = &twin.decryption; d->label.iov_base = "SMB2AESCCM"; d->label.iov_len = 11; d->context.iov_base = "ServerIn "; d->context.iov_len = 10; return generate_smb3encryptionkey(conn, sess, &twin); } int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn, struct ksmbd_session *sess) { struct derivation_twin twin; struct derivation *d; d = &twin.encryption; d->label.iov_base = "SMBS2CCipherKey"; d->label.iov_len = 16; d->context.iov_base = sess->Preauth_HashValue; d->context.iov_len = 64; d = &twin.decryption; d->label.iov_base = "SMBC2SCipherKey"; d->label.iov_len = 16; d->context.iov_base = sess->Preauth_HashValue; d->context.iov_len = 64; return generate_smb3encryptionkey(conn, sess, &twin); } int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf, __u8 *pi_hash) { int rc; struct smb2_hdr *rcv_hdr = smb2_get_msg(buf); char *all_bytes_msg = (char *)&rcv_hdr->ProtocolId; int msg_size = get_rfc1002_len(buf); struct ksmbd_crypto_ctx *ctx = NULL; if (conn->preauth_info->Preauth_HashId != SMB2_PREAUTH_INTEGRITY_SHA512) return -EINVAL; ctx = ksmbd_crypto_ctx_find_sha512(); if (!ctx) { ksmbd_debug(AUTH, "could not alloc sha512\n"); return -ENOMEM; } rc = crypto_shash_init(CRYPTO_SHA512(ctx)); if (rc) { ksmbd_debug(AUTH, "could not init shashn"); goto out; } rc = crypto_shash_update(CRYPTO_SHA512(ctx), pi_hash, 64); if (rc) { ksmbd_debug(AUTH, "could not update with n\n"); goto out; } rc = crypto_shash_update(CRYPTO_SHA512(ctx), all_bytes_msg, msg_size); if (rc) { ksmbd_debug(AUTH, "could not update with n\n"); goto out; } rc = crypto_shash_final(CRYPTO_SHA512(ctx), pi_hash); if (rc) { ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc); goto out; } out: ksmbd_release_crypto_ctx(ctx); return rc; } int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len, __u8 *pi_hash) { int rc; struct ksmbd_crypto_ctx *ctx = NULL; ctx = ksmbd_crypto_ctx_find_sha256(); if (!ctx) { ksmbd_debug(AUTH, "could not alloc sha256\n"); return -ENOMEM; } rc = crypto_shash_init(CRYPTO_SHA256(ctx)); if (rc) { ksmbd_debug(AUTH, "could not init shashn"); goto out; } rc = crypto_shash_update(CRYPTO_SHA256(ctx), sd_buf, len); if (rc) { ksmbd_debug(AUTH, "could not update with n\n"); goto out; } rc = crypto_shash_final(CRYPTO_SHA256(ctx), pi_hash); if (rc) { ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc); goto out; } out: ksmbd_release_crypto_ctx(ctx); return rc; } static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id, int enc, u8 *key) { struct ksmbd_session *sess; u8 *ses_enc_key; if (enc) sess = work->sess; else sess = ksmbd_session_lookup_all(work->conn, ses_id); if (!sess) return -EINVAL; ses_enc_key = enc ? sess->smb3encryptionkey : sess->smb3decryptionkey; memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); return 0; } static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { void *addr; if (is_vmalloc_addr(buf)) addr = vmalloc_to_page(buf); else addr = virt_to_page(buf); sg_set_page(sg, addr, buflen, offset_in_page(buf)); } static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec, u8 *sign) { struct scatterlist *sg; unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20; int i, *nr_entries, total_entries = 0, sg_idx = 0; if (!nvec) return NULL; nr_entries = kcalloc(nvec, sizeof(int), GFP_KERNEL); if (!nr_entries) return NULL; for (i = 0; i < nvec - 1; i++) { unsigned long kaddr = (unsigned long)iov[i + 1].iov_base; if (is_vmalloc_addr(iov[i + 1].iov_base)) { nr_entries[i] = ((kaddr + iov[i + 1].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT) - (kaddr >> PAGE_SHIFT); } else { nr_entries[i]++; } total_entries += nr_entries[i]; } /* Add two entries for transform header and signature */ total_entries += 2; sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL); if (!sg) { kfree(nr_entries); return NULL; } sg_init_table(sg, total_entries); smb2_sg_set_buf(&sg[sg_idx++], iov[0].iov_base + 24, assoc_data_len); for (i = 0; i < nvec - 1; i++) { void *data = iov[i + 1].iov_base; int len = iov[i + 1].iov_len; if (is_vmalloc_addr(data)) { int j, offset = offset_in_page(data); for (j = 0; j < nr_entries[i]; j++) { unsigned int bytes = PAGE_SIZE - offset; if (!len) break; if (bytes > len) bytes = len; sg_set_page(&sg[sg_idx++], vmalloc_to_page(data), bytes, offset_in_page(data)); data += bytes; len -= bytes; offset = 0; } } else { sg_set_page(&sg[sg_idx++], virt_to_page(data), len, offset_in_page(data)); } } smb2_sg_set_buf(&sg[sg_idx], sign, SMB2_SIGNATURE_SIZE); kfree(nr_entries); return sg; } int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov, unsigned int nvec, int enc) { struct ksmbd_conn *conn = work->conn; struct smb2_transform_hdr *tr_hdr = smb2_get_msg(iov[0].iov_base); unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20; int rc; struct scatterlist *sg; u8 sign[SMB2_SIGNATURE_SIZE] = {}; u8 key[SMB3_ENC_DEC_KEY_SIZE]; struct aead_request *req; char *iv; unsigned int iv_len; struct crypto_aead *tfm; unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); struct ksmbd_crypto_ctx *ctx; rc = ksmbd_get_encryption_key(work, le64_to_cpu(tr_hdr->SessionId), enc, key); if (rc) { pr_err("Could not get %scryption key\n", enc ? "en" : "de"); return rc; } if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM || conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) ctx = ksmbd_crypto_ctx_find_gcm(); else ctx = ksmbd_crypto_ctx_find_ccm(); if (!ctx) { pr_err("crypto alloc failed\n"); return -ENOMEM; } if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM || conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) tfm = CRYPTO_GCM(ctx); else tfm = CRYPTO_CCM(ctx); if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM || conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE); else rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE); if (rc) { pr_err("Failed to set aead key %d\n", rc); goto free_ctx; } rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE); if (rc) { pr_err("Failed to set authsize %d\n", rc); goto free_ctx; } req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { rc = -ENOMEM; goto free_ctx; } if (!enc) { memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE); crypt_len += SMB2_SIGNATURE_SIZE; } sg = ksmbd_init_sg(iov, nvec, sign); if (!sg) { pr_err("Failed to init sg\n"); rc = -ENOMEM; goto free_req; } iv_len = crypto_aead_ivsize(tfm); iv = kzalloc(iv_len, GFP_KERNEL); if (!iv) { rc = -ENOMEM; goto free_sg; } if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM || conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) { memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE); } else { iv[0] = 3; memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE); } aead_request_set_crypt(req, sg, sg, crypt_len, iv); aead_request_set_ad(req, assoc_data_len); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); if (enc) rc = crypto_aead_encrypt(req); else rc = crypto_aead_decrypt(req); if (rc) goto free_iv; if (enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); free_iv: kfree(iv); free_sg: kfree(sg); free_req: kfree(req); free_ctx: ksmbd_release_crypto_ctx(ctx); return rc; }
linux-master
fs/smb/server/auth.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include "glob.h" #include "nterr.h" #include "smb_common.h" #include "smbstatus.h" #include "mgmt/user_session.h" #include "connection.h" static int check_smb2_hdr(struct smb2_hdr *hdr) { /* * Make sure that this really is an SMB, that it is a response. */ if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) return 1; return 0; } /* * The following table defines the expected "StructureSize" of SMB2 requests * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order */ static const __le16 smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ cpu_to_le16(36), /* SMB2_SESSION_SETUP */ cpu_to_le16(25), /* SMB2_LOGOFF */ cpu_to_le16(4), /* SMB2_TREE_CONNECT */ cpu_to_le16(9), /* SMB2_TREE_DISCONNECT */ cpu_to_le16(4), /* SMB2_CREATE */ cpu_to_le16(57), /* SMB2_CLOSE */ cpu_to_le16(24), /* SMB2_FLUSH */ cpu_to_le16(24), /* SMB2_READ */ cpu_to_le16(49), /* SMB2_WRITE */ cpu_to_le16(49), /* SMB2_LOCK */ cpu_to_le16(48), /* SMB2_IOCTL */ cpu_to_le16(57), /* SMB2_CANCEL */ cpu_to_le16(4), /* SMB2_ECHO */ cpu_to_le16(4), /* SMB2_QUERY_DIRECTORY */ cpu_to_le16(33), /* SMB2_CHANGE_NOTIFY */ cpu_to_le16(32), /* SMB2_QUERY_INFO */ cpu_to_le16(41), /* SMB2_SET_INFO */ cpu_to_le16(33), /* use 44 for lease break */ /* SMB2_OPLOCK_BREAK */ cpu_to_le16(36) }; /* * The size of the variable area depends on the offset and length fields * located in different fields for various SMB2 requests. SMB2 requests * with no variable length info, show an offset of zero for the offset field. */ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ true, /* SMB2_SESSION_SETUP */ true, /* SMB2_LOGOFF */ false, /* SMB2_TREE_CONNECT */ true, /* SMB2_TREE_DISCONNECT */ false, /* SMB2_CREATE */ true, /* SMB2_CLOSE */ false, /* SMB2_FLUSH */ false, /* SMB2_READ */ true, /* SMB2_WRITE */ true, /* SMB2_LOCK */ true, /* SMB2_IOCTL */ true, /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */ /* SMB2_ECHO */ false, /* SMB2_QUERY_DIRECTORY */ true, /* SMB2_CHANGE_NOTIFY */ false, /* SMB2_QUERY_INFO */ true, /* SMB2_SET_INFO */ true, /* SMB2_OPLOCK_BREAK */ false }; /* * Set length of the data area and the offset to arguments. * if they are invalid, return error. */ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, struct smb2_hdr *hdr) { int ret = 0; *off = 0; *len = 0; /* * Following commands have data areas so we have to get the location * of the data buffer offset and data buffer length for the particular * command. */ switch (hdr->Command) { case SMB2_SESSION_SETUP: *off = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferOffset); *len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength); break; case SMB2_TREE_CONNECT: *off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset); *len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength); break; case SMB2_CREATE: { if (((struct smb2_create_req *)hdr)->CreateContextsLength) { *off = le32_to_cpu(((struct smb2_create_req *) hdr)->CreateContextsOffset); *len = le32_to_cpu(((struct smb2_create_req *) hdr)->CreateContextsLength); break; } *off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset); *len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength); break; } case SMB2_QUERY_INFO: *off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset); *len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength); break; case SMB2_SET_INFO: *off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset); *len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength); break; case SMB2_READ: *off = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoOffset); *len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength); break; case SMB2_WRITE: if (((struct smb2_write_req *)hdr)->DataOffset || ((struct smb2_write_req *)hdr)->Length) { *off = max_t(unsigned int, le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset), offsetof(struct smb2_write_req, Buffer)); *len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length); break; } *off = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoOffset); *len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength); break; case SMB2_QUERY_DIRECTORY: *off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset); *len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength); break; case SMB2_LOCK: { unsigned short lock_count; lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount); if (lock_count > 0) { *off = offsetof(struct smb2_lock_req, locks); *len = sizeof(struct smb2_lock_element) * lock_count; } break; } case SMB2_IOCTL: *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset); *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount); break; default: ksmbd_debug(SMB, "no length check for command\n"); break; } if (*off > 4096) { ksmbd_debug(SMB, "offset %d too large\n", *off); ret = -EINVAL; } else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) { ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n", MAX_STREAM_PROT_LEN, (u64)*off + *len); ret = -EINVAL; } return ret; } /* * Calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message. */ static int smb2_calc_size(void *buf, unsigned int *len) { struct smb2_pdu *pdu = (struct smb2_pdu *)buf; struct smb2_hdr *hdr = &pdu->hdr; unsigned int offset; /* the offset from the beginning of SMB to data area */ unsigned int data_length; /* the length of the variable length data area */ int ret; /* Structure Size has already been checked to make sure it is 64 */ *len = le16_to_cpu(hdr->StructureSize); /* * StructureSize2, ie length of fixed parameter area has already * been checked to make sure it is the correct length. */ *len += le16_to_cpu(pdu->StructureSize2); /* * StructureSize2 of smb2_lock pdu is set to 48, indicating * the size of smb2 lock request with single smb2_lock_element * regardless of number of locks. Subtract single * smb2_lock_element for correct buffer size check. */ if (hdr->Command == SMB2_LOCK) *len -= sizeof(struct smb2_lock_element); if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false) goto calc_size_exit; ret = smb2_get_data_area_len(&offset, &data_length, hdr); if (ret) return ret; ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length, offset); if (data_length > 0) { /* * Check to make sure that data area begins after fixed area, * Note that last byte of the fixed area is part of data area * for some commands, typically those with odd StructureSize, * so we must add one to the calculation. */ if (offset + 1 < *len) { ksmbd_debug(SMB, "data area offset %d overlaps SMB2 header %u\n", offset + 1, *len); return -EINVAL; } *len = offset + data_length; } calc_size_exit: ksmbd_debug(SMB, "SMB2 len %u\n", *len); return 0; } static inline int smb2_query_info_req_len(struct smb2_query_info_req *h) { return le32_to_cpu(h->InputBufferLength) + le32_to_cpu(h->OutputBufferLength); } static inline int smb2_set_info_req_len(struct smb2_set_info_req *h) { return le32_to_cpu(h->BufferLength); } static inline int smb2_read_req_len(struct smb2_read_req *h) { return le32_to_cpu(h->Length); } static inline int smb2_write_req_len(struct smb2_write_req *h) { return le32_to_cpu(h->Length); } static inline int smb2_query_dir_req_len(struct smb2_query_directory_req *h) { return le32_to_cpu(h->OutputBufferLength); } static inline int smb2_ioctl_req_len(struct smb2_ioctl_req *h) { return le32_to_cpu(h->InputCount) + le32_to_cpu(h->OutputCount); } static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h) { return le32_to_cpu(h->MaxInputResponse) + le32_to_cpu(h->MaxOutputResponse); } static int smb2_validate_credit_charge(struct ksmbd_conn *conn, struct smb2_hdr *hdr) { unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len; unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge); void *__hdr = hdr; int ret = 0; switch (hdr->Command) { case SMB2_QUERY_INFO: req_len = smb2_query_info_req_len(__hdr); break; case SMB2_SET_INFO: req_len = smb2_set_info_req_len(__hdr); break; case SMB2_READ: req_len = smb2_read_req_len(__hdr); break; case SMB2_WRITE: req_len = smb2_write_req_len(__hdr); break; case SMB2_QUERY_DIRECTORY: req_len = smb2_query_dir_req_len(__hdr); break; case SMB2_IOCTL: req_len = smb2_ioctl_req_len(__hdr); expect_resp_len = smb2_ioctl_resp_len(__hdr); break; case SMB2_CANCEL: return 0; default: req_len = 1; break; } credit_charge = max_t(unsigned short, credit_charge, 1); max_len = max_t(unsigned int, req_len, expect_resp_len); calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE); if (credit_charge < calc_credit_num) { ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n", credit_charge, calc_credit_num); return 1; } else if (credit_charge > conn->vals->max_credits) { ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge); return 1; } spin_lock(&conn->credits_lock); if (credit_charge > conn->total_credits) { ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n", credit_charge, conn->total_credits); ret = 1; } if ((u64)conn->outstanding_credits + credit_charge > conn->total_credits) { ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n", credit_charge, conn->outstanding_credits); ret = 1; } else conn->outstanding_credits += credit_charge; spin_unlock(&conn->credits_lock); return ret; } int ksmbd_smb2_check_message(struct ksmbd_work *work) { struct smb2_pdu *pdu = ksmbd_req_buf_next(work); struct smb2_hdr *hdr = &pdu->hdr; int command; __u32 clc_len; /* calculated length */ __u32 len = get_rfc1002_len(work->request_buf); __u32 req_struct_size, next_cmd = le32_to_cpu(hdr->NextCommand); if ((u64)work->next_smb2_rcv_hdr_off + next_cmd > len) { pr_err("next command(%u) offset exceeds smb msg size\n", next_cmd); return 1; } if (next_cmd > 0) len = next_cmd; else if (work->next_smb2_rcv_hdr_off) len -= work->next_smb2_rcv_hdr_off; if (check_smb2_hdr(hdr)) return 1; if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { ksmbd_debug(SMB, "Illegal structure size %u\n", le16_to_cpu(hdr->StructureSize)); return 1; } command = le16_to_cpu(hdr->Command); if (command >= NUMBER_OF_SMB2_COMMANDS) { ksmbd_debug(SMB, "Illegal SMB2 command %d\n", command); return 1; } if (smb2_req_struct_sizes[command] != pdu->StructureSize2) { if (!(command == SMB2_OPLOCK_BREAK_HE && (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 || le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) { /* special case for SMB2.1 lease break message */ ksmbd_debug(SMB, "Illegal request size %u for command %d\n", le16_to_cpu(pdu->StructureSize2), command); return 1; } } req_struct_size = le16_to_cpu(pdu->StructureSize2) + __SMB2_HEADER_STRUCTURE_SIZE; if (command == SMB2_LOCK_HE) req_struct_size -= sizeof(struct smb2_lock_element); if (req_struct_size > len + 1) return 1; if (smb2_calc_size(hdr, &clc_len)) return 1; if (len != clc_len) { /* client can return one byte more due to implied bcc[0] */ if (clc_len == len + 1) goto validate_credit; /* * Some windows servers (win2016) will pad also the final * PDU in a compound to 8 bytes. */ if (ALIGN(clc_len, 8) == len) goto validate_credit; /* * SMB2 NEGOTIATE request will be validated when message * handling proceeds. */ if (command == SMB2_NEGOTIATE_HE) goto validate_credit; /* * Allow a message that padded to 8byte boundary. * Linux 4.19.217 with smb 3.0.2 are sometimes * sending messages where the cls_len is exactly * 8 bytes less than len. */ if (clc_len < len && (len - clc_len) <= 8) goto validate_credit; pr_err_ratelimited( "cli req too short, len %d not %d. cmd:%d mid:%llu\n", len, clc_len, command, le64_to_cpu(hdr->MessageId)); return 1; } validate_credit: if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) && smb2_validate_credit_charge(work->conn, hdr)) { work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); return 1; } return 0; } int smb2_negotiate_request(struct ksmbd_work *work) { return ksmbd_smb_negotiate_common(work, SMB2_NEGOTIATE_HE); }
linux-master
fs/smb/server/smb2misc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/moduleparam.h> #include "glob.h" #include "oplock.h" #include "smb_common.h" #include "smbstatus.h" #include "connection.h" #include "mgmt/user_session.h" #include "mgmt/share_config.h" #include "mgmt/tree_connect.h" static LIST_HEAD(lease_table_list); static DEFINE_RWLOCK(lease_list_lock); /** * alloc_opinfo() - allocate a new opinfo object for oplock info * @work: smb work * @id: fid of open file * @Tid: tree id of connection * * Return: allocated opinfo object on success, otherwise NULL */ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work, u64 id, __u16 Tid) { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; struct oplock_info *opinfo; opinfo = kzalloc(sizeof(struct oplock_info), GFP_KERNEL); if (!opinfo) return NULL; opinfo->sess = sess; opinfo->conn = conn; opinfo->level = SMB2_OPLOCK_LEVEL_NONE; opinfo->op_state = OPLOCK_STATE_NONE; opinfo->pending_break = 0; opinfo->fid = id; opinfo->Tid = Tid; INIT_LIST_HEAD(&opinfo->op_entry); INIT_LIST_HEAD(&opinfo->interim_list); init_waitqueue_head(&opinfo->oplock_q); init_waitqueue_head(&opinfo->oplock_brk); atomic_set(&opinfo->refcount, 1); atomic_set(&opinfo->breaking_cnt, 0); return opinfo; } static void lease_add_list(struct oplock_info *opinfo) { struct lease_table *lb = opinfo->o_lease->l_lb; spin_lock(&lb->lb_lock); list_add_rcu(&opinfo->lease_entry, &lb->lease_list); spin_unlock(&lb->lb_lock); } static void lease_del_list(struct oplock_info *opinfo) { struct lease_table *lb = opinfo->o_lease->l_lb; if (!lb) return; spin_lock(&lb->lb_lock); if (list_empty(&opinfo->lease_entry)) { spin_unlock(&lb->lb_lock); return; } list_del_init(&opinfo->lease_entry); opinfo->o_lease->l_lb = NULL; spin_unlock(&lb->lb_lock); } static void lb_add(struct lease_table *lb) { write_lock(&lease_list_lock); list_add(&lb->l_entry, &lease_table_list); write_unlock(&lease_list_lock); } static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx) { struct lease *lease; lease = kmalloc(sizeof(struct lease), GFP_KERNEL); if (!lease) return -ENOMEM; memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE); lease->state = lctx->req_state; lease->new_state = 0; lease->flags = lctx->flags; lease->duration = lctx->duration; memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE); lease->version = lctx->version; lease->epoch = 0; INIT_LIST_HEAD(&opinfo->lease_entry); opinfo->o_lease = lease; return 0; } static void free_lease(struct oplock_info *opinfo) { struct lease *lease; lease = opinfo->o_lease; kfree(lease); } static void free_opinfo(struct oplock_info *opinfo) { if (opinfo->is_lease) free_lease(opinfo); kfree(opinfo); } static inline void opinfo_free_rcu(struct rcu_head *rcu_head) { struct oplock_info *opinfo; opinfo = container_of(rcu_head, struct oplock_info, rcu_head); free_opinfo(opinfo); } struct oplock_info *opinfo_get(struct ksmbd_file *fp) { struct oplock_info *opinfo; rcu_read_lock(); opinfo = rcu_dereference(fp->f_opinfo); if (opinfo && !atomic_inc_not_zero(&opinfo->refcount)) opinfo = NULL; rcu_read_unlock(); return opinfo; } static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci) { struct oplock_info *opinfo; if (list_empty(&ci->m_op_list)) return NULL; rcu_read_lock(); opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info, op_entry); if (opinfo) { if (!atomic_inc_not_zero(&opinfo->refcount)) opinfo = NULL; else { atomic_inc(&opinfo->conn->r_count); if (ksmbd_conn_releasing(opinfo->conn)) { atomic_dec(&opinfo->conn->r_count); atomic_dec(&opinfo->refcount); opinfo = NULL; } } } rcu_read_unlock(); return opinfo; } static void opinfo_conn_put(struct oplock_info *opinfo) { struct ksmbd_conn *conn; if (!opinfo) return; conn = opinfo->conn; /* * Checking waitqueue to dropping pending requests on * disconnection. waitqueue_active is safe because it * uses atomic operation for condition. */ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) wake_up(&conn->r_count_q); opinfo_put(opinfo); } void opinfo_put(struct oplock_info *opinfo) { if (!atomic_dec_and_test(&opinfo->refcount)) return; call_rcu(&opinfo->rcu_head, opinfo_free_rcu); } static void opinfo_add(struct oplock_info *opinfo) { struct ksmbd_inode *ci = opinfo->o_fp->f_ci; write_lock(&ci->m_lock); list_add_rcu(&opinfo->op_entry, &ci->m_op_list); write_unlock(&ci->m_lock); } static void opinfo_del(struct oplock_info *opinfo) { struct ksmbd_inode *ci = opinfo->o_fp->f_ci; if (opinfo->is_lease) { write_lock(&lease_list_lock); lease_del_list(opinfo); write_unlock(&lease_list_lock); } write_lock(&ci->m_lock); list_del_rcu(&opinfo->op_entry); write_unlock(&ci->m_lock); } static unsigned long opinfo_count(struct ksmbd_file *fp) { if (ksmbd_stream_fd(fp)) return atomic_read(&fp->f_ci->sop_count); else return atomic_read(&fp->f_ci->op_count); } static void opinfo_count_inc(struct ksmbd_file *fp) { if (ksmbd_stream_fd(fp)) return atomic_inc(&fp->f_ci->sop_count); else return atomic_inc(&fp->f_ci->op_count); } static void opinfo_count_dec(struct ksmbd_file *fp) { if (ksmbd_stream_fd(fp)) return atomic_dec(&fp->f_ci->sop_count); else return atomic_dec(&fp->f_ci->op_count); } /** * opinfo_write_to_read() - convert a write oplock to read oplock * @opinfo: current oplock info * * Return: 0 on success, otherwise -EINVAL */ int opinfo_write_to_read(struct oplock_info *opinfo) { struct lease *lease = opinfo->o_lease; if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH || opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) { pr_err("bad oplock(0x%x)\n", opinfo->level); if (opinfo->is_lease) pr_err("lease state(0x%x)\n", lease->state); return -EINVAL; } opinfo->level = SMB2_OPLOCK_LEVEL_II; if (opinfo->is_lease) lease->state = lease->new_state; return 0; } /** * opinfo_read_handle_to_read() - convert a read/handle oplock to read oplock * @opinfo: current oplock info * * Return: 0 on success, otherwise -EINVAL */ int opinfo_read_handle_to_read(struct oplock_info *opinfo) { struct lease *lease = opinfo->o_lease; lease->state = lease->new_state; opinfo->level = SMB2_OPLOCK_LEVEL_II; return 0; } /** * opinfo_write_to_none() - convert a write oplock to none * @opinfo: current oplock info * * Return: 0 on success, otherwise -EINVAL */ int opinfo_write_to_none(struct oplock_info *opinfo) { struct lease *lease = opinfo->o_lease; if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH || opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) { pr_err("bad oplock(0x%x)\n", opinfo->level); if (opinfo->is_lease) pr_err("lease state(0x%x)\n", lease->state); return -EINVAL; } opinfo->level = SMB2_OPLOCK_LEVEL_NONE; if (opinfo->is_lease) lease->state = lease->new_state; return 0; } /** * opinfo_read_to_none() - convert a write read to none * @opinfo: current oplock info * * Return: 0 on success, otherwise -EINVAL */ int opinfo_read_to_none(struct oplock_info *opinfo) { struct lease *lease = opinfo->o_lease; if (opinfo->level != SMB2_OPLOCK_LEVEL_II) { pr_err("bad oplock(0x%x)\n", opinfo->level); if (opinfo->is_lease) pr_err("lease state(0x%x)\n", lease->state); return -EINVAL; } opinfo->level = SMB2_OPLOCK_LEVEL_NONE; if (opinfo->is_lease) lease->state = lease->new_state; return 0; } /** * lease_read_to_write() - upgrade lease state from read to write * @opinfo: current lease info * * Return: 0 on success, otherwise -EINVAL */ int lease_read_to_write(struct oplock_info *opinfo) { struct lease *lease = opinfo->o_lease; if (!(lease->state & SMB2_LEASE_READ_CACHING_LE)) { ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state); return -EINVAL; } lease->new_state = SMB2_LEASE_NONE_LE; lease->state |= SMB2_LEASE_WRITE_CACHING_LE; if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE) opinfo->level = SMB2_OPLOCK_LEVEL_BATCH; else opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE; return 0; } /** * lease_none_upgrade() - upgrade lease state from none * @opinfo: current lease info * @new_state: new lease state * * Return: 0 on success, otherwise -EINVAL */ static int lease_none_upgrade(struct oplock_info *opinfo, __le32 new_state) { struct lease *lease = opinfo->o_lease; if (!(lease->state == SMB2_LEASE_NONE_LE)) { ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state); return -EINVAL; } lease->new_state = SMB2_LEASE_NONE_LE; lease->state = new_state; if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE) if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) opinfo->level = SMB2_OPLOCK_LEVEL_BATCH; else opinfo->level = SMB2_OPLOCK_LEVEL_II; else if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE; else if (lease->state & SMB2_LEASE_READ_CACHING_LE) opinfo->level = SMB2_OPLOCK_LEVEL_II; return 0; } /** * close_id_del_oplock() - release oplock object at file close time * @fp: ksmbd file pointer */ void close_id_del_oplock(struct ksmbd_file *fp) { struct oplock_info *opinfo; if (S_ISDIR(file_inode(fp->filp)->i_mode)) return; opinfo = opinfo_get(fp); if (!opinfo) return; opinfo_del(opinfo); rcu_assign_pointer(fp->f_opinfo, NULL); if (opinfo->op_state == OPLOCK_ACK_WAIT) { opinfo->op_state = OPLOCK_CLOSING; wake_up_interruptible_all(&opinfo->oplock_q); if (opinfo->is_lease) { atomic_set(&opinfo->breaking_cnt, 0); wake_up_interruptible_all(&opinfo->oplock_brk); } } opinfo_count_dec(fp); atomic_dec(&opinfo->refcount); opinfo_put(opinfo); } /** * grant_write_oplock() - grant exclusive/batch oplock or write lease * @opinfo_new: new oplock info object * @req_oplock: request oplock * @lctx: lease context information * * Return: 0 */ static void grant_write_oplock(struct oplock_info *opinfo_new, int req_oplock, struct lease_ctx_info *lctx) { struct lease *lease = opinfo_new->o_lease; if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH) opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH; else opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE; if (lctx) { lease->state = lctx->req_state; memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE); } } /** * grant_read_oplock() - grant level2 oplock or read lease * @opinfo_new: new oplock info object * @lctx: lease context information * * Return: 0 */ static void grant_read_oplock(struct oplock_info *opinfo_new, struct lease_ctx_info *lctx) { struct lease *lease = opinfo_new->o_lease; opinfo_new->level = SMB2_OPLOCK_LEVEL_II; if (lctx) { lease->state = SMB2_LEASE_READ_CACHING_LE; if (lctx->req_state & SMB2_LEASE_HANDLE_CACHING_LE) lease->state |= SMB2_LEASE_HANDLE_CACHING_LE; memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE); } } /** * grant_none_oplock() - grant none oplock or none lease * @opinfo_new: new oplock info object * @lctx: lease context information * * Return: 0 */ static void grant_none_oplock(struct oplock_info *opinfo_new, struct lease_ctx_info *lctx) { struct lease *lease = opinfo_new->o_lease; opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE; if (lctx) { lease->state = 0; memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE); } } static inline int compare_guid_key(struct oplock_info *opinfo, const char *guid1, const char *key1) { const char *guid2, *key2; guid2 = opinfo->conn->ClientGUID; key2 = opinfo->o_lease->lease_key; if (!memcmp(guid1, guid2, SMB2_CLIENT_GUID_SIZE) && !memcmp(key1, key2, SMB2_LEASE_KEY_SIZE)) return 1; return 0; } /** * same_client_has_lease() - check whether current lease request is * from lease owner of file * @ci: master file pointer * @client_guid: Client GUID * @lctx: lease context information * * Return: oplock(lease) object on success, otherwise NULL */ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci, char *client_guid, struct lease_ctx_info *lctx) { int ret; struct lease *lease; struct oplock_info *opinfo; struct oplock_info *m_opinfo = NULL; if (!lctx) return NULL; /* * Compare lease key and client_guid to know request from same owner * of same client */ read_lock(&ci->m_lock); list_for_each_entry(opinfo, &ci->m_op_list, op_entry) { if (!opinfo->is_lease) continue; read_unlock(&ci->m_lock); lease = opinfo->o_lease; ret = compare_guid_key(opinfo, client_guid, lctx->lease_key); if (ret) { m_opinfo = opinfo; /* skip upgrading lease about breaking lease */ if (atomic_read(&opinfo->breaking_cnt)) { read_lock(&ci->m_lock); continue; } /* upgrading lease */ if ((atomic_read(&ci->op_count) + atomic_read(&ci->sop_count)) == 1) { if (lease->state == (lctx->req_state & lease->state)) { lease->state |= lctx->req_state; if (lctx->req_state & SMB2_LEASE_WRITE_CACHING_LE) lease_read_to_write(opinfo); } } else if ((atomic_read(&ci->op_count) + atomic_read(&ci->sop_count)) > 1) { if (lctx->req_state == (SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)) lease->state = lctx->req_state; } if (lctx->req_state && lease->state == SMB2_LEASE_NONE_LE) lease_none_upgrade(opinfo, lctx->req_state); } read_lock(&ci->m_lock); } read_unlock(&ci->m_lock); return m_opinfo; } static void wait_for_break_ack(struct oplock_info *opinfo) { int rc = 0; rc = wait_event_interruptible_timeout(opinfo->oplock_q, opinfo->op_state == OPLOCK_STATE_NONE || opinfo->op_state == OPLOCK_CLOSING, OPLOCK_WAIT_TIME); /* is this a timeout ? */ if (!rc) { if (opinfo->is_lease) opinfo->o_lease->state = SMB2_LEASE_NONE_LE; opinfo->level = SMB2_OPLOCK_LEVEL_NONE; opinfo->op_state = OPLOCK_STATE_NONE; } } static void wake_up_oplock_break(struct oplock_info *opinfo) { clear_bit_unlock(0, &opinfo->pending_break); /* memory barrier is needed for wake_up_bit() */ smp_mb__after_atomic(); wake_up_bit(&opinfo->pending_break, 0); } static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level) { while (test_and_set_bit(0, &opinfo->pending_break)) { wait_on_bit(&opinfo->pending_break, 0, TASK_UNINTERRUPTIBLE); /* Not immediately break to none. */ opinfo->open_trunc = 0; if (opinfo->op_state == OPLOCK_CLOSING) return -ENOENT; else if (!opinfo->is_lease && opinfo->level <= req_op_level) return 1; } if (!opinfo->is_lease && opinfo->level <= req_op_level) { wake_up_oplock_break(opinfo); return 1; } return 0; } /** * __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn * to client * @wk: smb work object * * There are two ways this function can be called. 1- while file open we break * from exclusive/batch lock to levelII oplock and 2- while file write/truncate * we break from levelII oplock no oplock. * work->request_buf contains oplock_info. */ static void __smb2_oplock_break_noti(struct work_struct *wk) { struct smb2_oplock_break *rsp = NULL; struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); struct oplock_break_info *br_info = work->request_buf; struct smb2_hdr *rsp_hdr; struct ksmbd_file *fp; fp = ksmbd_lookup_durable_fd(br_info->fid); if (!fp) goto out; if (allocate_interim_rsp_buf(work)) { pr_err("smb2_allocate_rsp_buf failed! "); ksmbd_fd_put(work, fp); goto out; } rsp_hdr = smb2_get_msg(work->response_buf); memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2); rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER; rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE; rsp_hdr->CreditRequest = cpu_to_le16(0); rsp_hdr->Command = SMB2_OPLOCK_BREAK; rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR); rsp_hdr->NextCommand = 0; rsp_hdr->MessageId = cpu_to_le64(-1); rsp_hdr->Id.SyncId.ProcessId = 0; rsp_hdr->Id.SyncId.TreeId = 0; rsp_hdr->SessionId = 0; memset(rsp_hdr->Signature, 0, 16); rsp = smb2_get_msg(work->response_buf); rsp->StructureSize = cpu_to_le16(24); if (!br_info->open_trunc && (br_info->level == SMB2_OPLOCK_LEVEL_BATCH || br_info->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) rsp->OplockLevel = SMB2_OPLOCK_LEVEL_II; else rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE; rsp->Reserved = 0; rsp->Reserved2 = 0; rsp->PersistentFid = fp->persistent_id; rsp->VolatileFid = fp->volatile_id; ksmbd_fd_put(work, fp); if (ksmbd_iov_pin_rsp(work, (void *)rsp, sizeof(struct smb2_oplock_break))) goto out; ksmbd_debug(OPLOCK, "sending oplock break v_id %llu p_id = %llu lock level = %d\n", rsp->VolatileFid, rsp->PersistentFid, rsp->OplockLevel); ksmbd_conn_write(work); out: ksmbd_free_work_struct(work); } /** * smb2_oplock_break_noti() - send smb2 exclusive/batch to level2 oplock * break command from server to client * @opinfo: oplock info object * * Return: 0 on success, otherwise error */ static int smb2_oplock_break_noti(struct oplock_info *opinfo) { struct ksmbd_conn *conn = opinfo->conn; struct oplock_break_info *br_info; int ret = 0; struct ksmbd_work *work = ksmbd_alloc_work_struct(); if (!work) return -ENOMEM; br_info = kmalloc(sizeof(struct oplock_break_info), GFP_KERNEL); if (!br_info) { ksmbd_free_work_struct(work); return -ENOMEM; } br_info->level = opinfo->level; br_info->fid = opinfo->fid; br_info->open_trunc = opinfo->open_trunc; work->request_buf = (char *)br_info; work->conn = conn; work->sess = opinfo->sess; if (opinfo->op_state == OPLOCK_ACK_WAIT) { INIT_WORK(&work->work, __smb2_oplock_break_noti); ksmbd_queue_work(work); wait_for_break_ack(opinfo); } else { __smb2_oplock_break_noti(&work->work); if (opinfo->level == SMB2_OPLOCK_LEVEL_II) opinfo->level = SMB2_OPLOCK_LEVEL_NONE; } return ret; } /** * __smb2_lease_break_noti() - send lease break command from server * to client * @wk: smb work object */ static void __smb2_lease_break_noti(struct work_struct *wk) { struct smb2_lease_break *rsp = NULL; struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); struct lease_break_info *br_info = work->request_buf; struct smb2_hdr *rsp_hdr; if (allocate_interim_rsp_buf(work)) { ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! "); goto out; } rsp_hdr = smb2_get_msg(work->response_buf); memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2); rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER; rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE; rsp_hdr->CreditRequest = cpu_to_le16(0); rsp_hdr->Command = SMB2_OPLOCK_BREAK; rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR); rsp_hdr->NextCommand = 0; rsp_hdr->MessageId = cpu_to_le64(-1); rsp_hdr->Id.SyncId.ProcessId = 0; rsp_hdr->Id.SyncId.TreeId = 0; rsp_hdr->SessionId = 0; memset(rsp_hdr->Signature, 0, 16); rsp = smb2_get_msg(work->response_buf); rsp->StructureSize = cpu_to_le16(44); rsp->Epoch = br_info->epoch; rsp->Flags = 0; if (br_info->curr_state & (SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)) rsp->Flags = SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED; memcpy(rsp->LeaseKey, br_info->lease_key, SMB2_LEASE_KEY_SIZE); rsp->CurrentLeaseState = br_info->curr_state; rsp->NewLeaseState = br_info->new_state; rsp->BreakReason = 0; rsp->AccessMaskHint = 0; rsp->ShareMaskHint = 0; if (ksmbd_iov_pin_rsp(work, (void *)rsp, sizeof(struct smb2_lease_break))) goto out; ksmbd_conn_write(work); out: ksmbd_free_work_struct(work); } /** * smb2_lease_break_noti() - break lease when a new client request * write lease * @opinfo: conains lease state information * * Return: 0 on success, otherwise error */ static int smb2_lease_break_noti(struct oplock_info *opinfo) { struct ksmbd_conn *conn = opinfo->conn; struct list_head *tmp, *t; struct ksmbd_work *work; struct lease_break_info *br_info; struct lease *lease = opinfo->o_lease; work = ksmbd_alloc_work_struct(); if (!work) return -ENOMEM; br_info = kmalloc(sizeof(struct lease_break_info), GFP_KERNEL); if (!br_info) { ksmbd_free_work_struct(work); return -ENOMEM; } br_info->curr_state = lease->state; br_info->new_state = lease->new_state; if (lease->version == 2) br_info->epoch = cpu_to_le16(++lease->epoch); else br_info->epoch = 0; memcpy(br_info->lease_key, lease->lease_key, SMB2_LEASE_KEY_SIZE); work->request_buf = (char *)br_info; work->conn = conn; work->sess = opinfo->sess; if (opinfo->op_state == OPLOCK_ACK_WAIT) { list_for_each_safe(tmp, t, &opinfo->interim_list) { struct ksmbd_work *in_work; in_work = list_entry(tmp, struct ksmbd_work, interim_entry); setup_async_work(in_work, NULL, NULL); smb2_send_interim_resp(in_work, STATUS_PENDING); list_del(&in_work->interim_entry); } INIT_WORK(&work->work, __smb2_lease_break_noti); ksmbd_queue_work(work); wait_for_break_ack(opinfo); } else { __smb2_lease_break_noti(&work->work); if (opinfo->o_lease->new_state == SMB2_LEASE_NONE_LE) { opinfo->level = SMB2_OPLOCK_LEVEL_NONE; opinfo->o_lease->state = SMB2_LEASE_NONE_LE; } } return 0; } static void wait_lease_breaking(struct oplock_info *opinfo) { if (!opinfo->is_lease) return; wake_up_interruptible_all(&opinfo->oplock_brk); if (atomic_read(&opinfo->breaking_cnt)) { int ret = 0; ret = wait_event_interruptible_timeout(opinfo->oplock_brk, atomic_read(&opinfo->breaking_cnt) == 0, HZ); if (!ret) atomic_set(&opinfo->breaking_cnt, 0); } } static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level) { int err = 0; /* Need to break exclusive/batch oplock, write lease or overwrite_if */ ksmbd_debug(OPLOCK, "request to send oplock(level : 0x%x) break notification\n", brk_opinfo->level); if (brk_opinfo->is_lease) { struct lease *lease = brk_opinfo->o_lease; atomic_inc(&brk_opinfo->breaking_cnt); err = oplock_break_pending(brk_opinfo, req_op_level); if (err) return err < 0 ? err : 0; if (brk_opinfo->open_trunc) { /* * Create overwrite break trigger the lease break to * none. */ lease->new_state = SMB2_LEASE_NONE_LE; } else { if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) { if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE) lease->new_state = SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE; else lease->new_state = SMB2_LEASE_READ_CACHING_LE; } else { if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE) lease->new_state = SMB2_LEASE_READ_CACHING_LE; else lease->new_state = SMB2_LEASE_NONE_LE; } } if (lease->state & (SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)) brk_opinfo->op_state = OPLOCK_ACK_WAIT; else atomic_dec(&brk_opinfo->breaking_cnt); } else { err = oplock_break_pending(brk_opinfo, req_op_level); if (err) return err < 0 ? err : 0; if (brk_opinfo->level == SMB2_OPLOCK_LEVEL_BATCH || brk_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) brk_opinfo->op_state = OPLOCK_ACK_WAIT; } if (brk_opinfo->is_lease) err = smb2_lease_break_noti(brk_opinfo); else err = smb2_oplock_break_noti(brk_opinfo); ksmbd_debug(OPLOCK, "oplock granted = %d\n", brk_opinfo->level); if (brk_opinfo->op_state == OPLOCK_CLOSING) err = -ENOENT; wake_up_oplock_break(brk_opinfo); wait_lease_breaking(brk_opinfo); return err; } void destroy_lease_table(struct ksmbd_conn *conn) { struct lease_table *lb, *lbtmp; struct oplock_info *opinfo; write_lock(&lease_list_lock); if (list_empty(&lease_table_list)) { write_unlock(&lease_list_lock); return; } list_for_each_entry_safe(lb, lbtmp, &lease_table_list, l_entry) { if (conn && memcmp(lb->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE)) continue; again: rcu_read_lock(); list_for_each_entry_rcu(opinfo, &lb->lease_list, lease_entry) { rcu_read_unlock(); lease_del_list(opinfo); goto again; } rcu_read_unlock(); list_del(&lb->l_entry); kfree(lb); } write_unlock(&lease_list_lock); } int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci, struct lease_ctx_info *lctx) { struct oplock_info *opinfo; int err = 0; struct lease_table *lb; if (!lctx) return err; read_lock(&lease_list_lock); if (list_empty(&lease_table_list)) { read_unlock(&lease_list_lock); return 0; } list_for_each_entry(lb, &lease_table_list, l_entry) { if (!memcmp(lb->client_guid, sess->ClientGUID, SMB2_CLIENT_GUID_SIZE)) goto found; } read_unlock(&lease_list_lock); return 0; found: rcu_read_lock(); list_for_each_entry_rcu(opinfo, &lb->lease_list, lease_entry) { if (!atomic_inc_not_zero(&opinfo->refcount)) continue; rcu_read_unlock(); if (opinfo->o_fp->f_ci == ci) goto op_next; err = compare_guid_key(opinfo, sess->ClientGUID, lctx->lease_key); if (err) { err = -EINVAL; ksmbd_debug(OPLOCK, "found same lease key is already used in other files\n"); opinfo_put(opinfo); goto out; } op_next: opinfo_put(opinfo); rcu_read_lock(); } rcu_read_unlock(); out: read_unlock(&lease_list_lock); return err; } static void copy_lease(struct oplock_info *op1, struct oplock_info *op2) { struct lease *lease1 = op1->o_lease; struct lease *lease2 = op2->o_lease; op2->level = op1->level; lease2->state = lease1->state; memcpy(lease2->lease_key, lease1->lease_key, SMB2_LEASE_KEY_SIZE); lease2->duration = lease1->duration; lease2->flags = lease1->flags; } static int add_lease_global_list(struct oplock_info *opinfo) { struct lease_table *lb; read_lock(&lease_list_lock); list_for_each_entry(lb, &lease_table_list, l_entry) { if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID, SMB2_CLIENT_GUID_SIZE)) { opinfo->o_lease->l_lb = lb; lease_add_list(opinfo); read_unlock(&lease_list_lock); return 0; } } read_unlock(&lease_list_lock); lb = kmalloc(sizeof(struct lease_table), GFP_KERNEL); if (!lb) return -ENOMEM; memcpy(lb->client_guid, opinfo->conn->ClientGUID, SMB2_CLIENT_GUID_SIZE); INIT_LIST_HEAD(&lb->lease_list); spin_lock_init(&lb->lb_lock); opinfo->o_lease->l_lb = lb; lease_add_list(opinfo); lb_add(lb); return 0; } static void set_oplock_level(struct oplock_info *opinfo, int level, struct lease_ctx_info *lctx) { switch (level) { case SMB2_OPLOCK_LEVEL_BATCH: case SMB2_OPLOCK_LEVEL_EXCLUSIVE: grant_write_oplock(opinfo, level, lctx); break; case SMB2_OPLOCK_LEVEL_II: grant_read_oplock(opinfo, lctx); break; default: grant_none_oplock(opinfo, lctx); break; } } /** * smb_grant_oplock() - handle oplock/lease request on file open * @work: smb work * @req_op_level: oplock level * @pid: id of open file * @fp: ksmbd file pointer * @tid: Tree id of connection * @lctx: lease context information on file open * @share_ret: share mode * * Return: 0 on success, otherwise error */ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid, struct ksmbd_file *fp, __u16 tid, struct lease_ctx_info *lctx, int share_ret) { struct ksmbd_session *sess = work->sess; int err = 0; struct oplock_info *opinfo = NULL, *prev_opinfo = NULL; struct ksmbd_inode *ci = fp->f_ci; bool prev_op_has_lease; __le32 prev_op_state = 0; /* not support directory lease */ if (S_ISDIR(file_inode(fp->filp)->i_mode)) return 0; opinfo = alloc_opinfo(work, pid, tid); if (!opinfo) return -ENOMEM; if (lctx) { err = alloc_lease(opinfo, lctx); if (err) goto err_out; opinfo->is_lease = 1; } /* ci does not have any oplock */ if (!opinfo_count(fp)) goto set_lev; /* grant none-oplock if second open is trunc */ if (fp->attrib_only && fp->cdoption != FILE_OVERWRITE_IF_LE && fp->cdoption != FILE_OVERWRITE_LE && fp->cdoption != FILE_SUPERSEDE_LE) { req_op_level = SMB2_OPLOCK_LEVEL_NONE; goto set_lev; } if (lctx) { struct oplock_info *m_opinfo; /* is lease already granted ? */ m_opinfo = same_client_has_lease(ci, sess->ClientGUID, lctx); if (m_opinfo) { copy_lease(m_opinfo, opinfo); if (atomic_read(&m_opinfo->breaking_cnt)) opinfo->o_lease->flags = SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE; goto out; } } prev_opinfo = opinfo_get_list(ci); if (!prev_opinfo || (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) { opinfo_conn_put(prev_opinfo); goto set_lev; } prev_op_has_lease = prev_opinfo->is_lease; if (prev_op_has_lease) prev_op_state = prev_opinfo->o_lease->state; if (share_ret < 0 && prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { err = share_ret; opinfo_conn_put(prev_opinfo); goto err_out; } if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { opinfo_conn_put(prev_opinfo); goto op_break_not_needed; } list_add(&work->interim_entry, &prev_opinfo->interim_list); err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II); opinfo_conn_put(prev_opinfo); if (err == -ENOENT) goto set_lev; /* Check all oplock was freed by close */ else if (err < 0) goto err_out; op_break_not_needed: if (share_ret < 0) { err = share_ret; goto err_out; } if (req_op_level != SMB2_OPLOCK_LEVEL_NONE) req_op_level = SMB2_OPLOCK_LEVEL_II; /* grant fixed oplock on stacked locking between lease and oplock */ if (prev_op_has_lease && !lctx) if (prev_op_state & SMB2_LEASE_HANDLE_CACHING_LE) req_op_level = SMB2_OPLOCK_LEVEL_NONE; if (!prev_op_has_lease && lctx) { req_op_level = SMB2_OPLOCK_LEVEL_II; lctx->req_state = SMB2_LEASE_READ_CACHING_LE; } set_lev: set_oplock_level(opinfo, req_op_level, lctx); out: rcu_assign_pointer(fp->f_opinfo, opinfo); opinfo->o_fp = fp; opinfo_count_inc(fp); opinfo_add(opinfo); if (opinfo->is_lease) { err = add_lease_global_list(opinfo); if (err) goto err_out; } return 0; err_out: free_opinfo(opinfo); return err; } /** * smb_break_all_write_oplock() - break batch/exclusive oplock to level2 * @work: smb work * @fp: ksmbd file pointer * @is_trunc: truncate on open */ static void smb_break_all_write_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, int is_trunc) { struct oplock_info *brk_opinfo; brk_opinfo = opinfo_get_list(fp->f_ci); if (!brk_opinfo) return; if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { opinfo_conn_put(brk_opinfo); return; } brk_opinfo->open_trunc = is_trunc; list_add(&work->interim_entry, &brk_opinfo->interim_list); oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II); opinfo_conn_put(brk_opinfo); } /** * smb_break_all_levII_oplock() - send level2 oplock or read lease break command * from server to client * @work: smb work * @fp: ksmbd file pointer * @is_trunc: truncate on open */ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, int is_trunc) { struct oplock_info *op, *brk_op; struct ksmbd_inode *ci; struct ksmbd_conn *conn = work->conn; if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS)) return; ci = fp->f_ci; op = opinfo_get(fp); rcu_read_lock(); list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) { if (!atomic_inc_not_zero(&brk_op->refcount)) continue; atomic_inc(&brk_op->conn->r_count); if (ksmbd_conn_releasing(brk_op->conn)) { atomic_dec(&brk_op->conn->r_count); continue; } rcu_read_unlock(); if (brk_op->is_lease && (brk_op->o_lease->state & (~(SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)))) { ksmbd_debug(OPLOCK, "unexpected lease state(0x%x)\n", brk_op->o_lease->state); goto next; } else if (brk_op->level != SMB2_OPLOCK_LEVEL_II) { ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n", brk_op->level); goto next; } /* Skip oplock being break to none */ if (brk_op->is_lease && brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE && atomic_read(&brk_op->breaking_cnt)) goto next; if (op && op->is_lease && brk_op->is_lease && !memcmp(conn->ClientGUID, brk_op->conn->ClientGUID, SMB2_CLIENT_GUID_SIZE) && !memcmp(op->o_lease->lease_key, brk_op->o_lease->lease_key, SMB2_LEASE_KEY_SIZE)) goto next; brk_op->open_trunc = is_trunc; oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE); next: opinfo_conn_put(brk_op); rcu_read_lock(); } rcu_read_unlock(); if (op) opinfo_put(op); } /** * smb_break_all_oplock() - break both batch/exclusive and level2 oplock * @work: smb work * @fp: ksmbd file pointer */ void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp) { if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS)) return; smb_break_all_write_oplock(work, fp, 1); smb_break_all_levII_oplock(work, fp, 1); } /** * smb2_map_lease_to_oplock() - map lease state to corresponding oplock type * @lease_state: lease type * * Return: 0 if no mapping, otherwise corresponding oplock type */ __u8 smb2_map_lease_to_oplock(__le32 lease_state) { if (lease_state == (SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_WRITE_CACHING_LE)) { return SMB2_OPLOCK_LEVEL_BATCH; } else if (lease_state != SMB2_LEASE_WRITE_CACHING_LE && lease_state & SMB2_LEASE_WRITE_CACHING_LE) { if (!(lease_state & SMB2_LEASE_HANDLE_CACHING_LE)) return SMB2_OPLOCK_LEVEL_EXCLUSIVE; } else if (lease_state & SMB2_LEASE_READ_CACHING_LE) { return SMB2_OPLOCK_LEVEL_II; } return 0; } /** * create_lease_buf() - create lease context for open cmd response * @rbuf: buffer to create lease context response * @lease: buffer to stored parsed lease state information */ void create_lease_buf(u8 *rbuf, struct lease *lease) { if (lease->version == 2) { struct create_lease_v2 *buf = (struct create_lease_v2 *)rbuf; memset(buf, 0, sizeof(struct create_lease_v2)); memcpy(buf->lcontext.LeaseKey, lease->lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseFlags = lease->flags; buf->lcontext.LeaseState = lease->state; memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key, SMB2_LEASE_KEY_SIZE); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_lease_v2, lcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_lease_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->Name[0] = 'R'; buf->Name[1] = 'q'; buf->Name[2] = 'L'; buf->Name[3] = 's'; } else { struct create_lease *buf = (struct create_lease *)rbuf; memset(buf, 0, sizeof(struct create_lease)); memcpy(buf->lcontext.LeaseKey, lease->lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseFlags = lease->flags; buf->lcontext.LeaseState = lease->state; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_lease, lcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_lease, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->Name[0] = 'R'; buf->Name[1] = 'q'; buf->Name[2] = 'L'; buf->Name[3] = 's'; } } /** * parse_lease_state() - parse lease context containted in file open request * @open_req: buffer containing smb2 file open(create) request * * Return: oplock state, -ENOENT if create lease context not found */ struct lease_ctx_info *parse_lease_state(void *open_req) { struct create_context *cc; struct smb2_create_req *req = (struct smb2_create_req *)open_req; struct lease_ctx_info *lreq; cc = smb2_find_context_vals(req, SMB2_CREATE_REQUEST_LEASE, 4); if (IS_ERR_OR_NULL(cc)) return NULL; lreq = kzalloc(sizeof(struct lease_ctx_info), GFP_KERNEL); if (!lreq) return NULL; if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) { struct create_lease_v2 *lc = (struct create_lease_v2 *)cc; memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; lreq->duration = lc->lcontext.LeaseDuration; memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey, SMB2_LEASE_KEY_SIZE); lreq->version = 2; } else { struct create_lease *lc = (struct create_lease *)cc; memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; lreq->duration = lc->lcontext.LeaseDuration; lreq->version = 1; } return lreq; } /** * smb2_find_context_vals() - find a particular context info in open request * @open_req: buffer containing smb2 file open(create) request * @tag: context name to search for * @tag_len: the length of tag * * Return: pointer to requested context, NULL if @str context not found * or error pointer if name length is invalid. */ struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len) { struct create_context *cc; unsigned int next = 0; char *name; struct smb2_create_req *req = (struct smb2_create_req *)open_req; unsigned int remain_len, name_off, name_len, value_off, value_len, cc_len; /* * CreateContextsOffset and CreateContextsLength are guaranteed to * be valid because of ksmbd_smb2_check_message(). */ cc = (struct create_context *)((char *)req + le32_to_cpu(req->CreateContextsOffset)); remain_len = le32_to_cpu(req->CreateContextsLength); do { cc = (struct create_context *)((char *)cc + next); if (remain_len < offsetof(struct create_context, Buffer)) return ERR_PTR(-EINVAL); next = le32_to_cpu(cc->Next); name_off = le16_to_cpu(cc->NameOffset); name_len = le16_to_cpu(cc->NameLength); value_off = le16_to_cpu(cc->DataOffset); value_len = le32_to_cpu(cc->DataLength); cc_len = next ? next : remain_len; if ((next & 0x7) != 0 || next > remain_len || name_off != offsetof(struct create_context, Buffer) || name_len < 4 || name_off + name_len > cc_len || (value_off & 0x7) != 0 || (value_len && value_off < name_off + (name_len < 8 ? 8 : name_len)) || ((u64)value_off + value_len > cc_len)) return ERR_PTR(-EINVAL); name = (char *)cc + name_off; if (name_len == tag_len && !memcmp(name, tag, name_len)) return cc; remain_len -= next; } while (next != 0); return NULL; } /** * create_durable_rsp_buf() - create durable handle context * @cc: buffer to create durable context response */ void create_durable_rsp_buf(char *cc) { struct create_durable_rsp *buf; buf = (struct create_durable_rsp *)cc; memset(buf, 0, sizeof(struct create_durable_rsp)); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable_rsp, Data)); buf->ccontext.DataLength = cpu_to_le32(8); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable_rsp, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_DURABLE_HANDLE_RESPONSE is "DHnQ" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'Q'; } /** * create_durable_v2_rsp_buf() - create durable handle v2 context * @cc: buffer to create durable context response * @fp: ksmbd file pointer */ void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp) { struct create_durable_v2_rsp *buf; buf = (struct create_durable_v2_rsp *)cc; memset(buf, 0, sizeof(struct create_durable_rsp)); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable_rsp, Data)); buf->ccontext.DataLength = cpu_to_le32(8); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable_rsp, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_DURABLE_HANDLE_RESPONSE_V2 is "DH2Q" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'Q'; buf->Timeout = cpu_to_le32(fp->durable_timeout); } /** * create_mxac_rsp_buf() - create query maximal access context * @cc: buffer to create maximal access context response * @maximal_access: maximal access */ void create_mxac_rsp_buf(char *cc, int maximal_access) { struct create_mxac_rsp *buf; buf = (struct create_mxac_rsp *)cc; memset(buf, 0, sizeof(struct create_mxac_rsp)); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_mxac_rsp, QueryStatus)); buf->ccontext.DataLength = cpu_to_le32(8); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_mxac_rsp, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_QUERY_MAXIMAL_ACCESS_RESPONSE is "MxAc" */ buf->Name[0] = 'M'; buf->Name[1] = 'x'; buf->Name[2] = 'A'; buf->Name[3] = 'c'; buf->QueryStatus = STATUS_SUCCESS; buf->MaximalAccess = cpu_to_le32(maximal_access); } void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id) { struct create_disk_id_rsp *buf; buf = (struct create_disk_id_rsp *)cc; memset(buf, 0, sizeof(struct create_disk_id_rsp)); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_disk_id_rsp, DiskFileId)); buf->ccontext.DataLength = cpu_to_le32(32); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_mxac_rsp, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_QUERY_ON_DISK_ID_RESPONSE is "QFid" */ buf->Name[0] = 'Q'; buf->Name[1] = 'F'; buf->Name[2] = 'i'; buf->Name[3] = 'd'; buf->DiskFileId = cpu_to_le64(file_id); buf->VolumeId = cpu_to_le64(vol_id); } /** * create_posix_rsp_buf() - create posix extension context * @cc: buffer to create posix on posix response * @fp: ksmbd file pointer */ void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp) { struct create_posix_rsp *buf; struct inode *inode = file_inode(fp->filp); struct mnt_idmap *idmap = file_mnt_idmap(fp->filp); vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); buf = (struct create_posix_rsp *)cc; memset(buf, 0, sizeof(struct create_posix_rsp)); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_posix_rsp, nlink)); /* * DataLength = nlink(4) + reparse_tag(4) + mode(4) + * domain sid(28) + unix group sid(16). */ buf->ccontext.DataLength = cpu_to_le32(56); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_posix_rsp, Name)); buf->ccontext.NameLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ buf->Name[0] = 0x93; buf->Name[1] = 0xAD; buf->Name[2] = 0x25; buf->Name[3] = 0x50; buf->Name[4] = 0x9C; buf->Name[5] = 0xB4; buf->Name[6] = 0x11; buf->Name[7] = 0xE7; buf->Name[8] = 0xB4; buf->Name[9] = 0x23; buf->Name[10] = 0x83; buf->Name[11] = 0xDE; buf->Name[12] = 0x96; buf->Name[13] = 0x8B; buf->Name[14] = 0xCD; buf->Name[15] = 0x7C; buf->nlink = cpu_to_le32(inode->i_nlink); buf->reparse_tag = cpu_to_le32(fp->volatile_id); buf->mode = cpu_to_le32(inode->i_mode & 0777); /* * SidBuffer(44) contain two sids(Domain sid(28), UNIX group sid(16)). * Domain sid(28) = revision(1) + num_subauth(1) + authority(6) + * sub_auth(4 * 4(num_subauth)) + RID(4). * UNIX group id(16) = revision(1) + num_subauth(1) + authority(6) + * sub_auth(4 * 1(num_subauth)) + RID(4). */ id_to_sid(from_kuid_munged(&init_user_ns, vfsuid_into_kuid(vfsuid)), SIDOWNER, (struct smb_sid *)&buf->SidBuffer[0]); id_to_sid(from_kgid_munged(&init_user_ns, vfsgid_into_kgid(vfsgid)), SIDUNIX_GROUP, (struct smb_sid *)&buf->SidBuffer[28]); } /* * Find lease object(opinfo) for given lease key/fid from lease * break/file close path. */ /** * lookup_lease_in_table() - find a matching lease info object * @conn: connection instance * @lease_key: lease key to be searched for * * Return: opinfo if found matching opinfo, otherwise NULL */ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn, char *lease_key) { struct oplock_info *opinfo = NULL, *ret_op = NULL; struct lease_table *lt; int ret; read_lock(&lease_list_lock); list_for_each_entry(lt, &lease_table_list, l_entry) { if (!memcmp(lt->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE)) goto found; } read_unlock(&lease_list_lock); return NULL; found: rcu_read_lock(); list_for_each_entry_rcu(opinfo, &lt->lease_list, lease_entry) { if (!atomic_inc_not_zero(&opinfo->refcount)) continue; rcu_read_unlock(); if (!opinfo->op_state || opinfo->op_state == OPLOCK_CLOSING) goto op_next; if (!(opinfo->o_lease->state & (SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_WRITE_CACHING_LE))) goto op_next; ret = compare_guid_key(opinfo, conn->ClientGUID, lease_key); if (ret) { ksmbd_debug(OPLOCK, "found opinfo\n"); ret_op = opinfo; goto out; } op_next: opinfo_put(opinfo); rcu_read_lock(); } rcu_read_unlock(); out: read_unlock(&lease_list_lock); return ret_op; }
linux-master
fs/smb/server/oplock.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/slab.h> #include "glob.h" #include "auth.h" #include "connection.h" #include "smb_common.h" #include "server.h" static struct smb_version_values smb21_server_values = { .version_string = SMB21_VERSION_STRING, .protocol_id = SMB21_PROT_ID, .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU, .max_read_size = SMB21_DEFAULT_IOSIZE, .max_write_size = SMB21_DEFAULT_IOSIZE, .max_trans_size = SMB21_DEFAULT_IOSIZE, .max_credits = SMB2_MAX_CREDITS, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .create_lease_size = sizeof(struct create_lease), .create_durable_size = sizeof(struct create_durable_rsp), .create_mxac_size = sizeof(struct create_mxac_rsp), .create_disk_id_size = sizeof(struct create_disk_id_rsp), .create_posix_size = sizeof(struct create_posix_rsp), }; static struct smb_version_values smb30_server_values = { .version_string = SMB30_VERSION_STRING, .protocol_id = SMB30_PROT_ID, .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU, .max_read_size = SMB3_DEFAULT_IOSIZE, .max_write_size = SMB3_DEFAULT_IOSIZE, .max_trans_size = SMB3_DEFAULT_TRANS_SIZE, .max_credits = SMB2_MAX_CREDITS, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .create_lease_size = sizeof(struct create_lease_v2), .create_durable_size = sizeof(struct create_durable_rsp), .create_durable_v2_size = sizeof(struct create_durable_v2_rsp), .create_mxac_size = sizeof(struct create_mxac_rsp), .create_disk_id_size = sizeof(struct create_disk_id_rsp), .create_posix_size = sizeof(struct create_posix_rsp), }; static struct smb_version_values smb302_server_values = { .version_string = SMB302_VERSION_STRING, .protocol_id = SMB302_PROT_ID, .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU, .max_read_size = SMB3_DEFAULT_IOSIZE, .max_write_size = SMB3_DEFAULT_IOSIZE, .max_trans_size = SMB3_DEFAULT_TRANS_SIZE, .max_credits = SMB2_MAX_CREDITS, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .create_lease_size = sizeof(struct create_lease_v2), .create_durable_size = sizeof(struct create_durable_rsp), .create_durable_v2_size = sizeof(struct create_durable_v2_rsp), .create_mxac_size = sizeof(struct create_mxac_rsp), .create_disk_id_size = sizeof(struct create_disk_id_rsp), .create_posix_size = sizeof(struct create_posix_rsp), }; static struct smb_version_values smb311_server_values = { .version_string = SMB311_VERSION_STRING, .protocol_id = SMB311_PROT_ID, .capabilities = SMB2_GLOBAL_CAP_LARGE_MTU, .max_read_size = SMB3_DEFAULT_IOSIZE, .max_write_size = SMB3_DEFAULT_IOSIZE, .max_trans_size = SMB3_DEFAULT_TRANS_SIZE, .max_credits = SMB2_MAX_CREDITS, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .create_lease_size = sizeof(struct create_lease_v2), .create_durable_size = sizeof(struct create_durable_rsp), .create_durable_v2_size = sizeof(struct create_durable_v2_rsp), .create_mxac_size = sizeof(struct create_mxac_rsp), .create_disk_id_size = sizeof(struct create_disk_id_rsp), .create_posix_size = sizeof(struct create_posix_rsp), }; static struct smb_version_ops smb2_0_server_ops = { .get_cmd_val = get_smb2_cmd_val, .init_rsp_hdr = init_smb2_rsp_hdr, .set_rsp_status = set_smb2_rsp_status, .allocate_rsp_buf = smb2_allocate_rsp_buf, .set_rsp_credits = smb2_set_rsp_credits, .check_user_session = smb2_check_user_session, .get_ksmbd_tcon = smb2_get_ksmbd_tcon, .is_sign_req = smb2_is_sign_req, .check_sign_req = smb2_check_sign_req, .set_sign_rsp = smb2_set_sign_rsp }; static struct smb_version_ops smb3_0_server_ops = { .get_cmd_val = get_smb2_cmd_val, .init_rsp_hdr = init_smb2_rsp_hdr, .set_rsp_status = set_smb2_rsp_status, .allocate_rsp_buf = smb2_allocate_rsp_buf, .set_rsp_credits = smb2_set_rsp_credits, .check_user_session = smb2_check_user_session, .get_ksmbd_tcon = smb2_get_ksmbd_tcon, .is_sign_req = smb2_is_sign_req, .check_sign_req = smb3_check_sign_req, .set_sign_rsp = smb3_set_sign_rsp, .generate_signingkey = ksmbd_gen_smb30_signingkey, .generate_encryptionkey = ksmbd_gen_smb30_encryptionkey, .is_transform_hdr = smb3_is_transform_hdr, .decrypt_req = smb3_decrypt_req, .encrypt_resp = smb3_encrypt_resp }; static struct smb_version_ops smb3_11_server_ops = { .get_cmd_val = get_smb2_cmd_val, .init_rsp_hdr = init_smb2_rsp_hdr, .set_rsp_status = set_smb2_rsp_status, .allocate_rsp_buf = smb2_allocate_rsp_buf, .set_rsp_credits = smb2_set_rsp_credits, .check_user_session = smb2_check_user_session, .get_ksmbd_tcon = smb2_get_ksmbd_tcon, .is_sign_req = smb2_is_sign_req, .check_sign_req = smb3_check_sign_req, .set_sign_rsp = smb3_set_sign_rsp, .generate_signingkey = ksmbd_gen_smb311_signingkey, .generate_encryptionkey = ksmbd_gen_smb311_encryptionkey, .is_transform_hdr = smb3_is_transform_hdr, .decrypt_req = smb3_decrypt_req, .encrypt_resp = smb3_encrypt_resp }; static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = { [SMB2_NEGOTIATE_HE] = { .proc = smb2_negotiate_request, }, [SMB2_SESSION_SETUP_HE] = { .proc = smb2_sess_setup, }, [SMB2_TREE_CONNECT_HE] = { .proc = smb2_tree_connect,}, [SMB2_TREE_DISCONNECT_HE] = { .proc = smb2_tree_disconnect,}, [SMB2_LOGOFF_HE] = { .proc = smb2_session_logoff,}, [SMB2_CREATE_HE] = { .proc = smb2_open}, [SMB2_QUERY_INFO_HE] = { .proc = smb2_query_info}, [SMB2_QUERY_DIRECTORY_HE] = { .proc = smb2_query_dir}, [SMB2_CLOSE_HE] = { .proc = smb2_close}, [SMB2_ECHO_HE] = { .proc = smb2_echo}, [SMB2_SET_INFO_HE] = { .proc = smb2_set_info}, [SMB2_READ_HE] = { .proc = smb2_read}, [SMB2_WRITE_HE] = { .proc = smb2_write}, [SMB2_FLUSH_HE] = { .proc = smb2_flush}, [SMB2_CANCEL_HE] = { .proc = smb2_cancel}, [SMB2_LOCK_HE] = { .proc = smb2_lock}, [SMB2_IOCTL_HE] = { .proc = smb2_ioctl}, [SMB2_OPLOCK_BREAK_HE] = { .proc = smb2_oplock_break}, [SMB2_CHANGE_NOTIFY_HE] = { .proc = smb2_notify}, }; /** * init_smb2_1_server() - initialize a smb server connection with smb2.1 * command dispatcher * @conn: connection instance */ void init_smb2_1_server(struct ksmbd_conn *conn) { conn->vals = &smb21_server_values; conn->ops = &smb2_0_server_ops; conn->cmds = smb2_0_server_cmds; conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds); conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256_LE; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES) conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING; } /** * init_smb3_0_server() - initialize a smb server connection with smb3.0 * command dispatcher * @conn: connection instance */ void init_smb3_0_server(struct ksmbd_conn *conn) { conn->vals = &smb30_server_values; conn->ops = &smb3_0_server_ops; conn->cmds = smb2_0_server_cmds; conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds); conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES) conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION && conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION) conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL; } /** * init_smb3_02_server() - initialize a smb server connection with smb3.02 * command dispatcher * @conn: connection instance */ void init_smb3_02_server(struct ksmbd_conn *conn) { conn->vals = &smb302_server_values; conn->ops = &smb3_0_server_ops; conn->cmds = smb2_0_server_cmds; conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds); conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES) conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION || (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) && conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)) conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL; } /** * init_smb3_11_server() - initialize a smb server connection with smb3.11 * command dispatcher * @conn: connection instance */ int init_smb3_11_server(struct ksmbd_conn *conn) { conn->vals = &smb311_server_values; conn->ops = &smb3_11_server_ops; conn->cmds = smb2_0_server_cmds; conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds); conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES) conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION || (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) && conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)) conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL; INIT_LIST_HEAD(&conn->preauth_sess_table); return 0; } void init_smb2_max_read_size(unsigned int sz) { sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE); smb21_server_values.max_read_size = sz; smb30_server_values.max_read_size = sz; smb302_server_values.max_read_size = sz; smb311_server_values.max_read_size = sz; } void init_smb2_max_write_size(unsigned int sz) { sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE); smb21_server_values.max_write_size = sz; smb30_server_values.max_write_size = sz; smb302_server_values.max_write_size = sz; smb311_server_values.max_write_size = sz; } void init_smb2_max_trans_size(unsigned int sz) { sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE); smb21_server_values.max_trans_size = sz; smb30_server_values.max_trans_size = sz; smb302_server_values.max_trans_size = sz; smb311_server_values.max_trans_size = sz; } void init_smb2_max_credits(unsigned int sz) { smb21_server_values.max_credits = sz; smb30_server_values.max_credits = sz; smb302_server_values.max_credits = sz; smb311_server_values.max_credits = sz; }
linux-master
fs/smb/server/smb2ops.c
// SPDX-License-Identifier: LGPL-2.1+ /* * Copyright (C) International Business Machines Corp., 2007,2008 * Author(s): Steve French ([email protected]) * Copyright (C) 2020 Samsung Electronics Co., Ltd. * Author(s): Namjae Jeon <[email protected]> */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/mnt_idmapping.h> #include "smbacl.h" #include "smb_common.h" #include "server.h" #include "misc.h" #include "mgmt/share_config.h" static const struct smb_sid domain = {1, 4, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(21), cpu_to_le32(1), cpu_to_le32(2), cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* security id for everyone/world system group */ static const struct smb_sid creator_owner = { 1, 1, {0, 0, 0, 0, 0, 3}, {0} }; /* security id for everyone/world system group */ static const struct smb_sid creator_group = { 1, 1, {0, 0, 0, 0, 0, 3}, {cpu_to_le32(1)} }; /* security id for everyone/world system group */ static const struct smb_sid sid_everyone = { 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; /* security id for Authenticated Users system group */ static const struct smb_sid sid_authusers = { 1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} }; /* S-1-22-1 Unmapped Unix users */ static const struct smb_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22}, {cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-22-2 Unmapped Unix groups */ static const struct smb_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22}, {cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ /* S-1-5-88 MS NFS and Apple style UID/GID/mode */ /* S-1-5-88-1 Unix uid */ static const struct smb_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-5-88-2 Unix gid */ static const struct smb_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-5-88-3 Unix mode */ static const struct smb_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* * if the two SIDs (roughly equivalent to a UUID for a user or group) are * the same returns zero, if they do not match returns non-zero. */ int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid) { int i; int num_subauth, num_sat, num_saw; if (!ctsid || !cwsid) return 1; /* compare the revision */ if (ctsid->revision != cwsid->revision) { if (ctsid->revision > cwsid->revision) return 1; else return -1; } /* compare all of the six auth values */ for (i = 0; i < NUM_AUTHS; ++i) { if (ctsid->authority[i] != cwsid->authority[i]) { if (ctsid->authority[i] > cwsid->authority[i]) return 1; else return -1; } } /* compare all of the subauth values if any */ num_sat = ctsid->num_subauth; num_saw = cwsid->num_subauth; num_subauth = min(num_sat, num_saw); if (num_subauth) { for (i = 0; i < num_subauth; ++i) { if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) { if (le32_to_cpu(ctsid->sub_auth[i]) > le32_to_cpu(cwsid->sub_auth[i])) return 1; else return -1; } } } return 0; /* sids compare/match */ } static void smb_copy_sid(struct smb_sid *dst, const struct smb_sid *src) { int i; dst->revision = src->revision; dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES); for (i = 0; i < NUM_AUTHS; ++i) dst->authority[i] = src->authority[i]; for (i = 0; i < dst->num_subauth; ++i) dst->sub_auth[i] = src->sub_auth[i]; } /* * change posix mode to reflect permissions * pmode is the existing mode (we only want to overwrite part of this * bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007 */ static umode_t access_flags_to_mode(struct smb_fattr *fattr, __le32 ace_flags, int type) { __u32 flags = le32_to_cpu(ace_flags); umode_t mode = 0; if (flags & GENERIC_ALL) { mode = 0777; ksmbd_debug(SMB, "all perms\n"); return mode; } if ((flags & GENERIC_READ) || (flags & FILE_READ_RIGHTS)) mode = 0444; if ((flags & GENERIC_WRITE) || (flags & FILE_WRITE_RIGHTS)) { mode |= 0222; if (S_ISDIR(fattr->cf_mode)) mode |= 0111; } if ((flags & GENERIC_EXECUTE) || (flags & FILE_EXEC_RIGHTS)) mode |= 0111; if (type == ACCESS_DENIED_ACE_TYPE || type == ACCESS_DENIED_OBJECT_ACE_TYPE) mode = ~mode; ksmbd_debug(SMB, "access flags 0x%x mode now %04o\n", flags, mode); return mode; } /* * Generate access flags to reflect permissions mode is the existing mode. * This function is called for every ACE in the DACL whose SID matches * with either owner or group or everyone. */ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, __u32 *pace_flags) { /* reset access mask */ *pace_flags = 0x0; /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */ mode &= bits_to_use; /* * check for R/W/X UGO since we do not know whose flags * is this but we have cleared all the bits sans RWX for * either user or group or other as per bits_to_use */ if (mode & 0444) *pace_flags |= SET_FILE_READ_RIGHTS; if (mode & 0222) *pace_flags |= FILE_WRITE_RIGHTS; if (mode & 0111) *pace_flags |= SET_FILE_EXEC_RIGHTS; ksmbd_debug(SMB, "mode: %o, access flags now 0x%x\n", mode, *pace_flags); } static __u16 fill_ace_for_sid(struct smb_ace *pntace, const struct smb_sid *psid, int type, int flags, umode_t mode, umode_t bits) { int i; __u16 size = 0; __u32 access_req = 0; pntace->type = type; pntace->flags = flags; mode_to_access_flags(mode, bits, &access_req); if (!access_req) access_req = SET_MINIMUM_RIGHTS; pntace->access_req = cpu_to_le32(access_req); pntace->sid.revision = psid->revision; pntace->sid.num_subauth = psid->num_subauth; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = psid->authority[i]; for (i = 0; i < psid->num_subauth; i++) pntace->sid.sub_auth[i] = psid->sub_auth[i]; size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4); pntace->size = cpu_to_le16(size); return size; } void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid) { switch (sidtype) { case SIDOWNER: smb_copy_sid(ssid, &server_conf.domain_sid); break; case SIDUNIX_USER: smb_copy_sid(ssid, &sid_unix_users); break; case SIDUNIX_GROUP: smb_copy_sid(ssid, &sid_unix_groups); break; case SIDCREATOR_OWNER: smb_copy_sid(ssid, &creator_owner); return; case SIDCREATOR_GROUP: smb_copy_sid(ssid, &creator_group); return; case SIDNFS_USER: smb_copy_sid(ssid, &sid_unix_NFS_users); break; case SIDNFS_GROUP: smb_copy_sid(ssid, &sid_unix_NFS_groups); break; case SIDNFS_MODE: smb_copy_sid(ssid, &sid_unix_NFS_mode); break; default: return; } /* RID */ ssid->sub_auth[ssid->num_subauth] = cpu_to_le32(cid); ssid->num_subauth++; } static int sid_to_id(struct mnt_idmap *idmap, struct smb_sid *psid, uint sidtype, struct smb_fattr *fattr) { int rc = -EINVAL; /* * If we have too many subauthorities, then something is really wrong. * Just return an error. */ if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) { pr_err("%s: %u subauthorities is too many!\n", __func__, psid->num_subauth); return -EIO; } if (sidtype == SIDOWNER) { kuid_t uid; uid_t id; id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]); uid = KUIDT_INIT(id); uid = from_vfsuid(idmap, &init_user_ns, VFSUIDT_INIT(uid)); if (uid_valid(uid)) { fattr->cf_uid = uid; rc = 0; } } else { kgid_t gid; gid_t id; id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]); gid = KGIDT_INIT(id); gid = from_vfsgid(idmap, &init_user_ns, VFSGIDT_INIT(gid)); if (gid_valid(gid)) { fattr->cf_gid = gid; rc = 0; } } return rc; } void posix_state_to_acl(struct posix_acl_state *state, struct posix_acl_entry *pace) { int i; pace->e_tag = ACL_USER_OBJ; pace->e_perm = state->owner.allow; for (i = 0; i < state->users->n; i++) { pace++; pace->e_tag = ACL_USER; pace->e_uid = state->users->aces[i].uid; pace->e_perm = state->users->aces[i].perms.allow; } pace++; pace->e_tag = ACL_GROUP_OBJ; pace->e_perm = state->group.allow; for (i = 0; i < state->groups->n; i++) { pace++; pace->e_tag = ACL_GROUP; pace->e_gid = state->groups->aces[i].gid; pace->e_perm = state->groups->aces[i].perms.allow; } if (state->users->n || state->groups->n) { pace++; pace->e_tag = ACL_MASK; pace->e_perm = state->mask.allow; } pace++; pace->e_tag = ACL_OTHER; pace->e_perm = state->other.allow; } int init_acl_state(struct posix_acl_state *state, int cnt) { int alloc; memset(state, 0, sizeof(struct posix_acl_state)); /* * In the worst case, each individual acl could be for a distinct * named user or group, but we don't know which, so we allocate * enough space for either: */ alloc = sizeof(struct posix_ace_state_array) + cnt * sizeof(struct posix_user_ace_state); state->users = kzalloc(alloc, GFP_KERNEL); if (!state->users) return -ENOMEM; state->groups = kzalloc(alloc, GFP_KERNEL); if (!state->groups) { kfree(state->users); return -ENOMEM; } return 0; } void free_acl_state(struct posix_acl_state *state) { kfree(state->users); kfree(state->groups); } static void parse_dacl(struct mnt_idmap *idmap, struct smb_acl *pdacl, char *end_of_acl, struct smb_sid *pownersid, struct smb_sid *pgrpsid, struct smb_fattr *fattr) { int i, ret; int num_aces = 0; unsigned int acl_size; char *acl_base; struct smb_ace **ppace; struct posix_acl_entry *cf_pace, *cf_pdace; struct posix_acl_state acl_state, default_acl_state; umode_t mode = 0, acl_mode; bool owner_found = false, group_found = false, others_found = false; if (!pdacl) return; /* validate that we do not go past end of acl */ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) || end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { pr_err("ACL too small to parse DACL\n"); return; } ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n", le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), le32_to_cpu(pdacl->num_aces)); acl_base = (char *)pdacl; acl_size = sizeof(struct smb_acl); num_aces = le32_to_cpu(pdacl->num_aces); if (num_aces <= 0) return; if (num_aces > ULONG_MAX / sizeof(struct smb_ace *)) return; ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL); if (!ppace) return; ret = init_acl_state(&acl_state, num_aces); if (ret) return; ret = init_acl_state(&default_acl_state, num_aces); if (ret) { free_acl_state(&acl_state); return; } /* * reset rwx permissions for user/group/other. * Also, if num_aces is 0 i.e. DACL has no ACEs, * user/group/other have no permissions */ for (i = 0; i < num_aces; ++i) { if (end_of_acl - acl_base < acl_size) break; ppace[i] = (struct smb_ace *)(acl_base + acl_size); acl_base = (char *)ppace[i]; acl_size = offsetof(struct smb_ace, sid) + offsetof(struct smb_sid, sub_auth); if (end_of_acl - acl_base < acl_size || ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || (end_of_acl - acl_base < acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || (le16_to_cpu(ppace[i]->size) < acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth)) break; acl_size = le16_to_cpu(ppace[i]->size); ppace[i]->access_req = smb_map_generic_desired_access(ppace[i]->access_req); if (!(compare_sids(&ppace[i]->sid, &sid_unix_NFS_mode))) { fattr->cf_mode = le32_to_cpu(ppace[i]->sid.sub_auth[2]); break; } else if (!compare_sids(&ppace[i]->sid, pownersid)) { acl_mode = access_flags_to_mode(fattr, ppace[i]->access_req, ppace[i]->type); acl_mode &= 0700; if (!owner_found) { mode &= ~(0700); mode |= acl_mode; } owner_found = true; } else if (!compare_sids(&ppace[i]->sid, pgrpsid) || ppace[i]->sid.sub_auth[ppace[i]->sid.num_subauth - 1] == DOMAIN_USER_RID_LE) { acl_mode = access_flags_to_mode(fattr, ppace[i]->access_req, ppace[i]->type); acl_mode &= 0070; if (!group_found) { mode &= ~(0070); mode |= acl_mode; } group_found = true; } else if (!compare_sids(&ppace[i]->sid, &sid_everyone)) { acl_mode = access_flags_to_mode(fattr, ppace[i]->access_req, ppace[i]->type); acl_mode &= 0007; if (!others_found) { mode &= ~(0007); mode |= acl_mode; } others_found = true; } else if (!compare_sids(&ppace[i]->sid, &creator_owner)) { continue; } else if (!compare_sids(&ppace[i]->sid, &creator_group)) { continue; } else if (!compare_sids(&ppace[i]->sid, &sid_authusers)) { continue; } else { struct smb_fattr temp_fattr; acl_mode = access_flags_to_mode(fattr, ppace[i]->access_req, ppace[i]->type); temp_fattr.cf_uid = INVALID_UID; ret = sid_to_id(idmap, &ppace[i]->sid, SIDOWNER, &temp_fattr); if (ret || uid_eq(temp_fattr.cf_uid, INVALID_UID)) { pr_err("%s: Error %d mapping Owner SID to uid\n", __func__, ret); continue; } acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004; acl_state.users->aces[acl_state.users->n].uid = temp_fattr.cf_uid; acl_state.users->aces[acl_state.users->n++].perms.allow = ((acl_mode & 0700) >> 6) | 0004; default_acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004; default_acl_state.users->aces[default_acl_state.users->n].uid = temp_fattr.cf_uid; default_acl_state.users->aces[default_acl_state.users->n++].perms.allow = ((acl_mode & 0700) >> 6) | 0004; } } kfree(ppace); if (owner_found) { /* The owner must be set to at least read-only. */ acl_state.owner.allow = ((mode & 0700) >> 6) | 0004; acl_state.users->aces[acl_state.users->n].uid = fattr->cf_uid; acl_state.users->aces[acl_state.users->n++].perms.allow = ((mode & 0700) >> 6) | 0004; default_acl_state.owner.allow = ((mode & 0700) >> 6) | 0004; default_acl_state.users->aces[default_acl_state.users->n].uid = fattr->cf_uid; default_acl_state.users->aces[default_acl_state.users->n++].perms.allow = ((mode & 0700) >> 6) | 0004; } if (group_found) { acl_state.group.allow = (mode & 0070) >> 3; acl_state.groups->aces[acl_state.groups->n].gid = fattr->cf_gid; acl_state.groups->aces[acl_state.groups->n++].perms.allow = (mode & 0070) >> 3; default_acl_state.group.allow = (mode & 0070) >> 3; default_acl_state.groups->aces[default_acl_state.groups->n].gid = fattr->cf_gid; default_acl_state.groups->aces[default_acl_state.groups->n++].perms.allow = (mode & 0070) >> 3; } if (others_found) { fattr->cf_mode &= ~(0007); fattr->cf_mode |= mode & 0007; acl_state.other.allow = mode & 0007; default_acl_state.other.allow = mode & 0007; } if (acl_state.users->n || acl_state.groups->n) { acl_state.mask.allow = 0x07; if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) { fattr->cf_acls = posix_acl_alloc(acl_state.users->n + acl_state.groups->n + 4, GFP_KERNEL); if (fattr->cf_acls) { cf_pace = fattr->cf_acls->a_entries; posix_state_to_acl(&acl_state, cf_pace); } } } if (default_acl_state.users->n || default_acl_state.groups->n) { default_acl_state.mask.allow = 0x07; if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) { fattr->cf_dacls = posix_acl_alloc(default_acl_state.users->n + default_acl_state.groups->n + 4, GFP_KERNEL); if (fattr->cf_dacls) { cf_pdace = fattr->cf_dacls->a_entries; posix_state_to_acl(&default_acl_state, cf_pdace); } } } free_acl_state(&acl_state); free_acl_state(&default_acl_state); } static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap, struct smb_ace *pndace, struct smb_fattr *fattr, u32 *num_aces, u16 *size, u32 nt_aces_num) { struct posix_acl_entry *pace; struct smb_sid *sid; struct smb_ace *ntace; int i, j; if (!fattr->cf_acls) goto posix_default_acl; pace = fattr->cf_acls->a_entries; for (i = 0; i < fattr->cf_acls->a_count; i++, pace++) { int flags = 0; sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL); if (!sid) break; if (pace->e_tag == ACL_USER) { uid_t uid; unsigned int sid_type = SIDOWNER; uid = posix_acl_uid_translate(idmap, pace); if (!uid) sid_type = SIDUNIX_USER; id_to_sid(uid, sid_type, sid); } else if (pace->e_tag == ACL_GROUP) { gid_t gid; gid = posix_acl_gid_translate(idmap, pace); id_to_sid(gid, SIDUNIX_GROUP, sid); } else if (pace->e_tag == ACL_OTHER && !nt_aces_num) { smb_copy_sid(sid, &sid_everyone); } else { kfree(sid); continue; } ntace = pndace; for (j = 0; j < nt_aces_num; j++) { if (ntace->sid.sub_auth[ntace->sid.num_subauth - 1] == sid->sub_auth[sid->num_subauth - 1]) goto pass_same_sid; ntace = (struct smb_ace *)((char *)ntace + le16_to_cpu(ntace->size)); } if (S_ISDIR(fattr->cf_mode) && pace->e_tag == ACL_OTHER) flags = 0x03; ntace = (struct smb_ace *)((char *)pndace + *size); *size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, flags, pace->e_perm, 0777); (*num_aces)++; if (pace->e_tag == ACL_USER) ntace->access_req |= FILE_DELETE_LE | FILE_DELETE_CHILD_LE; if (S_ISDIR(fattr->cf_mode) && (pace->e_tag == ACL_USER || pace->e_tag == ACL_GROUP)) { ntace = (struct smb_ace *)((char *)pndace + *size); *size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, 0x03, pace->e_perm, 0777); (*num_aces)++; if (pace->e_tag == ACL_USER) ntace->access_req |= FILE_DELETE_LE | FILE_DELETE_CHILD_LE; } pass_same_sid: kfree(sid); } if (nt_aces_num) return; posix_default_acl: if (!fattr->cf_dacls) return; pace = fattr->cf_dacls->a_entries; for (i = 0; i < fattr->cf_dacls->a_count; i++, pace++) { sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL); if (!sid) break; if (pace->e_tag == ACL_USER) { uid_t uid; uid = posix_acl_uid_translate(idmap, pace); id_to_sid(uid, SIDCREATOR_OWNER, sid); } else if (pace->e_tag == ACL_GROUP) { gid_t gid; gid = posix_acl_gid_translate(idmap, pace); id_to_sid(gid, SIDCREATOR_GROUP, sid); } else { kfree(sid); continue; } ntace = (struct smb_ace *)((char *)pndace + *size); *size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, 0x0b, pace->e_perm, 0777); (*num_aces)++; if (pace->e_tag == ACL_USER) ntace->access_req |= FILE_DELETE_LE | FILE_DELETE_CHILD_LE; kfree(sid); } } static void set_ntacl_dacl(struct mnt_idmap *idmap, struct smb_acl *pndacl, struct smb_acl *nt_dacl, unsigned int aces_size, const struct smb_sid *pownersid, const struct smb_sid *pgrpsid, struct smb_fattr *fattr) { struct smb_ace *ntace, *pndace; int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0; unsigned short size = 0; int i; pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl)); if (nt_num_aces) { ntace = (struct smb_ace *)((char *)nt_dacl + sizeof(struct smb_acl)); for (i = 0; i < nt_num_aces; i++) { unsigned short nt_ace_size; if (offsetof(struct smb_ace, access_req) > aces_size) break; nt_ace_size = le16_to_cpu(ntace->size); if (nt_ace_size > aces_size) break; memcpy((char *)pndace + size, ntace, nt_ace_size); size += nt_ace_size; aces_size -= nt_ace_size; ntace = (struct smb_ace *)((char *)ntace + nt_ace_size); num_aces++; } } set_posix_acl_entries_dacl(idmap, pndace, fattr, &num_aces, &size, nt_num_aces); pndacl->num_aces = cpu_to_le32(num_aces); pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size); } static void set_mode_dacl(struct mnt_idmap *idmap, struct smb_acl *pndacl, struct smb_fattr *fattr) { struct smb_ace *pace, *pndace; u32 num_aces = 0; u16 size = 0, ace_size = 0; uid_t uid; const struct smb_sid *sid; pace = pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl)); if (fattr->cf_acls) { set_posix_acl_entries_dacl(idmap, pndace, fattr, &num_aces, &size, num_aces); goto out; } /* owner RID */ uid = from_kuid(&init_user_ns, fattr->cf_uid); if (uid) sid = &server_conf.domain_sid; else sid = &sid_unix_users; ace_size = fill_ace_for_sid(pace, sid, ACCESS_ALLOWED, 0, fattr->cf_mode, 0700); pace->sid.sub_auth[pace->sid.num_subauth++] = cpu_to_le32(uid); pace->size = cpu_to_le16(ace_size + 4); size += le16_to_cpu(pace->size); pace = (struct smb_ace *)((char *)pndace + size); /* Group RID */ ace_size = fill_ace_for_sid(pace, &sid_unix_groups, ACCESS_ALLOWED, 0, fattr->cf_mode, 0070); pace->sid.sub_auth[pace->sid.num_subauth++] = cpu_to_le32(from_kgid(&init_user_ns, fattr->cf_gid)); pace->size = cpu_to_le16(ace_size + 4); size += le16_to_cpu(pace->size); pace = (struct smb_ace *)((char *)pndace + size); num_aces = 3; if (S_ISDIR(fattr->cf_mode)) { pace = (struct smb_ace *)((char *)pndace + size); /* creator owner */ size += fill_ace_for_sid(pace, &creator_owner, ACCESS_ALLOWED, 0x0b, fattr->cf_mode, 0700); pace = (struct smb_ace *)((char *)pndace + size); /* creator group */ size += fill_ace_for_sid(pace, &creator_group, ACCESS_ALLOWED, 0x0b, fattr->cf_mode, 0070); pace = (struct smb_ace *)((char *)pndace + size); num_aces = 5; } /* other */ size += fill_ace_for_sid(pace, &sid_everyone, ACCESS_ALLOWED, 0, fattr->cf_mode, 0007); out: pndacl->num_aces = cpu_to_le32(num_aces); pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size); } static int parse_sid(struct smb_sid *psid, char *end_of_acl) { /* * validate that we do not go past end of ACL - sid must be at least 8 * bytes long (assuming no sub-auths - e.g. the null SID */ if (end_of_acl < (char *)psid + 8) { pr_err("ACL too small to parse SID %p\n", psid); return -EINVAL; } return 0; } /* Convert CIFS ACL to POSIX form */ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, int acl_len, struct smb_fattr *fattr) { int rc = 0; struct smb_sid *owner_sid_ptr, *group_sid_ptr; struct smb_acl *dacl_ptr; /* no need for SACL ptr */ char *end_of_acl = ((char *)pntsd) + acl_len; __u32 dacloffset; int pntsd_type; if (!pntsd) return -EIO; if (acl_len < sizeof(struct smb_ntsd)) return -EINVAL; owner_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); group_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); dacloffset = le32_to_cpu(pntsd->dacloffset); dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset); ksmbd_debug(SMB, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n", pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset), le32_to_cpu(pntsd->gsidoffset), le32_to_cpu(pntsd->sacloffset), dacloffset); pntsd_type = le16_to_cpu(pntsd->type); if (!(pntsd_type & DACL_PRESENT)) { ksmbd_debug(SMB, "DACL_PRESENT in DACL type is not set\n"); return rc; } pntsd->type = cpu_to_le16(DACL_PRESENT); if (pntsd->osidoffset) { rc = parse_sid(owner_sid_ptr, end_of_acl); if (rc) { pr_err("%s: Error %d parsing Owner SID\n", __func__, rc); return rc; } rc = sid_to_id(idmap, owner_sid_ptr, SIDOWNER, fattr); if (rc) { pr_err("%s: Error %d mapping Owner SID to uid\n", __func__, rc); owner_sid_ptr = NULL; } } if (pntsd->gsidoffset) { rc = parse_sid(group_sid_ptr, end_of_acl); if (rc) { pr_err("%s: Error %d mapping Owner SID to gid\n", __func__, rc); return rc; } rc = sid_to_id(idmap, group_sid_ptr, SIDUNIX_GROUP, fattr); if (rc) { pr_err("%s: Error %d mapping Group SID to gid\n", __func__, rc); group_sid_ptr = NULL; } } if ((pntsd_type & (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ)) == (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ)) pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED); if (pntsd_type & DACL_PROTECTED) pntsd->type |= cpu_to_le16(DACL_PROTECTED); if (dacloffset) { parse_dacl(idmap, dacl_ptr, end_of_acl, owner_sid_ptr, group_sid_ptr, fattr); } return 0; } /* Convert permission bits from mode to equivalent CIFS ACL */ int build_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info, __u32 *secdesclen, struct smb_fattr *fattr) { int rc = 0; __u32 offset; struct smb_sid *owner_sid_ptr, *group_sid_ptr; struct smb_sid *nowner_sid_ptr, *ngroup_sid_ptr; struct smb_acl *dacl_ptr = NULL; /* no need for SACL ptr */ uid_t uid; gid_t gid; unsigned int sid_type = SIDOWNER; nowner_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL); if (!nowner_sid_ptr) return -ENOMEM; uid = from_kuid(&init_user_ns, fattr->cf_uid); if (!uid) sid_type = SIDUNIX_USER; id_to_sid(uid, sid_type, nowner_sid_ptr); ngroup_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL); if (!ngroup_sid_ptr) { kfree(nowner_sid_ptr); return -ENOMEM; } gid = from_kgid(&init_user_ns, fattr->cf_gid); id_to_sid(gid, SIDUNIX_GROUP, ngroup_sid_ptr); offset = sizeof(struct smb_ntsd); pntsd->sacloffset = 0; pntsd->revision = cpu_to_le16(1); pntsd->type = cpu_to_le16(SELF_RELATIVE); if (ppntsd) pntsd->type |= ppntsd->type; if (addition_info & OWNER_SECINFO) { pntsd->osidoffset = cpu_to_le32(offset); owner_sid_ptr = (struct smb_sid *)((char *)pntsd + offset); smb_copy_sid(owner_sid_ptr, nowner_sid_ptr); offset += 1 + 1 + 6 + (nowner_sid_ptr->num_subauth * 4); } if (addition_info & GROUP_SECINFO) { pntsd->gsidoffset = cpu_to_le32(offset); group_sid_ptr = (struct smb_sid *)((char *)pntsd + offset); smb_copy_sid(group_sid_ptr, ngroup_sid_ptr); offset += 1 + 1 + 6 + (ngroup_sid_ptr->num_subauth * 4); } if (addition_info & DACL_SECINFO) { pntsd->type |= cpu_to_le16(DACL_PRESENT); dacl_ptr = (struct smb_acl *)((char *)pntsd + offset); dacl_ptr->revision = cpu_to_le16(2); dacl_ptr->size = cpu_to_le16(sizeof(struct smb_acl)); dacl_ptr->num_aces = 0; if (!ppntsd) { set_mode_dacl(idmap, dacl_ptr, fattr); } else { struct smb_acl *ppdacl_ptr; unsigned int dacl_offset = le32_to_cpu(ppntsd->dacloffset); int ppdacl_size, ntacl_size = ppntsd_size - dacl_offset; if (!dacl_offset || (dacl_offset + sizeof(struct smb_acl) > ppntsd_size)) goto out; ppdacl_ptr = (struct smb_acl *)((char *)ppntsd + dacl_offset); ppdacl_size = le16_to_cpu(ppdacl_ptr->size); if (ppdacl_size > ntacl_size || ppdacl_size < sizeof(struct smb_acl)) goto out; set_ntacl_dacl(idmap, dacl_ptr, ppdacl_ptr, ntacl_size - sizeof(struct smb_acl), nowner_sid_ptr, ngroup_sid_ptr, fattr); } pntsd->dacloffset = cpu_to_le32(offset); offset += le16_to_cpu(dacl_ptr->size); } out: kfree(nowner_sid_ptr); kfree(ngroup_sid_ptr); *secdesclen = offset; return rc; } static void smb_set_ace(struct smb_ace *ace, const struct smb_sid *sid, u8 type, u8 flags, __le32 access_req) { ace->type = type; ace->flags = flags; ace->access_req = access_req; smb_copy_sid(&ace->sid, sid); ace->size = cpu_to_le16(1 + 1 + 2 + 4 + 1 + 1 + 6 + (sid->num_subauth * 4)); } int smb_inherit_dacl(struct ksmbd_conn *conn, const struct path *path, unsigned int uid, unsigned int gid) { const struct smb_sid *psid, *creator = NULL; struct smb_ace *parent_aces, *aces; struct smb_acl *parent_pdacl; struct smb_ntsd *parent_pntsd = NULL; struct smb_sid owner_sid, group_sid; struct dentry *parent = path->dentry->d_parent; struct mnt_idmap *idmap = mnt_idmap(path->mnt); int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size; int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size; char *aces_base; bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode); pntsd_size = ksmbd_vfs_get_sd_xattr(conn, idmap, parent, &parent_pntsd); if (pntsd_size <= 0) return -ENOENT; dacloffset = le32_to_cpu(parent_pntsd->dacloffset); if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) { rc = -EINVAL; goto free_parent_pntsd; } parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset); acl_len = pntsd_size - dacloffset; num_aces = le32_to_cpu(parent_pdacl->num_aces); pntsd_type = le16_to_cpu(parent_pntsd->type); pdacl_size = le16_to_cpu(parent_pdacl->size); if (pdacl_size > acl_len || pdacl_size < sizeof(struct smb_acl)) { rc = -EINVAL; goto free_parent_pntsd; } aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL); if (!aces_base) { rc = -ENOMEM; goto free_parent_pntsd; } aces = (struct smb_ace *)aces_base; parent_aces = (struct smb_ace *)((char *)parent_pdacl + sizeof(struct smb_acl)); aces_size = acl_len - sizeof(struct smb_acl); if (pntsd_type & DACL_AUTO_INHERITED) inherited_flags = INHERITED_ACE; for (i = 0; i < num_aces; i++) { int pace_size; if (offsetof(struct smb_ace, access_req) > aces_size) break; pace_size = le16_to_cpu(parent_aces->size); if (pace_size > aces_size) break; aces_size -= pace_size; flags = parent_aces->flags; if (!smb_inherit_flags(flags, is_dir)) goto pass; if (is_dir) { flags &= ~(INHERIT_ONLY_ACE | INHERITED_ACE); if (!(flags & CONTAINER_INHERIT_ACE)) flags |= INHERIT_ONLY_ACE; if (flags & NO_PROPAGATE_INHERIT_ACE) flags = 0; } else { flags = 0; } if (!compare_sids(&creator_owner, &parent_aces->sid)) { creator = &creator_owner; id_to_sid(uid, SIDOWNER, &owner_sid); psid = &owner_sid; } else if (!compare_sids(&creator_group, &parent_aces->sid)) { creator = &creator_group; id_to_sid(gid, SIDUNIX_GROUP, &group_sid); psid = &group_sid; } else { creator = NULL; psid = &parent_aces->sid; } if (is_dir && creator && flags & CONTAINER_INHERIT_ACE) { smb_set_ace(aces, psid, parent_aces->type, inherited_flags, parent_aces->access_req); nt_size += le16_to_cpu(aces->size); ace_cnt++; aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size)); flags |= INHERIT_ONLY_ACE; psid = creator; } else if (is_dir && !(parent_aces->flags & NO_PROPAGATE_INHERIT_ACE)) { psid = &parent_aces->sid; } smb_set_ace(aces, psid, parent_aces->type, flags | inherited_flags, parent_aces->access_req); nt_size += le16_to_cpu(aces->size); aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size)); ace_cnt++; pass: parent_aces = (struct smb_ace *)((char *)parent_aces + pace_size); } if (nt_size > 0) { struct smb_ntsd *pntsd; struct smb_acl *pdacl; struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL; int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size; if (parent_pntsd->osidoffset) { powner_sid = (struct smb_sid *)((char *)parent_pntsd + le32_to_cpu(parent_pntsd->osidoffset)); powner_sid_size = 1 + 1 + 6 + (powner_sid->num_subauth * 4); } if (parent_pntsd->gsidoffset) { pgroup_sid = (struct smb_sid *)((char *)parent_pntsd + le32_to_cpu(parent_pntsd->gsidoffset)); pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4); } pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size + pgroup_sid_size + sizeof(struct smb_acl) + nt_size, GFP_KERNEL); if (!pntsd) { rc = -ENOMEM; goto free_aces_base; } pntsd->revision = cpu_to_le16(1); pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PRESENT); if (le16_to_cpu(parent_pntsd->type) & DACL_AUTO_INHERITED) pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED); pntsd_size = sizeof(struct smb_ntsd); pntsd->osidoffset = parent_pntsd->osidoffset; pntsd->gsidoffset = parent_pntsd->gsidoffset; pntsd->dacloffset = parent_pntsd->dacloffset; if (pntsd->osidoffset) { struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); memcpy(owner_sid, powner_sid, powner_sid_size); pntsd_size += powner_sid_size; } if (pntsd->gsidoffset) { struct smb_sid *group_sid = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); memcpy(group_sid, pgroup_sid, pgroup_sid_size); pntsd_size += pgroup_sid_size; } if (pntsd->dacloffset) { struct smb_ace *pace; pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset)); pdacl->revision = cpu_to_le16(2); pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size); pdacl->num_aces = cpu_to_le32(ace_cnt); pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); memcpy(pace, aces_base, nt_size); pntsd_size += sizeof(struct smb_acl) + nt_size; } ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size); kfree(pntsd); } free_aces_base: kfree(aces_base); free_parent_pntsd: kfree(parent_pntsd); return rc; } bool smb_inherit_flags(int flags, bool is_dir) { if (!is_dir) return (flags & OBJECT_INHERIT_ACE) != 0; if (flags & OBJECT_INHERIT_ACE && !(flags & NO_PROPAGATE_INHERIT_ACE)) return true; if (flags & CONTAINER_INHERIT_ACE) return true; return false; } int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, __le32 *pdaccess, int uid) { struct mnt_idmap *idmap = mnt_idmap(path->mnt); struct smb_ntsd *pntsd = NULL; struct smb_acl *pdacl; struct posix_acl *posix_acls; int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset; struct smb_sid sid; int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE); struct smb_ace *ace; int i, found = 0; unsigned int access_bits = 0; struct smb_ace *others_ace = NULL; struct posix_acl_entry *pa_entry; unsigned int sid_type = SIDOWNER; unsigned short ace_size; ksmbd_debug(SMB, "check permission using windows acl\n"); pntsd_size = ksmbd_vfs_get_sd_xattr(conn, idmap, path->dentry, &pntsd); if (pntsd_size <= 0 || !pntsd) goto err_out; dacl_offset = le32_to_cpu(pntsd->dacloffset); if (!dacl_offset || (dacl_offset + sizeof(struct smb_acl) > pntsd_size)) goto err_out; pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset)); acl_size = pntsd_size - dacl_offset; pdacl_size = le16_to_cpu(pdacl->size); if (pdacl_size > acl_size || pdacl_size < sizeof(struct smb_acl)) goto err_out; if (!pdacl->num_aces) { if (!(pdacl_size - sizeof(struct smb_acl)) && *pdaccess & ~(FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE)) { rc = -EACCES; goto err_out; } goto err_out; } if (*pdaccess & FILE_MAXIMAL_ACCESS_LE) { granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES | DELETE; ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); aces_size = acl_size - sizeof(struct smb_acl); for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { if (offsetof(struct smb_ace, access_req) > aces_size) break; ace_size = le16_to_cpu(ace->size); if (ace_size > aces_size) break; aces_size -= ace_size; granted |= le32_to_cpu(ace->access_req); ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size)); } if (!pdacl->num_aces) granted = GENERIC_ALL_FLAGS; } if (!uid) sid_type = SIDUNIX_USER; id_to_sid(uid, sid_type, &sid); ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); aces_size = acl_size - sizeof(struct smb_acl); for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { if (offsetof(struct smb_ace, access_req) > aces_size) break; ace_size = le16_to_cpu(ace->size); if (ace_size > aces_size) break; aces_size -= ace_size; if (!compare_sids(&sid, &ace->sid) || !compare_sids(&sid_unix_NFS_mode, &ace->sid)) { found = 1; break; } if (!compare_sids(&sid_everyone, &ace->sid)) others_ace = ace; ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size)); } if (*pdaccess & FILE_MAXIMAL_ACCESS_LE && found) { granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES | DELETE; granted |= le32_to_cpu(ace->access_req); if (!pdacl->num_aces) granted = GENERIC_ALL_FLAGS; } if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) { posix_acls = get_inode_acl(d_inode(path->dentry), ACL_TYPE_ACCESS); if (!IS_ERR_OR_NULL(posix_acls) && !found) { unsigned int id = -1; pa_entry = posix_acls->a_entries; for (i = 0; i < posix_acls->a_count; i++, pa_entry++) { if (pa_entry->e_tag == ACL_USER) id = posix_acl_uid_translate(idmap, pa_entry); else if (pa_entry->e_tag == ACL_GROUP) id = posix_acl_gid_translate(idmap, pa_entry); else continue; if (id == uid) { mode_to_access_flags(pa_entry->e_perm, 0777, &access_bits); if (!access_bits) access_bits = SET_MINIMUM_RIGHTS; posix_acl_release(posix_acls); goto check_access_bits; } } } if (!IS_ERR_OR_NULL(posix_acls)) posix_acl_release(posix_acls); } if (!found) { if (others_ace) { ace = others_ace; } else { ksmbd_debug(SMB, "Can't find corresponding sid\n"); rc = -EACCES; goto err_out; } } switch (ace->type) { case ACCESS_ALLOWED_ACE_TYPE: access_bits = le32_to_cpu(ace->access_req); break; case ACCESS_DENIED_ACE_TYPE: case ACCESS_DENIED_CALLBACK_ACE_TYPE: access_bits = le32_to_cpu(~ace->access_req); break; } check_access_bits: if (granted & ~(access_bits | FILE_READ_ATTRIBUTES | READ_CONTROL | WRITE_DAC | DELETE)) { ksmbd_debug(SMB, "Access denied with winACL, granted : %x, access_req : %x\n", granted, le32_to_cpu(ace->access_req)); rc = -EACCES; goto err_out; } *pdaccess = cpu_to_le32(granted); err_out: kfree(pntsd); return rc; } int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon, const struct path *path, struct smb_ntsd *pntsd, int ntsd_len, bool type_check) { int rc; struct smb_fattr fattr = {{0}}; struct inode *inode = d_inode(path->dentry); struct mnt_idmap *idmap = mnt_idmap(path->mnt); struct iattr newattrs; fattr.cf_uid = INVALID_UID; fattr.cf_gid = INVALID_GID; fattr.cf_mode = inode->i_mode; rc = parse_sec_desc(idmap, pntsd, ntsd_len, &fattr); if (rc) goto out; newattrs.ia_valid = ATTR_CTIME; if (!uid_eq(fattr.cf_uid, INVALID_UID)) { newattrs.ia_valid |= ATTR_UID; newattrs.ia_uid = fattr.cf_uid; } if (!gid_eq(fattr.cf_gid, INVALID_GID)) { newattrs.ia_valid |= ATTR_GID; newattrs.ia_gid = fattr.cf_gid; } newattrs.ia_valid |= ATTR_MODE; newattrs.ia_mode = (inode->i_mode & ~0777) | (fattr.cf_mode & 0777); ksmbd_vfs_remove_acl_xattrs(idmap, path); /* Update posix acls */ if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && fattr.cf_dacls) { rc = set_posix_acl(idmap, path->dentry, ACL_TYPE_ACCESS, fattr.cf_acls); if (rc < 0) ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n", rc); if (S_ISDIR(inode->i_mode) && fattr.cf_dacls) { rc = set_posix_acl(idmap, path->dentry, ACL_TYPE_DEFAULT, fattr.cf_dacls); if (rc) ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n", rc); } } inode_lock(inode); rc = notify_change(idmap, path->dentry, &newattrs, NULL); inode_unlock(inode); if (rc) goto out; /* Check it only calling from SD BUFFER context */ if (type_check && !(le16_to_cpu(pntsd->type) & DACL_PRESENT)) goto out; if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) { /* Update WinACL in xattr */ ksmbd_vfs_remove_sd_xattrs(idmap, path); ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len); } out: posix_acl_release(fattr.cf_acls); posix_acl_release(fattr.cf_dacls); return rc; } void ksmbd_init_domain(u32 *sub_auth) { int i; memcpy(&server_conf.domain_sid, &domain, sizeof(struct smb_sid)); for (i = 0; i < 3; ++i) server_conf.domain_sid.sub_auth[i + 1] = cpu_to_le32(sub_auth[i]); }
linux-master
fs/smb/server/smbacl.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include "glob.h" #include "oplock.h" #include "misc.h" #include <linux/sched/signal.h> #include <linux/workqueue.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/moduleparam.h> #include "server.h" #include "smb_common.h" #include "smbstatus.h" #include "connection.h" #include "transport_ipc.h" #include "mgmt/user_session.h" #include "crypto_ctx.h" #include "auth.h" int ksmbd_debug_types; struct ksmbd_server_config server_conf; enum SERVER_CTRL_TYPE { SERVER_CTRL_TYPE_INIT, SERVER_CTRL_TYPE_RESET, }; struct server_ctrl_struct { int type; struct work_struct ctrl_work; }; static DEFINE_MUTEX(ctrl_lock); static int ___server_conf_set(int idx, char *val) { if (idx >= ARRAY_SIZE(server_conf.conf)) return -EINVAL; if (!val || val[0] == 0x00) return -EINVAL; kfree(server_conf.conf[idx]); server_conf.conf[idx] = kstrdup(val, GFP_KERNEL); if (!server_conf.conf[idx]) return -ENOMEM; return 0; } int ksmbd_set_netbios_name(char *v) { return ___server_conf_set(SERVER_CONF_NETBIOS_NAME, v); } int ksmbd_set_server_string(char *v) { return ___server_conf_set(SERVER_CONF_SERVER_STRING, v); } int ksmbd_set_work_group(char *v) { return ___server_conf_set(SERVER_CONF_WORK_GROUP, v); } char *ksmbd_netbios_name(void) { return server_conf.conf[SERVER_CONF_NETBIOS_NAME]; } char *ksmbd_server_string(void) { return server_conf.conf[SERVER_CONF_SERVER_STRING]; } char *ksmbd_work_group(void) { return server_conf.conf[SERVER_CONF_WORK_GROUP]; } /** * check_conn_state() - check state of server thread connection * @work: smb work containing server thread information * * Return: 0 on valid connection, otherwise 1 to reconnect */ static inline int check_conn_state(struct ksmbd_work *work) { struct smb_hdr *rsp_hdr; if (ksmbd_conn_exiting(work->conn) || ksmbd_conn_need_reconnect(work->conn)) { rsp_hdr = work->response_buf; rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED; return 1; } return 0; } #define SERVER_HANDLER_CONTINUE 0 #define SERVER_HANDLER_ABORT 1 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, u16 *cmd) { struct smb_version_cmds *cmds; u16 command; int ret; if (check_conn_state(work)) return SERVER_HANDLER_CONTINUE; if (ksmbd_verify_smb_message(work)) return SERVER_HANDLER_ABORT; command = conn->ops->get_cmd_val(work); *cmd = command; andx_again: if (command >= conn->max_cmds) { conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); return SERVER_HANDLER_CONTINUE; } cmds = &conn->cmds[command]; if (!cmds->proc) { ksmbd_debug(SMB, "*** not implemented yet cmd = %x\n", command); conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED); return SERVER_HANDLER_CONTINUE; } if (work->sess && conn->ops->is_sign_req(work, command)) { ret = conn->ops->check_sign_req(work); if (!ret) { conn->ops->set_rsp_status(work, STATUS_ACCESS_DENIED); return SERVER_HANDLER_CONTINUE; } } ret = cmds->proc(work); if (ret < 0) ksmbd_debug(CONN, "Failed to process %u [%d]\n", command, ret); /* AndX commands - chained request can return positive values */ else if (ret > 0) { command = ret; *cmd = command; goto andx_again; } if (work->send_no_response) return SERVER_HANDLER_ABORT; return SERVER_HANDLER_CONTINUE; } static void __handle_ksmbd_work(struct ksmbd_work *work, struct ksmbd_conn *conn) { u16 command = 0; int rc; bool is_chained = false; if (conn->ops->allocate_rsp_buf(work)) return; if (conn->ops->is_transform_hdr && conn->ops->is_transform_hdr(work->request_buf)) { rc = conn->ops->decrypt_req(work); if (rc < 0) { conn->ops->set_rsp_status(work, STATUS_DATA_ERROR); goto send; } work->encrypted = true; } rc = conn->ops->init_rsp_hdr(work); if (rc) { /* either uid or tid is not correct */ conn->ops->set_rsp_status(work, STATUS_INVALID_HANDLE); goto send; } do { if (conn->ops->check_user_session) { rc = conn->ops->check_user_session(work); if (rc < 0) { if (rc == -EINVAL) conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); else conn->ops->set_rsp_status(work, STATUS_USER_SESSION_DELETED); goto send; } else if (rc > 0) { rc = conn->ops->get_ksmbd_tcon(work); if (rc < 0) { if (rc == -EINVAL) conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); else conn->ops->set_rsp_status(work, STATUS_NETWORK_NAME_DELETED); goto send; } } } rc = __process_request(work, conn, &command); if (rc == SERVER_HANDLER_ABORT) break; /* * Call smb2_set_rsp_credits() function to set number of credits * granted in hdr of smb2 response. */ if (conn->ops->set_rsp_credits) { spin_lock(&conn->credits_lock); rc = conn->ops->set_rsp_credits(work); spin_unlock(&conn->credits_lock); if (rc < 0) { conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); goto send; } } is_chained = is_chained_smb2_message(work); if (work->sess && (work->sess->sign || smb3_11_final_sess_setup_resp(work) || conn->ops->is_sign_req(work, command))) conn->ops->set_sign_rsp(work); } while (is_chained == true); send: smb3_preauth_hash_rsp(work); if (work->sess && work->sess->enc && work->encrypted && conn->ops->encrypt_resp) { rc = conn->ops->encrypt_resp(work); if (rc < 0) conn->ops->set_rsp_status(work, STATUS_DATA_ERROR); } ksmbd_conn_write(work); } /** * handle_ksmbd_work() - process pending smb work requests * @wk: smb work containing request command buffer * * called by kworker threads to processing remaining smb work requests */ static void handle_ksmbd_work(struct work_struct *wk) { struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); struct ksmbd_conn *conn = work->conn; atomic64_inc(&conn->stats.request_served); __handle_ksmbd_work(work, conn); ksmbd_conn_try_dequeue_request(work); ksmbd_free_work_struct(work); /* * Checking waitqueue to dropping pending requests on * disconnection. waitqueue_active is safe because it * uses atomic operation for condition. */ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) wake_up(&conn->r_count_q); } /** * queue_ksmbd_work() - queue a smb request to worker thread queue * for proccessing smb command and sending response * @conn: connection instance * * read remaining data from socket create and submit work. */ static int queue_ksmbd_work(struct ksmbd_conn *conn) { struct ksmbd_work *work; int err; work = ksmbd_alloc_work_struct(); if (!work) { pr_err("allocation for work failed\n"); return -ENOMEM; } work->conn = conn; work->request_buf = conn->request_buf; conn->request_buf = NULL; err = ksmbd_init_smb_server(work); if (err) { ksmbd_free_work_struct(work); return 0; } ksmbd_conn_enqueue_request(work); atomic_inc(&conn->r_count); /* update activity on connection */ conn->last_active = jiffies; INIT_WORK(&work->work, handle_ksmbd_work); ksmbd_queue_work(work); return 0; } static int ksmbd_server_process_request(struct ksmbd_conn *conn) { return queue_ksmbd_work(conn); } static int ksmbd_server_terminate_conn(struct ksmbd_conn *conn) { ksmbd_sessions_deregister(conn); destroy_lease_table(conn); return 0; } static void ksmbd_server_tcp_callbacks_init(void) { struct ksmbd_conn_ops ops; ops.process_fn = ksmbd_server_process_request; ops.terminate_fn = ksmbd_server_terminate_conn; ksmbd_conn_init_server_callbacks(&ops); } static void server_conf_free(void) { int i; for (i = 0; i < ARRAY_SIZE(server_conf.conf); i++) { kfree(server_conf.conf[i]); server_conf.conf[i] = NULL; } } static int server_conf_init(void) { WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP); server_conf.enforced_signing = 0; server_conf.min_protocol = ksmbd_min_protocol(); server_conf.max_protocol = ksmbd_max_protocol(); server_conf.auth_mechs = KSMBD_AUTH_NTLMSSP; #ifdef CONFIG_SMB_SERVER_KERBEROS5 server_conf.auth_mechs |= KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5; #endif return 0; } static void server_ctrl_handle_init(struct server_ctrl_struct *ctrl) { int ret; ret = ksmbd_conn_transport_init(); if (ret) { server_queue_ctrl_reset_work(); return; } WRITE_ONCE(server_conf.state, SERVER_STATE_RUNNING); } static void server_ctrl_handle_reset(struct server_ctrl_struct *ctrl) { ksmbd_ipc_soft_reset(); ksmbd_conn_transport_destroy(); server_conf_free(); server_conf_init(); WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP); } static void server_ctrl_handle_work(struct work_struct *work) { struct server_ctrl_struct *ctrl; ctrl = container_of(work, struct server_ctrl_struct, ctrl_work); mutex_lock(&ctrl_lock); switch (ctrl->type) { case SERVER_CTRL_TYPE_INIT: server_ctrl_handle_init(ctrl); break; case SERVER_CTRL_TYPE_RESET: server_ctrl_handle_reset(ctrl); break; default: pr_err("Unknown server work type: %d\n", ctrl->type); } mutex_unlock(&ctrl_lock); kfree(ctrl); module_put(THIS_MODULE); } static int __queue_ctrl_work(int type) { struct server_ctrl_struct *ctrl; ctrl = kmalloc(sizeof(struct server_ctrl_struct), GFP_KERNEL); if (!ctrl) return -ENOMEM; __module_get(THIS_MODULE); ctrl->type = type; INIT_WORK(&ctrl->ctrl_work, server_ctrl_handle_work); queue_work(system_long_wq, &ctrl->ctrl_work); return 0; } int server_queue_ctrl_init_work(void) { return __queue_ctrl_work(SERVER_CTRL_TYPE_INIT); } int server_queue_ctrl_reset_work(void) { return __queue_ctrl_work(SERVER_CTRL_TYPE_RESET); } static ssize_t stats_show(const struct class *class, const struct class_attribute *attr, char *buf) { /* * Inc this each time you change stats output format, * so user space will know what to do. */ static int stats_version = 2; static const char * const state[] = { "startup", "running", "reset", "shutdown" }; return sysfs_emit(buf, "%d %s %d %lu\n", stats_version, state[server_conf.state], server_conf.tcp_port, server_conf.ipc_last_active / HZ); } static ssize_t kill_server_store(const struct class *class, const struct class_attribute *attr, const char *buf, size_t len) { if (!sysfs_streq(buf, "hard")) return len; pr_info("kill command received\n"); mutex_lock(&ctrl_lock); WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING); __module_get(THIS_MODULE); server_ctrl_handle_reset(NULL); module_put(THIS_MODULE); mutex_unlock(&ctrl_lock); return len; } static const char * const debug_type_strings[] = {"smb", "auth", "vfs", "oplock", "ipc", "conn", "rdma"}; static ssize_t debug_show(const struct class *class, const struct class_attribute *attr, char *buf) { ssize_t sz = 0; int i, pos = 0; for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) { if ((ksmbd_debug_types >> i) & 1) { pos = sysfs_emit_at(buf, sz, "[%s] ", debug_type_strings[i]); } else { pos = sysfs_emit_at(buf, sz, "%s ", debug_type_strings[i]); } sz += pos; } sz += sysfs_emit_at(buf, sz, "\n"); return sz; } static ssize_t debug_store(const struct class *class, const struct class_attribute *attr, const char *buf, size_t len) { int i; for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) { if (sysfs_streq(buf, "all")) { if (ksmbd_debug_types == KSMBD_DEBUG_ALL) ksmbd_debug_types = 0; else ksmbd_debug_types = KSMBD_DEBUG_ALL; break; } if (sysfs_streq(buf, debug_type_strings[i])) { if (ksmbd_debug_types & (1 << i)) ksmbd_debug_types &= ~(1 << i); else ksmbd_debug_types |= (1 << i); break; } } return len; } static CLASS_ATTR_RO(stats); static CLASS_ATTR_WO(kill_server); static CLASS_ATTR_RW(debug); static struct attribute *ksmbd_control_class_attrs[] = { &class_attr_stats.attr, &class_attr_kill_server.attr, &class_attr_debug.attr, NULL, }; ATTRIBUTE_GROUPS(ksmbd_control_class); static struct class ksmbd_control_class = { .name = "ksmbd-control", .class_groups = ksmbd_control_class_groups, }; static int ksmbd_server_shutdown(void) { WRITE_ONCE(server_conf.state, SERVER_STATE_SHUTTING_DOWN); class_unregister(&ksmbd_control_class); ksmbd_workqueue_destroy(); ksmbd_ipc_release(); ksmbd_conn_transport_destroy(); ksmbd_crypto_destroy(); ksmbd_free_global_file_table(); destroy_lease_table(NULL); ksmbd_work_pool_destroy(); ksmbd_exit_file_cache(); server_conf_free(); return 0; } static int __init ksmbd_server_init(void) { int ret; ret = class_register(&ksmbd_control_class); if (ret) { pr_err("Unable to register ksmbd-control class\n"); return ret; } ksmbd_server_tcp_callbacks_init(); ret = server_conf_init(); if (ret) goto err_unregister; ret = ksmbd_work_pool_init(); if (ret) goto err_unregister; ret = ksmbd_init_file_cache(); if (ret) goto err_destroy_work_pools; ret = ksmbd_ipc_init(); if (ret) goto err_exit_file_cache; ret = ksmbd_init_global_file_table(); if (ret) goto err_ipc_release; ret = ksmbd_inode_hash_init(); if (ret) goto err_destroy_file_table; ret = ksmbd_crypto_create(); if (ret) goto err_release_inode_hash; ret = ksmbd_workqueue_init(); if (ret) goto err_crypto_destroy; return 0; err_crypto_destroy: ksmbd_crypto_destroy(); err_release_inode_hash: ksmbd_release_inode_hash(); err_destroy_file_table: ksmbd_free_global_file_table(); err_ipc_release: ksmbd_ipc_release(); err_exit_file_cache: ksmbd_exit_file_cache(); err_destroy_work_pools: ksmbd_work_pool_destroy(); err_unregister: class_unregister(&ksmbd_control_class); return ret; } /** * ksmbd_server_exit() - shutdown forker thread and free memory at module exit */ static void __exit ksmbd_server_exit(void) { ksmbd_server_shutdown(); rcu_barrier(); ksmbd_release_inode_hash(); } MODULE_AUTHOR("Namjae Jeon <[email protected]>"); MODULE_VERSION(KSMBD_VERSION); MODULE_DESCRIPTION("Linux kernel CIFS/SMB SERVER"); MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: ecb"); MODULE_SOFTDEP("pre: hmac"); MODULE_SOFTDEP("pre: md5"); MODULE_SOFTDEP("pre: nls"); MODULE_SOFTDEP("pre: aes"); MODULE_SOFTDEP("pre: cmac"); MODULE_SOFTDEP("pre: sha256"); MODULE_SOFTDEP("pre: sha512"); MODULE_SOFTDEP("pre: aead2"); MODULE_SOFTDEP("pre: ccm"); MODULE_SOFTDEP("pre: gcm"); MODULE_SOFTDEP("pre: crc32"); module_init(ksmbd_server_init) module_exit(ksmbd_server_exit)
linux-master
fs/smb/server/server.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2019 Samsung Electronics Co., Ltd. */ #include <linux/list.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "server.h" #include "connection.h" #include "ksmbd_work.h" #include "mgmt/ksmbd_ida.h" static struct kmem_cache *work_cache; static struct workqueue_struct *ksmbd_wq; struct ksmbd_work *ksmbd_alloc_work_struct(void) { struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL); if (work) { work->compound_fid = KSMBD_NO_FID; work->compound_pfid = KSMBD_NO_FID; INIT_LIST_HEAD(&work->request_entry); INIT_LIST_HEAD(&work->async_request_entry); INIT_LIST_HEAD(&work->fp_entry); INIT_LIST_HEAD(&work->interim_entry); INIT_LIST_HEAD(&work->aux_read_list); work->iov_alloc_cnt = 4; work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), GFP_KERNEL); if (!work->iov) { kmem_cache_free(work_cache, work); work = NULL; } } return work; } void ksmbd_free_work_struct(struct ksmbd_work *work) { struct aux_read *ar, *tmp; WARN_ON(work->saved_cred != NULL); kvfree(work->response_buf); list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) { kvfree(ar->buf); list_del(&ar->entry); kfree(ar); } kfree(work->tr_buf); kvfree(work->request_buf); kfree(work->iov); if (work->async_id) ksmbd_release_id(&work->conn->async_ida, work->async_id); kmem_cache_free(work_cache, work); } void ksmbd_work_pool_destroy(void) { kmem_cache_destroy(work_cache); } int ksmbd_work_pool_init(void) { work_cache = kmem_cache_create("ksmbd_work_cache", sizeof(struct ksmbd_work), 0, SLAB_HWCACHE_ALIGN, NULL); if (!work_cache) return -ENOMEM; return 0; } int ksmbd_workqueue_init(void) { ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0); if (!ksmbd_wq) return -ENOMEM; return 0; } void ksmbd_workqueue_destroy(void) { destroy_workqueue(ksmbd_wq); ksmbd_wq = NULL; } bool ksmbd_queue_work(struct ksmbd_work *work) { return queue_work(ksmbd_wq, &work->work); } static int ksmbd_realloc_iov_pin(struct ksmbd_work *work, void *ib, unsigned int ib_len) { if (work->iov_alloc_cnt <= work->iov_cnt) { struct kvec *new; work->iov_alloc_cnt += 4; new = krealloc(work->iov, sizeof(struct kvec) * work->iov_alloc_cnt, GFP_KERNEL | __GFP_ZERO); if (!new) return -ENOMEM; work->iov = new; } work->iov[++work->iov_idx].iov_base = ib; work->iov[work->iov_idx].iov_len = ib_len; work->iov_cnt++; return 0; } static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len, void *aux_buf, unsigned int aux_size) { /* Plus rfc_length size on first iov */ if (!work->iov_idx) { work->iov[work->iov_idx].iov_base = work->response_buf; *(__be32 *)work->iov[0].iov_base = 0; work->iov[work->iov_idx].iov_len = 4; work->iov_cnt++; } ksmbd_realloc_iov_pin(work, ib, len); inc_rfc1001_len(work->iov[0].iov_base, len); if (aux_size) { struct aux_read *ar; ksmbd_realloc_iov_pin(work, aux_buf, aux_size); inc_rfc1001_len(work->iov[0].iov_base, aux_size); ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL); if (!ar) return -ENOMEM; ar->buf = aux_buf; list_add(&ar->entry, &work->aux_read_list); } return 0; } int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len) { return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0); } int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len, void *aux_buf, unsigned int aux_size) { return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size); } int allocate_interim_rsp_buf(struct ksmbd_work *work) { work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL); if (!work->response_buf) return -ENOMEM; work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE; return 0; }
linux-master
fs/smb/server/ksmbd_work.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/mutex.h> #include <linux/freezer.h> #include <linux/module.h> #include "server.h" #include "smb_common.h" #include "mgmt/ksmbd_ida.h" #include "connection.h" #include "transport_tcp.h" #include "transport_rdma.h" static DEFINE_MUTEX(init_lock); static struct ksmbd_conn_ops default_conn_ops; LIST_HEAD(conn_list); DECLARE_RWSEM(conn_list_lock); /** * ksmbd_conn_free() - free resources of the connection instance * * @conn: connection instance to be cleand up * * During the thread termination, the corresponding conn instance * resources(sock/memory) are released and finally the conn object is freed. */ void ksmbd_conn_free(struct ksmbd_conn *conn) { down_write(&conn_list_lock); list_del(&conn->conns_list); up_write(&conn_list_lock); xa_destroy(&conn->sessions); kvfree(conn->request_buf); kfree(conn->preauth_info); kfree(conn); } /** * ksmbd_conn_alloc() - initialize a new connection instance * * Return: ksmbd_conn struct on success, otherwise NULL */ struct ksmbd_conn *ksmbd_conn_alloc(void) { struct ksmbd_conn *conn; conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL); if (!conn) return NULL; conn->need_neg = true; ksmbd_conn_set_new(conn); conn->local_nls = load_nls("utf8"); if (!conn->local_nls) conn->local_nls = load_nls_default(); if (IS_ENABLED(CONFIG_UNICODE)) conn->um = utf8_load(UNICODE_AGE(12, 1, 0)); else conn->um = ERR_PTR(-EOPNOTSUPP); if (IS_ERR(conn->um)) conn->um = NULL; atomic_set(&conn->req_running, 0); atomic_set(&conn->r_count, 0); conn->total_credits = 1; conn->outstanding_credits = 0; init_waitqueue_head(&conn->req_running_q); init_waitqueue_head(&conn->r_count_q); INIT_LIST_HEAD(&conn->conns_list); INIT_LIST_HEAD(&conn->requests); INIT_LIST_HEAD(&conn->async_requests); spin_lock_init(&conn->request_lock); spin_lock_init(&conn->credits_lock); ida_init(&conn->async_ida); xa_init(&conn->sessions); spin_lock_init(&conn->llist_lock); INIT_LIST_HEAD(&conn->lock_list); down_write(&conn_list_lock); list_add(&conn->conns_list, &conn_list); up_write(&conn_list_lock); return conn; } bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c) { struct ksmbd_conn *t; bool ret = false; down_read(&conn_list_lock); list_for_each_entry(t, &conn_list, conns_list) { if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE)) continue; ret = true; break; } up_read(&conn_list_lock); return ret; } void ksmbd_conn_enqueue_request(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct list_head *requests_queue = NULL; if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) requests_queue = &conn->requests; if (requests_queue) { atomic_inc(&conn->req_running); spin_lock(&conn->request_lock); list_add_tail(&work->request_entry, requests_queue); spin_unlock(&conn->request_lock); } } void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; if (list_empty(&work->request_entry) && list_empty(&work->async_request_entry)) return; atomic_dec(&conn->req_running); spin_lock(&conn->request_lock); list_del_init(&work->request_entry); spin_unlock(&conn->request_lock); if (work->asynchronous) release_async_work(work); wake_up_all(&conn->req_running_q); } void ksmbd_conn_lock(struct ksmbd_conn *conn) { mutex_lock(&conn->srv_mutex); } void ksmbd_conn_unlock(struct ksmbd_conn *conn) { mutex_unlock(&conn->srv_mutex); } void ksmbd_all_conn_set_status(u64 sess_id, u32 status) { struct ksmbd_conn *conn; down_read(&conn_list_lock); list_for_each_entry(conn, &conn_list, conns_list) { if (conn->binding || xa_load(&conn->sessions, sess_id)) WRITE_ONCE(conn->status, status); } up_read(&conn_list_lock); } void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id) { struct ksmbd_conn *bind_conn; wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2); down_read(&conn_list_lock); list_for_each_entry(bind_conn, &conn_list, conns_list) { if (bind_conn == conn) continue; if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) && !ksmbd_conn_releasing(bind_conn) && atomic_read(&bind_conn->req_running)) { wait_event(bind_conn->req_running_q, atomic_read(&bind_conn->req_running) == 0); } } up_read(&conn_list_lock); } int ksmbd_conn_write(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; int sent; if (!work->response_buf) { pr_err("NULL response header\n"); return -EINVAL; } if (work->send_no_response) return 0; ksmbd_conn_lock(conn); sent = conn->transport->ops->writev(conn->transport, work->iov, work->iov_cnt, get_rfc1002_len(work->iov[0].iov_base) + 4, work->need_invalidate_rkey, work->remote_key); ksmbd_conn_unlock(conn); if (sent < 0) { pr_err("Failed to send message: %d\n", sent); return sent; } return 0; } int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf, unsigned int buflen, struct smb2_buffer_desc_v1 *desc, unsigned int desc_len) { int ret = -EINVAL; if (conn->transport->ops->rdma_read) ret = conn->transport->ops->rdma_read(conn->transport, buf, buflen, desc, desc_len); return ret; } int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf, unsigned int buflen, struct smb2_buffer_desc_v1 *desc, unsigned int desc_len) { int ret = -EINVAL; if (conn->transport->ops->rdma_write) ret = conn->transport->ops->rdma_write(conn->transport, buf, buflen, desc, desc_len); return ret; } bool ksmbd_conn_alive(struct ksmbd_conn *conn) { if (!ksmbd_server_running()) return false; if (ksmbd_conn_exiting(conn)) return false; if (kthread_should_stop()) return false; if (atomic_read(&conn->stats.open_files_count) > 0) return true; /* * Stop current session if the time that get last request from client * is bigger than deadtime user configured and opening file count is * zero. */ if (server_conf.deadtime > 0 && time_after(jiffies, conn->last_active + server_conf.deadtime)) { ksmbd_debug(CONN, "No response from client in %lu minutes\n", server_conf.deadtime / SMB_ECHO_INTERVAL); return false; } return true; } #define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr)) #define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4) /** * ksmbd_conn_handler_loop() - session thread to listen on new smb requests * @p: connection instance * * One thread each per connection * * Return: 0 on success */ int ksmbd_conn_handler_loop(void *p) { struct ksmbd_conn *conn = (struct ksmbd_conn *)p; struct ksmbd_transport *t = conn->transport; unsigned int pdu_size, max_allowed_pdu_size; char hdr_buf[4] = {0,}; int size; mutex_init(&conn->srv_mutex); __module_get(THIS_MODULE); if (t->ops->prepare && t->ops->prepare(t)) goto out; conn->last_active = jiffies; while (ksmbd_conn_alive(conn)) { if (try_to_freeze()) continue; kvfree(conn->request_buf); conn->request_buf = NULL; size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1); if (size != sizeof(hdr_buf)) break; pdu_size = get_rfc1002_len(hdr_buf); ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size); if (ksmbd_conn_good(conn)) max_allowed_pdu_size = SMB3_MAX_MSGSIZE + conn->vals->max_write_size; else max_allowed_pdu_size = SMB3_MAX_MSGSIZE; if (pdu_size > max_allowed_pdu_size) { pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n", pdu_size, max_allowed_pdu_size, READ_ONCE(conn->status)); break; } /* * Check maximum pdu size(0x00FFFFFF). */ if (pdu_size > MAX_STREAM_PROT_LEN) break; if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE) break; /* 4 for rfc1002 length field */ /* 1 for implied bcc[0] */ size = pdu_size + 4 + 1; conn->request_buf = kvmalloc(size, GFP_KERNEL); if (!conn->request_buf) break; memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf)); /* * We already read 4 bytes to find out PDU size, now * read in PDU */ size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2); if (size < 0) { pr_err("sock_read failed: %d\n", size); break; } if (size != pdu_size) { pr_err("PDU error. Read: %d, Expected: %d\n", size, pdu_size); continue; } if (!ksmbd_smb_request(conn)) break; if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId == SMB2_PROTO_NUMBER) { if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE) break; } if (!default_conn_ops.process_fn) { pr_err("No connection request callback\n"); break; } if (default_conn_ops.process_fn(conn)) { pr_err("Cannot handle request\n"); break; } } out: ksmbd_conn_set_releasing(conn); /* Wait till all reference dropped to the Server object*/ wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0); if (IS_ENABLED(CONFIG_UNICODE)) utf8_unload(conn->um); unload_nls(conn->local_nls); if (default_conn_ops.terminate_fn) default_conn_ops.terminate_fn(conn); t->ops->disconnect(t); module_put(THIS_MODULE); return 0; } void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) { default_conn_ops.process_fn = ops->process_fn; default_conn_ops.terminate_fn = ops->terminate_fn; } int ksmbd_conn_transport_init(void) { int ret; mutex_lock(&init_lock); ret = ksmbd_tcp_init(); if (ret) { pr_err("Failed to init TCP subsystem: %d\n", ret); goto out; } ret = ksmbd_rdma_init(); if (ret) { pr_err("Failed to init RDMA subsystem: %d\n", ret); goto out; } out: mutex_unlock(&init_lock); return ret; } static void stop_sessions(void) { struct ksmbd_conn *conn; struct ksmbd_transport *t; again: down_read(&conn_list_lock); list_for_each_entry(conn, &conn_list, conns_list) { struct task_struct *task; t = conn->transport; task = t->handler; if (task) ksmbd_debug(CONN, "Stop session handler %s/%d\n", task->comm, task_pid_nr(task)); ksmbd_conn_set_exiting(conn); if (t->ops->shutdown) { up_read(&conn_list_lock); t->ops->shutdown(t); down_read(&conn_list_lock); } } up_read(&conn_list_lock); if (!list_empty(&conn_list)) { schedule_timeout_interruptible(HZ / 10); /* 100ms */ goto again; } } void ksmbd_conn_transport_destroy(void) { mutex_lock(&init_lock); ksmbd_tcp_destroy(); ksmbd_rdma_destroy(); stop_sessions(); mutex_unlock(&init_lock); }
linux-master
fs/smb/server/connection.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/freezer.h> #include "smb_common.h" #include "server.h" #include "auth.h" #include "connection.h" #include "transport_tcp.h" #define IFACE_STATE_DOWN BIT(0) #define IFACE_STATE_CONFIGURED BIT(1) static atomic_t active_num_conn; struct interface { struct task_struct *ksmbd_kthread; struct socket *ksmbd_socket; struct list_head entry; char *name; struct mutex sock_release_lock; int state; }; static LIST_HEAD(iface_list); static int bind_additional_ifaces; struct tcp_transport { struct ksmbd_transport transport; struct socket *sock; struct kvec *iov; unsigned int nr_iov; }; static struct ksmbd_transport_ops ksmbd_tcp_transport_ops; static void tcp_stop_kthread(struct task_struct *kthread); static struct interface *alloc_iface(char *ifname); #define KSMBD_TRANS(t) (&(t)->transport) #define TCP_TRANS(t) ((struct tcp_transport *)container_of(t, \ struct tcp_transport, transport)) static inline void ksmbd_tcp_nodelay(struct socket *sock) { tcp_sock_set_nodelay(sock->sk); } static inline void ksmbd_tcp_reuseaddr(struct socket *sock) { sock_set_reuseaddr(sock->sk); } static inline void ksmbd_tcp_rcv_timeout(struct socket *sock, s64 secs) { lock_sock(sock->sk); if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) sock->sk->sk_rcvtimeo = secs * HZ; else sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; release_sock(sock->sk); } static inline void ksmbd_tcp_snd_timeout(struct socket *sock, s64 secs) { sock_set_sndtimeo(sock->sk, secs); } static struct tcp_transport *alloc_transport(struct socket *client_sk) { struct tcp_transport *t; struct ksmbd_conn *conn; t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return NULL; t->sock = client_sk; conn = ksmbd_conn_alloc(); if (!conn) { kfree(t); return NULL; } conn->transport = KSMBD_TRANS(t); KSMBD_TRANS(t)->conn = conn; KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops; return t; } static void free_transport(struct tcp_transport *t) { kernel_sock_shutdown(t->sock, SHUT_RDWR); sock_release(t->sock); t->sock = NULL; ksmbd_conn_free(KSMBD_TRANS(t)->conn); kfree(t->iov); kfree(t); } /** * kvec_array_init() - initialize a IO vector segment * @new: IO vector to be initialized * @iov: base IO vector * @nr_segs: number of segments in base iov * @bytes: total iovec length so far for read * * Return: Number of IO segments */ static unsigned int kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs, size_t bytes) { size_t base = 0; while (bytes || !iov->iov_len) { int copy = min(bytes, iov->iov_len); bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; nr_segs--; base = 0; } } memcpy(new, iov, sizeof(*iov) * nr_segs); new->iov_base += base; new->iov_len -= base; return nr_segs; } /** * get_conn_iovec() - get connection iovec for reading from socket * @t: TCP transport instance * @nr_segs: number of segments in iov * * Return: return existing or newly allocate iovec */ static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs) { struct kvec *new_iov; if (t->iov && nr_segs <= t->nr_iov) return t->iov; /* not big enough -- allocate a new one and release the old */ new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL); if (new_iov) { kfree(t->iov); t->iov = new_iov; t->nr_iov = nr_segs; } return new_iov; } static unsigned short ksmbd_tcp_get_port(const struct sockaddr *sa) { switch (sa->sa_family) { case AF_INET: return ntohs(((struct sockaddr_in *)sa)->sin_port); case AF_INET6: return ntohs(((struct sockaddr_in6 *)sa)->sin6_port); } return 0; } /** * ksmbd_tcp_new_connection() - create a new tcp session on mount * @client_sk: socket associated with new connection * * whenever a new connection is requested, create a conn thread * (session thread) to handle new incoming smb requests from the connection * * Return: 0 on success, otherwise error */ static int ksmbd_tcp_new_connection(struct socket *client_sk) { struct sockaddr *csin; int rc = 0; struct tcp_transport *t; t = alloc_transport(client_sk); if (!t) { sock_release(client_sk); return -ENOMEM; } csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn); if (kernel_getpeername(client_sk, csin) < 0) { pr_err("client ip resolution failed\n"); rc = -EINVAL; goto out_error; } KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop, KSMBD_TRANS(t)->conn, "ksmbd:%u", ksmbd_tcp_get_port(csin)); if (IS_ERR(KSMBD_TRANS(t)->handler)) { pr_err("cannot start conn thread\n"); rc = PTR_ERR(KSMBD_TRANS(t)->handler); free_transport(t); } return rc; out_error: free_transport(t); return rc; } /** * ksmbd_kthread_fn() - listen to new SMB connections and callback server * @p: arguments to forker thread * * Return: 0 on success, error number otherwise */ static int ksmbd_kthread_fn(void *p) { struct socket *client_sk = NULL; struct interface *iface = (struct interface *)p; int ret; while (!kthread_should_stop()) { mutex_lock(&iface->sock_release_lock); if (!iface->ksmbd_socket) { mutex_unlock(&iface->sock_release_lock); break; } ret = kernel_accept(iface->ksmbd_socket, &client_sk, SOCK_NONBLOCK); mutex_unlock(&iface->sock_release_lock); if (ret) { if (ret == -EAGAIN) /* check for new connections every 100 msecs */ schedule_timeout_interruptible(HZ / 10); continue; } if (server_conf.max_connections && atomic_inc_return(&active_num_conn) >= server_conf.max_connections) { pr_info_ratelimited("Limit the maximum number of connections(%u)\n", atomic_read(&active_num_conn)); atomic_dec(&active_num_conn); sock_release(client_sk); continue; } ksmbd_debug(CONN, "connect success: accepted new connection\n"); client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT; client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT; ksmbd_tcp_new_connection(client_sk); } ksmbd_debug(CONN, "releasing socket\n"); return 0; } /** * ksmbd_tcp_run_kthread() - start forker thread * @iface: pointer to struct interface * * start forker thread(ksmbd/0) at module init time to listen * on port 445 for new SMB connection requests. It creates per connection * server threads(ksmbd/x) * * Return: 0 on success or error number */ static int ksmbd_tcp_run_kthread(struct interface *iface) { int rc; struct task_struct *kthread; kthread = kthread_run(ksmbd_kthread_fn, (void *)iface, "ksmbd-%s", iface->name); if (IS_ERR(kthread)) { rc = PTR_ERR(kthread); return rc; } iface->ksmbd_kthread = kthread; return 0; } /** * ksmbd_tcp_readv() - read data from socket in given iovec * @t: TCP transport instance * @iov_orig: base IO vector * @nr_segs: number of segments in base iov * @to_read: number of bytes to read from socket * @max_retries: maximum retry count * * Return: on success return number of bytes read from socket, * otherwise return error number */ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig, unsigned int nr_segs, unsigned int to_read, int max_retries) { int length = 0; int total_read; unsigned int segs; struct msghdr ksmbd_msg; struct kvec *iov; struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn; iov = get_conn_iovec(t, nr_segs); if (!iov) return -ENOMEM; ksmbd_msg.msg_control = NULL; ksmbd_msg.msg_controllen = 0; for (total_read = 0; to_read; total_read += length, to_read -= length) { try_to_freeze(); if (!ksmbd_conn_alive(conn)) { total_read = -ESHUTDOWN; break; } segs = kvec_array_init(iov, iov_orig, nr_segs, total_read); length = kernel_recvmsg(t->sock, &ksmbd_msg, iov, segs, to_read, 0); if (length == -EINTR) { total_read = -ESHUTDOWN; break; } else if (ksmbd_conn_need_reconnect(conn)) { total_read = -EAGAIN; break; } else if (length == -ERESTARTSYS || length == -EAGAIN) { /* * If max_retries is negative, Allow unlimited * retries to keep connection with inactive sessions. */ if (max_retries == 0) { total_read = length; break; } else if (max_retries > 0) { max_retries--; } usleep_range(1000, 2000); length = 0; continue; } else if (length <= 0) { total_read = length; break; } } return total_read; } /** * ksmbd_tcp_read() - read data from socket in given buffer * @t: TCP transport instance * @buf: buffer to store read data from socket * @to_read: number of bytes to read from socket * * Return: on success return number of bytes read from socket, * otherwise return error number */ static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf, unsigned int to_read, int max_retries) { struct kvec iov; iov.iov_base = buf; iov.iov_len = to_read; return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read, max_retries); } static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov, int nvecs, int size, bool need_invalidate, unsigned int remote_key) { struct msghdr smb_msg = {.msg_flags = MSG_NOSIGNAL}; return kernel_sendmsg(TCP_TRANS(t)->sock, &smb_msg, iov, nvecs, size); } static void ksmbd_tcp_disconnect(struct ksmbd_transport *t) { free_transport(TCP_TRANS(t)); if (server_conf.max_connections) atomic_dec(&active_num_conn); } static void tcp_destroy_socket(struct socket *ksmbd_socket) { int ret; if (!ksmbd_socket) return; /* set zero to timeout */ ksmbd_tcp_rcv_timeout(ksmbd_socket, 0); ksmbd_tcp_snd_timeout(ksmbd_socket, 0); ret = kernel_sock_shutdown(ksmbd_socket, SHUT_RDWR); if (ret) pr_err("Failed to shutdown socket: %d\n", ret); sock_release(ksmbd_socket); } /** * create_socket - create socket for ksmbd/0 * * Return: 0 on success, error number otherwise */ static int create_socket(struct interface *iface) { int ret; struct sockaddr_in6 sin6; struct sockaddr_in sin; struct socket *ksmbd_socket; bool ipv4 = false; ret = sock_create(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &ksmbd_socket); if (ret) { if (ret != -EAFNOSUPPORT) pr_err("Can't create socket for ipv6, fallback to ipv4: %d\n", ret); ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &ksmbd_socket); if (ret) { pr_err("Can't create socket for ipv4: %d\n", ret); goto out_clear; } sin.sin_family = PF_INET; sin.sin_addr.s_addr = htonl(INADDR_ANY); sin.sin_port = htons(server_conf.tcp_port); ipv4 = true; } else { sin6.sin6_family = PF_INET6; sin6.sin6_addr = in6addr_any; sin6.sin6_port = htons(server_conf.tcp_port); } ksmbd_tcp_nodelay(ksmbd_socket); ksmbd_tcp_reuseaddr(ksmbd_socket); ret = sock_setsockopt(ksmbd_socket, SOL_SOCKET, SO_BINDTODEVICE, KERNEL_SOCKPTR(iface->name), strlen(iface->name)); if (ret != -ENODEV && ret < 0) { pr_err("Failed to set SO_BINDTODEVICE: %d\n", ret); goto out_error; } if (ipv4) ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin, sizeof(sin)); else ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin6, sizeof(sin6)); if (ret) { pr_err("Failed to bind socket: %d\n", ret); goto out_error; } ksmbd_socket->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT; ksmbd_socket->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT; ret = kernel_listen(ksmbd_socket, KSMBD_SOCKET_BACKLOG); if (ret) { pr_err("Port listen() error: %d\n", ret); goto out_error; } iface->ksmbd_socket = ksmbd_socket; ret = ksmbd_tcp_run_kthread(iface); if (ret) { pr_err("Can't start ksmbd main kthread: %d\n", ret); goto out_error; } iface->state = IFACE_STATE_CONFIGURED; return 0; out_error: tcp_destroy_socket(ksmbd_socket); out_clear: iface->ksmbd_socket = NULL; return ret; } static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct interface *iface; int ret, found = 0; switch (event) { case NETDEV_UP: if (netif_is_bridge_port(netdev)) return NOTIFY_OK; list_for_each_entry(iface, &iface_list, entry) { if (!strcmp(iface->name, netdev->name)) { found = 1; if (iface->state != IFACE_STATE_DOWN) break; ret = create_socket(iface); if (ret) return NOTIFY_OK; break; } } if (!found && bind_additional_ifaces) { iface = alloc_iface(kstrdup(netdev->name, GFP_KERNEL)); if (!iface) return NOTIFY_OK; ret = create_socket(iface); if (ret) break; } break; case NETDEV_DOWN: list_for_each_entry(iface, &iface_list, entry) { if (!strcmp(iface->name, netdev->name) && iface->state == IFACE_STATE_CONFIGURED) { tcp_stop_kthread(iface->ksmbd_kthread); iface->ksmbd_kthread = NULL; mutex_lock(&iface->sock_release_lock); tcp_destroy_socket(iface->ksmbd_socket); iface->ksmbd_socket = NULL; mutex_unlock(&iface->sock_release_lock); iface->state = IFACE_STATE_DOWN; break; } } break; } return NOTIFY_DONE; } static struct notifier_block ksmbd_netdev_notifier = { .notifier_call = ksmbd_netdev_event, }; int ksmbd_tcp_init(void) { register_netdevice_notifier(&ksmbd_netdev_notifier); return 0; } static void tcp_stop_kthread(struct task_struct *kthread) { int ret; if (!kthread) return; ret = kthread_stop(kthread); if (ret) pr_err("failed to stop forker thread\n"); } void ksmbd_tcp_destroy(void) { struct interface *iface, *tmp; unregister_netdevice_notifier(&ksmbd_netdev_notifier); list_for_each_entry_safe(iface, tmp, &iface_list, entry) { list_del(&iface->entry); kfree(iface->name); kfree(iface); } } static struct interface *alloc_iface(char *ifname) { struct interface *iface; if (!ifname) return NULL; iface = kzalloc(sizeof(struct interface), GFP_KERNEL); if (!iface) { kfree(ifname); return NULL; } iface->name = ifname; iface->state = IFACE_STATE_DOWN; list_add(&iface->entry, &iface_list); mutex_init(&iface->sock_release_lock); return iface; } int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz) { int sz = 0; if (!ifc_list_sz) { struct net_device *netdev; rtnl_lock(); for_each_netdev(&init_net, netdev) { if (netif_is_bridge_port(netdev)) continue; if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) return -ENOMEM; } rtnl_unlock(); bind_additional_ifaces = 1; return 0; } while (ifc_list_sz > 0) { if (!alloc_iface(kstrdup(ifc_list, GFP_KERNEL))) return -ENOMEM; sz = strlen(ifc_list); if (!sz) break; ifc_list += sz + 1; ifc_list_sz -= (sz + 1); } bind_additional_ifaces = 0; return 0; } static struct ksmbd_transport_ops ksmbd_tcp_transport_ops = { .read = ksmbd_tcp_read, .writev = ksmbd_tcp_writev, .disconnect = ksmbd_tcp_disconnect, };
linux-master
fs/smb/server/transport_tcp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich * * Copyright (c) 2000 RP Internet (www.rpi.net.au). */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/oid_registry.h> #include "glob.h" #include "asn1.h" #include "connection.h" #include "auth.h" #include "ksmbd_spnego_negtokeninit.asn1.h" #include "ksmbd_spnego_negtokentarg.asn1.h" #define NTLMSSP_OID_LEN 10 static char NTLMSSP_OID_STR[NTLMSSP_OID_LEN] = { 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x02, 0x02, 0x0a }; int ksmbd_decode_negTokenInit(unsigned char *security_blob, int length, struct ksmbd_conn *conn) { return asn1_ber_decoder(&ksmbd_spnego_negtokeninit_decoder, conn, security_blob, length); } int ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length, struct ksmbd_conn *conn) { return asn1_ber_decoder(&ksmbd_spnego_negtokentarg_decoder, conn, security_blob, length); } static int compute_asn_hdr_len_bytes(int len) { if (len > 0xFFFFFF) return 4; else if (len > 0xFFFF) return 3; else if (len > 0xFF) return 2; else if (len > 0x7F) return 1; else return 0; } static void encode_asn_tag(char *buf, unsigned int *ofs, char tag, char seq, int length) { int i; int index = *ofs; char hdr_len = compute_asn_hdr_len_bytes(length); int len = length + 2 + hdr_len; /* insert tag */ buf[index++] = tag; if (!hdr_len) { buf[index++] = len; } else { buf[index++] = 0x80 | hdr_len; for (i = hdr_len - 1; i >= 0; i--) buf[index++] = (len >> (i * 8)) & 0xFF; } /* insert seq */ len = len - (index - *ofs); buf[index++] = seq; if (!hdr_len) { buf[index++] = len; } else { buf[index++] = 0x80 | hdr_len; for (i = hdr_len - 1; i >= 0; i--) buf[index++] = (len >> (i * 8)) & 0xFF; } *ofs += (index - *ofs); } int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen, char *ntlm_blob, int ntlm_blob_len) { char *buf; unsigned int ofs = 0; int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1; int oid_len = 4 + compute_asn_hdr_len_bytes(NTLMSSP_OID_LEN) * 2 + NTLMSSP_OID_LEN; int ntlmssp_len = 4 + compute_asn_hdr_len_bytes(ntlm_blob_len) * 2 + ntlm_blob_len; int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len + oid_len + ntlmssp_len) * 2 + neg_result_len + oid_len + ntlmssp_len; buf = kmalloc(total_len, GFP_KERNEL); if (!buf) return -ENOMEM; /* insert main gss header */ encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len + oid_len + ntlmssp_len); /* insert neg result */ encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1); buf[ofs++] = 1; /* insert oid */ encode_asn_tag(buf, &ofs, 0xa1, 0x06, NTLMSSP_OID_LEN); memcpy(buf + ofs, NTLMSSP_OID_STR, NTLMSSP_OID_LEN); ofs += NTLMSSP_OID_LEN; /* insert response token - ntlmssp blob */ encode_asn_tag(buf, &ofs, 0xa2, 0x04, ntlm_blob_len); memcpy(buf + ofs, ntlm_blob, ntlm_blob_len); ofs += ntlm_blob_len; *pbuffer = buf; *buflen = total_len; return 0; } int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen, int neg_result) { char *buf; unsigned int ofs = 0; int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1; int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len) * 2 + neg_result_len; buf = kmalloc(total_len, GFP_KERNEL); if (!buf) return -ENOMEM; /* insert main gss header */ encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len); /* insert neg result */ encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1); if (neg_result) buf[ofs++] = 2; else buf[ofs++] = 0; *pbuffer = buf; *buflen = total_len; return 0; } int ksmbd_gssapi_this_mech(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { enum OID oid; oid = look_up_OID(value, vlen); if (oid != OID_spnego) { char buf[50]; sprint_oid(value, vlen, buf, sizeof(buf)); ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf); return -EBADMSG; } return 0; } int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct ksmbd_conn *conn = context; enum OID oid; int mech_type; oid = look_up_OID(value, vlen); if (oid == OID_ntlmssp) { mech_type = KSMBD_AUTH_NTLMSSP; } else if (oid == OID_mskrb5) { mech_type = KSMBD_AUTH_MSKRB5; } else if (oid == OID_krb5) { mech_type = KSMBD_AUTH_KRB5; } else if (oid == OID_krb5u2u) { mech_type = KSMBD_AUTH_KRB5U2U; } else { char buf[50]; sprint_oid(value, vlen, buf, sizeof(buf)); ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf); return -EBADMSG; } conn->auth_mechs |= mech_type; if (conn->preferred_auth_mech == 0) conn->preferred_auth_mech = mech_type; return 0; } static int ksmbd_neg_token_alloc(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct ksmbd_conn *conn = context; conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL); if (!conn->mechToken) return -ENOMEM; return 0; } int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { return ksmbd_neg_token_alloc(context, hdrlen, tag, value, vlen); } int ksmbd_neg_token_targ_resp_token(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { return ksmbd_neg_token_alloc(context, hdrlen, tag, value, vlen); }
linux-master
fs/smb/server/asn1.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/inetdevice.h> #include <net/addrconf.h> #include <linux/syscalls.h> #include <linux/namei.h> #include <linux/statfs.h> #include <linux/ethtool.h> #include <linux/falloc.h> #include <linux/mount.h> #include <linux/filelock.h> #include "glob.h" #include "smbfsctl.h" #include "oplock.h" #include "smbacl.h" #include "auth.h" #include "asn1.h" #include "connection.h" #include "transport_ipc.h" #include "transport_rdma.h" #include "vfs.h" #include "vfs_cache.h" #include "misc.h" #include "server.h" #include "smb_common.h" #include "smbstatus.h" #include "ksmbd_work.h" #include "mgmt/user_config.h" #include "mgmt/share_config.h" #include "mgmt/tree_connect.h" #include "mgmt/user_session.h" #include "mgmt/ksmbd_ida.h" #include "ndr.h" static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) { if (work->next_smb2_rcv_hdr_off) { *req = ksmbd_req_buf_next(work); *rsp = ksmbd_resp_buf_next(work); } else { *req = smb2_get_msg(work->request_buf); *rsp = smb2_get_msg(work->response_buf); } } #define WORK_BUFFERS(w, rq, rs) __wbuf((w), (void **)&(rq), (void **)&(rs)) /** * check_session_id() - check for valid session id in smb header * @conn: connection instance * @id: session id from smb header * * Return: 1 if valid session id, otherwise 0 */ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id) { struct ksmbd_session *sess; if (id == 0 || id == -1) return false; sess = ksmbd_session_lookup_all(conn, id); if (sess) return true; pr_err("Invalid user session id: %llu\n", id); return false; } struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn) { return xa_load(&sess->ksmbd_chann_list, (long)conn); } /** * smb2_get_ksmbd_tcon() - get tree connection information using a tree id. * @work: smb work * * Return: 0 if there is a tree connection matched or these are * skipable commands, otherwise error */ int smb2_get_ksmbd_tcon(struct ksmbd_work *work) { struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); unsigned int cmd = le16_to_cpu(req_hdr->Command); unsigned int tree_id; if (cmd == SMB2_TREE_CONNECT_HE || cmd == SMB2_CANCEL_HE || cmd == SMB2_LOGOFF_HE) { ksmbd_debug(SMB, "skip to check tree connect request\n"); return 0; } if (xa_empty(&work->sess->tree_conns)) { ksmbd_debug(SMB, "NO tree connected\n"); return -ENOENT; } tree_id = le32_to_cpu(req_hdr->Id.SyncId.TreeId); /* * If request is not the first in Compound request, * Just validate tree id in header with work->tcon->id. */ if (work->next_smb2_rcv_hdr_off) { if (!work->tcon) { pr_err("The first operation in the compound does not have tcon\n"); return -EINVAL; } if (tree_id != UINT_MAX && work->tcon->id != tree_id) { pr_err("tree id(%u) is different with id(%u) in first operation\n", tree_id, work->tcon->id); return -EINVAL; } return 1; } work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id); if (!work->tcon) { pr_err("Invalid tid %d\n", tree_id); return -ENOENT; } return 1; } /** * smb2_set_err_rsp() - set error response code on smb response * @work: smb work containing response buffer */ void smb2_set_err_rsp(struct ksmbd_work *work) { struct smb2_err_rsp *err_rsp; if (work->next_smb2_rcv_hdr_off) err_rsp = ksmbd_resp_buf_next(work); else err_rsp = smb2_get_msg(work->response_buf); if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) { int err; err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE; err_rsp->ErrorContextCount = 0; err_rsp->Reserved = 0; err_rsp->ByteCount = 0; err_rsp->ErrorData[0] = 0; err = ksmbd_iov_pin_rsp(work, (void *)err_rsp, __SMB2_HEADER_STRUCTURE_SIZE + SMB2_ERROR_STRUCTURE_SIZE2); if (err) work->send_no_response = 1; } } /** * is_smb2_neg_cmd() - is it smb2 negotiation command * @work: smb work containing smb header * * Return: true if smb2 negotiation command, otherwise false */ bool is_smb2_neg_cmd(struct ksmbd_work *work) { struct smb2_hdr *hdr = smb2_get_msg(work->request_buf); /* is it SMB2 header ? */ if (hdr->ProtocolId != SMB2_PROTO_NUMBER) return false; /* make sure it is request not response message */ if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) return false; if (hdr->Command != SMB2_NEGOTIATE) return false; return true; } /** * is_smb2_rsp() - is it smb2 response * @work: smb work containing smb response buffer * * Return: true if smb2 response, otherwise false */ bool is_smb2_rsp(struct ksmbd_work *work) { struct smb2_hdr *hdr = smb2_get_msg(work->response_buf); /* is it SMB2 header ? */ if (hdr->ProtocolId != SMB2_PROTO_NUMBER) return false; /* make sure it is response not request message */ if (!(hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)) return false; return true; } /** * get_smb2_cmd_val() - get smb command code from smb header * @work: smb work containing smb request buffer * * Return: smb2 request command value */ u16 get_smb2_cmd_val(struct ksmbd_work *work) { struct smb2_hdr *rcv_hdr; if (work->next_smb2_rcv_hdr_off) rcv_hdr = ksmbd_req_buf_next(work); else rcv_hdr = smb2_get_msg(work->request_buf); return le16_to_cpu(rcv_hdr->Command); } /** * set_smb2_rsp_status() - set error response code on smb2 header * @work: smb work containing response buffer * @err: error response code */ void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err) { struct smb2_hdr *rsp_hdr; if (work->next_smb2_rcv_hdr_off) rsp_hdr = ksmbd_resp_buf_next(work); else rsp_hdr = smb2_get_msg(work->response_buf); rsp_hdr->Status = err; smb2_set_err_rsp(work); } /** * init_smb2_neg_rsp() - initialize smb2 response for negotiate command * @work: smb work containing smb request buffer * * smb2 negotiate response is sent in reply of smb1 negotiate command for * dialect auto-negotiation. */ int init_smb2_neg_rsp(struct ksmbd_work *work) { struct smb2_hdr *rsp_hdr; struct smb2_negotiate_rsp *rsp; struct ksmbd_conn *conn = work->conn; int err; rsp_hdr = smb2_get_msg(work->response_buf); memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2); rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER; rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE; rsp_hdr->CreditRequest = cpu_to_le16(2); rsp_hdr->Command = SMB2_NEGOTIATE; rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR); rsp_hdr->NextCommand = 0; rsp_hdr->MessageId = 0; rsp_hdr->Id.SyncId.ProcessId = 0; rsp_hdr->Id.SyncId.TreeId = 0; rsp_hdr->SessionId = 0; memset(rsp_hdr->Signature, 0, 16); rsp = smb2_get_msg(work->response_buf); WARN_ON(ksmbd_conn_good(conn)); rsp->StructureSize = cpu_to_le16(65); ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect); rsp->DialectRevision = cpu_to_le16(conn->dialect); /* Not setting conn guid rsp->ServerGUID, as it * not used by client for identifying connection */ rsp->Capabilities = cpu_to_le32(conn->vals->capabilities); /* Default Max Message Size till SMB2.0, 64K*/ rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size); rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size); rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size); rsp->SystemTime = cpu_to_le64(ksmbd_systime()); rsp->ServerStartTime = 0; rsp->SecurityBufferOffset = cpu_to_le16(128); rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH); ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) + le16_to_cpu(rsp->SecurityBufferOffset)); rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE; if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY) rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE; err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_negotiate_rsp) + AUTH_GSS_LENGTH); if (err) return err; conn->use_spnego = true; ksmbd_conn_set_need_negotiate(conn); return 0; } /** * smb2_set_rsp_credits() - set number of credits in response buffer * @work: smb work containing smb response buffer */ int smb2_set_rsp_credits(struct ksmbd_work *work) { struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); struct smb2_hdr *hdr = ksmbd_resp_buf_next(work); struct ksmbd_conn *conn = work->conn; unsigned short credits_requested, aux_max; unsigned short credit_charge, credits_granted = 0; if (work->send_no_response) return 0; hdr->CreditCharge = req_hdr->CreditCharge; if (conn->total_credits > conn->vals->max_credits) { hdr->CreditRequest = 0; pr_err("Total credits overflow: %d\n", conn->total_credits); return -EINVAL; } credit_charge = max_t(unsigned short, le16_to_cpu(req_hdr->CreditCharge), 1); if (credit_charge > conn->total_credits) { ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n", credit_charge, conn->total_credits); return -EINVAL; } conn->total_credits -= credit_charge; conn->outstanding_credits -= credit_charge; credits_requested = max_t(unsigned short, le16_to_cpu(req_hdr->CreditRequest), 1); /* according to smb2.credits smbtorture, Windows server * 2016 or later grant up to 8192 credits at once. * * TODO: Need to adjuct CreditRequest value according to * current cpu load */ if (hdr->Command == SMB2_NEGOTIATE) aux_max = 1; else aux_max = conn->vals->max_credits - conn->total_credits; credits_granted = min_t(unsigned short, credits_requested, aux_max); conn->total_credits += credits_granted; work->credits_granted += credits_granted; if (!req_hdr->NextCommand) { /* Update CreditRequest in last request */ hdr->CreditRequest = cpu_to_le16(work->credits_granted); } ksmbd_debug(SMB, "credits: requested[%d] granted[%d] total_granted[%d]\n", credits_requested, credits_granted, conn->total_credits); return 0; } /** * init_chained_smb2_rsp() - initialize smb2 chained response * @work: smb work containing smb response buffer */ static void init_chained_smb2_rsp(struct ksmbd_work *work) { struct smb2_hdr *req = ksmbd_req_buf_next(work); struct smb2_hdr *rsp = ksmbd_resp_buf_next(work); struct smb2_hdr *rsp_hdr; struct smb2_hdr *rcv_hdr; int next_hdr_offset = 0; int len, new_len; /* Len of this response = updated RFC len - offset of previous cmd * in the compound rsp */ /* Storing the current local FID which may be needed by subsequent * command in the compound request */ if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) { work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId; work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId; work->compound_sid = le64_to_cpu(rsp->SessionId); } len = get_rfc1002_len(work->response_buf) - work->next_smb2_rsp_hdr_off; next_hdr_offset = le32_to_cpu(req->NextCommand); new_len = ALIGN(len, 8); work->iov[work->iov_idx].iov_len += (new_len - len); inc_rfc1001_len(work->response_buf, new_len - len); rsp->NextCommand = cpu_to_le32(new_len); work->next_smb2_rcv_hdr_off += next_hdr_offset; work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off; work->next_smb2_rsp_hdr_off += new_len; ksmbd_debug(SMB, "Compound req new_len = %d rcv off = %d rsp off = %d\n", new_len, work->next_smb2_rcv_hdr_off, work->next_smb2_rsp_hdr_off); rsp_hdr = ksmbd_resp_buf_next(work); rcv_hdr = ksmbd_req_buf_next(work); if (!(rcv_hdr->Flags & SMB2_FLAGS_RELATED_OPERATIONS)) { ksmbd_debug(SMB, "related flag should be set\n"); work->compound_fid = KSMBD_NO_FID; work->compound_pfid = KSMBD_NO_FID; } memset((char *)rsp_hdr, 0, sizeof(struct smb2_hdr) + 2); rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER; rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE; rsp_hdr->Command = rcv_hdr->Command; /* * Message is response. We don't grant oplock yet. */ rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR | SMB2_FLAGS_RELATED_OPERATIONS); rsp_hdr->NextCommand = 0; rsp_hdr->MessageId = rcv_hdr->MessageId; rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId; rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId; rsp_hdr->SessionId = rcv_hdr->SessionId; memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16); } /** * is_chained_smb2_message() - check for chained command * @work: smb work containing smb request buffer * * Return: true if chained request, otherwise false */ bool is_chained_smb2_message(struct ksmbd_work *work) { struct smb2_hdr *hdr = smb2_get_msg(work->request_buf); unsigned int len, next_cmd; if (hdr->ProtocolId != SMB2_PROTO_NUMBER) return false; hdr = ksmbd_req_buf_next(work); next_cmd = le32_to_cpu(hdr->NextCommand); if (next_cmd > 0) { if ((u64)work->next_smb2_rcv_hdr_off + next_cmd + __SMB2_HEADER_STRUCTURE_SIZE > get_rfc1002_len(work->request_buf)) { pr_err("next command(%u) offset exceeds smb msg size\n", next_cmd); return false; } if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE > work->response_sz) { pr_err("next response offset exceeds response buffer size\n"); return false; } ksmbd_debug(SMB, "got SMB2 chained command\n"); init_chained_smb2_rsp(work); return true; } else if (work->next_smb2_rcv_hdr_off) { /* * This is last request in chained command, * align response to 8 byte */ len = ALIGN(get_rfc1002_len(work->response_buf), 8); len = len - get_rfc1002_len(work->response_buf); if (len) { ksmbd_debug(SMB, "padding len %u\n", len); work->iov[work->iov_idx].iov_len += len; inc_rfc1001_len(work->response_buf, len); } work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off; } return false; } /** * init_smb2_rsp_hdr() - initialize smb2 response * @work: smb work containing smb request buffer * * Return: 0 */ int init_smb2_rsp_hdr(struct ksmbd_work *work) { struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf); struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf); memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2); rsp_hdr->ProtocolId = rcv_hdr->ProtocolId; rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE; rsp_hdr->Command = rcv_hdr->Command; /* * Message is response. We don't grant oplock yet. */ rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR); rsp_hdr->NextCommand = 0; rsp_hdr->MessageId = rcv_hdr->MessageId; rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId; rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId; rsp_hdr->SessionId = rcv_hdr->SessionId; memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16); return 0; } /** * smb2_allocate_rsp_buf() - allocate smb2 response buffer * @work: smb work containing smb request buffer * * Return: 0 on success, otherwise -ENOMEM */ int smb2_allocate_rsp_buf(struct ksmbd_work *work) { struct smb2_hdr *hdr = smb2_get_msg(work->request_buf); size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE; size_t large_sz = small_sz + work->conn->vals->max_trans_size; size_t sz = small_sz; int cmd = le16_to_cpu(hdr->Command); if (cmd == SMB2_IOCTL_HE || cmd == SMB2_QUERY_DIRECTORY_HE) sz = large_sz; if (cmd == SMB2_QUERY_INFO_HE) { struct smb2_query_info_req *req; req = smb2_get_msg(work->request_buf); if ((req->InfoType == SMB2_O_INFO_FILE && (req->FileInfoClass == FILE_FULL_EA_INFORMATION || req->FileInfoClass == FILE_ALL_INFORMATION)) || req->InfoType == SMB2_O_INFO_SECURITY) sz = large_sz; } /* allocate large response buf for chained commands */ if (le32_to_cpu(hdr->NextCommand) > 0) sz = large_sz; work->response_buf = kvzalloc(sz, GFP_KERNEL); if (!work->response_buf) return -ENOMEM; work->response_sz = sz; return 0; } /** * smb2_check_user_session() - check for valid session for a user * @work: smb work containing smb request buffer * * Return: 0 on success, otherwise error */ int smb2_check_user_session(struct ksmbd_work *work) { struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); struct ksmbd_conn *conn = work->conn; unsigned int cmd = le16_to_cpu(req_hdr->Command); unsigned long long sess_id; /* * SMB2_ECHO, SMB2_NEGOTIATE, SMB2_SESSION_SETUP command do not * require a session id, so no need to validate user session's for * these commands. */ if (cmd == SMB2_ECHO_HE || cmd == SMB2_NEGOTIATE_HE || cmd == SMB2_SESSION_SETUP_HE) return 0; if (!ksmbd_conn_good(conn)) return -EIO; sess_id = le64_to_cpu(req_hdr->SessionId); /* * If request is not the first in Compound request, * Just validate session id in header with work->sess->id. */ if (work->next_smb2_rcv_hdr_off) { if (!work->sess) { pr_err("The first operation in the compound does not have sess\n"); return -EINVAL; } if (sess_id != ULLONG_MAX && work->sess->id != sess_id) { pr_err("session id(%llu) is different with the first operation(%lld)\n", sess_id, work->sess->id); return -EINVAL; } return 1; } /* Check for validity of user session */ work->sess = ksmbd_session_lookup_all(conn, sess_id); if (work->sess) return 1; ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id); return -ENOENT; } static void destroy_previous_session(struct ksmbd_conn *conn, struct ksmbd_user *user, u64 id) { struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id); struct ksmbd_user *prev_user; struct channel *chann; long index; if (!prev_sess) return; prev_user = prev_sess->user; if (!prev_user || strcmp(user->name, prev_user->name) || user->passkey_sz != prev_user->passkey_sz || memcmp(user->passkey, prev_user->passkey, user->passkey_sz)) return; prev_sess->state = SMB2_SESSION_EXPIRED; xa_for_each(&prev_sess->ksmbd_chann_list, index, chann) ksmbd_conn_set_exiting(chann->conn); } /** * smb2_get_name() - get filename string from on the wire smb format * @src: source buffer * @maxlen: maxlen of source string * @local_nls: nls_table pointer * * Return: matching converted filename on success, otherwise error ptr */ static char * smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls) { char *name; name = smb_strndup_from_utf16(src, maxlen, 1, local_nls); if (IS_ERR(name)) { pr_err("failed to get name %ld\n", PTR_ERR(name)); return name; } ksmbd_conv_path_to_unix(name); ksmbd_strip_last_slash(name); return name; } int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg) { struct smb2_hdr *rsp_hdr; struct ksmbd_conn *conn = work->conn; int id; rsp_hdr = ksmbd_resp_buf_next(work); rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND; id = ksmbd_acquire_async_msg_id(&conn->async_ida); if (id < 0) { pr_err("Failed to alloc async message id\n"); return id; } work->asynchronous = true; work->async_id = id; rsp_hdr->Id.AsyncId = cpu_to_le64(id); ksmbd_debug(SMB, "Send interim Response to inform async request id : %d\n", work->async_id); work->cancel_fn = fn; work->cancel_argv = arg; if (list_empty(&work->async_request_entry)) { spin_lock(&conn->request_lock); list_add_tail(&work->async_request_entry, &conn->async_requests); spin_unlock(&conn->request_lock); } return 0; } void release_async_work(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; spin_lock(&conn->request_lock); list_del_init(&work->async_request_entry); spin_unlock(&conn->request_lock); work->asynchronous = 0; work->cancel_fn = NULL; kfree(work->cancel_argv); work->cancel_argv = NULL; if (work->async_id) { ksmbd_release_id(&conn->async_ida, work->async_id); work->async_id = 0; } } void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status) { struct smb2_hdr *rsp_hdr; struct ksmbd_work *in_work = ksmbd_alloc_work_struct(); if (allocate_interim_rsp_buf(in_work)) { pr_err("smb_allocate_rsp_buf failed!\n"); ksmbd_free_work_struct(in_work); return; } in_work->conn = work->conn; memcpy(smb2_get_msg(in_work->response_buf), ksmbd_resp_buf_next(work), __SMB2_HEADER_STRUCTURE_SIZE); rsp_hdr = smb2_get_msg(in_work->response_buf); smb2_set_err_rsp(in_work); rsp_hdr->Status = status; ksmbd_conn_write(in_work); ksmbd_free_work_struct(in_work); } static __le32 smb2_get_reparse_tag_special_file(umode_t mode) { if (S_ISDIR(mode) || S_ISREG(mode)) return 0; if (S_ISLNK(mode)) return IO_REPARSE_TAG_LX_SYMLINK_LE; else if (S_ISFIFO(mode)) return IO_REPARSE_TAG_LX_FIFO_LE; else if (S_ISSOCK(mode)) return IO_REPARSE_TAG_AF_UNIX_LE; else if (S_ISCHR(mode)) return IO_REPARSE_TAG_LX_CHR_LE; else if (S_ISBLK(mode)) return IO_REPARSE_TAG_LX_BLK_LE; return 0; } /** * smb2_get_dos_mode() - get file mode in dos format from unix mode * @stat: kstat containing file mode * @attribute: attribute flags * * Return: converted dos mode */ static int smb2_get_dos_mode(struct kstat *stat, int attribute) { int attr = 0; if (S_ISDIR(stat->mode)) { attr = FILE_ATTRIBUTE_DIRECTORY | (attribute & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM)); } else { attr = (attribute & 0x00005137) | FILE_ATTRIBUTE_ARCHIVE; attr &= ~(FILE_ATTRIBUTE_DIRECTORY); if (S_ISREG(stat->mode) && (server_conf.share_fake_fscaps & FILE_SUPPORTS_SPARSE_FILES)) attr |= FILE_ATTRIBUTE_SPARSE_FILE; if (smb2_get_reparse_tag_special_file(stat->mode)) attr |= FILE_ATTRIBUTE_REPARSE_POINT; } return attr; } static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt, __le16 hash_id) { pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(38); pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); pneg_ctxt->Reserved = cpu_to_le32(0); pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); pneg_ctxt->HashAlgorithms = hash_id; } static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt, __le16 cipher_type) { pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(4); pneg_ctxt->Reserved = cpu_to_le32(0); pneg_ctxt->CipherCount = cpu_to_le16(1); pneg_ctxt->Ciphers[0] = cipher_type; } static void build_sign_cap_ctxt(struct smb2_signing_capabilities *pneg_ctxt, __le16 sign_algo) { pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16((sizeof(struct smb2_signing_capabilities) + 2) - sizeof(struct smb2_neg_context)); pneg_ctxt->Reserved = cpu_to_le32(0); pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(1); pneg_ctxt->SigningAlgorithms[0] = sign_algo; } static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE; pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ pneg_ctxt->Name[0] = 0x93; pneg_ctxt->Name[1] = 0xAD; pneg_ctxt->Name[2] = 0x25; pneg_ctxt->Name[3] = 0x50; pneg_ctxt->Name[4] = 0x9C; pneg_ctxt->Name[5] = 0xB4; pneg_ctxt->Name[6] = 0x11; pneg_ctxt->Name[7] = 0xE7; pneg_ctxt->Name[8] = 0xB4; pneg_ctxt->Name[9] = 0x23; pneg_ctxt->Name[10] = 0x83; pneg_ctxt->Name[11] = 0xDE; pneg_ctxt->Name[12] = 0x96; pneg_ctxt->Name[13] = 0x8B; pneg_ctxt->Name[14] = 0xCD; pneg_ctxt->Name[15] = 0x7C; } static unsigned int assemble_neg_contexts(struct ksmbd_conn *conn, struct smb2_negotiate_rsp *rsp) { char * const pneg_ctxt = (char *)rsp + le32_to_cpu(rsp->NegotiateContextOffset); int neg_ctxt_cnt = 1; int ctxt_size; ksmbd_debug(SMB, "assemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n"); build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt, conn->preauth_info->Preauth_HashId); ctxt_size = sizeof(struct smb2_preauth_neg_context); if (conn->cipher_type) { /* Round to 8 byte boundary */ ctxt_size = round_up(ctxt_size, 8); ksmbd_debug(SMB, "assemble SMB2_ENCRYPTION_CAPABILITIES context\n"); build_encrypt_ctxt((struct smb2_encryption_neg_context *) (pneg_ctxt + ctxt_size), conn->cipher_type); neg_ctxt_cnt++; ctxt_size += sizeof(struct smb2_encryption_neg_context) + 2; } /* compression context not yet supported */ WARN_ON(conn->compress_algorithm != SMB3_COMPRESS_NONE); if (conn->posix_ext_supported) { ctxt_size = round_up(ctxt_size, 8); ksmbd_debug(SMB, "assemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n"); build_posix_ctxt((struct smb2_posix_neg_context *) (pneg_ctxt + ctxt_size)); neg_ctxt_cnt++; ctxt_size += sizeof(struct smb2_posix_neg_context); } if (conn->signing_negotiated) { ctxt_size = round_up(ctxt_size, 8); ksmbd_debug(SMB, "assemble SMB2_SIGNING_CAPABILITIES context\n"); build_sign_cap_ctxt((struct smb2_signing_capabilities *) (pneg_ctxt + ctxt_size), conn->signing_algorithm); neg_ctxt_cnt++; ctxt_size += sizeof(struct smb2_signing_capabilities) + 2; } rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt); return ctxt_size + AUTH_GSS_PADDING; } static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn, struct smb2_preauth_neg_context *pneg_ctxt, int ctxt_len) { /* * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt, * which may not be present. Only check for used HashAlgorithms[1]. */ if (ctxt_len < sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN) return STATUS_INVALID_PARAMETER; if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP; conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512; return STATUS_SUCCESS; } static void decode_encrypt_ctxt(struct ksmbd_conn *conn, struct smb2_encryption_neg_context *pneg_ctxt, int ctxt_len) { int cph_cnt; int i, cphs_size; if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) { pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n"); return; } conn->cipher_type = 0; cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount); cphs_size = cph_cnt * sizeof(__le16); if (sizeof(struct smb2_encryption_neg_context) + cphs_size > ctxt_len) { pr_err("Invalid cipher count(%d)\n", cph_cnt); return; } if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) return; for (i = 0; i < cph_cnt; i++) { if (pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_GCM || pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_CCM || pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_CCM || pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_GCM) { ksmbd_debug(SMB, "Cipher ID = 0x%x\n", pneg_ctxt->Ciphers[i]); conn->cipher_type = pneg_ctxt->Ciphers[i]; break; } } } /** * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption * @conn: smb connection * * Return: true if connection should be encrypted, else false */ bool smb3_encryption_negotiated(struct ksmbd_conn *conn) { if (!conn->ops->generate_encryptionkey) return false; /* * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag. * SMB 3.1.1 uses the cipher_type field. */ return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) || conn->cipher_type; } static void decode_compress_ctxt(struct ksmbd_conn *conn, struct smb2_compression_capabilities_context *pneg_ctxt) { conn->compress_algorithm = SMB3_COMPRESS_NONE; } static void decode_sign_cap_ctxt(struct ksmbd_conn *conn, struct smb2_signing_capabilities *pneg_ctxt, int ctxt_len) { int sign_algo_cnt; int i, sign_alos_size; if (sizeof(struct smb2_signing_capabilities) > ctxt_len) { pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n"); return; } conn->signing_negotiated = false; sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount); sign_alos_size = sign_algo_cnt * sizeof(__le16); if (sizeof(struct smb2_signing_capabilities) + sign_alos_size > ctxt_len) { pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt); return; } for (i = 0; i < sign_algo_cnt; i++) { if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256_LE || pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC_LE) { ksmbd_debug(SMB, "Signing Algorithm ID = 0x%x\n", pneg_ctxt->SigningAlgorithms[i]); conn->signing_negotiated = true; conn->signing_algorithm = pneg_ctxt->SigningAlgorithms[i]; break; } } } static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, struct smb2_negotiate_req *req, unsigned int len_of_smb) { /* +4 is to account for the RFC1001 len field */ struct smb2_neg_context *pctx = (struct smb2_neg_context *)req; int i = 0, len_of_ctxts; unsigned int offset = le32_to_cpu(req->NegotiateContextOffset); unsigned int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount); __le32 status = STATUS_INVALID_PARAMETER; ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt); if (len_of_smb <= offset) { ksmbd_debug(SMB, "Invalid response: negotiate context offset\n"); return status; } len_of_ctxts = len_of_smb - offset; while (i++ < neg_ctxt_cnt) { int clen, ctxt_len; if (len_of_ctxts < (int)sizeof(struct smb2_neg_context)) break; pctx = (struct smb2_neg_context *)((char *)pctx + offset); clen = le16_to_cpu(pctx->DataLength); ctxt_len = clen + sizeof(struct smb2_neg_context); if (ctxt_len > len_of_ctxts) break; if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n"); if (conn->preauth_info->Preauth_HashId) break; status = decode_preauth_ctxt(conn, (struct smb2_preauth_neg_context *)pctx, ctxt_len); if (status != STATUS_SUCCESS) break; } else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_ENCRYPTION_CAPABILITIES context\n"); if (conn->cipher_type) break; decode_encrypt_ctxt(conn, (struct smb2_encryption_neg_context *)pctx, ctxt_len); } else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_COMPRESSION_CAPABILITIES context\n"); if (conn->compress_algorithm) break; decode_compress_ctxt(conn, (struct smb2_compression_capabilities_context *)pctx); } else if (pctx->ContextType == SMB2_NETNAME_NEGOTIATE_CONTEXT_ID) { ksmbd_debug(SMB, "deassemble SMB2_NETNAME_NEGOTIATE_CONTEXT_ID context\n"); } else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) { ksmbd_debug(SMB, "deassemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n"); conn->posix_ext_supported = true; } else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_SIGNING_CAPABILITIES context\n"); decode_sign_cap_ctxt(conn, (struct smb2_signing_capabilities *)pctx, ctxt_len); } /* offsets must be 8 byte aligned */ offset = (ctxt_len + 7) & ~0x7; len_of_ctxts -= offset; } return status; } /** * smb2_handle_negotiate() - handler for smb2 negotiate command * @work: smb work containing smb request buffer * * Return: 0 */ int smb2_handle_negotiate(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_negotiate_req *req = smb2_get_msg(work->request_buf); struct smb2_negotiate_rsp *rsp = smb2_get_msg(work->response_buf); int rc = 0; unsigned int smb2_buf_len, smb2_neg_size, neg_ctxt_len = 0; __le32 status; ksmbd_debug(SMB, "Received negotiate request\n"); conn->need_neg = false; if (ksmbd_conn_good(conn)) { pr_err("conn->tcp_status is already in CifsGood State\n"); work->send_no_response = 1; return rc; } smb2_buf_len = get_rfc1002_len(work->request_buf); smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects); if (smb2_neg_size > smb2_buf_len) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } if (req->DialectCount == 0) { pr_err("malformed packet\n"); rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } if (conn->dialect == SMB311_PROT_ID) { unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset); if (smb2_buf_len < nego_ctxt_off) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } if (smb2_neg_size > nego_ctxt_off) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) > nego_ctxt_off) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } } else { if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) > smb2_buf_len) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } } conn->cli_cap = le32_to_cpu(req->Capabilities); switch (conn->dialect) { case SMB311_PROT_ID: conn->preauth_info = kzalloc(sizeof(struct preauth_integrity_info), GFP_KERNEL); if (!conn->preauth_info) { rc = -ENOMEM; rsp->hdr.Status = STATUS_INVALID_PARAMETER; goto err_out; } status = deassemble_neg_contexts(conn, req, get_rfc1002_len(work->request_buf)); if (status != STATUS_SUCCESS) { pr_err("deassemble_neg_contexts error(0x%x)\n", status); rsp->hdr.Status = status; rc = -EINVAL; kfree(conn->preauth_info); conn->preauth_info = NULL; goto err_out; } rc = init_smb3_11_server(conn); if (rc < 0) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; kfree(conn->preauth_info); conn->preauth_info = NULL; goto err_out; } ksmbd_gen_preauth_integrity_hash(conn, work->request_buf, conn->preauth_info->Preauth_HashValue); rsp->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); neg_ctxt_len = assemble_neg_contexts(conn, rsp); break; case SMB302_PROT_ID: init_smb3_02_server(conn); break; case SMB30_PROT_ID: init_smb3_0_server(conn); break; case SMB21_PROT_ID: init_smb2_1_server(conn); break; case SMB2X_PROT_ID: case BAD_PROT_ID: default: ksmbd_debug(SMB, "Server dialect :0x%x not supported\n", conn->dialect); rsp->hdr.Status = STATUS_NOT_SUPPORTED; rc = -EINVAL; goto err_out; } rsp->Capabilities = cpu_to_le32(conn->vals->capabilities); /* For stats */ conn->connection_type = conn->dialect; rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size); rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size); rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size); memcpy(conn->ClientGUID, req->ClientGUID, SMB2_CLIENT_GUID_SIZE); conn->cli_sec_mode = le16_to_cpu(req->SecurityMode); rsp->StructureSize = cpu_to_le16(65); rsp->DialectRevision = cpu_to_le16(conn->dialect); /* Not setting conn guid rsp->ServerGUID, as it * not used by client for identifying server */ memset(rsp->ServerGUID, 0, SMB2_CLIENT_GUID_SIZE); rsp->SystemTime = cpu_to_le64(ksmbd_systime()); rsp->ServerStartTime = 0; ksmbd_debug(SMB, "negotiate context offset %d, count %d\n", le32_to_cpu(rsp->NegotiateContextOffset), le16_to_cpu(rsp->NegotiateContextCount)); rsp->SecurityBufferOffset = cpu_to_le16(128); rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH); ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) + le16_to_cpu(rsp->SecurityBufferOffset)); rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE; conn->use_spnego = true; if ((server_conf.signing == KSMBD_CONFIG_OPT_AUTO || server_conf.signing == KSMBD_CONFIG_OPT_DISABLED) && req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED_LE) conn->sign = true; else if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY) { server_conf.enforced_signing = true; rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE; conn->sign = true; } conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode); ksmbd_conn_set_need_negotiate(conn); err_out: if (rc) rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; if (!rc) rc = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_negotiate_rsp) + AUTH_GSS_LENGTH + neg_ctxt_len); if (rc < 0) smb2_set_err_rsp(work); return rc; } static int alloc_preauth_hash(struct ksmbd_session *sess, struct ksmbd_conn *conn) { if (sess->Preauth_HashValue) return 0; sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue, PREAUTH_HASHVALUE_SIZE, GFP_KERNEL); if (!sess->Preauth_HashValue) return -ENOMEM; return 0; } static int generate_preauth_hash(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; u8 *preauth_hash; if (conn->dialect != SMB311_PROT_ID) return 0; if (conn->binding) { struct preauth_session *preauth_sess; preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id); if (!preauth_sess) { preauth_sess = ksmbd_preauth_session_alloc(conn, sess->id); if (!preauth_sess) return -ENOMEM; } preauth_hash = preauth_sess->Preauth_HashValue; } else { if (!sess->Preauth_HashValue) if (alloc_preauth_hash(sess, conn)) return -ENOMEM; preauth_hash = sess->Preauth_HashValue; } ksmbd_gen_preauth_integrity_hash(conn, work->request_buf, preauth_hash); return 0; } static int decode_negotiation_token(struct ksmbd_conn *conn, struct negotiate_message *negblob, size_t sz) { if (!conn->use_spnego) return -EINVAL; if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) { if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) { conn->auth_mechs |= KSMBD_AUTH_NTLMSSP; conn->preferred_auth_mech = KSMBD_AUTH_NTLMSSP; conn->use_spnego = false; } } return 0; } static int ntlm_negotiate(struct ksmbd_work *work, struct negotiate_message *negblob, size_t negblob_len, struct smb2_sess_setup_rsp *rsp) { struct challenge_message *chgblob; unsigned char *spnego_blob = NULL; u16 spnego_blob_len; char *neg_blob; int sz, rc; ksmbd_debug(SMB, "negotiate phase\n"); rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->conn); if (rc) return rc; sz = le16_to_cpu(rsp->SecurityBufferOffset); chgblob = (struct challenge_message *)((char *)&rsp->hdr.ProtocolId + sz); memset(chgblob, 0, sizeof(struct challenge_message)); if (!work->conn->use_spnego) { sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn); if (sz < 0) return -ENOMEM; rsp->SecurityBufferLength = cpu_to_le16(sz); return 0; } sz = sizeof(struct challenge_message); sz += (strlen(ksmbd_netbios_name()) * 2 + 1 + 4) * 6; neg_blob = kzalloc(sz, GFP_KERNEL); if (!neg_blob) return -ENOMEM; chgblob = (struct challenge_message *)neg_blob; sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn); if (sz < 0) { rc = -ENOMEM; goto out; } rc = build_spnego_ntlmssp_neg_blob(&spnego_blob, &spnego_blob_len, neg_blob, sz); if (rc) { rc = -ENOMEM; goto out; } sz = le16_to_cpu(rsp->SecurityBufferOffset); memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len); rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len); out: kfree(spnego_blob); kfree(neg_blob); return rc; } static struct authenticate_message *user_authblob(struct ksmbd_conn *conn, struct smb2_sess_setup_req *req) { int sz; if (conn->use_spnego && conn->mechToken) return (struct authenticate_message *)conn->mechToken; sz = le16_to_cpu(req->SecurityBufferOffset); return (struct authenticate_message *)((char *)&req->hdr.ProtocolId + sz); } static struct ksmbd_user *session_user(struct ksmbd_conn *conn, struct smb2_sess_setup_req *req) { struct authenticate_message *authblob; struct ksmbd_user *user; char *name; unsigned int name_off, name_len, secbuf_len; secbuf_len = le16_to_cpu(req->SecurityBufferLength); if (secbuf_len < sizeof(struct authenticate_message)) { ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len); return NULL; } authblob = user_authblob(conn, req); name_off = le32_to_cpu(authblob->UserName.BufferOffset); name_len = le16_to_cpu(authblob->UserName.Length); if (secbuf_len < (u64)name_off + name_len) return NULL; name = smb_strndup_from_utf16((const char *)authblob + name_off, name_len, true, conn->local_nls); if (IS_ERR(name)) { pr_err("cannot allocate memory\n"); return NULL; } ksmbd_debug(SMB, "session setup request for user %s\n", name); user = ksmbd_login_user(name); kfree(name); return user; } static int ntlm_authenticate(struct ksmbd_work *work, struct smb2_sess_setup_req *req, struct smb2_sess_setup_rsp *rsp) { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; struct channel *chann = NULL; struct ksmbd_user *user; u64 prev_id; int sz, rc; ksmbd_debug(SMB, "authenticate phase\n"); if (conn->use_spnego) { unsigned char *spnego_blob; u16 spnego_blob_len; rc = build_spnego_ntlmssp_auth_blob(&spnego_blob, &spnego_blob_len, 0); if (rc) return -ENOMEM; sz = le16_to_cpu(rsp->SecurityBufferOffset); memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len); rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len); kfree(spnego_blob); } user = session_user(conn, req); if (!user) { ksmbd_debug(SMB, "Unknown user name or an error\n"); return -EPERM; } /* Check for previous session */ prev_id = le64_to_cpu(req->PreviousSessionId); if (prev_id && prev_id != sess->id) destroy_previous_session(conn, user, prev_id); if (sess->state == SMB2_SESSION_VALID) { /* * Reuse session if anonymous try to connect * on reauthetication. */ if (conn->binding == false && ksmbd_anonymous_user(user)) { ksmbd_free_user(user); return 0; } if (!ksmbd_compare_user(sess->user, user)) { ksmbd_free_user(user); return -EPERM; } ksmbd_free_user(user); } else { sess->user = user; } if (conn->binding == false && user_guest(sess->user)) { rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE; } else { struct authenticate_message *authblob; authblob = user_authblob(conn, req); sz = le16_to_cpu(req->SecurityBufferLength); rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess); if (rc) { set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD); ksmbd_debug(SMB, "authentication failed\n"); return -EPERM; } } /* * If session state is SMB2_SESSION_VALID, We can assume * that it is reauthentication. And the user/password * has been verified, so return it here. */ if (sess->state == SMB2_SESSION_VALID) { if (conn->binding) goto binding_session; return 0; } if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE && (conn->sign || server_conf.enforced_signing)) || (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED)) sess->sign = true; if (smb3_encryption_negotiated(conn) && !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) { rc = conn->ops->generate_encryptionkey(conn, sess); if (rc) { ksmbd_debug(SMB, "SMB3 encryption key generation failed\n"); return -EINVAL; } sess->enc = true; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION) rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE; /* * signing is disable if encryption is enable * on this session */ sess->sign = false; } binding_session: if (conn->dialect >= SMB30_PROT_ID) { chann = lookup_chann_list(sess, conn); if (!chann) { chann = kmalloc(sizeof(struct channel), GFP_KERNEL); if (!chann) return -ENOMEM; chann->conn = conn; xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL); } } if (conn->ops->generate_signingkey) { rc = conn->ops->generate_signingkey(sess, conn); if (rc) { ksmbd_debug(SMB, "SMB3 signing key generation failed\n"); return -EINVAL; } } if (!ksmbd_conn_lookup_dialect(conn)) { pr_err("fail to verify the dialect\n"); return -ENOENT; } return 0; } #ifdef CONFIG_SMB_SERVER_KERBEROS5 static int krb5_authenticate(struct ksmbd_work *work, struct smb2_sess_setup_req *req, struct smb2_sess_setup_rsp *rsp) { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; char *in_blob, *out_blob; struct channel *chann = NULL; u64 prev_sess_id; int in_len, out_len; int retval; in_blob = (char *)&req->hdr.ProtocolId + le16_to_cpu(req->SecurityBufferOffset); in_len = le16_to_cpu(req->SecurityBufferLength); out_blob = (char *)&rsp->hdr.ProtocolId + le16_to_cpu(rsp->SecurityBufferOffset); out_len = work->response_sz - (le16_to_cpu(rsp->SecurityBufferOffset) + 4); /* Check previous session */ prev_sess_id = le64_to_cpu(req->PreviousSessionId); if (prev_sess_id && prev_sess_id != sess->id) destroy_previous_session(conn, sess->user, prev_sess_id); if (sess->state == SMB2_SESSION_VALID) ksmbd_free_user(sess->user); retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, out_blob, &out_len); if (retval) { ksmbd_debug(SMB, "krb5 authentication failed\n"); return -EINVAL; } rsp->SecurityBufferLength = cpu_to_le16(out_len); if ((conn->sign || server_conf.enforced_signing) || (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED)) sess->sign = true; if (smb3_encryption_negotiated(conn)) { retval = conn->ops->generate_encryptionkey(conn, sess); if (retval) { ksmbd_debug(SMB, "SMB3 encryption key generation failed\n"); return -EINVAL; } sess->enc = true; if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION) rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE; sess->sign = false; } if (conn->dialect >= SMB30_PROT_ID) { chann = lookup_chann_list(sess, conn); if (!chann) { chann = kmalloc(sizeof(struct channel), GFP_KERNEL); if (!chann) return -ENOMEM; chann->conn = conn; xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL); } } if (conn->ops->generate_signingkey) { retval = conn->ops->generate_signingkey(sess, conn); if (retval) { ksmbd_debug(SMB, "SMB3 signing key generation failed\n"); return -EINVAL; } } if (!ksmbd_conn_lookup_dialect(conn)) { pr_err("fail to verify the dialect\n"); return -ENOENT; } return 0; } #else static int krb5_authenticate(struct ksmbd_work *work, struct smb2_sess_setup_req *req, struct smb2_sess_setup_rsp *rsp) { return -EOPNOTSUPP; } #endif int smb2_sess_setup(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_sess_setup_req *req; struct smb2_sess_setup_rsp *rsp; struct ksmbd_session *sess; struct negotiate_message *negblob; unsigned int negblob_len, negblob_off; int rc = 0; ksmbd_debug(SMB, "Received request for session setup\n"); WORK_BUFFERS(work, req, rsp); rsp->StructureSize = cpu_to_le16(9); rsp->SessionFlags = 0; rsp->SecurityBufferOffset = cpu_to_le16(72); rsp->SecurityBufferLength = 0; ksmbd_conn_lock(conn); if (!req->hdr.SessionId) { sess = ksmbd_smb2_session_create(); if (!sess) { rc = -ENOMEM; goto out_err; } rsp->hdr.SessionId = cpu_to_le64(sess->id); rc = ksmbd_session_register(conn, sess); if (rc) goto out_err; } else if (conn->dialect >= SMB30_PROT_ID && (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) && req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) { u64 sess_id = le64_to_cpu(req->hdr.SessionId); sess = ksmbd_session_lookup_slowpath(sess_id); if (!sess) { rc = -ENOENT; goto out_err; } if (conn->dialect != sess->dialect) { rc = -EINVAL; goto out_err; } if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) { rc = -EINVAL; goto out_err; } if (strncmp(conn->ClientGUID, sess->ClientGUID, SMB2_CLIENT_GUID_SIZE)) { rc = -ENOENT; goto out_err; } if (sess->state == SMB2_SESSION_IN_PROGRESS) { rc = -EACCES; goto out_err; } if (sess->state == SMB2_SESSION_EXPIRED) { rc = -EFAULT; goto out_err; } if (ksmbd_conn_need_reconnect(conn)) { rc = -EFAULT; sess = NULL; goto out_err; } if (ksmbd_session_lookup(conn, sess_id)) { rc = -EACCES; goto out_err; } if (user_guest(sess->user)) { rc = -EOPNOTSUPP; goto out_err; } conn->binding = true; } else if ((conn->dialect < SMB30_PROT_ID || server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) && (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) { sess = NULL; rc = -EACCES; goto out_err; } else { sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId)); if (!sess) { rc = -ENOENT; goto out_err; } if (sess->state == SMB2_SESSION_EXPIRED) { rc = -EFAULT; goto out_err; } if (ksmbd_conn_need_reconnect(conn)) { rc = -EFAULT; sess = NULL; goto out_err; } } work->sess = sess; negblob_off = le16_to_cpu(req->SecurityBufferOffset); negblob_len = le16_to_cpu(req->SecurityBufferLength); if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) || negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) { rc = -EINVAL; goto out_err; } negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId + negblob_off); if (decode_negotiation_token(conn, negblob, negblob_len) == 0) { if (conn->mechToken) negblob = (struct negotiate_message *)conn->mechToken; } if (server_conf.auth_mechs & conn->auth_mechs) { rc = generate_preauth_hash(work); if (rc) goto out_err; if (conn->preferred_auth_mech & (KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5)) { rc = krb5_authenticate(work, req, rsp); if (rc) { rc = -EINVAL; goto out_err; } if (!ksmbd_conn_need_reconnect(conn)) { ksmbd_conn_set_good(conn); sess->state = SMB2_SESSION_VALID; } kfree(sess->Preauth_HashValue); sess->Preauth_HashValue = NULL; } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) { if (negblob->MessageType == NtLmNegotiate) { rc = ntlm_negotiate(work, negblob, negblob_len, rsp); if (rc) goto out_err; rsp->hdr.Status = STATUS_MORE_PROCESSING_REQUIRED; } else if (negblob->MessageType == NtLmAuthenticate) { rc = ntlm_authenticate(work, req, rsp); if (rc) goto out_err; if (!ksmbd_conn_need_reconnect(conn)) { ksmbd_conn_set_good(conn); sess->state = SMB2_SESSION_VALID; } if (conn->binding) { struct preauth_session *preauth_sess; preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id); if (preauth_sess) { list_del(&preauth_sess->preauth_entry); kfree(preauth_sess); } } kfree(sess->Preauth_HashValue); sess->Preauth_HashValue = NULL; } else { pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n", le32_to_cpu(negblob->MessageType)); rc = -EINVAL; } } else { /* TODO: need one more negotiation */ pr_err("Not support the preferred authentication\n"); rc = -EINVAL; } } else { pr_err("Not support authentication\n"); rc = -EINVAL; } out_err: if (rc == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else if (rc == -ENOENT) rsp->hdr.Status = STATUS_USER_SESSION_DELETED; else if (rc == -EACCES) rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED; else if (rc == -EFAULT) rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED; else if (rc == -ENOMEM) rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; else if (rc == -EOPNOTSUPP) rsp->hdr.Status = STATUS_NOT_SUPPORTED; else if (rc) rsp->hdr.Status = STATUS_LOGON_FAILURE; if (conn->use_spnego && conn->mechToken) { kfree(conn->mechToken); conn->mechToken = NULL; } if (rc < 0) { /* * SecurityBufferOffset should be set to zero * in session setup error response. */ rsp->SecurityBufferOffset = 0; if (sess) { bool try_delay = false; /* * To avoid dictionary attacks (repeated session setups rapidly sent) to * connect to server, ksmbd make a delay of a 5 seconds on session setup * failure to make it harder to send enough random connection requests * to break into a server. */ if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION) try_delay = true; sess->last_active = jiffies; sess->state = SMB2_SESSION_EXPIRED; if (try_delay) { ksmbd_conn_set_need_reconnect(conn); ssleep(5); ksmbd_conn_set_need_negotiate(conn); } } smb2_set_err_rsp(work); } else { unsigned int iov_len; if (rsp->SecurityBufferLength) iov_len = offsetof(struct smb2_sess_setup_rsp, Buffer) + le16_to_cpu(rsp->SecurityBufferLength); else iov_len = sizeof(struct smb2_sess_setup_rsp); rc = ksmbd_iov_pin_rsp(work, rsp, iov_len); if (rc) rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; } ksmbd_conn_unlock(conn); return rc; } /** * smb2_tree_connect() - handler for smb2 tree connect command * @work: smb work containing smb request buffer * * Return: 0 on success, otherwise error */ int smb2_tree_connect(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_tree_connect_req *req; struct smb2_tree_connect_rsp *rsp; struct ksmbd_session *sess = work->sess; char *treename = NULL, *name = NULL; struct ksmbd_tree_conn_status status; struct ksmbd_share_config *share; int rc = -EINVAL; WORK_BUFFERS(work, req, rsp); treename = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->PathLength), true, conn->local_nls); if (IS_ERR(treename)) { pr_err("treename is NULL\n"); status.ret = KSMBD_TREE_CONN_STATUS_ERROR; goto out_err1; } name = ksmbd_extract_sharename(conn->um, treename); if (IS_ERR(name)) { status.ret = KSMBD_TREE_CONN_STATUS_ERROR; goto out_err1; } ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n", name, treename); status = ksmbd_tree_conn_connect(conn, sess, name); if (status.ret == KSMBD_TREE_CONN_STATUS_OK) rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id); else goto out_err1; share = status.tree_conn->share_conf; if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) { ksmbd_debug(SMB, "IPC share path request\n"); rsp->ShareType = SMB2_SHARE_TYPE_PIPE; rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE | FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE | FILE_DELETE_LE | FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE | FILE_SYNCHRONIZE_LE; } else { rsp->ShareType = SMB2_SHARE_TYPE_DISK; rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE | FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE; if (test_tree_conn_flag(status.tree_conn, KSMBD_TREE_CONN_FLAG_WRITABLE)) { rsp->MaximalAccess |= FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE | FILE_WRITE_EA_LE | FILE_DELETE_LE | FILE_WRITE_ATTRIBUTES_LE | FILE_DELETE_CHILD_LE | FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE | FILE_SYNCHRONIZE_LE; } } status.tree_conn->maximal_access = le32_to_cpu(rsp->MaximalAccess); if (conn->posix_ext_supported) status.tree_conn->posix_extensions = true; rsp->StructureSize = cpu_to_le16(16); out_err1: rsp->Capabilities = 0; rsp->Reserved = 0; /* default manual caching */ rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING; rc = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_tree_connect_rsp)); if (rc) status.ret = KSMBD_TREE_CONN_STATUS_NOMEM; if (!IS_ERR(treename)) kfree(treename); if (!IS_ERR(name)) kfree(name); switch (status.ret) { case KSMBD_TREE_CONN_STATUS_OK: rsp->hdr.Status = STATUS_SUCCESS; rc = 0; break; case -ESTALE: case -ENOENT: case KSMBD_TREE_CONN_STATUS_NO_SHARE: rsp->hdr.Status = STATUS_BAD_NETWORK_NAME; break; case -ENOMEM: case KSMBD_TREE_CONN_STATUS_NOMEM: rsp->hdr.Status = STATUS_NO_MEMORY; break; case KSMBD_TREE_CONN_STATUS_ERROR: case KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS: case KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS: rsp->hdr.Status = STATUS_ACCESS_DENIED; break; case -EINVAL: rsp->hdr.Status = STATUS_INVALID_PARAMETER; break; default: rsp->hdr.Status = STATUS_ACCESS_DENIED; } if (status.ret != KSMBD_TREE_CONN_STATUS_OK) smb2_set_err_rsp(work); return rc; } /** * smb2_create_open_flags() - convert smb open flags to unix open flags * @file_present: is file already present * @access: file access flags * @disposition: file disposition flags * @may_flags: set with MAY_ flags * * Return: file open flags */ static int smb2_create_open_flags(bool file_present, __le32 access, __le32 disposition, int *may_flags) { int oflags = O_NONBLOCK | O_LARGEFILE; if (access & FILE_READ_DESIRED_ACCESS_LE && access & FILE_WRITE_DESIRE_ACCESS_LE) { oflags |= O_RDWR; *may_flags = MAY_OPEN | MAY_READ | MAY_WRITE; } else if (access & FILE_WRITE_DESIRE_ACCESS_LE) { oflags |= O_WRONLY; *may_flags = MAY_OPEN | MAY_WRITE; } else { oflags |= O_RDONLY; *may_flags = MAY_OPEN | MAY_READ; } if (access == FILE_READ_ATTRIBUTES_LE) oflags |= O_PATH; if (file_present) { switch (disposition & FILE_CREATE_MASK_LE) { case FILE_OPEN_LE: case FILE_CREATE_LE: break; case FILE_SUPERSEDE_LE: case FILE_OVERWRITE_LE: case FILE_OVERWRITE_IF_LE: oflags |= O_TRUNC; break; default: break; } } else { switch (disposition & FILE_CREATE_MASK_LE) { case FILE_SUPERSEDE_LE: case FILE_CREATE_LE: case FILE_OPEN_IF_LE: case FILE_OVERWRITE_IF_LE: oflags |= O_CREAT; break; case FILE_OPEN_LE: case FILE_OVERWRITE_LE: oflags &= ~O_CREAT; break; default: break; } } return oflags; } /** * smb2_tree_disconnect() - handler for smb tree connect request * @work: smb work containing request buffer * * Return: 0 */ int smb2_tree_disconnect(struct ksmbd_work *work) { struct smb2_tree_disconnect_rsp *rsp; struct smb2_tree_disconnect_req *req; struct ksmbd_session *sess = work->sess; struct ksmbd_tree_connect *tcon = work->tcon; int err; WORK_BUFFERS(work, req, rsp); ksmbd_debug(SMB, "request\n"); rsp->StructureSize = cpu_to_le16(4); err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_tree_disconnect_rsp)); if (err) { rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; smb2_set_err_rsp(work); return err; } if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) { ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId); rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED; smb2_set_err_rsp(work); return -ENOENT; } ksmbd_close_tree_conn_fds(work); ksmbd_tree_conn_disconnect(sess, tcon); work->tcon = NULL; return 0; } /** * smb2_session_logoff() - handler for session log off request * @work: smb work containing request buffer * * Return: 0 */ int smb2_session_logoff(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_logoff_req *req; struct smb2_logoff_rsp *rsp; struct ksmbd_session *sess; u64 sess_id; int err; WORK_BUFFERS(work, req, rsp); ksmbd_debug(SMB, "request\n"); sess_id = le64_to_cpu(req->hdr.SessionId); rsp->StructureSize = cpu_to_le16(4); err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp)); if (err) { rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; smb2_set_err_rsp(work); return err; } ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT); ksmbd_close_session_fds(work); ksmbd_conn_wait_idle(conn, sess_id); /* * Re-lookup session to validate if session is deleted * while waiting request complete */ sess = ksmbd_session_lookup_all(conn, sess_id); if (ksmbd_tree_conn_session_logoff(sess)) { ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId); rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED; smb2_set_err_rsp(work); return -ENOENT; } ksmbd_destroy_file_table(&sess->file_table); sess->state = SMB2_SESSION_EXPIRED; ksmbd_free_user(sess->user); sess->user = NULL; ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE); return 0; } /** * create_smb2_pipe() - create IPC pipe * @work: smb work containing request buffer * * Return: 0 on success, otherwise error */ static noinline int create_smb2_pipe(struct ksmbd_work *work) { struct smb2_create_rsp *rsp; struct smb2_create_req *req; int id; int err; char *name; WORK_BUFFERS(work, req, rsp); name = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->NameLength), 1, work->conn->local_nls); if (IS_ERR(name)) { rsp->hdr.Status = STATUS_NO_MEMORY; err = PTR_ERR(name); goto out; } id = ksmbd_session_rpc_open(work->sess, name); if (id < 0) { pr_err("Unable to open RPC pipe: %d\n", id); err = id; goto out; } rsp->hdr.Status = STATUS_SUCCESS; rsp->StructureSize = cpu_to_le16(89); rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE; rsp->Flags = 0; rsp->CreateAction = cpu_to_le32(FILE_OPENED); rsp->CreationTime = cpu_to_le64(0); rsp->LastAccessTime = cpu_to_le64(0); rsp->ChangeTime = cpu_to_le64(0); rsp->AllocationSize = cpu_to_le64(0); rsp->EndofFile = cpu_to_le64(0); rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE; rsp->Reserved2 = 0; rsp->VolatileFileId = id; rsp->PersistentFileId = 0; rsp->CreateContextsOffset = 0; rsp->CreateContextsLength = 0; err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_create_rsp, Buffer)); if (err) goto out; kfree(name); return 0; out: switch (err) { case -EINVAL: rsp->hdr.Status = STATUS_INVALID_PARAMETER; break; case -ENOSPC: case -ENOMEM: rsp->hdr.Status = STATUS_NO_MEMORY; break; } if (!IS_ERR(name)) kfree(name); smb2_set_err_rsp(work); return err; } /** * smb2_set_ea() - handler for setting extended attributes using set * info command * @eabuf: set info command buffer * @buf_len: set info command buffer length * @path: dentry path for get ea * * Return: 0 on success, otherwise error */ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len, const struct path *path) { struct mnt_idmap *idmap = mnt_idmap(path->mnt); char *attr_name = NULL, *value; int rc = 0; unsigned int next = 0; if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength)) return -EINVAL; attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL); if (!attr_name) return -ENOMEM; do { if (!eabuf->EaNameLength) goto next; ksmbd_debug(SMB, "name : <%s>, name_len : %u, value_len : %u, next : %u\n", eabuf->name, eabuf->EaNameLength, le16_to_cpu(eabuf->EaValueLength), le32_to_cpu(eabuf->NextEntryOffset)); if (eabuf->EaNameLength > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) { rc = -EINVAL; break; } memcpy(attr_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); memcpy(&attr_name[XATTR_USER_PREFIX_LEN], eabuf->name, eabuf->EaNameLength); attr_name[XATTR_USER_PREFIX_LEN + eabuf->EaNameLength] = '\0'; value = (char *)&eabuf->name + eabuf->EaNameLength + 1; if (!eabuf->EaValueLength) { rc = ksmbd_vfs_casexattr_len(idmap, path->dentry, attr_name, XATTR_USER_PREFIX_LEN + eabuf->EaNameLength); /* delete the EA only when it exits */ if (rc > 0) { rc = ksmbd_vfs_remove_xattr(idmap, path, attr_name); if (rc < 0) { ksmbd_debug(SMB, "remove xattr failed(%d)\n", rc); break; } } /* if the EA doesn't exist, just do nothing. */ rc = 0; } else { rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value, le16_to_cpu(eabuf->EaValueLength), 0); if (rc < 0) { ksmbd_debug(SMB, "ksmbd_vfs_setxattr is failed(%d)\n", rc); break; } } next: next = le32_to_cpu(eabuf->NextEntryOffset); if (next == 0 || buf_len < next) break; buf_len -= next; eabuf = (struct smb2_ea_info *)((char *)eabuf + next); if (buf_len < sizeof(struct smb2_ea_info)) { rc = -EINVAL; break; } if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength)) { rc = -EINVAL; break; } } while (next != 0); kfree(attr_name); return rc; } static noinline int smb2_set_stream_name_xattr(const struct path *path, struct ksmbd_file *fp, char *stream_name, int s_type) { struct mnt_idmap *idmap = mnt_idmap(path->mnt); size_t xattr_stream_size; char *xattr_stream_name; int rc; rc = ksmbd_vfs_xattr_stream_name(stream_name, &xattr_stream_name, &xattr_stream_size, s_type); if (rc) return rc; fp->stream.name = xattr_stream_name; fp->stream.size = xattr_stream_size; /* Check if there is stream prefix in xattr space */ rc = ksmbd_vfs_casexattr_len(idmap, path->dentry, xattr_stream_name, xattr_stream_size); if (rc >= 0) return 0; if (fp->cdoption == FILE_OPEN_LE) { ksmbd_debug(SMB, "XATTR stream name lookup failed: %d\n", rc); return -EBADF; } rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0); if (rc < 0) pr_err("Failed to store XATTR stream name :%d\n", rc); return 0; } static int smb2_remove_smb_xattrs(const struct path *path) { struct mnt_idmap *idmap = mnt_idmap(path->mnt); char *name, *xattr_list = NULL; ssize_t xattr_list_len; int err = 0; xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; } else if (!xattr_list_len) { ksmbd_debug(SMB, "empty xattr in the file\n"); goto out; } for (name = xattr_list; name - xattr_list < xattr_list_len; name += strlen(name) + 1) { ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name)); if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) && !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX, STREAM_PREFIX_LEN)) { err = ksmbd_vfs_remove_xattr(idmap, path, name); if (err) ksmbd_debug(SMB, "remove xattr failed : %s\n", name); } } out: kvfree(xattr_list); return err; } static int smb2_create_truncate(const struct path *path) { int rc = vfs_truncate(path, 0); if (rc) { pr_err("vfs_truncate failed, rc %d\n", rc); return rc; } rc = smb2_remove_smb_xattrs(path); if (rc == -EOPNOTSUPP) rc = 0; if (rc) ksmbd_debug(SMB, "ksmbd_truncate_stream_name_xattr failed, rc %d\n", rc); return rc; } static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *path, struct ksmbd_file *fp) { struct xattr_dos_attrib da = {0}; int rc; if (!test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) return; da.version = 4; da.attr = le32_to_cpu(fp->f_ci->m_fattr); da.itime = da.create_time = fp->create_time; da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME | XATTR_DOSINFO_ITIME; rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da); if (rc) ksmbd_debug(SMB, "failed to store file attribute into xattr\n"); } static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon, const struct path *path, struct ksmbd_file *fp) { struct xattr_dos_attrib da; int rc; fp->f_ci->m_fattr &= ~(FILE_ATTRIBUTE_HIDDEN_LE | FILE_ATTRIBUTE_SYSTEM_LE); /* get FileAttributes from XATTR_NAME_DOS_ATTRIBUTE */ if (!test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) return; rc = ksmbd_vfs_get_dos_attrib_xattr(mnt_idmap(path->mnt), path->dentry, &da); if (rc > 0) { fp->f_ci->m_fattr = cpu_to_le32(da.attr); fp->create_time = da.create_time; fp->itime = da.itime; } } static int smb2_creat(struct ksmbd_work *work, struct path *parent_path, struct path *path, char *name, int open_flags, umode_t posix_mode, bool is_dir) { struct ksmbd_tree_connect *tcon = work->tcon; struct ksmbd_share_config *share = tcon->share_conf; umode_t mode; int rc; if (!(open_flags & O_CREAT)) return -EBADF; ksmbd_debug(SMB, "file does not exist, so creating\n"); if (is_dir == true) { ksmbd_debug(SMB, "creating directory\n"); mode = share_config_directory_mode(share, posix_mode); rc = ksmbd_vfs_mkdir(work, name, mode); if (rc) return rc; } else { ksmbd_debug(SMB, "creating regular file\n"); mode = share_config_create_mode(share, posix_mode); rc = ksmbd_vfs_create(work, name, mode); if (rc) return rc; } rc = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0); if (rc) { pr_err("cannot get linux path (%s), err = %d\n", name, rc); return rc; } return 0; } static int smb2_create_sd_buffer(struct ksmbd_work *work, struct smb2_create_req *req, const struct path *path) { struct create_context *context; struct create_sd_buf_req *sd_buf; if (!req->CreateContextsOffset) return -ENOENT; /* Parse SD BUFFER create contexts */ context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER, 4); if (!context) return -ENOENT; else if (IS_ERR(context)) return PTR_ERR(context); ksmbd_debug(SMB, "Set ACLs using SMB2_CREATE_SD_BUFFER context\n"); sd_buf = (struct create_sd_buf_req *)context; if (le16_to_cpu(context->DataOffset) + le32_to_cpu(context->DataLength) < sizeof(struct create_sd_buf_req)) return -EINVAL; return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd, le32_to_cpu(sd_buf->ccontext.DataLength), true); } static void ksmbd_acls_fattr(struct smb_fattr *fattr, struct mnt_idmap *idmap, struct inode *inode) { vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); fattr->cf_uid = vfsuid_into_kuid(vfsuid); fattr->cf_gid = vfsgid_into_kgid(vfsgid); fattr->cf_mode = inode->i_mode; fattr->cf_acls = NULL; fattr->cf_dacls = NULL; if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) { fattr->cf_acls = get_inode_acl(inode, ACL_TYPE_ACCESS); if (S_ISDIR(inode->i_mode)) fattr->cf_dacls = get_inode_acl(inode, ACL_TYPE_DEFAULT); } } /** * smb2_open() - handler for smb file open request * @work: smb work containing request buffer * * Return: 0 on success, otherwise error */ int smb2_open(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; struct ksmbd_tree_connect *tcon = work->tcon; struct smb2_create_req *req; struct smb2_create_rsp *rsp; struct path path, parent_path; struct ksmbd_share_config *share = tcon->share_conf; struct ksmbd_file *fp = NULL; struct file *filp = NULL; struct mnt_idmap *idmap = NULL; struct kstat stat; struct create_context *context; struct lease_ctx_info *lc = NULL; struct create_ea_buf_req *ea_buf = NULL; struct oplock_info *opinfo; __le32 *next_ptr = NULL; int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0; int rc = 0; int contxt_cnt = 0, query_disk_id = 0; int maximal_access_ctxt = 0, posix_ctxt = 0; int s_type = 0; int next_off = 0; char *name = NULL; char *stream_name = NULL; bool file_present = false, created = false, already_permitted = false; int share_ret, need_truncate = 0; u64 time; umode_t posix_mode = 0; __le32 daccess, maximal_access = 0; int iov_len = 0; WORK_BUFFERS(work, req, rsp); if (req->hdr.NextCommand && !work->next_smb2_rcv_hdr_off && (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)) { ksmbd_debug(SMB, "invalid flag in chained command\n"); rsp->hdr.Status = STATUS_INVALID_PARAMETER; smb2_set_err_rsp(work); return -EINVAL; } if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) { ksmbd_debug(SMB, "IPC pipe create request\n"); return create_smb2_pipe(work); } if (req->NameLength) { if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) && *(char *)req->Buffer == '\\') { pr_err("not allow directory name included leading slash\n"); rc = -EINVAL; goto err_out1; } name = smb2_get_name(req->Buffer, le16_to_cpu(req->NameLength), work->conn->local_nls); if (IS_ERR(name)) { rc = PTR_ERR(name); if (rc != -ENOMEM) rc = -ENOENT; name = NULL; goto err_out1; } ksmbd_debug(SMB, "converted name = %s\n", name); if (strchr(name, ':')) { if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_STREAMS)) { rc = -EBADF; goto err_out1; } rc = parse_stream_name(name, &stream_name, &s_type); if (rc < 0) goto err_out1; } rc = ksmbd_validate_filename(name); if (rc < 0) goto err_out1; if (ksmbd_share_veto_filename(share, name)) { rc = -ENOENT; ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n", name); goto err_out1; } } else { name = kstrdup("", GFP_KERNEL); if (!name) { rc = -ENOMEM; goto err_out1; } } req_op_level = req->RequestedOplockLevel; if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) lc = parse_lease_state(req); if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) { pr_err("Invalid impersonationlevel : 0x%x\n", le32_to_cpu(req->ImpersonationLevel)); rc = -EIO; rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL; goto err_out1; } if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) { pr_err("Invalid create options : 0x%x\n", le32_to_cpu(req->CreateOptions)); rc = -EINVAL; goto err_out1; } else { if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE && req->CreateOptions & FILE_RANDOM_ACCESS_LE) req->CreateOptions = ~(FILE_SEQUENTIAL_ONLY_LE); if (req->CreateOptions & (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION | FILE_RESERVE_OPFILTER_LE)) { rc = -EOPNOTSUPP; goto err_out1; } if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) { if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) { rc = -EINVAL; goto err_out1; } else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) { req->CreateOptions = ~(FILE_NO_COMPRESSION_LE); } } } if (le32_to_cpu(req->CreateDisposition) > le32_to_cpu(FILE_OVERWRITE_IF_LE)) { pr_err("Invalid create disposition : 0x%x\n", le32_to_cpu(req->CreateDisposition)); rc = -EINVAL; goto err_out1; } if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) { pr_err("Invalid desired access : 0x%x\n", le32_to_cpu(req->DesiredAccess)); rc = -EACCES; goto err_out1; } if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) { pr_err("Invalid file attribute : 0x%x\n", le32_to_cpu(req->FileAttributes)); rc = -EINVAL; goto err_out1; } if (req->CreateContextsOffset) { /* Parse non-durable handle create contexts */ context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4); if (IS_ERR(context)) { rc = PTR_ERR(context); goto err_out1; } else if (context) { ea_buf = (struct create_ea_buf_req *)context; if (le16_to_cpu(context->DataOffset) + le32_to_cpu(context->DataLength) < sizeof(struct create_ea_buf_req)) { rc = -EINVAL; goto err_out1; } if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) { rsp->hdr.Status = STATUS_ACCESS_DENIED; rc = -EACCES; goto err_out1; } } context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4); if (IS_ERR(context)) { rc = PTR_ERR(context); goto err_out1; } else if (context) { ksmbd_debug(SMB, "get query maximal access context\n"); maximal_access_ctxt = 1; } context = smb2_find_context_vals(req, SMB2_CREATE_TIMEWARP_REQUEST, 4); if (IS_ERR(context)) { rc = PTR_ERR(context); goto err_out1; } else if (context) { ksmbd_debug(SMB, "get timewarp context\n"); rc = -EBADF; goto err_out1; } if (tcon->posix_extensions) { context = smb2_find_context_vals(req, SMB2_CREATE_TAG_POSIX, 16); if (IS_ERR(context)) { rc = PTR_ERR(context); goto err_out1; } else if (context) { struct create_posix *posix = (struct create_posix *)context; if (le16_to_cpu(context->DataOffset) + le32_to_cpu(context->DataLength) < sizeof(struct create_posix) - 4) { rc = -EINVAL; goto err_out1; } ksmbd_debug(SMB, "get posix context\n"); posix_mode = le32_to_cpu(posix->Mode); posix_ctxt = 1; } } } if (ksmbd_override_fsids(work)) { rc = -ENOMEM; goto err_out1; } rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS, &parent_path, &path, 1); if (!rc) { file_present = true; if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) { /* * If file exists with under flags, return access * denied error. */ if (req->CreateDisposition == FILE_OVERWRITE_IF_LE || req->CreateDisposition == FILE_OPEN_IF_LE) { rc = -EACCES; goto err_out; } if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { ksmbd_debug(SMB, "User does not have write permission\n"); rc = -EACCES; goto err_out; } } else if (d_is_symlink(path.dentry)) { rc = -EACCES; goto err_out; } file_present = true; idmap = mnt_idmap(path.mnt); } else { if (rc != -ENOENT) goto err_out; ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n", name, rc); rc = 0; } if (stream_name) { if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) { if (s_type == DATA_STREAM) { rc = -EIO; rsp->hdr.Status = STATUS_NOT_A_DIRECTORY; } } else { if (file_present && S_ISDIR(d_inode(path.dentry)->i_mode) && s_type == DATA_STREAM) { rc = -EIO; rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY; } } if (req->CreateOptions & FILE_DIRECTORY_FILE_LE && req->FileAttributes & FILE_ATTRIBUTE_NORMAL_LE) { rsp->hdr.Status = STATUS_NOT_A_DIRECTORY; rc = -EIO; } if (rc < 0) goto err_out; } if (file_present && req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE && S_ISDIR(d_inode(path.dentry)->i_mode) && !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) { ksmbd_debug(SMB, "open() argument is a directory: %s, %x\n", name, req->CreateOptions); rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY; rc = -EIO; goto err_out; } if (file_present && (req->CreateOptions & FILE_DIRECTORY_FILE_LE) && !(req->CreateDisposition == FILE_CREATE_LE) && !S_ISDIR(d_inode(path.dentry)->i_mode)) { rsp->hdr.Status = STATUS_NOT_A_DIRECTORY; rc = -EIO; goto err_out; } if (!stream_name && file_present && req->CreateDisposition == FILE_CREATE_LE) { rc = -EEXIST; goto err_out; } daccess = smb_map_generic_desired_access(req->DesiredAccess); if (file_present && !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) { rc = smb_check_perm_dacl(conn, &path, &daccess, sess->user->uid); if (rc) goto err_out; } if (daccess & FILE_MAXIMAL_ACCESS_LE) { if (!file_present) { daccess = cpu_to_le32(GENERIC_ALL_FLAGS); } else { ksmbd_vfs_query_maximal_access(idmap, path.dentry, &daccess); already_permitted = true; } maximal_access = daccess; } open_flags = smb2_create_open_flags(file_present, daccess, req->CreateDisposition, &may_flags); if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { if (open_flags & O_CREAT) { ksmbd_debug(SMB, "User does not have write permission\n"); rc = -EACCES; goto err_out; } } /*create file if not present */ if (!file_present) { rc = smb2_creat(work, &parent_path, &path, name, open_flags, posix_mode, req->CreateOptions & FILE_DIRECTORY_FILE_LE); if (rc) { if (rc == -ENOENT) { rc = -EIO; rsp->hdr.Status = STATUS_OBJECT_PATH_NOT_FOUND; } goto err_out; } created = true; idmap = mnt_idmap(path.mnt); if (ea_buf) { if (le32_to_cpu(ea_buf->ccontext.DataLength) < sizeof(struct smb2_ea_info)) { rc = -EINVAL; goto err_out; } rc = smb2_set_ea(&ea_buf->ea, le32_to_cpu(ea_buf->ccontext.DataLength), &path); if (rc == -EOPNOTSUPP) rc = 0; else if (rc) goto err_out; } } else if (!already_permitted) { /* FILE_READ_ATTRIBUTE is allowed without inode_permission, * because execute(search) permission on a parent directory, * is already granted. */ if (daccess & ~(FILE_READ_ATTRIBUTES_LE | FILE_READ_CONTROL_LE)) { rc = inode_permission(idmap, d_inode(path.dentry), may_flags); if (rc) goto err_out; if ((daccess & FILE_DELETE_LE) || (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) { rc = inode_permission(idmap, d_inode(path.dentry->d_parent), MAY_EXEC | MAY_WRITE); if (rc) goto err_out; } } } rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent)); if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) { rc = -EBUSY; goto err_out; } rc = 0; filp = dentry_open(&path, open_flags, current_cred()); if (IS_ERR(filp)) { rc = PTR_ERR(filp); pr_err("dentry open for dir failed, rc %d\n", rc); goto err_out; } if (file_present) { if (!(open_flags & O_TRUNC)) file_info = FILE_OPENED; else file_info = FILE_OVERWRITTEN; if ((req->CreateDisposition & FILE_CREATE_MASK_LE) == FILE_SUPERSEDE_LE) file_info = FILE_SUPERSEDED; } else if (open_flags & O_CREAT) { file_info = FILE_CREATED; } ksmbd_vfs_set_fadvise(filp, req->CreateOptions); /* Obtain Volatile-ID */ fp = ksmbd_open_fd(work, filp); if (IS_ERR(fp)) { fput(filp); rc = PTR_ERR(fp); fp = NULL; goto err_out; } /* Get Persistent-ID */ ksmbd_open_durable_fd(fp); if (!has_file_id(fp->persistent_id)) { rc = -ENOMEM; goto err_out; } fp->cdoption = req->CreateDisposition; fp->daccess = daccess; fp->saccess = req->ShareAccess; fp->coption = req->CreateOptions; /* Set default windows and posix acls if creating new file */ if (created) { int posix_acl_rc; struct inode *inode = d_inode(path.dentry); posix_acl_rc = ksmbd_vfs_inherit_posix_acl(idmap, &path, d_inode(path.dentry->d_parent)); if (posix_acl_rc) ksmbd_debug(SMB, "inherit posix acl failed : %d\n", posix_acl_rc); if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) { rc = smb_inherit_dacl(conn, &path, sess->user->uid, sess->user->gid); } if (rc) { rc = smb2_create_sd_buffer(work, req, &path); if (rc) { if (posix_acl_rc) ksmbd_vfs_set_init_posix_acl(idmap, &path); if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) { struct smb_fattr fattr; struct smb_ntsd *pntsd; int pntsd_size, ace_num = 0; ksmbd_acls_fattr(&fattr, idmap, inode); if (fattr.cf_acls) ace_num = fattr.cf_acls->a_count; if (fattr.cf_dacls) ace_num += fattr.cf_dacls->a_count; pntsd = kmalloc(sizeof(struct smb_ntsd) + sizeof(struct smb_sid) * 3 + sizeof(struct smb_acl) + sizeof(struct smb_ace) * ace_num * 2, GFP_KERNEL); if (!pntsd) { posix_acl_release(fattr.cf_acls); posix_acl_release(fattr.cf_dacls); goto err_out; } rc = build_sec_desc(idmap, pntsd, NULL, 0, OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO, &pntsd_size, &fattr); posix_acl_release(fattr.cf_acls); posix_acl_release(fattr.cf_dacls); if (rc) { kfree(pntsd); goto err_out; } rc = ksmbd_vfs_set_sd_xattr(conn, idmap, &path, pntsd, pntsd_size); kfree(pntsd); if (rc) pr_err("failed to store ntacl in xattr : %d\n", rc); } } } rc = 0; } if (stream_name) { rc = smb2_set_stream_name_xattr(&path, fp, stream_name, s_type); if (rc) goto err_out; file_info = FILE_CREATED; } fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE | FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE)); if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC && !fp->attrib_only && !stream_name) { smb_break_all_oplock(work, fp); need_truncate = 1; } /* fp should be searchable through ksmbd_inode.m_fp_list * after daccess, saccess, attrib_only, and stream are * initialized. */ write_lock(&fp->f_ci->m_lock); list_add(&fp->node, &fp->f_ci->m_fp_list); write_unlock(&fp->f_ci->m_lock); /* Check delete pending among previous fp before oplock break */ if (ksmbd_inode_pending_delete(fp)) { rc = -EBUSY; goto err_out; } share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp); if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) || (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) { if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) { rc = share_ret; goto err_out; } } else { if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) { req_op_level = smb2_map_lease_to_oplock(lc->req_state); ksmbd_debug(SMB, "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n", name, req_op_level, lc->req_state); rc = find_same_lease_key(sess, fp->f_ci, lc); if (rc) goto err_out; } else if (open_flags == O_RDONLY && (req_op_level == SMB2_OPLOCK_LEVEL_BATCH || req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) req_op_level = SMB2_OPLOCK_LEVEL_II; rc = smb_grant_oplock(work, req_op_level, fp->persistent_id, fp, le32_to_cpu(req->hdr.Id.SyncId.TreeId), lc, share_ret); if (rc < 0) goto err_out; } if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) ksmbd_fd_set_delete_on_close(fp, file_info); if (need_truncate) { rc = smb2_create_truncate(&path); if (rc) goto err_out; } if (req->CreateContextsOffset) { struct create_alloc_size_req *az_req; az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req, SMB2_CREATE_ALLOCATION_SIZE, 4); if (IS_ERR(az_req)) { rc = PTR_ERR(az_req); goto err_out; } else if (az_req) { loff_t alloc_size; int err; if (le16_to_cpu(az_req->ccontext.DataOffset) + le32_to_cpu(az_req->ccontext.DataLength) < sizeof(struct create_alloc_size_req)) { rc = -EINVAL; goto err_out; } alloc_size = le64_to_cpu(az_req->AllocationSize); ksmbd_debug(SMB, "request smb2 create allocate size : %llu\n", alloc_size); smb_break_all_levII_oplock(work, fp, 1); err = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0, alloc_size); if (err < 0) ksmbd_debug(SMB, "vfs_fallocate is failed : %d\n", err); } context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4); if (IS_ERR(context)) { rc = PTR_ERR(context); goto err_out; } else if (context) { ksmbd_debug(SMB, "get query on disk id context\n"); query_disk_id = 1; } } rc = ksmbd_vfs_getattr(&path, &stat); if (rc) goto err_out; if (stat.result_mask & STATX_BTIME) fp->create_time = ksmbd_UnixTimeToNT(stat.btime); else fp->create_time = ksmbd_UnixTimeToNT(stat.ctime); if (req->FileAttributes || fp->f_ci->m_fattr == 0) fp->f_ci->m_fattr = cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes))); if (!created) smb2_update_xattrs(tcon, &path, fp); else smb2_new_xattrs(tcon, &path, fp); memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE); rsp->StructureSize = cpu_to_le16(89); rcu_read_lock(); opinfo = rcu_dereference(fp->f_opinfo); rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0; rcu_read_unlock(); rsp->Flags = 0; rsp->CreateAction = cpu_to_le32(file_info); rsp->CreationTime = cpu_to_le64(fp->create_time); time = ksmbd_UnixTimeToNT(stat.atime); rsp->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.mtime); rsp->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.ctime); rsp->ChangeTime = cpu_to_le64(time); rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.blocks << 9); rsp->EndofFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); rsp->FileAttributes = fp->f_ci->m_fattr; rsp->Reserved2 = 0; rsp->PersistentFileId = fp->persistent_id; rsp->VolatileFileId = fp->volatile_id; rsp->CreateContextsOffset = 0; rsp->CreateContextsLength = 0; iov_len = offsetof(struct smb2_create_rsp, Buffer); /* If lease is request send lease context response */ if (opinfo && opinfo->is_lease) { struct create_context *lease_ccontext; ksmbd_debug(SMB, "lease granted on(%s) lease state 0x%x\n", name, opinfo->o_lease->state); rsp->OplockLevel = SMB2_OPLOCK_LEVEL_LEASE; lease_ccontext = (struct create_context *)rsp->Buffer; contxt_cnt++; create_lease_buf(rsp->Buffer, opinfo->o_lease); le32_add_cpu(&rsp->CreateContextsLength, conn->vals->create_lease_size); iov_len += conn->vals->create_lease_size; next_ptr = &lease_ccontext->Next; next_off = conn->vals->create_lease_size; } if (maximal_access_ctxt) { struct create_context *mxac_ccontext; if (maximal_access == 0) ksmbd_vfs_query_maximal_access(idmap, path.dentry, &maximal_access); mxac_ccontext = (struct create_context *)(rsp->Buffer + le32_to_cpu(rsp->CreateContextsLength)); contxt_cnt++; create_mxac_rsp_buf(rsp->Buffer + le32_to_cpu(rsp->CreateContextsLength), le32_to_cpu(maximal_access)); le32_add_cpu(&rsp->CreateContextsLength, conn->vals->create_mxac_size); iov_len += conn->vals->create_mxac_size; if (next_ptr) *next_ptr = cpu_to_le32(next_off); next_ptr = &mxac_ccontext->Next; next_off = conn->vals->create_mxac_size; } if (query_disk_id) { struct create_context *disk_id_ccontext; disk_id_ccontext = (struct create_context *)(rsp->Buffer + le32_to_cpu(rsp->CreateContextsLength)); contxt_cnt++; create_disk_id_rsp_buf(rsp->Buffer + le32_to_cpu(rsp->CreateContextsLength), stat.ino, tcon->id); le32_add_cpu(&rsp->CreateContextsLength, conn->vals->create_disk_id_size); iov_len += conn->vals->create_disk_id_size; if (next_ptr) *next_ptr = cpu_to_le32(next_off); next_ptr = &disk_id_ccontext->Next; next_off = conn->vals->create_disk_id_size; } if (posix_ctxt) { contxt_cnt++; create_posix_rsp_buf(rsp->Buffer + le32_to_cpu(rsp->CreateContextsLength), fp); le32_add_cpu(&rsp->CreateContextsLength, conn->vals->create_posix_size); iov_len += conn->vals->create_posix_size; if (next_ptr) *next_ptr = cpu_to_le32(next_off); } if (contxt_cnt > 0) { rsp->CreateContextsOffset = cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer)); } err_out: if (file_present || created) { inode_unlock(d_inode(parent_path.dentry)); path_put(&path); path_put(&parent_path); } ksmbd_revert_fsids(work); err_out1: if (!rc) rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len); if (rc) { if (rc == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else if (rc == -EOPNOTSUPP) rsp->hdr.Status = STATUS_NOT_SUPPORTED; else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV) rsp->hdr.Status = STATUS_ACCESS_DENIED; else if (rc == -ENOENT) rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID; else if (rc == -EPERM) rsp->hdr.Status = STATUS_SHARING_VIOLATION; else if (rc == -EBUSY) rsp->hdr.Status = STATUS_DELETE_PENDING; else if (rc == -EBADF) rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND; else if (rc == -ENOEXEC) rsp->hdr.Status = STATUS_DUPLICATE_OBJECTID; else if (rc == -ENXIO) rsp->hdr.Status = STATUS_NO_SUCH_DEVICE; else if (rc == -EEXIST) rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION; else if (rc == -EMFILE) rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; if (!rsp->hdr.Status) rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR; if (fp) ksmbd_fd_put(work, fp); smb2_set_err_rsp(work); ksmbd_debug(SMB, "Error response: %x\n", rsp->hdr.Status); } kfree(name); kfree(lc); return 0; } static int readdir_info_level_struct_sz(int info_level) { switch (info_level) { case FILE_FULL_DIRECTORY_INFORMATION: return sizeof(struct file_full_directory_info); case FILE_BOTH_DIRECTORY_INFORMATION: return sizeof(struct file_both_directory_info); case FILE_DIRECTORY_INFORMATION: return sizeof(struct file_directory_info); case FILE_NAMES_INFORMATION: return sizeof(struct file_names_info); case FILEID_FULL_DIRECTORY_INFORMATION: return sizeof(struct file_id_full_dir_info); case FILEID_BOTH_DIRECTORY_INFORMATION: return sizeof(struct file_id_both_directory_info); case SMB_FIND_FILE_POSIX_INFO: return sizeof(struct smb2_posix_info); default: return -EOPNOTSUPP; } } static int dentry_name(struct ksmbd_dir_info *d_info, int info_level) { switch (info_level) { case FILE_FULL_DIRECTORY_INFORMATION: { struct file_full_directory_info *ffdinfo; ffdinfo = (struct file_full_directory_info *)d_info->rptr; d_info->rptr += le32_to_cpu(ffdinfo->NextEntryOffset); d_info->name = ffdinfo->FileName; d_info->name_len = le32_to_cpu(ffdinfo->FileNameLength); return 0; } case FILE_BOTH_DIRECTORY_INFORMATION: { struct file_both_directory_info *fbdinfo; fbdinfo = (struct file_both_directory_info *)d_info->rptr; d_info->rptr += le32_to_cpu(fbdinfo->NextEntryOffset); d_info->name = fbdinfo->FileName; d_info->name_len = le32_to_cpu(fbdinfo->FileNameLength); return 0; } case FILE_DIRECTORY_INFORMATION: { struct file_directory_info *fdinfo; fdinfo = (struct file_directory_info *)d_info->rptr; d_info->rptr += le32_to_cpu(fdinfo->NextEntryOffset); d_info->name = fdinfo->FileName; d_info->name_len = le32_to_cpu(fdinfo->FileNameLength); return 0; } case FILE_NAMES_INFORMATION: { struct file_names_info *fninfo; fninfo = (struct file_names_info *)d_info->rptr; d_info->rptr += le32_to_cpu(fninfo->NextEntryOffset); d_info->name = fninfo->FileName; d_info->name_len = le32_to_cpu(fninfo->FileNameLength); return 0; } case FILEID_FULL_DIRECTORY_INFORMATION: { struct file_id_full_dir_info *dinfo; dinfo = (struct file_id_full_dir_info *)d_info->rptr; d_info->rptr += le32_to_cpu(dinfo->NextEntryOffset); d_info->name = dinfo->FileName; d_info->name_len = le32_to_cpu(dinfo->FileNameLength); return 0; } case FILEID_BOTH_DIRECTORY_INFORMATION: { struct file_id_both_directory_info *fibdinfo; fibdinfo = (struct file_id_both_directory_info *)d_info->rptr; d_info->rptr += le32_to_cpu(fibdinfo->NextEntryOffset); d_info->name = fibdinfo->FileName; d_info->name_len = le32_to_cpu(fibdinfo->FileNameLength); return 0; } case SMB_FIND_FILE_POSIX_INFO: { struct smb2_posix_info *posix_info; posix_info = (struct smb2_posix_info *)d_info->rptr; d_info->rptr += le32_to_cpu(posix_info->NextEntryOffset); d_info->name = posix_info->name; d_info->name_len = le32_to_cpu(posix_info->name_len); return 0; } default: return -EINVAL; } } /** * smb2_populate_readdir_entry() - encode directory entry in smb2 response * buffer * @conn: connection instance * @info_level: smb information level * @d_info: structure included variables for query dir * @ksmbd_kstat: ksmbd wrapper of dirent stat information * * if directory has many entries, find first can't read it fully. * find next might be called multiple times to read remaining dir entries * * Return: 0 on success, otherwise error */ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level, struct ksmbd_dir_info *d_info, struct ksmbd_kstat *ksmbd_kstat) { int next_entry_offset = 0; char *conv_name; int conv_len; void *kstat; int struct_sz, rc = 0; conv_name = ksmbd_convert_dir_info_name(d_info, conn->local_nls, &conv_len); if (!conv_name) return -ENOMEM; /* Somehow the name has only terminating NULL bytes */ if (conv_len < 0) { rc = -EINVAL; goto free_conv_name; } struct_sz = readdir_info_level_struct_sz(info_level) + conv_len; next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT); d_info->last_entry_off_align = next_entry_offset - struct_sz; if (next_entry_offset > d_info->out_buf_len) { d_info->out_buf_len = 0; rc = -ENOSPC; goto free_conv_name; } kstat = d_info->wptr; if (info_level != FILE_NAMES_INFORMATION) kstat = ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat); switch (info_level) { case FILE_FULL_DIRECTORY_INFORMATION: { struct file_full_directory_info *ffdinfo; ffdinfo = (struct file_full_directory_info *)kstat; ffdinfo->FileNameLength = cpu_to_le32(conv_len); ffdinfo->EaSize = smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode); if (ffdinfo->EaSize) ffdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; if (d_info->hide_dot_file && d_info->name[0] == '.') ffdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; memcpy(ffdinfo->FileName, conv_name, conv_len); ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILE_BOTH_DIRECTORY_INFORMATION: { struct file_both_directory_info *fbdinfo; fbdinfo = (struct file_both_directory_info *)kstat; fbdinfo->FileNameLength = cpu_to_le32(conv_len); fbdinfo->EaSize = smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode); if (fbdinfo->EaSize) fbdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; fbdinfo->ShortNameLength = 0; fbdinfo->Reserved = 0; if (d_info->hide_dot_file && d_info->name[0] == '.') fbdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; memcpy(fbdinfo->FileName, conv_name, conv_len); fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILE_DIRECTORY_INFORMATION: { struct file_directory_info *fdinfo; fdinfo = (struct file_directory_info *)kstat; fdinfo->FileNameLength = cpu_to_le32(conv_len); if (d_info->hide_dot_file && d_info->name[0] == '.') fdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; memcpy(fdinfo->FileName, conv_name, conv_len); fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILE_NAMES_INFORMATION: { struct file_names_info *fninfo; fninfo = (struct file_names_info *)kstat; fninfo->FileNameLength = cpu_to_le32(conv_len); memcpy(fninfo->FileName, conv_name, conv_len); fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILEID_FULL_DIRECTORY_INFORMATION: { struct file_id_full_dir_info *dinfo; dinfo = (struct file_id_full_dir_info *)kstat; dinfo->FileNameLength = cpu_to_le32(conv_len); dinfo->EaSize = smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode); if (dinfo->EaSize) dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; dinfo->Reserved = 0; dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); if (d_info->hide_dot_file && d_info->name[0] == '.') dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; memcpy(dinfo->FileName, conv_name, conv_len); dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILEID_BOTH_DIRECTORY_INFORMATION: { struct file_id_both_directory_info *fibdinfo; fibdinfo = (struct file_id_both_directory_info *)kstat; fibdinfo->FileNameLength = cpu_to_le32(conv_len); fibdinfo->EaSize = smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode); if (fibdinfo->EaSize) fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); fibdinfo->ShortNameLength = 0; fibdinfo->Reserved = 0; fibdinfo->Reserved2 = cpu_to_le16(0); if (d_info->hide_dot_file && d_info->name[0] == '.') fibdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; memcpy(fibdinfo->FileName, conv_name, conv_len); fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case SMB_FIND_FILE_POSIX_INFO: { struct smb2_posix_info *posix_info; u64 time; posix_info = (struct smb2_posix_info *)kstat; posix_info->Ignored = 0; posix_info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time); time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime); posix_info->ChangeTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->atime); posix_info->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->mtime); posix_info->LastWriteTime = cpu_to_le64(time); posix_info->EndOfFile = cpu_to_le64(ksmbd_kstat->kstat->size); posix_info->AllocationSize = cpu_to_le64(ksmbd_kstat->kstat->blocks << 9); posix_info->DeviceId = cpu_to_le32(ksmbd_kstat->kstat->rdev); posix_info->HardLinks = cpu_to_le32(ksmbd_kstat->kstat->nlink); posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode & 0777); posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino); posix_info->DosAttributes = S_ISDIR(ksmbd_kstat->kstat->mode) ? FILE_ATTRIBUTE_DIRECTORY_LE : FILE_ATTRIBUTE_ARCHIVE_LE; if (d_info->hide_dot_file && d_info->name[0] == '.') posix_info->DosAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; /* * SidBuffer(32) contain two sids(Domain sid(16), UNIX group sid(16)). * UNIX sid(16) = revision(1) + num_subauth(1) + authority(6) + * sub_auth(4 * 1(num_subauth)) + RID(4). */ id_to_sid(from_kuid_munged(&init_user_ns, ksmbd_kstat->kstat->uid), SIDUNIX_USER, (struct smb_sid *)&posix_info->SidBuffer[0]); id_to_sid(from_kgid_munged(&init_user_ns, ksmbd_kstat->kstat->gid), SIDUNIX_GROUP, (struct smb_sid *)&posix_info->SidBuffer[16]); memcpy(posix_info->name, conv_name, conv_len); posix_info->name_len = cpu_to_le32(conv_len); posix_info->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } } /* switch (info_level) */ d_info->last_entry_offset = d_info->data_count; d_info->data_count += next_entry_offset; d_info->out_buf_len -= next_entry_offset; d_info->wptr += next_entry_offset; ksmbd_debug(SMB, "info_level : %d, buf_len :%d, next_offset : %d, data_count : %d\n", info_level, d_info->out_buf_len, next_entry_offset, d_info->data_count); free_conv_name: kfree(conv_name); return rc; } struct smb2_query_dir_private { struct ksmbd_work *work; char *search_pattern; struct ksmbd_file *dir_fp; struct ksmbd_dir_info *d_info; int info_level; }; static void lock_dir(struct ksmbd_file *dir_fp) { struct dentry *dir = dir_fp->filp->f_path.dentry; inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); } static void unlock_dir(struct ksmbd_file *dir_fp) { struct dentry *dir = dir_fp->filp->f_path.dentry; inode_unlock(d_inode(dir)); } static int process_query_dir_entries(struct smb2_query_dir_private *priv) { struct mnt_idmap *idmap = file_mnt_idmap(priv->dir_fp->filp); struct kstat kstat; struct ksmbd_kstat ksmbd_kstat; int rc; int i; for (i = 0; i < priv->d_info->num_entry; i++) { struct dentry *dent; if (dentry_name(priv->d_info, priv->info_level)) return -EINVAL; lock_dir(priv->dir_fp); dent = lookup_one(idmap, priv->d_info->name, priv->dir_fp->filp->f_path.dentry, priv->d_info->name_len); unlock_dir(priv->dir_fp); if (IS_ERR(dent)) { ksmbd_debug(SMB, "Cannot lookup `%s' [%ld]\n", priv->d_info->name, PTR_ERR(dent)); continue; } if (unlikely(d_is_negative(dent))) { dput(dent); ksmbd_debug(SMB, "Negative dentry `%s'\n", priv->d_info->name); continue; } ksmbd_kstat.kstat = &kstat; if (priv->info_level != FILE_NAMES_INFORMATION) ksmbd_vfs_fill_dentry_attrs(priv->work, idmap, dent, &ksmbd_kstat); rc = smb2_populate_readdir_entry(priv->work->conn, priv->info_level, priv->d_info, &ksmbd_kstat); dput(dent); if (rc) return rc; } return 0; } static int reserve_populate_dentry(struct ksmbd_dir_info *d_info, int info_level) { int struct_sz; int conv_len; int next_entry_offset; struct_sz = readdir_info_level_struct_sz(info_level); if (struct_sz == -EOPNOTSUPP) return -EOPNOTSUPP; conv_len = (d_info->name_len + 1) * 2; next_entry_offset = ALIGN(struct_sz + conv_len, KSMBD_DIR_INFO_ALIGNMENT); if (next_entry_offset > d_info->out_buf_len) { d_info->out_buf_len = 0; return -ENOSPC; } switch (info_level) { case FILE_FULL_DIRECTORY_INFORMATION: { struct file_full_directory_info *ffdinfo; ffdinfo = (struct file_full_directory_info *)d_info->wptr; memcpy(ffdinfo->FileName, d_info->name, d_info->name_len); ffdinfo->FileName[d_info->name_len] = 0x00; ffdinfo->FileNameLength = cpu_to_le32(d_info->name_len); ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILE_BOTH_DIRECTORY_INFORMATION: { struct file_both_directory_info *fbdinfo; fbdinfo = (struct file_both_directory_info *)d_info->wptr; memcpy(fbdinfo->FileName, d_info->name, d_info->name_len); fbdinfo->FileName[d_info->name_len] = 0x00; fbdinfo->FileNameLength = cpu_to_le32(d_info->name_len); fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILE_DIRECTORY_INFORMATION: { struct file_directory_info *fdinfo; fdinfo = (struct file_directory_info *)d_info->wptr; memcpy(fdinfo->FileName, d_info->name, d_info->name_len); fdinfo->FileName[d_info->name_len] = 0x00; fdinfo->FileNameLength = cpu_to_le32(d_info->name_len); fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILE_NAMES_INFORMATION: { struct file_names_info *fninfo; fninfo = (struct file_names_info *)d_info->wptr; memcpy(fninfo->FileName, d_info->name, d_info->name_len); fninfo->FileName[d_info->name_len] = 0x00; fninfo->FileNameLength = cpu_to_le32(d_info->name_len); fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILEID_FULL_DIRECTORY_INFORMATION: { struct file_id_full_dir_info *dinfo; dinfo = (struct file_id_full_dir_info *)d_info->wptr; memcpy(dinfo->FileName, d_info->name, d_info->name_len); dinfo->FileName[d_info->name_len] = 0x00; dinfo->FileNameLength = cpu_to_le32(d_info->name_len); dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case FILEID_BOTH_DIRECTORY_INFORMATION: { struct file_id_both_directory_info *fibdinfo; fibdinfo = (struct file_id_both_directory_info *)d_info->wptr; memcpy(fibdinfo->FileName, d_info->name, d_info->name_len); fibdinfo->FileName[d_info->name_len] = 0x00; fibdinfo->FileNameLength = cpu_to_le32(d_info->name_len); fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } case SMB_FIND_FILE_POSIX_INFO: { struct smb2_posix_info *posix_info; posix_info = (struct smb2_posix_info *)d_info->wptr; memcpy(posix_info->name, d_info->name, d_info->name_len); posix_info->name[d_info->name_len] = 0x00; posix_info->name_len = cpu_to_le32(d_info->name_len); posix_info->NextEntryOffset = cpu_to_le32(next_entry_offset); break; } } /* switch (info_level) */ d_info->num_entry++; d_info->out_buf_len -= next_entry_offset; d_info->wptr += next_entry_offset; return 0; } static bool __query_dir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct ksmbd_readdir_data *buf; struct smb2_query_dir_private *priv; struct ksmbd_dir_info *d_info; int rc; buf = container_of(ctx, struct ksmbd_readdir_data, ctx); priv = buf->private; d_info = priv->d_info; /* dot and dotdot entries are already reserved */ if (!strcmp(".", name) || !strcmp("..", name)) return true; if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name)) return true; if (!match_pattern(name, namlen, priv->search_pattern)) return true; d_info->name = name; d_info->name_len = namlen; rc = reserve_populate_dentry(d_info, priv->info_level); if (rc) return false; if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY) d_info->out_buf_len = 0; return true; } static int verify_info_level(int info_level) { switch (info_level) { case FILE_FULL_DIRECTORY_INFORMATION: case FILE_BOTH_DIRECTORY_INFORMATION: case FILE_DIRECTORY_INFORMATION: case FILE_NAMES_INFORMATION: case FILEID_FULL_DIRECTORY_INFORMATION: case FILEID_BOTH_DIRECTORY_INFORMATION: case SMB_FIND_FILE_POSIX_INFO: break; default: return -EOPNOTSUPP; } return 0; } static int smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len) { int free_len; free_len = (int)(work->response_sz - (get_rfc1002_len(work->response_buf) + 4)) - hdr2_len; return free_len; } static int smb2_calc_max_out_buf_len(struct ksmbd_work *work, unsigned short hdr2_len, unsigned int out_buf_len) { int free_len; if (out_buf_len > work->conn->vals->max_trans_size) return -EINVAL; free_len = smb2_resp_buf_len(work, hdr2_len); if (free_len < 0) return -EINVAL; return min_t(int, out_buf_len, free_len); } int smb2_query_dir(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_query_directory_req *req; struct smb2_query_directory_rsp *rsp; struct ksmbd_share_config *share = work->tcon->share_conf; struct ksmbd_file *dir_fp = NULL; struct ksmbd_dir_info d_info; int rc = 0; char *srch_ptr = NULL; unsigned char srch_flag; int buffer_sz; struct smb2_query_dir_private query_dir_private = {NULL, }; WORK_BUFFERS(work, req, rsp); if (ksmbd_override_fsids(work)) { rsp->hdr.Status = STATUS_NO_MEMORY; smb2_set_err_rsp(work); return -ENOMEM; } rc = verify_info_level(req->FileInformationClass); if (rc) { rc = -EFAULT; goto err_out2; } dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!dir_fp) { rc = -EBADF; goto err_out2; } if (!(dir_fp->daccess & FILE_LIST_DIRECTORY_LE) || inode_permission(file_mnt_idmap(dir_fp->filp), file_inode(dir_fp->filp), MAY_READ | MAY_EXEC)) { pr_err("no right to enumerate directory (%pD)\n", dir_fp->filp); rc = -EACCES; goto err_out2; } if (!S_ISDIR(file_inode(dir_fp->filp)->i_mode)) { pr_err("can't do query dir for a file\n"); rc = -EINVAL; goto err_out2; } srch_flag = req->Flags; srch_ptr = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->FileNameLength), 1, conn->local_nls); if (IS_ERR(srch_ptr)) { ksmbd_debug(SMB, "Search Pattern not found\n"); rc = -EINVAL; goto err_out2; } else { ksmbd_debug(SMB, "Search pattern is %s\n", srch_ptr); } if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) { ksmbd_debug(SMB, "Restart directory scan\n"); generic_file_llseek(dir_fp->filp, 0, SEEK_SET); } memset(&d_info, 0, sizeof(struct ksmbd_dir_info)); d_info.wptr = (char *)rsp->Buffer; d_info.rptr = (char *)rsp->Buffer; d_info.out_buf_len = smb2_calc_max_out_buf_len(work, 8, le32_to_cpu(req->OutputBufferLength)); if (d_info.out_buf_len < 0) { rc = -EINVAL; goto err_out; } d_info.flags = srch_flag; /* * reserve dot and dotdot entries in head of buffer * in first response */ rc = ksmbd_populate_dot_dotdot_entries(work, req->FileInformationClass, dir_fp, &d_info, srch_ptr, smb2_populate_readdir_entry); if (rc == -ENOSPC) rc = 0; else if (rc) goto err_out; if (test_share_config_flag(share, KSMBD_SHARE_FLAG_HIDE_DOT_FILES)) d_info.hide_dot_file = true; buffer_sz = d_info.out_buf_len; d_info.rptr = d_info.wptr; query_dir_private.work = work; query_dir_private.search_pattern = srch_ptr; query_dir_private.dir_fp = dir_fp; query_dir_private.d_info = &d_info; query_dir_private.info_level = req->FileInformationClass; dir_fp->readdir_data.private = &query_dir_private; set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir); rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx); /* * req->OutputBufferLength is too small to contain even one entry. * In this case, it immediately returns OutputBufferLength 0 to client. */ if (!d_info.out_buf_len && !d_info.num_entry) goto no_buf_len; if (rc > 0 || rc == -ENOSPC) rc = 0; else if (rc) goto err_out; d_info.wptr = d_info.rptr; d_info.out_buf_len = buffer_sz; rc = process_query_dir_entries(&query_dir_private); if (rc) goto err_out; if (!d_info.data_count && d_info.out_buf_len >= 0) { if (srch_flag & SMB2_RETURN_SINGLE_ENTRY && !is_asterisk(srch_ptr)) { rsp->hdr.Status = STATUS_NO_SUCH_FILE; } else { dir_fp->dot_dotdot[0] = dir_fp->dot_dotdot[1] = 0; rsp->hdr.Status = STATUS_NO_MORE_FILES; } rsp->StructureSize = cpu_to_le16(9); rsp->OutputBufferOffset = cpu_to_le16(0); rsp->OutputBufferLength = cpu_to_le32(0); rsp->Buffer[0] = 0; rc = ksmbd_iov_pin_rsp(work, (void *)rsp, sizeof(struct smb2_query_directory_rsp)); if (rc) goto err_out; } else { no_buf_len: ((struct file_directory_info *) ((char *)rsp->Buffer + d_info.last_entry_offset)) ->NextEntryOffset = 0; if (d_info.data_count >= d_info.last_entry_off_align) d_info.data_count -= d_info.last_entry_off_align; rsp->StructureSize = cpu_to_le16(9); rsp->OutputBufferOffset = cpu_to_le16(72); rsp->OutputBufferLength = cpu_to_le32(d_info.data_count); rc = ksmbd_iov_pin_rsp(work, (void *)rsp, offsetof(struct smb2_query_directory_rsp, Buffer) + d_info.data_count); if (rc) goto err_out; } kfree(srch_ptr); ksmbd_fd_put(work, dir_fp); ksmbd_revert_fsids(work); return 0; err_out: pr_err("error while processing smb2 query dir rc = %d\n", rc); kfree(srch_ptr); err_out2: if (rc == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else if (rc == -EACCES) rsp->hdr.Status = STATUS_ACCESS_DENIED; else if (rc == -ENOENT) rsp->hdr.Status = STATUS_NO_SUCH_FILE; else if (rc == -EBADF) rsp->hdr.Status = STATUS_FILE_CLOSED; else if (rc == -ENOMEM) rsp->hdr.Status = STATUS_NO_MEMORY; else if (rc == -EFAULT) rsp->hdr.Status = STATUS_INVALID_INFO_CLASS; else if (rc == -EIO) rsp->hdr.Status = STATUS_FILE_CORRUPT_ERROR; if (!rsp->hdr.Status) rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR; smb2_set_err_rsp(work); ksmbd_fd_put(work, dir_fp); ksmbd_revert_fsids(work); return 0; } /** * buffer_check_err() - helper function to check buffer errors * @reqOutputBufferLength: max buffer length expected in command response * @rsp: query info response buffer contains output buffer length * @rsp_org: base response buffer pointer in case of chained response * * Return: 0 on success, otherwise error */ static int buffer_check_err(int reqOutputBufferLength, struct smb2_query_info_rsp *rsp, void *rsp_org) { if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) { pr_err("Invalid Buffer Size Requested\n"); rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH; *(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr)); return -EINVAL; } return 0; } static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp, void *rsp_org) { struct smb2_file_standard_info *sinfo; sinfo = (struct smb2_file_standard_info *)rsp->Buffer; sinfo->AllocationSize = cpu_to_le64(4096); sinfo->EndOfFile = cpu_to_le64(0); sinfo->NumberOfLinks = cpu_to_le32(1); sinfo->DeletePending = 1; sinfo->Directory = 0; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_standard_info)); } static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num, void *rsp_org) { struct smb2_file_internal_info *file_info; file_info = (struct smb2_file_internal_info *)rsp->Buffer; /* any unique number */ file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63)); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_internal_info)); } static int smb2_get_info_file_pipe(struct ksmbd_session *sess, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp, void *rsp_org) { u64 id; int rc; /* * Windows can sometime send query file info request on * pipe without opening it, checking error condition here */ id = req->VolatileFileId; if (!ksmbd_session_rpc_method(sess, id)) return -ENOENT; ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n", req->FileInfoClass, req->VolatileFileId); switch (req->FileInfoClass) { case FILE_STANDARD_INFORMATION: get_standard_info_pipe(rsp, rsp_org); rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength), rsp, rsp_org); break; case FILE_INTERNAL_INFORMATION: get_internal_info_pipe(rsp, id, rsp_org); rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength), rsp, rsp_org); break; default: ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n", req->FileInfoClass); rc = -EOPNOTSUPP; } return rc; } /** * smb2_get_ea() - handler for smb2 get extended attribute command * @work: smb work containing query info command buffer * @fp: ksmbd_file pointer * @req: get extended attribute request * @rsp: response buffer pointer * @rsp_org: base response buffer pointer in case of chained response * * Return: 0 on success, otherwise error */ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp, void *rsp_org) { struct smb2_ea_info *eainfo, *prev_eainfo; char *name, *ptr, *xattr_list = NULL, *buf; int rc, name_len, value_len, xattr_list_len, idx; ssize_t buf_free_len, alignment_bytes, next_offset, rsp_data_cnt = 0; struct smb2_ea_info_req *ea_req = NULL; const struct path *path; struct mnt_idmap *idmap = file_mnt_idmap(fp->filp); if (!(fp->daccess & FILE_READ_EA_LE)) { pr_err("Not permitted to read ext attr : 0x%x\n", fp->daccess); return -EACCES; } path = &fp->filp->f_path; /* single EA entry is requested with given user.* name */ if (req->InputBufferLength) { if (le32_to_cpu(req->InputBufferLength) < sizeof(struct smb2_ea_info_req)) return -EINVAL; ea_req = (struct smb2_ea_info_req *)req->Buffer; } else { /* need to send all EAs, if no specific EA is requested*/ if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY) ksmbd_debug(SMB, "All EAs are requested but need to send single EA entry in rsp flags 0x%x\n", le32_to_cpu(req->Flags)); } buf_free_len = smb2_calc_max_out_buf_len(work, 8, le32_to_cpu(req->OutputBufferLength)); if (buf_free_len < 0) return -EINVAL; rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (rc < 0) { rsp->hdr.Status = STATUS_INVALID_HANDLE; goto out; } else if (!rc) { /* there is no EA in the file */ ksmbd_debug(SMB, "no ea data in the file\n"); goto done; } xattr_list_len = rc; ptr = (char *)rsp->Buffer; eainfo = (struct smb2_ea_info *)ptr; prev_eainfo = eainfo; idx = 0; while (idx < xattr_list_len) { name = xattr_list + idx; name_len = strlen(name); ksmbd_debug(SMB, "%s, len %d\n", name, name_len); idx += name_len + 1; /* * CIFS does not support EA other than user.* namespace, * still keep the framework generic, to list other attrs * in future. */ if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) continue; if (!strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX, STREAM_PREFIX_LEN)) continue; if (req->InputBufferLength && strncmp(&name[XATTR_USER_PREFIX_LEN], ea_req->name, ea_req->EaNameLength)) continue; if (!strncmp(&name[XATTR_USER_PREFIX_LEN], DOS_ATTRIBUTE_PREFIX, DOS_ATTRIBUTE_PREFIX_LEN)) continue; if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) name_len -= XATTR_USER_PREFIX_LEN; ptr = eainfo->name + name_len + 1; buf_free_len -= (offsetof(struct smb2_ea_info, name) + name_len + 1); /* bailout if xattr can't fit in buf_free_len */ value_len = ksmbd_vfs_getxattr(idmap, path->dentry, name, &buf); if (value_len <= 0) { rc = -ENOENT; rsp->hdr.Status = STATUS_INVALID_HANDLE; goto out; } buf_free_len -= value_len; if (buf_free_len < 0) { kfree(buf); break; } memcpy(ptr, buf, value_len); kfree(buf); ptr += value_len; eainfo->Flags = 0; eainfo->EaNameLength = name_len; if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) memcpy(eainfo->name, &name[XATTR_USER_PREFIX_LEN], name_len); else memcpy(eainfo->name, name, name_len); eainfo->name[name_len] = '\0'; eainfo->EaValueLength = cpu_to_le16(value_len); next_offset = offsetof(struct smb2_ea_info, name) + name_len + 1 + value_len; /* align next xattr entry at 4 byte bundary */ alignment_bytes = ((next_offset + 3) & ~3) - next_offset; if (alignment_bytes) { memset(ptr, '\0', alignment_bytes); ptr += alignment_bytes; next_offset += alignment_bytes; buf_free_len -= alignment_bytes; } eainfo->NextEntryOffset = cpu_to_le32(next_offset); prev_eainfo = eainfo; eainfo = (struct smb2_ea_info *)ptr; rsp_data_cnt += next_offset; if (req->InputBufferLength) { ksmbd_debug(SMB, "single entry requested\n"); break; } } /* no more ea entries */ prev_eainfo->NextEntryOffset = 0; done: rc = 0; if (rsp_data_cnt == 0) rsp->hdr.Status = STATUS_NO_EAS_ON_FILE; rsp->OutputBufferLength = cpu_to_le32(rsp_data_cnt); out: kvfree(xattr_list); return rc; } static void get_file_access_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_access_info *file_info; file_info = (struct smb2_file_access_info *)rsp->Buffer; file_info->AccessFlags = fp->daccess; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_access_info)); } static int get_file_basic_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_basic_info *basic_info; struct kstat stat; u64 time; if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) { pr_err("no right to read the attributes : 0x%x\n", fp->daccess); return -EACCES; } basic_info = (struct smb2_file_basic_info *)rsp->Buffer; generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, file_inode(fp->filp), &stat); basic_info->CreationTime = cpu_to_le64(fp->create_time); time = ksmbd_UnixTimeToNT(stat.atime); basic_info->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.mtime); basic_info->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.ctime); basic_info->ChangeTime = cpu_to_le64(time); basic_info->Attributes = fp->f_ci->m_fattr; basic_info->Pad1 = 0; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_basic_info)); return 0; } static void get_file_standard_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_standard_info *sinfo; unsigned int delete_pending; struct inode *inode; struct kstat stat; inode = file_inode(fp->filp); generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat); sinfo = (struct smb2_file_standard_info *)rsp->Buffer; delete_pending = ksmbd_inode_pending_delete(fp); sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9); sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); sinfo->DeletePending = delete_pending; sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_standard_info)); } static void get_file_alignment_info(struct smb2_query_info_rsp *rsp, void *rsp_org) { struct smb2_file_alignment_info *file_info; file_info = (struct smb2_file_alignment_info *)rsp->Buffer; file_info->AlignmentRequirement = 0; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_alignment_info)); } static int get_file_all_info(struct ksmbd_work *work, struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct ksmbd_conn *conn = work->conn; struct smb2_file_all_info *file_info; unsigned int delete_pending; struct inode *inode; struct kstat stat; int conv_len; char *filename; u64 time; if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) { ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n", fp->daccess); return -EACCES; } filename = convert_to_nt_pathname(work->tcon->share_conf, &fp->filp->f_path); if (IS_ERR(filename)) return PTR_ERR(filename); inode = file_inode(fp->filp); generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat); ksmbd_debug(SMB, "filename = %s\n", filename); delete_pending = ksmbd_inode_pending_delete(fp); file_info = (struct smb2_file_all_info *)rsp->Buffer; file_info->CreationTime = cpu_to_le64(fp->create_time); time = ksmbd_UnixTimeToNT(stat.atime); file_info->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.mtime); file_info->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.ctime); file_info->ChangeTime = cpu_to_le64(time); file_info->Attributes = fp->f_ci->m_fattr; file_info->Pad1 = 0; file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9); file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); file_info->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); file_info->DeletePending = delete_pending; file_info->Directory = S_ISDIR(stat.mode) ? 1 : 0; file_info->Pad2 = 0; file_info->IndexNumber = cpu_to_le64(stat.ino); file_info->EASize = 0; file_info->AccessFlags = fp->daccess; file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos); file_info->Mode = fp->coption; file_info->AlignmentRequirement = 0; conv_len = smbConvertToUTF16((__le16 *)file_info->FileName, filename, PATH_MAX, conn->local_nls, 0); conv_len *= 2; file_info->FileNameLength = cpu_to_le32(conv_len); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_all_info) + conv_len - 1); kfree(filename); return 0; } static void get_file_alternate_info(struct ksmbd_work *work, struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct ksmbd_conn *conn = work->conn; struct smb2_file_alt_name_info *file_info; struct dentry *dentry = fp->filp->f_path.dentry; int conv_len; spin_lock(&dentry->d_lock); file_info = (struct smb2_file_alt_name_info *)rsp->Buffer; conv_len = ksmbd_extract_shortname(conn, dentry->d_name.name, file_info->FileName); spin_unlock(&dentry->d_lock); file_info->FileNameLength = cpu_to_le32(conv_len); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len); } static void get_file_stream_info(struct ksmbd_work *work, struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct ksmbd_conn *conn = work->conn; struct smb2_file_stream_info *file_info; char *stream_name, *xattr_list = NULL, *stream_buf; struct kstat stat; const struct path *path = &fp->filp->f_path; ssize_t xattr_list_len; int nbytes = 0, streamlen, stream_name_len, next, idx = 0; int buf_free_len; struct smb2_query_info_req *req = ksmbd_req_buf_next(work); generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, file_inode(fp->filp), &stat); file_info = (struct smb2_file_stream_info *)rsp->Buffer; buf_free_len = smb2_calc_max_out_buf_len(work, 8, le32_to_cpu(req->OutputBufferLength)); if (buf_free_len < 0) goto out; xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; } else if (!xattr_list_len) { ksmbd_debug(SMB, "empty xattr in the file\n"); goto out; } while (idx < xattr_list_len) { stream_name = xattr_list + idx; streamlen = strlen(stream_name); idx += streamlen + 1; ksmbd_debug(SMB, "%s, len %d\n", stream_name, streamlen); if (strncmp(&stream_name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX, STREAM_PREFIX_LEN)) continue; stream_name_len = streamlen - (XATTR_USER_PREFIX_LEN + STREAM_PREFIX_LEN); streamlen = stream_name_len; /* plus : size */ streamlen += 1; stream_buf = kmalloc(streamlen + 1, GFP_KERNEL); if (!stream_buf) break; streamlen = snprintf(stream_buf, streamlen + 1, ":%s", &stream_name[XATTR_NAME_STREAM_LEN]); next = sizeof(struct smb2_file_stream_info) + streamlen * 2; if (next > buf_free_len) { kfree(stream_buf); break; } file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes]; streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, stream_buf, streamlen, conn->local_nls, 0); streamlen *= 2; kfree(stream_buf); file_info->StreamNameLength = cpu_to_le32(streamlen); file_info->StreamSize = cpu_to_le64(stream_name_len); file_info->StreamAllocationSize = cpu_to_le64(stream_name_len); nbytes += next; buf_free_len -= next; file_info->NextEntryOffset = cpu_to_le32(next); } out: if (!S_ISDIR(stat.mode) && buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) { file_info = (struct smb2_file_stream_info *) &rsp->Buffer[nbytes]; streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, "::$DATA", 7, conn->local_nls, 0); streamlen *= 2; file_info->StreamNameLength = cpu_to_le32(streamlen); file_info->StreamSize = cpu_to_le64(stat.size); file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9); nbytes += sizeof(struct smb2_file_stream_info) + streamlen; } /* last entry offset should be 0 */ file_info->NextEntryOffset = 0; kvfree(xattr_list); rsp->OutputBufferLength = cpu_to_le32(nbytes); } static void get_file_internal_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_internal_info *file_info; struct kstat stat; generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, file_inode(fp->filp), &stat); file_info = (struct smb2_file_internal_info *)rsp->Buffer; file_info->IndexNumber = cpu_to_le64(stat.ino); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_internal_info)); } static int get_file_network_open_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_ntwrk_info *file_info; struct inode *inode; struct kstat stat; u64 time; if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) { pr_err("no right to read the attributes : 0x%x\n", fp->daccess); return -EACCES; } file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer; inode = file_inode(fp->filp); generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat); file_info->CreationTime = cpu_to_le64(fp->create_time); time = ksmbd_UnixTimeToNT(stat.atime); file_info->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.mtime); file_info->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(stat.ctime); file_info->ChangeTime = cpu_to_le64(time); file_info->Attributes = fp->f_ci->m_fattr; file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9); file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); file_info->Reserved = cpu_to_le32(0); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_ntwrk_info)); return 0; } static void get_file_ea_info(struct smb2_query_info_rsp *rsp, void *rsp_org) { struct smb2_file_ea_info *file_info; file_info = (struct smb2_file_ea_info *)rsp->Buffer; file_info->EASize = 0; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_ea_info)); } static void get_file_position_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_pos_info *file_info; file_info = (struct smb2_file_pos_info *)rsp->Buffer; file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_pos_info)); } static void get_file_mode_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_mode_info *file_info; file_info = (struct smb2_file_mode_info *)rsp->Buffer; file_info->Mode = fp->coption & FILE_MODE_INFO_MASK; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_mode_info)); } static void get_file_compression_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_comp_info *file_info; struct kstat stat; generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, file_inode(fp->filp), &stat); file_info = (struct smb2_file_comp_info *)rsp->Buffer; file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9); file_info->CompressionFormat = COMPRESSION_FORMAT_NONE; file_info->CompressionUnitShift = 0; file_info->ChunkShift = 0; file_info->ClusterShift = 0; memset(&file_info->Reserved[0], 0, 3); rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_comp_info)); } static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb2_file_attr_tag_info *file_info; if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) { pr_err("no right to read the attributes : 0x%x\n", fp->daccess); return -EACCES; } file_info = (struct smb2_file_attr_tag_info *)rsp->Buffer; file_info->FileAttributes = fp->f_ci->m_fattr; file_info->ReparseTag = 0; rsp->OutputBufferLength = cpu_to_le32(sizeof(struct smb2_file_attr_tag_info)); return 0; } static void find_file_posix_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { struct smb311_posix_qinfo *file_info; struct inode *inode = file_inode(fp->filp); struct mnt_idmap *idmap = file_mnt_idmap(fp->filp); vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); u64 time; int out_buf_len = sizeof(struct smb311_posix_qinfo) + 32; file_info = (struct smb311_posix_qinfo *)rsp->Buffer; file_info->CreationTime = cpu_to_le64(fp->create_time); time = ksmbd_UnixTimeToNT(inode->i_atime); file_info->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(inode->i_mtime); file_info->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(inode_get_ctime(inode)); file_info->ChangeTime = cpu_to_le64(time); file_info->DosAttributes = fp->f_ci->m_fattr; file_info->Inode = cpu_to_le64(inode->i_ino); file_info->EndOfFile = cpu_to_le64(inode->i_size); file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9); file_info->HardLinks = cpu_to_le32(inode->i_nlink); file_info->Mode = cpu_to_le32(inode->i_mode & 0777); file_info->DeviceId = cpu_to_le32(inode->i_rdev); /* * Sids(32) contain two sids(Domain sid(16), UNIX group sid(16)). * UNIX sid(16) = revision(1) + num_subauth(1) + authority(6) + * sub_auth(4 * 1(num_subauth)) + RID(4). */ id_to_sid(from_kuid_munged(&init_user_ns, vfsuid_into_kuid(vfsuid)), SIDUNIX_USER, (struct smb_sid *)&file_info->Sids[0]); id_to_sid(from_kgid_munged(&init_user_ns, vfsgid_into_kgid(vfsgid)), SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]); rsp->OutputBufferLength = cpu_to_le32(out_buf_len); } static int smb2_get_info_file(struct ksmbd_work *work, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp) { struct ksmbd_file *fp; int fileinfoclass = 0; int rc = 0; unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID; if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) { /* smb2 info file called for pipe */ return smb2_get_info_file_pipe(work->sess, req, rsp, work->response_buf); } if (work->next_smb2_rcv_hdr_off) { if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; pid = work->compound_pfid; } } if (!has_file_id(id)) { id = req->VolatileFileId; pid = req->PersistentFileId; } fp = ksmbd_lookup_fd_slow(work, id, pid); if (!fp) return -ENOENT; fileinfoclass = req->FileInfoClass; switch (fileinfoclass) { case FILE_ACCESS_INFORMATION: get_file_access_info(rsp, fp, work->response_buf); break; case FILE_BASIC_INFORMATION: rc = get_file_basic_info(rsp, fp, work->response_buf); break; case FILE_STANDARD_INFORMATION: get_file_standard_info(rsp, fp, work->response_buf); break; case FILE_ALIGNMENT_INFORMATION: get_file_alignment_info(rsp, work->response_buf); break; case FILE_ALL_INFORMATION: rc = get_file_all_info(work, rsp, fp, work->response_buf); break; case FILE_ALTERNATE_NAME_INFORMATION: get_file_alternate_info(work, rsp, fp, work->response_buf); break; case FILE_STREAM_INFORMATION: get_file_stream_info(work, rsp, fp, work->response_buf); break; case FILE_INTERNAL_INFORMATION: get_file_internal_info(rsp, fp, work->response_buf); break; case FILE_NETWORK_OPEN_INFORMATION: rc = get_file_network_open_info(rsp, fp, work->response_buf); break; case FILE_EA_INFORMATION: get_file_ea_info(rsp, work->response_buf); break; case FILE_FULL_EA_INFORMATION: rc = smb2_get_ea(work, fp, req, rsp, work->response_buf); break; case FILE_POSITION_INFORMATION: get_file_position_info(rsp, fp, work->response_buf); break; case FILE_MODE_INFORMATION: get_file_mode_info(rsp, fp, work->response_buf); break; case FILE_COMPRESSION_INFORMATION: get_file_compression_info(rsp, fp, work->response_buf); break; case FILE_ATTRIBUTE_TAG_INFORMATION: rc = get_file_attribute_tag_info(rsp, fp, work->response_buf); break; case SMB_FIND_FILE_POSIX_INFO: if (!work->tcon->posix_extensions) { pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n"); rc = -EOPNOTSUPP; } else { find_file_posix_info(rsp, fp, work->response_buf); } break; default: ksmbd_debug(SMB, "fileinfoclass %d not supported yet\n", fileinfoclass); rc = -EOPNOTSUPP; } if (!rc) rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength), rsp, work->response_buf); ksmbd_fd_put(work, fp); return rc; } static int smb2_get_info_filesystem(struct ksmbd_work *work, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp) { struct ksmbd_session *sess = work->sess; struct ksmbd_conn *conn = work->conn; struct ksmbd_share_config *share = work->tcon->share_conf; int fsinfoclass = 0; struct kstatfs stfs; struct path path; int rc = 0, len; if (!share->path) return -EIO; rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path); if (rc) { pr_err("cannot create vfs path\n"); return -EIO; } rc = vfs_statfs(&path, &stfs); if (rc) { pr_err("cannot do stat of path %s\n", share->path); path_put(&path); return -EIO; } fsinfoclass = req->FileInfoClass; switch (fsinfoclass) { case FS_DEVICE_INFORMATION: { struct filesystem_device_info *info; info = (struct filesystem_device_info *)rsp->Buffer; info->DeviceType = cpu_to_le32(stfs.f_type); info->DeviceCharacteristics = cpu_to_le32(0x00000020); rsp->OutputBufferLength = cpu_to_le32(8); break; } case FS_ATTRIBUTE_INFORMATION: { struct filesystem_attribute_info *info; size_t sz; info = (struct filesystem_attribute_info *)rsp->Buffer; info->Attributes = cpu_to_le32(FILE_SUPPORTS_OBJECT_IDS | FILE_PERSISTENT_ACLS | FILE_UNICODE_ON_DISK | FILE_CASE_PRESERVED_NAMES | FILE_CASE_SENSITIVE_SEARCH | FILE_SUPPORTS_BLOCK_REFCOUNTING); info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps); if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_STREAMS)) info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS); info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen); len = smbConvertToUTF16((__le16 *)info->FileSystemName, "NTFS", PATH_MAX, conn->local_nls, 0); len = len * 2; info->FileSystemNameLen = cpu_to_le32(len); sz = sizeof(struct filesystem_attribute_info) - 2 + len; rsp->OutputBufferLength = cpu_to_le32(sz); break; } case FS_VOLUME_INFORMATION: { struct filesystem_vol_info *info; size_t sz; unsigned int serial_crc = 0; info = (struct filesystem_vol_info *)(rsp->Buffer); info->VolumeCreationTime = 0; serial_crc = crc32_le(serial_crc, share->name, strlen(share->name)); serial_crc = crc32_le(serial_crc, share->path, strlen(share->path)); serial_crc = crc32_le(serial_crc, ksmbd_netbios_name(), strlen(ksmbd_netbios_name())); /* Taking dummy value of serial number*/ info->SerialNumber = cpu_to_le32(serial_crc); len = smbConvertToUTF16((__le16 *)info->VolumeLabel, share->name, PATH_MAX, conn->local_nls, 0); len = len * 2; info->VolumeLabelSize = cpu_to_le32(len); info->Reserved = 0; sz = sizeof(struct filesystem_vol_info) - 2 + len; rsp->OutputBufferLength = cpu_to_le32(sz); break; } case FS_SIZE_INFORMATION: { struct filesystem_info *info; info = (struct filesystem_info *)(rsp->Buffer); info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks); info->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree); info->SectorsPerAllocationUnit = cpu_to_le32(1); info->BytesPerSector = cpu_to_le32(stfs.f_bsize); rsp->OutputBufferLength = cpu_to_le32(24); break; } case FS_FULL_SIZE_INFORMATION: { struct smb2_fs_full_size_info *info; info = (struct smb2_fs_full_size_info *)(rsp->Buffer); info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks); info->CallerAvailableAllocationUnits = cpu_to_le64(stfs.f_bavail); info->ActualAvailableAllocationUnits = cpu_to_le64(stfs.f_bfree); info->SectorsPerAllocationUnit = cpu_to_le32(1); info->BytesPerSector = cpu_to_le32(stfs.f_bsize); rsp->OutputBufferLength = cpu_to_le32(32); break; } case FS_OBJECT_ID_INFORMATION: { struct object_id_info *info; info = (struct object_id_info *)(rsp->Buffer); if (!user_guest(sess->user)) memcpy(info->objid, user_passkey(sess->user), 16); else memset(info->objid, 0, 16); info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC); info->extended_info.version = cpu_to_le32(1); info->extended_info.release = cpu_to_le32(1); info->extended_info.rel_date = 0; memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0")); rsp->OutputBufferLength = cpu_to_le32(64); break; } case FS_SECTOR_SIZE_INFORMATION: { struct smb3_fs_ss_info *info; unsigned int sector_size = min_t(unsigned int, path.mnt->mnt_sb->s_blocksize, 4096); info = (struct smb3_fs_ss_info *)(rsp->Buffer); info->LogicalBytesPerSector = cpu_to_le32(sector_size); info->PhysicalBytesPerSectorForAtomicity = cpu_to_le32(sector_size); info->PhysicalBytesPerSectorForPerf = cpu_to_le32(sector_size); info->FSEffPhysicalBytesPerSectorForAtomicity = cpu_to_le32(sector_size); info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE | SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE); info->ByteOffsetForSectorAlignment = 0; info->ByteOffsetForPartitionAlignment = 0; rsp->OutputBufferLength = cpu_to_le32(28); break; } case FS_CONTROL_INFORMATION: { /* * TODO : The current implementation is based on * test result with win7(NTFS) server. It's need to * modify this to get valid Quota values * from Linux kernel */ struct smb2_fs_control_info *info; info = (struct smb2_fs_control_info *)(rsp->Buffer); info->FreeSpaceStartFiltering = 0; info->FreeSpaceThreshold = 0; info->FreeSpaceStopFiltering = 0; info->DefaultQuotaThreshold = cpu_to_le64(SMB2_NO_FID); info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID); info->Padding = 0; rsp->OutputBufferLength = cpu_to_le32(48); break; } case FS_POSIX_INFORMATION: { struct filesystem_posix_info *info; if (!work->tcon->posix_extensions) { pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n"); rc = -EOPNOTSUPP; } else { info = (struct filesystem_posix_info *)(rsp->Buffer); info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize); info->BlockSize = cpu_to_le32(stfs.f_bsize); info->TotalBlocks = cpu_to_le64(stfs.f_blocks); info->BlocksAvail = cpu_to_le64(stfs.f_bfree); info->UserBlocksAvail = cpu_to_le64(stfs.f_bavail); info->TotalFileNodes = cpu_to_le64(stfs.f_files); info->FreeFileNodes = cpu_to_le64(stfs.f_ffree); rsp->OutputBufferLength = cpu_to_le32(56); } break; } default: path_put(&path); return -EOPNOTSUPP; } rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength), rsp, work->response_buf); path_put(&path); return rc; } static int smb2_get_info_sec(struct ksmbd_work *work, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp) { struct ksmbd_file *fp; struct mnt_idmap *idmap; struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL; struct smb_fattr fattr = {{0}}; struct inode *inode; __u32 secdesclen = 0; unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID; int addition_info = le32_to_cpu(req->AdditionalInformation); int rc = 0, ppntsd_size = 0; if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | PROTECTED_DACL_SECINFO | UNPROTECTED_DACL_SECINFO)) { ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n", addition_info); pntsd->revision = cpu_to_le16(1); pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PROTECTED); pntsd->osidoffset = 0; pntsd->gsidoffset = 0; pntsd->sacloffset = 0; pntsd->dacloffset = 0; secdesclen = sizeof(struct smb_ntsd); rsp->OutputBufferLength = cpu_to_le32(secdesclen); return 0; } if (work->next_smb2_rcv_hdr_off) { if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; pid = work->compound_pfid; } } if (!has_file_id(id)) { id = req->VolatileFileId; pid = req->PersistentFileId; } fp = ksmbd_lookup_fd_slow(work, id, pid); if (!fp) return -ENOENT; idmap = file_mnt_idmap(fp->filp); inode = file_inode(fp->filp); ksmbd_acls_fattr(&fattr, idmap, inode); if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) ppntsd_size = ksmbd_vfs_get_sd_xattr(work->conn, idmap, fp->filp->f_path.dentry, &ppntsd); /* Check if sd buffer size exceeds response buffer size */ if (smb2_resp_buf_len(work, 8) > ppntsd_size) rc = build_sec_desc(idmap, pntsd, ppntsd, ppntsd_size, addition_info, &secdesclen, &fattr); posix_acl_release(fattr.cf_acls); posix_acl_release(fattr.cf_dacls); kfree(ppntsd); ksmbd_fd_put(work, fp); if (rc) return rc; rsp->OutputBufferLength = cpu_to_le32(secdesclen); return 0; } /** * smb2_query_info() - handler for smb2 query info command * @work: smb work containing query info request buffer * * Return: 0 on success, otherwise error */ int smb2_query_info(struct ksmbd_work *work) { struct smb2_query_info_req *req; struct smb2_query_info_rsp *rsp; int rc = 0; WORK_BUFFERS(work, req, rsp); ksmbd_debug(SMB, "GOT query info request\n"); switch (req->InfoType) { case SMB2_O_INFO_FILE: ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n"); rc = smb2_get_info_file(work, req, rsp); break; case SMB2_O_INFO_FILESYSTEM: ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILESYSTEM\n"); rc = smb2_get_info_filesystem(work, req, rsp); break; case SMB2_O_INFO_SECURITY: ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n"); rc = smb2_get_info_sec(work, req, rsp); break; default: ksmbd_debug(SMB, "InfoType %d not supported yet\n", req->InfoType); rc = -EOPNOTSUPP; } if (!rc) { rsp->StructureSize = cpu_to_le16(9); rsp->OutputBufferOffset = cpu_to_le16(72); rc = ksmbd_iov_pin_rsp(work, (void *)rsp, offsetof(struct smb2_query_info_rsp, Buffer) + le32_to_cpu(rsp->OutputBufferLength)); } if (rc < 0) { if (rc == -EACCES) rsp->hdr.Status = STATUS_ACCESS_DENIED; else if (rc == -ENOENT) rsp->hdr.Status = STATUS_FILE_CLOSED; else if (rc == -EIO) rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR; else if (rc == -ENOMEM) rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; else if (rc == -EOPNOTSUPP || rsp->hdr.Status == 0) rsp->hdr.Status = STATUS_INVALID_INFO_CLASS; smb2_set_err_rsp(work); ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n", rc); return rc; } return 0; } /** * smb2_close_pipe() - handler for closing IPC pipe * @work: smb work containing close request buffer * * Return: 0 */ static noinline int smb2_close_pipe(struct ksmbd_work *work) { u64 id; struct smb2_close_req *req; struct smb2_close_rsp *rsp; WORK_BUFFERS(work, req, rsp); id = req->VolatileFileId; ksmbd_session_rpc_close(work->sess, id); rsp->StructureSize = cpu_to_le16(60); rsp->Flags = 0; rsp->Reserved = 0; rsp->CreationTime = 0; rsp->LastAccessTime = 0; rsp->LastWriteTime = 0; rsp->ChangeTime = 0; rsp->AllocationSize = 0; rsp->EndOfFile = 0; rsp->Attributes = 0; return ksmbd_iov_pin_rsp(work, (void *)rsp, sizeof(struct smb2_close_rsp)); } /** * smb2_close() - handler for smb2 close file command * @work: smb work containing close request buffer * * Return: 0 */ int smb2_close(struct ksmbd_work *work) { u64 volatile_id = KSMBD_NO_FID; u64 sess_id; struct smb2_close_req *req; struct smb2_close_rsp *rsp; struct ksmbd_conn *conn = work->conn; struct ksmbd_file *fp; struct inode *inode; u64 time; int err = 0; WORK_BUFFERS(work, req, rsp); if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) { ksmbd_debug(SMB, "IPC pipe close request\n"); return smb2_close_pipe(work); } sess_id = le64_to_cpu(req->hdr.SessionId); if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS) sess_id = work->compound_sid; work->compound_sid = 0; if (check_session_id(conn, sess_id)) { work->compound_sid = sess_id; } else { rsp->hdr.Status = STATUS_USER_SESSION_DELETED; if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS) rsp->hdr.Status = STATUS_INVALID_PARAMETER; err = -EBADF; goto out; } if (work->next_smb2_rcv_hdr_off && !has_file_id(req->VolatileFileId)) { if (!has_file_id(work->compound_fid)) { /* file already closed, return FILE_CLOSED */ ksmbd_debug(SMB, "file already closed\n"); rsp->hdr.Status = STATUS_FILE_CLOSED; err = -EBADF; goto out; } else { ksmbd_debug(SMB, "Compound request set FID = %llu:%llu\n", work->compound_fid, work->compound_pfid); volatile_id = work->compound_fid; /* file closed, stored id is not valid anymore */ work->compound_fid = KSMBD_NO_FID; work->compound_pfid = KSMBD_NO_FID; } } else { volatile_id = req->VolatileFileId; } ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id); rsp->StructureSize = cpu_to_le16(60); rsp->Reserved = 0; if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) { fp = ksmbd_lookup_fd_fast(work, volatile_id); if (!fp) { err = -ENOENT; goto out; } inode = file_inode(fp->filp); rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 : cpu_to_le64(inode->i_blocks << 9); rsp->EndOfFile = cpu_to_le64(inode->i_size); rsp->Attributes = fp->f_ci->m_fattr; rsp->CreationTime = cpu_to_le64(fp->create_time); time = ksmbd_UnixTimeToNT(inode->i_atime); rsp->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(inode->i_mtime); rsp->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(inode_get_ctime(inode)); rsp->ChangeTime = cpu_to_le64(time); ksmbd_fd_put(work, fp); } else { rsp->Flags = 0; rsp->AllocationSize = 0; rsp->EndOfFile = 0; rsp->Attributes = 0; rsp->CreationTime = 0; rsp->LastAccessTime = 0; rsp->LastWriteTime = 0; rsp->ChangeTime = 0; } err = ksmbd_close_fd(work, volatile_id); out: if (!err) err = ksmbd_iov_pin_rsp(work, (void *)rsp, sizeof(struct smb2_close_rsp)); if (err) { if (rsp->hdr.Status == 0) rsp->hdr.Status = STATUS_FILE_CLOSED; smb2_set_err_rsp(work); } return err; } /** * smb2_echo() - handler for smb2 echo(ping) command * @work: smb work containing echo request buffer * * Return: 0 */ int smb2_echo(struct ksmbd_work *work) { struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf); if (work->next_smb2_rcv_hdr_off) rsp = ksmbd_resp_buf_next(work); rsp->StructureSize = cpu_to_le16(4); rsp->Reserved = 0; return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_echo_rsp)); } static int smb2_rename(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_rename_info *file_info, struct nls_table *local_nls) { struct ksmbd_share_config *share = fp->tcon->share_conf; char *new_name = NULL; int rc, flags = 0; ksmbd_debug(SMB, "setting FILE_RENAME_INFO\n"); new_name = smb2_get_name(file_info->FileName, le32_to_cpu(file_info->FileNameLength), local_nls); if (IS_ERR(new_name)) return PTR_ERR(new_name); if (strchr(new_name, ':')) { int s_type; char *xattr_stream_name, *stream_name = NULL; size_t xattr_stream_size; int len; rc = parse_stream_name(new_name, &stream_name, &s_type); if (rc < 0) goto out; len = strlen(new_name); if (len > 0 && new_name[len - 1] != '/') { pr_err("not allow base filename in rename\n"); rc = -ESHARE; goto out; } rc = ksmbd_vfs_xattr_stream_name(stream_name, &xattr_stream_name, &xattr_stream_size, s_type); if (rc) goto out; rc = ksmbd_vfs_setxattr(file_mnt_idmap(fp->filp), &fp->filp->f_path, xattr_stream_name, NULL, 0, 0); if (rc < 0) { pr_err("failed to store stream name in xattr: %d\n", rc); rc = -EINVAL; goto out; } goto out; } ksmbd_debug(SMB, "new name %s\n", new_name); if (ksmbd_share_veto_filename(share, new_name)) { rc = -ENOENT; ksmbd_debug(SMB, "Can't rename vetoed file: %s\n", new_name); goto out; } if (!file_info->ReplaceIfExists) flags = RENAME_NOREPLACE; rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags); out: kfree(new_name); return rc; } static int smb2_create_link(struct ksmbd_work *work, struct ksmbd_share_config *share, struct smb2_file_link_info *file_info, unsigned int buf_len, struct file *filp, struct nls_table *local_nls) { char *link_name = NULL, *target_name = NULL, *pathname = NULL; struct path path, parent_path; bool file_present = false; int rc; if (buf_len < (u64)sizeof(struct smb2_file_link_info) + le32_to_cpu(file_info->FileNameLength)) return -EINVAL; ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n"); pathname = kmalloc(PATH_MAX, GFP_KERNEL); if (!pathname) return -ENOMEM; link_name = smb2_get_name(file_info->FileName, le32_to_cpu(file_info->FileNameLength), local_nls); if (IS_ERR(link_name) || S_ISDIR(file_inode(filp)->i_mode)) { rc = -EINVAL; goto out; } ksmbd_debug(SMB, "link name is %s\n", link_name); target_name = file_path(filp, pathname, PATH_MAX); if (IS_ERR(target_name)) { rc = -EINVAL; goto out; } ksmbd_debug(SMB, "target name is %s\n", target_name); rc = ksmbd_vfs_kern_path_locked(work, link_name, LOOKUP_NO_SYMLINKS, &parent_path, &path, 0); if (rc) { if (rc != -ENOENT) goto out; } else file_present = true; if (file_info->ReplaceIfExists) { if (file_present) { rc = ksmbd_vfs_remove_file(work, &path); if (rc) { rc = -EINVAL; ksmbd_debug(SMB, "cannot delete %s\n", link_name); goto out; } } } else { if (file_present) { rc = -EEXIST; ksmbd_debug(SMB, "link already exists\n"); goto out; } } rc = ksmbd_vfs_link(work, target_name, link_name); if (rc) rc = -EINVAL; out: if (file_present) { inode_unlock(d_inode(parent_path.dentry)); path_put(&path); path_put(&parent_path); } if (!IS_ERR(link_name)) kfree(link_name); kfree(pathname); return rc; } static int set_file_basic_info(struct ksmbd_file *fp, struct smb2_file_basic_info *file_info, struct ksmbd_share_config *share) { struct iattr attrs; struct file *filp; struct inode *inode; struct mnt_idmap *idmap; int rc = 0; if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE)) return -EACCES; attrs.ia_valid = 0; filp = fp->filp; inode = file_inode(filp); idmap = file_mnt_idmap(filp); if (file_info->CreationTime) fp->create_time = le64_to_cpu(file_info->CreationTime); if (file_info->LastAccessTime) { attrs.ia_atime = ksmbd_NTtimeToUnix(file_info->LastAccessTime); attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET); } attrs.ia_valid |= ATTR_CTIME; if (file_info->ChangeTime) attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime); else attrs.ia_ctime = inode_get_ctime(inode); if (file_info->LastWriteTime) { attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime); attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET); } if (file_info->Attributes) { if (!S_ISDIR(inode->i_mode) && file_info->Attributes & FILE_ATTRIBUTE_DIRECTORY_LE) { pr_err("can't change a file to a directory\n"); return -EINVAL; } if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == FILE_ATTRIBUTE_NORMAL_LE)) fp->f_ci->m_fattr = file_info->Attributes | (fp->f_ci->m_fattr & FILE_ATTRIBUTE_DIRECTORY_LE); } if (test_share_config_flag(share, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS) && (file_info->CreationTime || file_info->Attributes)) { struct xattr_dos_attrib da = {0}; da.version = 4; da.itime = fp->itime; da.create_time = fp->create_time; da.attr = le32_to_cpu(fp->f_ci->m_fattr); da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME | XATTR_DOSINFO_ITIME; rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da); if (rc) ksmbd_debug(SMB, "failed to restore file attribute in EA\n"); rc = 0; } if (attrs.ia_valid) { struct dentry *dentry = filp->f_path.dentry; struct inode *inode = d_inode(dentry); if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EACCES; inode_lock(inode); inode_set_ctime_to_ts(inode, attrs.ia_ctime); attrs.ia_valid &= ~ATTR_CTIME; rc = notify_change(idmap, dentry, &attrs, NULL); inode_unlock(inode); } return rc; } static int set_file_allocation_info(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_alloc_info *file_alloc_info) { /* * TODO : It's working fine only when store dos attributes * is not yes. need to implement a logic which works * properly with any smb.conf option */ loff_t alloc_blks; struct inode *inode; int rc; if (!(fp->daccess & FILE_WRITE_DATA_LE)) return -EACCES; alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9; inode = file_inode(fp->filp); if (alloc_blks > inode->i_blocks) { smb_break_all_levII_oplock(work, fp, 1); rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0, alloc_blks * 512); if (rc && rc != -EOPNOTSUPP) { pr_err("vfs_fallocate is failed : %d\n", rc); return rc; } } else if (alloc_blks < inode->i_blocks) { loff_t size; /* * Allocation size could be smaller than original one * which means allocated blocks in file should be * deallocated. use truncate to cut out it, but inode * size is also updated with truncate offset. * inode size is retained by backup inode size. */ size = i_size_read(inode); rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512); if (rc) { pr_err("truncate failed!, err %d\n", rc); return rc; } if (size < alloc_blks * 512) i_size_write(inode, size); } return 0; } static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_eof_info *file_eof_info) { loff_t newsize; struct inode *inode; int rc; if (!(fp->daccess & FILE_WRITE_DATA_LE)) return -EACCES; newsize = le64_to_cpu(file_eof_info->EndOfFile); inode = file_inode(fp->filp); /* * If FILE_END_OF_FILE_INFORMATION of set_info_file is called * on FAT32 shared device, truncate execution time is too long * and network error could cause from windows client. because * truncate of some filesystem like FAT32 fill zero data in * truncated range. */ if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) { ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize); rc = ksmbd_vfs_truncate(work, fp, newsize); if (rc) { ksmbd_debug(SMB, "truncate failed!, err %d\n", rc); if (rc != -EAGAIN) rc = -EBADF; return rc; } } return 0; } static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_rename_info *rename_info, unsigned int buf_len) { if (!(fp->daccess & FILE_DELETE_LE)) { pr_err("no right to delete : 0x%x\n", fp->daccess); return -EACCES; } if (buf_len < (u64)sizeof(struct smb2_file_rename_info) + le32_to_cpu(rename_info->FileNameLength)) return -EINVAL; if (!le32_to_cpu(rename_info->FileNameLength)) return -EINVAL; return smb2_rename(work, fp, rename_info, work->conn->local_nls); } static int set_file_disposition_info(struct ksmbd_file *fp, struct smb2_file_disposition_info *file_info) { struct inode *inode; if (!(fp->daccess & FILE_DELETE_LE)) { pr_err("no right to delete : 0x%x\n", fp->daccess); return -EACCES; } inode = file_inode(fp->filp); if (file_info->DeletePending) { if (S_ISDIR(inode->i_mode) && ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY) return -EBUSY; ksmbd_set_inode_pending_delete(fp); } else { ksmbd_clear_inode_pending_delete(fp); } return 0; } static int set_file_position_info(struct ksmbd_file *fp, struct smb2_file_pos_info *file_info) { loff_t current_byte_offset; unsigned long sector_size; struct inode *inode; inode = file_inode(fp->filp); current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset); sector_size = inode->i_sb->s_blocksize; if (current_byte_offset < 0 || (fp->coption == FILE_NO_INTERMEDIATE_BUFFERING_LE && current_byte_offset & (sector_size - 1))) { pr_err("CurrentByteOffset is not valid : %llu\n", current_byte_offset); return -EINVAL; } fp->filp->f_pos = current_byte_offset; return 0; } static int set_file_mode_info(struct ksmbd_file *fp, struct smb2_file_mode_info *file_info) { __le32 mode; mode = file_info->Mode; if ((mode & ~FILE_MODE_INFO_MASK)) { pr_err("Mode is not valid : 0x%x\n", le32_to_cpu(mode)); return -EINVAL; } /* * TODO : need to implement consideration for * FILE_SYNCHRONOUS_IO_ALERT and FILE_SYNCHRONOUS_IO_NONALERT */ ksmbd_vfs_set_fadvise(fp->filp, mode); fp->coption = mode; return 0; } /** * smb2_set_info_file() - handler for smb2 set info command * @work: smb work containing set info command buffer * @fp: ksmbd_file pointer * @req: request buffer pointer * @share: ksmbd_share_config pointer * * Return: 0 on success, otherwise error * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH */ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_set_info_req *req, struct ksmbd_share_config *share) { unsigned int buf_len = le32_to_cpu(req->BufferLength); switch (req->FileInfoClass) { case FILE_BASIC_INFORMATION: { if (buf_len < sizeof(struct smb2_file_basic_info)) return -EINVAL; return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share); } case FILE_ALLOCATION_INFORMATION: { if (buf_len < sizeof(struct smb2_file_alloc_info)) return -EINVAL; return set_file_allocation_info(work, fp, (struct smb2_file_alloc_info *)req->Buffer); } case FILE_END_OF_FILE_INFORMATION: { if (buf_len < sizeof(struct smb2_file_eof_info)) return -EINVAL; return set_end_of_file_info(work, fp, (struct smb2_file_eof_info *)req->Buffer); } case FILE_RENAME_INFORMATION: { if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { ksmbd_debug(SMB, "User does not have write permission\n"); return -EACCES; } if (buf_len < sizeof(struct smb2_file_rename_info)) return -EINVAL; return set_rename_info(work, fp, (struct smb2_file_rename_info *)req->Buffer, buf_len); } case FILE_LINK_INFORMATION: { if (buf_len < sizeof(struct smb2_file_link_info)) return -EINVAL; return smb2_create_link(work, work->tcon->share_conf, (struct smb2_file_link_info *)req->Buffer, buf_len, fp->filp, work->conn->local_nls); } case FILE_DISPOSITION_INFORMATION: { if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { ksmbd_debug(SMB, "User does not have write permission\n"); return -EACCES; } if (buf_len < sizeof(struct smb2_file_disposition_info)) return -EINVAL; return set_file_disposition_info(fp, (struct smb2_file_disposition_info *)req->Buffer); } case FILE_FULL_EA_INFORMATION: { if (!(fp->daccess & FILE_WRITE_EA_LE)) { pr_err("Not permitted to write ext attr: 0x%x\n", fp->daccess); return -EACCES; } if (buf_len < sizeof(struct smb2_ea_info)) return -EINVAL; return smb2_set_ea((struct smb2_ea_info *)req->Buffer, buf_len, &fp->filp->f_path); } case FILE_POSITION_INFORMATION: { if (buf_len < sizeof(struct smb2_file_pos_info)) return -EINVAL; return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer); } case FILE_MODE_INFORMATION: { if (buf_len < sizeof(struct smb2_file_mode_info)) return -EINVAL; return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer); } } pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass); return -EOPNOTSUPP; } static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info, char *buffer, int buf_len) { struct smb_ntsd *pntsd = (struct smb_ntsd *)buffer; fp->saccess |= FILE_SHARE_DELETE_LE; return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd, buf_len, false); } /** * smb2_set_info() - handler for smb2 set info command handler * @work: smb work containing set info request buffer * * Return: 0 on success, otherwise error */ int smb2_set_info(struct ksmbd_work *work) { struct smb2_set_info_req *req; struct smb2_set_info_rsp *rsp; struct ksmbd_file *fp; int rc = 0; unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID; ksmbd_debug(SMB, "Received set info request\n"); if (work->next_smb2_rcv_hdr_off) { req = ksmbd_req_buf_next(work); rsp = ksmbd_resp_buf_next(work); if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; pid = work->compound_pfid; } } else { req = smb2_get_msg(work->request_buf); rsp = smb2_get_msg(work->response_buf); } if (!has_file_id(id)) { id = req->VolatileFileId; pid = req->PersistentFileId; } fp = ksmbd_lookup_fd_slow(work, id, pid); if (!fp) { ksmbd_debug(SMB, "Invalid id for close: %u\n", id); rc = -ENOENT; goto err_out; } switch (req->InfoType) { case SMB2_O_INFO_FILE: ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n"); rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf); break; case SMB2_O_INFO_SECURITY: ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n"); if (ksmbd_override_fsids(work)) { rc = -ENOMEM; goto err_out; } rc = smb2_set_info_sec(fp, le32_to_cpu(req->AdditionalInformation), req->Buffer, le32_to_cpu(req->BufferLength)); ksmbd_revert_fsids(work); break; default: rc = -EOPNOTSUPP; } if (rc < 0) goto err_out; rsp->StructureSize = cpu_to_le16(2); rc = ksmbd_iov_pin_rsp(work, (void *)rsp, sizeof(struct smb2_set_info_rsp)); if (rc) goto err_out; ksmbd_fd_put(work, fp); return 0; err_out: if (rc == -EACCES || rc == -EPERM || rc == -EXDEV) rsp->hdr.Status = STATUS_ACCESS_DENIED; else if (rc == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else if (rc == -ESHARE) rsp->hdr.Status = STATUS_SHARING_VIOLATION; else if (rc == -ENOENT) rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID; else if (rc == -EBUSY || rc == -ENOTEMPTY) rsp->hdr.Status = STATUS_DIRECTORY_NOT_EMPTY; else if (rc == -EAGAIN) rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT; else if (rc == -EBADF || rc == -ESTALE) rsp->hdr.Status = STATUS_INVALID_HANDLE; else if (rc == -EEXIST) rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION; else if (rsp->hdr.Status == 0 || rc == -EOPNOTSUPP) rsp->hdr.Status = STATUS_INVALID_INFO_CLASS; smb2_set_err_rsp(work); ksmbd_fd_put(work, fp); ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n", rc); return rc; } /** * smb2_read_pipe() - handler for smb2 read from IPC pipe * @work: smb work containing read IPC pipe command buffer * * Return: 0 on success, otherwise error */ static noinline int smb2_read_pipe(struct ksmbd_work *work) { int nbytes = 0, err; u64 id; struct ksmbd_rpc_command *rpc_resp; struct smb2_read_req *req; struct smb2_read_rsp *rsp; WORK_BUFFERS(work, req, rsp); id = req->VolatileFileId; rpc_resp = ksmbd_rpc_read(work->sess, id); if (rpc_resp) { void *aux_payload_buf; if (rpc_resp->flags != KSMBD_RPC_OK) { err = -EINVAL; goto out; } aux_payload_buf = kvmalloc(rpc_resp->payload_sz, GFP_KERNEL); if (!aux_payload_buf) { err = -ENOMEM; goto out; } memcpy(aux_payload_buf, rpc_resp->payload, rpc_resp->payload_sz); nbytes = rpc_resp->payload_sz; kvfree(rpc_resp); err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, offsetof(struct smb2_read_rsp, Buffer), aux_payload_buf, nbytes); if (err) goto out; } else { err = ksmbd_iov_pin_rsp(work, (void *)rsp, offsetof(struct smb2_read_rsp, Buffer)); if (err) goto out; } rsp->StructureSize = cpu_to_le16(17); rsp->DataOffset = 80; rsp->Reserved = 0; rsp->DataLength = cpu_to_le32(nbytes); rsp->DataRemaining = 0; rsp->Flags = 0; return 0; out: rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR; smb2_set_err_rsp(work); kvfree(rpc_resp); return err; } static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work, struct smb2_buffer_desc_v1 *desc, __le32 Channel, __le16 ChannelInfoLength) { unsigned int i, ch_count; if (work->conn->dialect == SMB30_PROT_ID && Channel != SMB2_CHANNEL_RDMA_V1) return -EINVAL; ch_count = le16_to_cpu(ChannelInfoLength) / sizeof(*desc); if (ksmbd_debug_types & KSMBD_DEBUG_RDMA) { for (i = 0; i < ch_count; i++) { pr_info("RDMA r/w request %#x: token %#x, length %#x\n", i, le32_to_cpu(desc[i].token), le32_to_cpu(desc[i].length)); } } if (!ch_count) return -EINVAL; work->need_invalidate_rkey = (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE); if (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) work->remote_key = le32_to_cpu(desc->token); return 0; } static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work, struct smb2_read_req *req, void *data_buf, size_t length) { int err; err = ksmbd_conn_rdma_write(work->conn, data_buf, length, (struct smb2_buffer_desc_v1 *) ((char *)req + le16_to_cpu(req->ReadChannelInfoOffset)), le16_to_cpu(req->ReadChannelInfoLength)); if (err) return err; return length; } /** * smb2_read() - handler for smb2 read from file * @work: smb work containing read command buffer * * Return: 0 on success, otherwise error */ int smb2_read(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_read_req *req; struct smb2_read_rsp *rsp; struct ksmbd_file *fp = NULL; loff_t offset; size_t length, mincount; ssize_t nbytes = 0, remain_bytes = 0; int err = 0; bool is_rdma_channel = false; unsigned int max_read_size = conn->vals->max_read_size; unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID; void *aux_payload_buf; if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) { ksmbd_debug(SMB, "IPC pipe read request\n"); return smb2_read_pipe(work); } if (work->next_smb2_rcv_hdr_off) { req = ksmbd_req_buf_next(work); rsp = ksmbd_resp_buf_next(work); if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; pid = work->compound_pfid; } } else { req = smb2_get_msg(work->request_buf); rsp = smb2_get_msg(work->response_buf); } if (!has_file_id(id)) { id = req->VolatileFileId; pid = req->PersistentFileId; } if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE || req->Channel == SMB2_CHANNEL_RDMA_V1) { is_rdma_channel = true; max_read_size = get_smbd_max_read_write_size(); } if (is_rdma_channel == true) { unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset); if (ch_offset < offsetof(struct smb2_read_req, Buffer)) { err = -EINVAL; goto out; } err = smb2_set_remote_key_for_rdma(work, (struct smb2_buffer_desc_v1 *) ((char *)req + ch_offset), req->Channel, req->ReadChannelInfoLength); if (err) goto out; } fp = ksmbd_lookup_fd_slow(work, id, pid); if (!fp) { err = -ENOENT; goto out; } if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_READ_ATTRIBUTES_LE))) { pr_err("Not permitted to read : 0x%x\n", fp->daccess); err = -EACCES; goto out; } offset = le64_to_cpu(req->Offset); length = le32_to_cpu(req->Length); mincount = le32_to_cpu(req->MinimumCount); if (length > max_read_size) { ksmbd_debug(SMB, "limiting read size to max size(%u)\n", max_read_size); err = -EINVAL; goto out; } ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n", fp->filp, offset, length); aux_payload_buf = kvzalloc(length, GFP_KERNEL); if (!aux_payload_buf) { err = -ENOMEM; goto out; } nbytes = ksmbd_vfs_read(work, fp, length, &offset, aux_payload_buf); if (nbytes < 0) { err = nbytes; goto out; } if ((nbytes == 0 && length != 0) || nbytes < mincount) { kvfree(aux_payload_buf); rsp->hdr.Status = STATUS_END_OF_FILE; smb2_set_err_rsp(work); ksmbd_fd_put(work, fp); return 0; } ksmbd_debug(SMB, "nbytes %zu, offset %lld mincount %zu\n", nbytes, offset, mincount); if (is_rdma_channel == true) { /* write data to the client using rdma channel */ remain_bytes = smb2_read_rdma_channel(work, req, aux_payload_buf, nbytes); kvfree(aux_payload_buf); aux_payload_buf = NULL; nbytes = 0; if (remain_bytes < 0) { err = (int)remain_bytes; goto out; } } rsp->StructureSize = cpu_to_le16(17); rsp->DataOffset = 80; rsp->Reserved = 0; rsp->DataLength = cpu_to_le32(nbytes); rsp->DataRemaining = cpu_to_le32(remain_bytes); rsp->Flags = 0; err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, offsetof(struct smb2_read_rsp, Buffer), aux_payload_buf, nbytes); if (err) goto out; ksmbd_fd_put(work, fp); return 0; out: if (err) { if (err == -EISDIR) rsp->hdr.Status = STATUS_INVALID_DEVICE_REQUEST; else if (err == -EAGAIN) rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT; else if (err == -ENOENT) rsp->hdr.Status = STATUS_FILE_CLOSED; else if (err == -EACCES) rsp->hdr.Status = STATUS_ACCESS_DENIED; else if (err == -ESHARE) rsp->hdr.Status = STATUS_SHARING_VIOLATION; else if (err == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else rsp->hdr.Status = STATUS_INVALID_HANDLE; smb2_set_err_rsp(work); } ksmbd_fd_put(work, fp); return err; } /** * smb2_write_pipe() - handler for smb2 write on IPC pipe * @work: smb work containing write IPC pipe command buffer * * Return: 0 on success, otherwise error */ static noinline int smb2_write_pipe(struct ksmbd_work *work) { struct smb2_write_req *req; struct smb2_write_rsp *rsp; struct ksmbd_rpc_command *rpc_resp; u64 id = 0; int err = 0, ret = 0; char *data_buf; size_t length; WORK_BUFFERS(work, req, rsp); length = le32_to_cpu(req->Length); id = req->VolatileFileId; if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(work->request_buf)) { pr_err("invalid write data offset %u, smb_len %u\n", le16_to_cpu(req->DataOffset), get_rfc1002_len(work->request_buf)); err = -EINVAL; goto out; } data_buf = (char *)(((char *)&req->hdr.ProtocolId) + le16_to_cpu(req->DataOffset)); rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length); if (rpc_resp) { if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) { rsp->hdr.Status = STATUS_NOT_SUPPORTED; kvfree(rpc_resp); smb2_set_err_rsp(work); return -EOPNOTSUPP; } if (rpc_resp->flags != KSMBD_RPC_OK) { rsp->hdr.Status = STATUS_INVALID_HANDLE; smb2_set_err_rsp(work); kvfree(rpc_resp); return ret; } kvfree(rpc_resp); } rsp->StructureSize = cpu_to_le16(17); rsp->DataOffset = 0; rsp->Reserved = 0; rsp->DataLength = cpu_to_le32(length); rsp->DataRemaining = 0; rsp->Reserved2 = 0; err = ksmbd_iov_pin_rsp(work, (void *)rsp, offsetof(struct smb2_write_rsp, Buffer)); out: if (err) { rsp->hdr.Status = STATUS_INVALID_HANDLE; smb2_set_err_rsp(work); } return err; } static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work, struct smb2_write_req *req, struct ksmbd_file *fp, loff_t offset, size_t length, bool sync) { char *data_buf; int ret; ssize_t nbytes; data_buf = kvzalloc(length, GFP_KERNEL); if (!data_buf) return -ENOMEM; ret = ksmbd_conn_rdma_read(work->conn, data_buf, length, (struct smb2_buffer_desc_v1 *) ((char *)req + le16_to_cpu(req->WriteChannelInfoOffset)), le16_to_cpu(req->WriteChannelInfoLength)); if (ret < 0) { kvfree(data_buf); return ret; } ret = ksmbd_vfs_write(work, fp, data_buf, length, &offset, sync, &nbytes); kvfree(data_buf); if (ret < 0) return ret; return nbytes; } /** * smb2_write() - handler for smb2 write from file * @work: smb work containing write command buffer * * Return: 0 on success, otherwise error */ int smb2_write(struct ksmbd_work *work) { struct smb2_write_req *req; struct smb2_write_rsp *rsp; struct ksmbd_file *fp = NULL; loff_t offset; size_t length; ssize_t nbytes; char *data_buf; bool writethrough = false, is_rdma_channel = false; int err = 0; unsigned int max_write_size = work->conn->vals->max_write_size; WORK_BUFFERS(work, req, rsp); if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) { ksmbd_debug(SMB, "IPC pipe write request\n"); return smb2_write_pipe(work); } offset = le64_to_cpu(req->Offset); length = le32_to_cpu(req->Length); if (req->Channel == SMB2_CHANNEL_RDMA_V1 || req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) { is_rdma_channel = true; max_write_size = get_smbd_max_read_write_size(); length = le32_to_cpu(req->RemainingBytes); } if (is_rdma_channel == true) { unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset); if (req->Length != 0 || req->DataOffset != 0 || ch_offset < offsetof(struct smb2_write_req, Buffer)) { err = -EINVAL; goto out; } err = smb2_set_remote_key_for_rdma(work, (struct smb2_buffer_desc_v1 *) ((char *)req + ch_offset), req->Channel, req->WriteChannelInfoLength); if (err) goto out; } if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { ksmbd_debug(SMB, "User does not have write permission\n"); err = -EACCES; goto out; } fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!fp) { err = -ENOENT; goto out; } if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_READ_ATTRIBUTES_LE))) { pr_err("Not permitted to write : 0x%x\n", fp->daccess); err = -EACCES; goto out; } if (length > max_write_size) { ksmbd_debug(SMB, "limiting write size to max size(%u)\n", max_write_size); err = -EINVAL; goto out; } ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags)); if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH) writethrough = true; if (is_rdma_channel == false) { if (le16_to_cpu(req->DataOffset) < offsetof(struct smb2_write_req, Buffer)) { err = -EINVAL; goto out; } data_buf = (char *)(((char *)&req->hdr.ProtocolId) + le16_to_cpu(req->DataOffset)); ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n", fp->filp, offset, length); err = ksmbd_vfs_write(work, fp, data_buf, length, &offset, writethrough, &nbytes); if (err < 0) goto out; } else { /* read data from the client using rdma channel, and * write the data. */ nbytes = smb2_write_rdma_channel(work, req, fp, offset, length, writethrough); if (nbytes < 0) { err = (int)nbytes; goto out; } } rsp->StructureSize = cpu_to_le16(17); rsp->DataOffset = 0; rsp->Reserved = 0; rsp->DataLength = cpu_to_le32(nbytes); rsp->DataRemaining = 0; rsp->Reserved2 = 0; err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_write_rsp, Buffer)); if (err) goto out; ksmbd_fd_put(work, fp); return 0; out: if (err == -EAGAIN) rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT; else if (err == -ENOSPC || err == -EFBIG) rsp->hdr.Status = STATUS_DISK_FULL; else if (err == -ENOENT) rsp->hdr.Status = STATUS_FILE_CLOSED; else if (err == -EACCES) rsp->hdr.Status = STATUS_ACCESS_DENIED; else if (err == -ESHARE) rsp->hdr.Status = STATUS_SHARING_VIOLATION; else if (err == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else rsp->hdr.Status = STATUS_INVALID_HANDLE; smb2_set_err_rsp(work); ksmbd_fd_put(work, fp); return err; } /** * smb2_flush() - handler for smb2 flush file - fsync * @work: smb work containing flush command buffer * * Return: 0 on success, otherwise error */ int smb2_flush(struct ksmbd_work *work) { struct smb2_flush_req *req; struct smb2_flush_rsp *rsp; int err; WORK_BUFFERS(work, req, rsp); ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", req->VolatileFileId); err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId); if (err) goto out; rsp->StructureSize = cpu_to_le16(4); rsp->Reserved = 0; return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_flush_rsp)); out: rsp->hdr.Status = STATUS_INVALID_HANDLE; smb2_set_err_rsp(work); return err; } /** * smb2_cancel() - handler for smb2 cancel command * @work: smb work containing cancel command buffer * * Return: 0 on success, otherwise error */ int smb2_cancel(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_hdr *hdr = smb2_get_msg(work->request_buf); struct smb2_hdr *chdr; struct ksmbd_work *iter; struct list_head *command_list; if (work->next_smb2_rcv_hdr_off) hdr = ksmbd_resp_buf_next(work); ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n", hdr->MessageId, hdr->Flags); if (hdr->Flags & SMB2_FLAGS_ASYNC_COMMAND) { command_list = &conn->async_requests; spin_lock(&conn->request_lock); list_for_each_entry(iter, command_list, async_request_entry) { chdr = smb2_get_msg(iter->request_buf); if (iter->async_id != le64_to_cpu(hdr->Id.AsyncId)) continue; ksmbd_debug(SMB, "smb2 with AsyncId %llu cancelled command = 0x%x\n", le64_to_cpu(hdr->Id.AsyncId), le16_to_cpu(chdr->Command)); iter->state = KSMBD_WORK_CANCELLED; if (iter->cancel_fn) iter->cancel_fn(iter->cancel_argv); break; } spin_unlock(&conn->request_lock); } else { command_list = &conn->requests; spin_lock(&conn->request_lock); list_for_each_entry(iter, command_list, request_entry) { chdr = smb2_get_msg(iter->request_buf); if (chdr->MessageId != hdr->MessageId || iter == work) continue; ksmbd_debug(SMB, "smb2 with mid %llu cancelled command = 0x%x\n", le64_to_cpu(hdr->MessageId), le16_to_cpu(chdr->Command)); iter->state = KSMBD_WORK_CANCELLED; break; } spin_unlock(&conn->request_lock); } /* For SMB2_CANCEL command itself send no response*/ work->send_no_response = 1; return 0; } struct file_lock *smb_flock_init(struct file *f) { struct file_lock *fl; fl = locks_alloc_lock(); if (!fl) goto out; locks_init_lock(fl); fl->fl_owner = f; fl->fl_pid = current->tgid; fl->fl_file = f; fl->fl_flags = FL_POSIX; fl->fl_ops = NULL; fl->fl_lmops = NULL; out: return fl; } static int smb2_set_flock_flags(struct file_lock *flock, int flags) { int cmd = -EINVAL; /* Checking for wrong flag combination during lock request*/ switch (flags) { case SMB2_LOCKFLAG_SHARED: ksmbd_debug(SMB, "received shared request\n"); cmd = F_SETLKW; flock->fl_type = F_RDLCK; flock->fl_flags |= FL_SLEEP; break; case SMB2_LOCKFLAG_EXCLUSIVE: ksmbd_debug(SMB, "received exclusive request\n"); cmd = F_SETLKW; flock->fl_type = F_WRLCK; flock->fl_flags |= FL_SLEEP; break; case SMB2_LOCKFLAG_SHARED | SMB2_LOCKFLAG_FAIL_IMMEDIATELY: ksmbd_debug(SMB, "received shared & fail immediately request\n"); cmd = F_SETLK; flock->fl_type = F_RDLCK; break; case SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_FAIL_IMMEDIATELY: ksmbd_debug(SMB, "received exclusive & fail immediately request\n"); cmd = F_SETLK; flock->fl_type = F_WRLCK; break; case SMB2_LOCKFLAG_UNLOCK: ksmbd_debug(SMB, "received unlock request\n"); flock->fl_type = F_UNLCK; cmd = F_SETLK; break; } return cmd; } static struct ksmbd_lock *smb2_lock_init(struct file_lock *flock, unsigned int cmd, int flags, struct list_head *lock_list) { struct ksmbd_lock *lock; lock = kzalloc(sizeof(struct ksmbd_lock), GFP_KERNEL); if (!lock) return NULL; lock->cmd = cmd; lock->fl = flock; lock->start = flock->fl_start; lock->end = flock->fl_end; lock->flags = flags; if (lock->start == lock->end) lock->zero_len = 1; INIT_LIST_HEAD(&lock->clist); INIT_LIST_HEAD(&lock->flist); INIT_LIST_HEAD(&lock->llist); list_add_tail(&lock->llist, lock_list); return lock; } static void smb2_remove_blocked_lock(void **argv) { struct file_lock *flock = (struct file_lock *)argv[0]; ksmbd_vfs_posix_lock_unblock(flock); wake_up(&flock->fl_wait); } static inline bool lock_defer_pending(struct file_lock *fl) { /* check pending lock waiters */ return waitqueue_active(&fl->fl_wait); } /** * smb2_lock() - handler for smb2 file lock command * @work: smb work containing lock command buffer * * Return: 0 on success, otherwise error */ int smb2_lock(struct ksmbd_work *work) { struct smb2_lock_req *req; struct smb2_lock_rsp *rsp; struct smb2_lock_element *lock_ele; struct ksmbd_file *fp = NULL; struct file_lock *flock = NULL; struct file *filp = NULL; int lock_count; int flags = 0; int cmd = 0; int err = -EIO, i, rc = 0; u64 lock_start, lock_length; struct ksmbd_lock *smb_lock = NULL, *cmp_lock, *tmp, *tmp2; struct ksmbd_conn *conn; int nolock = 0; LIST_HEAD(lock_list); LIST_HEAD(rollback_list); int prior_lock = 0; WORK_BUFFERS(work, req, rsp); ksmbd_debug(SMB, "Received lock request\n"); fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!fp) { ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", req->VolatileFileId); err = -ENOENT; goto out2; } filp = fp->filp; lock_count = le16_to_cpu(req->LockCount); lock_ele = req->locks; ksmbd_debug(SMB, "lock count is %d\n", lock_count); if (!lock_count) { err = -EINVAL; goto out2; } for (i = 0; i < lock_count; i++) { flags = le32_to_cpu(lock_ele[i].Flags); flock = smb_flock_init(filp); if (!flock) goto out; cmd = smb2_set_flock_flags(flock, flags); lock_start = le64_to_cpu(lock_ele[i].Offset); lock_length = le64_to_cpu(lock_ele[i].Length); if (lock_start > U64_MAX - lock_length) { pr_err("Invalid lock range requested\n"); rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE; locks_free_lock(flock); goto out; } if (lock_start > OFFSET_MAX) flock->fl_start = OFFSET_MAX; else flock->fl_start = lock_start; lock_length = le64_to_cpu(lock_ele[i].Length); if (lock_length > OFFSET_MAX - flock->fl_start) lock_length = OFFSET_MAX - flock->fl_start; flock->fl_end = flock->fl_start + lock_length; if (flock->fl_end < flock->fl_start) { ksmbd_debug(SMB, "the end offset(%llx) is smaller than the start offset(%llx)\n", flock->fl_end, flock->fl_start); rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE; locks_free_lock(flock); goto out; } /* Check conflict locks in one request */ list_for_each_entry(cmp_lock, &lock_list, llist) { if (cmp_lock->fl->fl_start <= flock->fl_start && cmp_lock->fl->fl_end >= flock->fl_end) { if (cmp_lock->fl->fl_type != F_UNLCK && flock->fl_type != F_UNLCK) { pr_err("conflict two locks in one request\n"); err = -EINVAL; locks_free_lock(flock); goto out; } } } smb_lock = smb2_lock_init(flock, cmd, flags, &lock_list); if (!smb_lock) { err = -EINVAL; locks_free_lock(flock); goto out; } } list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) { if (smb_lock->cmd < 0) { err = -EINVAL; goto out; } if (!(smb_lock->flags & SMB2_LOCKFLAG_MASK)) { err = -EINVAL; goto out; } if ((prior_lock & (SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_SHARED) && smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) || (prior_lock == SMB2_LOCKFLAG_UNLOCK && !(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK))) { err = -EINVAL; goto out; } prior_lock = smb_lock->flags; if (!(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) && !(smb_lock->flags & SMB2_LOCKFLAG_FAIL_IMMEDIATELY)) goto no_check_cl; nolock = 1; /* check locks in connection list */ down_read(&conn_list_lock); list_for_each_entry(conn, &conn_list, conns_list) { spin_lock(&conn->llist_lock); list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) { if (file_inode(cmp_lock->fl->fl_file) != file_inode(smb_lock->fl->fl_file)) continue; if (smb_lock->fl->fl_type == F_UNLCK) { if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file && cmp_lock->start == smb_lock->start && cmp_lock->end == smb_lock->end && !lock_defer_pending(cmp_lock->fl)) { nolock = 0; list_del(&cmp_lock->flist); list_del(&cmp_lock->clist); spin_unlock(&conn->llist_lock); up_read(&conn_list_lock); locks_free_lock(cmp_lock->fl); kfree(cmp_lock); goto out_check_cl; } continue; } if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file) { if (smb_lock->flags & SMB2_LOCKFLAG_SHARED) continue; } else { if (cmp_lock->flags & SMB2_LOCKFLAG_SHARED) continue; } /* check zero byte lock range */ if (cmp_lock->zero_len && !smb_lock->zero_len && cmp_lock->start > smb_lock->start && cmp_lock->start < smb_lock->end) { spin_unlock(&conn->llist_lock); up_read(&conn_list_lock); pr_err("previous lock conflict with zero byte lock range\n"); goto out; } if (smb_lock->zero_len && !cmp_lock->zero_len && smb_lock->start > cmp_lock->start && smb_lock->start < cmp_lock->end) { spin_unlock(&conn->llist_lock); up_read(&conn_list_lock); pr_err("current lock conflict with zero byte lock range\n"); goto out; } if (((cmp_lock->start <= smb_lock->start && cmp_lock->end > smb_lock->start) || (cmp_lock->start < smb_lock->end && cmp_lock->end >= smb_lock->end)) && !cmp_lock->zero_len && !smb_lock->zero_len) { spin_unlock(&conn->llist_lock); up_read(&conn_list_lock); pr_err("Not allow lock operation on exclusive lock range\n"); goto out; } } spin_unlock(&conn->llist_lock); } up_read(&conn_list_lock); out_check_cl: if (smb_lock->fl->fl_type == F_UNLCK && nolock) { pr_err("Try to unlock nolocked range\n"); rsp->hdr.Status = STATUS_RANGE_NOT_LOCKED; goto out; } no_check_cl: if (smb_lock->zero_len) { err = 0; goto skip; } flock = smb_lock->fl; list_del(&smb_lock->llist); retry: rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL); skip: if (flags & SMB2_LOCKFLAG_UNLOCK) { if (!rc) { ksmbd_debug(SMB, "File unlocked\n"); } else if (rc == -ENOENT) { rsp->hdr.Status = STATUS_NOT_LOCKED; goto out; } locks_free_lock(flock); kfree(smb_lock); } else { if (rc == FILE_LOCK_DEFERRED) { void **argv; ksmbd_debug(SMB, "would have to wait for getting lock\n"); spin_lock(&work->conn->llist_lock); list_add_tail(&smb_lock->clist, &work->conn->lock_list); spin_unlock(&work->conn->llist_lock); list_add(&smb_lock->llist, &rollback_list); argv = kmalloc(sizeof(void *), GFP_KERNEL); if (!argv) { err = -ENOMEM; goto out; } argv[0] = flock; rc = setup_async_work(work, smb2_remove_blocked_lock, argv); if (rc) { err = -ENOMEM; goto out; } spin_lock(&fp->f_lock); list_add(&work->fp_entry, &fp->blocked_works); spin_unlock(&fp->f_lock); smb2_send_interim_resp(work, STATUS_PENDING); ksmbd_vfs_posix_lock_wait(flock); spin_lock(&fp->f_lock); list_del(&work->fp_entry); spin_unlock(&fp->f_lock); if (work->state != KSMBD_WORK_ACTIVE) { list_del(&smb_lock->llist); spin_lock(&work->conn->llist_lock); list_del(&smb_lock->clist); spin_unlock(&work->conn->llist_lock); locks_free_lock(flock); if (work->state == KSMBD_WORK_CANCELLED) { rsp->hdr.Status = STATUS_CANCELLED; kfree(smb_lock); smb2_send_interim_resp(work, STATUS_CANCELLED); work->send_no_response = 1; goto out; } rsp->hdr.Status = STATUS_RANGE_NOT_LOCKED; kfree(smb_lock); goto out2; } list_del(&smb_lock->llist); spin_lock(&work->conn->llist_lock); list_del(&smb_lock->clist); spin_unlock(&work->conn->llist_lock); release_async_work(work); goto retry; } else if (!rc) { spin_lock(&work->conn->llist_lock); list_add_tail(&smb_lock->clist, &work->conn->lock_list); list_add_tail(&smb_lock->flist, &fp->lock_list); spin_unlock(&work->conn->llist_lock); list_add(&smb_lock->llist, &rollback_list); ksmbd_debug(SMB, "successful in taking lock\n"); } else { goto out; } } } if (atomic_read(&fp->f_ci->op_count) > 1) smb_break_all_oplock(work, fp); rsp->StructureSize = cpu_to_le16(4); ksmbd_debug(SMB, "successful in taking lock\n"); rsp->hdr.Status = STATUS_SUCCESS; rsp->Reserved = 0; err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lock_rsp)); if (err) goto out; ksmbd_fd_put(work, fp); return 0; out: list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) { locks_free_lock(smb_lock->fl); list_del(&smb_lock->llist); kfree(smb_lock); } list_for_each_entry_safe(smb_lock, tmp, &rollback_list, llist) { struct file_lock *rlock = NULL; rlock = smb_flock_init(filp); rlock->fl_type = F_UNLCK; rlock->fl_start = smb_lock->start; rlock->fl_end = smb_lock->end; rc = vfs_lock_file(filp, F_SETLK, rlock, NULL); if (rc) pr_err("rollback unlock fail : %d\n", rc); list_del(&smb_lock->llist); spin_lock(&work->conn->llist_lock); if (!list_empty(&smb_lock->flist)) list_del(&smb_lock->flist); list_del(&smb_lock->clist); spin_unlock(&work->conn->llist_lock); locks_free_lock(smb_lock->fl); locks_free_lock(rlock); kfree(smb_lock); } out2: ksmbd_debug(SMB, "failed in taking lock(flags : %x), err : %d\n", flags, err); if (!rsp->hdr.Status) { if (err == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else if (err == -ENOMEM) rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; else if (err == -ENOENT) rsp->hdr.Status = STATUS_FILE_CLOSED; else rsp->hdr.Status = STATUS_LOCK_NOT_GRANTED; } smb2_set_err_rsp(work); ksmbd_fd_put(work, fp); return err; } static int fsctl_copychunk(struct ksmbd_work *work, struct copychunk_ioctl_req *ci_req, unsigned int cnt_code, unsigned int input_count, unsigned long long volatile_id, unsigned long long persistent_id, struct smb2_ioctl_rsp *rsp) { struct copychunk_ioctl_rsp *ci_rsp; struct ksmbd_file *src_fp = NULL, *dst_fp = NULL; struct srv_copychunk *chunks; unsigned int i, chunk_count, chunk_count_written = 0; unsigned int chunk_size_written = 0; loff_t total_size_written = 0; int ret = 0; ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0]; rsp->VolatileFileId = volatile_id; rsp->PersistentFileId = persistent_id; ci_rsp->ChunksWritten = cpu_to_le32(ksmbd_server_side_copy_max_chunk_count()); ci_rsp->ChunkBytesWritten = cpu_to_le32(ksmbd_server_side_copy_max_chunk_size()); ci_rsp->TotalBytesWritten = cpu_to_le32(ksmbd_server_side_copy_max_total_size()); chunks = (struct srv_copychunk *)&ci_req->Chunks[0]; chunk_count = le32_to_cpu(ci_req->ChunkCount); if (chunk_count == 0) goto out; total_size_written = 0; /* verify the SRV_COPYCHUNK_COPY packet */ if (chunk_count > ksmbd_server_side_copy_max_chunk_count() || input_count < offsetof(struct copychunk_ioctl_req, Chunks) + chunk_count * sizeof(struct srv_copychunk)) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; return -EINVAL; } for (i = 0; i < chunk_count; i++) { if (le32_to_cpu(chunks[i].Length) == 0 || le32_to_cpu(chunks[i].Length) > ksmbd_server_side_copy_max_chunk_size()) break; total_size_written += le32_to_cpu(chunks[i].Length); } if (i < chunk_count || total_size_written > ksmbd_server_side_copy_max_total_size()) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; return -EINVAL; } src_fp = ksmbd_lookup_foreign_fd(work, le64_to_cpu(ci_req->ResumeKey[0])); dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id); ret = -EINVAL; if (!src_fp || src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) { rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND; goto out; } if (!dst_fp) { rsp->hdr.Status = STATUS_FILE_CLOSED; goto out; } /* * FILE_READ_DATA should only be included in * the FSCTL_COPYCHUNK case */ if (cnt_code == FSCTL_COPYCHUNK && !(dst_fp->daccess & (FILE_READ_DATA_LE | FILE_GENERIC_READ_LE))) { rsp->hdr.Status = STATUS_ACCESS_DENIED; goto out; } ret = ksmbd_vfs_copy_file_ranges(work, src_fp, dst_fp, chunks, chunk_count, &chunk_count_written, &chunk_size_written, &total_size_written); if (ret < 0) { if (ret == -EACCES) rsp->hdr.Status = STATUS_ACCESS_DENIED; if (ret == -EAGAIN) rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT; else if (ret == -EBADF) rsp->hdr.Status = STATUS_INVALID_HANDLE; else if (ret == -EFBIG || ret == -ENOSPC) rsp->hdr.Status = STATUS_DISK_FULL; else if (ret == -EINVAL) rsp->hdr.Status = STATUS_INVALID_PARAMETER; else if (ret == -EISDIR) rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY; else if (ret == -E2BIG) rsp->hdr.Status = STATUS_INVALID_VIEW_SIZE; else rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR; } ci_rsp->ChunksWritten = cpu_to_le32(chunk_count_written); ci_rsp->ChunkBytesWritten = cpu_to_le32(chunk_size_written); ci_rsp->TotalBytesWritten = cpu_to_le32(total_size_written); out: ksmbd_fd_put(work, src_fp); ksmbd_fd_put(work, dst_fp); return ret; } static __be32 idev_ipv4_address(struct in_device *idev) { __be32 addr = 0; struct in_ifaddr *ifa; rcu_read_lock(); in_dev_for_each_ifa_rcu(ifa, idev) { if (ifa->ifa_flags & IFA_F_SECONDARY) continue; addr = ifa->ifa_address; break; } rcu_read_unlock(); return addr; } static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn, struct smb2_ioctl_rsp *rsp, unsigned int out_buf_len) { struct network_interface_info_ioctl_rsp *nii_rsp = NULL; int nbytes = 0; struct net_device *netdev; struct sockaddr_storage_rsp *sockaddr_storage; unsigned int flags; unsigned long long speed; rtnl_lock(); for_each_netdev(&init_net, netdev) { bool ipv4_set = false; if (netdev->type == ARPHRD_LOOPBACK) continue; flags = dev_get_flags(netdev); if (!(flags & IFF_RUNNING)) continue; ipv6_retry: if (out_buf_len < nbytes + sizeof(struct network_interface_info_ioctl_rsp)) { rtnl_unlock(); return -ENOSPC; } nii_rsp = (struct network_interface_info_ioctl_rsp *) &rsp->Buffer[nbytes]; nii_rsp->IfIndex = cpu_to_le32(netdev->ifindex); nii_rsp->Capability = 0; if (netdev->real_num_tx_queues > 1) nii_rsp->Capability |= cpu_to_le32(RSS_CAPABLE); if (ksmbd_rdma_capable_netdev(netdev)) nii_rsp->Capability |= cpu_to_le32(RDMA_CAPABLE); nii_rsp->Next = cpu_to_le32(152); nii_rsp->Reserved = 0; if (netdev->ethtool_ops->get_link_ksettings) { struct ethtool_link_ksettings cmd; netdev->ethtool_ops->get_link_ksettings(netdev, &cmd); speed = cmd.base.speed; } else { ksmbd_debug(SMB, "%s %s\n", netdev->name, "speed is unknown, defaulting to 1Gb/sec"); speed = SPEED_1000; } speed *= 1000000; nii_rsp->LinkSpeed = cpu_to_le64(speed); sockaddr_storage = (struct sockaddr_storage_rsp *) nii_rsp->SockAddr_Storage; memset(sockaddr_storage, 0, 128); if (!ipv4_set) { struct in_device *idev; sockaddr_storage->Family = cpu_to_le16(INTERNETWORK); sockaddr_storage->addr4.Port = 0; idev = __in_dev_get_rtnl(netdev); if (!idev) continue; sockaddr_storage->addr4.IPv4address = idev_ipv4_address(idev); nbytes += sizeof(struct network_interface_info_ioctl_rsp); ipv4_set = true; goto ipv6_retry; } else { struct inet6_dev *idev6; struct inet6_ifaddr *ifa; __u8 *ipv6_addr = sockaddr_storage->addr6.IPv6address; sockaddr_storage->Family = cpu_to_le16(INTERNETWORKV6); sockaddr_storage->addr6.Port = 0; sockaddr_storage->addr6.FlowInfo = 0; idev6 = __in6_dev_get(netdev); if (!idev6) continue; list_for_each_entry(ifa, &idev6->addr_list, if_list) { if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) continue; memcpy(ipv6_addr, ifa->addr.s6_addr, 16); break; } sockaddr_storage->addr6.ScopeId = 0; nbytes += sizeof(struct network_interface_info_ioctl_rsp); } } rtnl_unlock(); /* zero if this is last one */ if (nii_rsp) nii_rsp->Next = 0; rsp->PersistentFileId = SMB2_NO_FID; rsp->VolatileFileId = SMB2_NO_FID; return nbytes; } static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn, struct validate_negotiate_info_req *neg_req, struct validate_negotiate_info_rsp *neg_rsp, unsigned int in_buf_len) { int ret = 0; int dialect; if (in_buf_len < offsetof(struct validate_negotiate_info_req, Dialects) + le16_to_cpu(neg_req->DialectCount) * sizeof(__le16)) return -EINVAL; dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects, neg_req->DialectCount); if (dialect == BAD_PROT_ID || dialect != conn->dialect) { ret = -EINVAL; goto err_out; } if (strncmp(neg_req->Guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE)) { ret = -EINVAL; goto err_out; } if (le16_to_cpu(neg_req->SecurityMode) != conn->cli_sec_mode) { ret = -EINVAL; goto err_out; } if (le32_to_cpu(neg_req->Capabilities) != conn->cli_cap) { ret = -EINVAL; goto err_out; } neg_rsp->Capabilities = cpu_to_le32(conn->vals->capabilities); memset(neg_rsp->Guid, 0, SMB2_CLIENT_GUID_SIZE); neg_rsp->SecurityMode = cpu_to_le16(conn->srv_sec_mode); neg_rsp->Dialect = cpu_to_le16(conn->dialect); err_out: return ret; } static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id, struct file_allocated_range_buffer *qar_req, struct file_allocated_range_buffer *qar_rsp, unsigned int in_count, unsigned int *out_count) { struct ksmbd_file *fp; loff_t start, length; int ret = 0; *out_count = 0; if (in_count == 0) return -EINVAL; start = le64_to_cpu(qar_req->file_offset); length = le64_to_cpu(qar_req->length); if (start < 0 || length < 0) return -EINVAL; fp = ksmbd_lookup_fd_fast(work, id); if (!fp) return -ENOENT; ret = ksmbd_vfs_fqar_lseek(fp, start, length, qar_rsp, in_count, out_count); if (ret && ret != -E2BIG) *out_count = 0; ksmbd_fd_put(work, fp); return ret; } static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id, unsigned int out_buf_len, struct smb2_ioctl_req *req, struct smb2_ioctl_rsp *rsp) { struct ksmbd_rpc_command *rpc_resp; char *data_buf = (char *)&req->Buffer[0]; int nbytes = 0; rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf, le32_to_cpu(req->InputCount)); if (rpc_resp) { if (rpc_resp->flags == KSMBD_RPC_SOME_NOT_MAPPED) { /* * set STATUS_SOME_NOT_MAPPED response * for unknown domain sid. */ rsp->hdr.Status = STATUS_SOME_NOT_MAPPED; } else if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) { rsp->hdr.Status = STATUS_NOT_SUPPORTED; goto out; } else if (rpc_resp->flags != KSMBD_RPC_OK) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; goto out; } nbytes = rpc_resp->payload_sz; if (rpc_resp->payload_sz > out_buf_len) { rsp->hdr.Status = STATUS_BUFFER_OVERFLOW; nbytes = out_buf_len; } if (!rpc_resp->payload_sz) { rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR; goto out; } memcpy((char *)rsp->Buffer, rpc_resp->payload, nbytes); } out: kvfree(rpc_resp); return nbytes; } static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id, struct file_sparse *sparse) { struct ksmbd_file *fp; struct mnt_idmap *idmap; int ret = 0; __le32 old_fattr; fp = ksmbd_lookup_fd_fast(work, id); if (!fp) return -ENOENT; idmap = file_mnt_idmap(fp->filp); old_fattr = fp->f_ci->m_fattr; if (sparse->SetSparse) fp->f_ci->m_fattr |= FILE_ATTRIBUTE_SPARSE_FILE_LE; else fp->f_ci->m_fattr &= ~FILE_ATTRIBUTE_SPARSE_FILE_LE; if (fp->f_ci->m_fattr != old_fattr && test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) { struct xattr_dos_attrib da; ret = ksmbd_vfs_get_dos_attrib_xattr(idmap, fp->filp->f_path.dentry, &da); if (ret <= 0) goto out; da.attr = le32_to_cpu(fp->f_ci->m_fattr); ret = ksmbd_vfs_set_dos_attrib_xattr(idmap, &fp->filp->f_path, &da); if (ret) fp->f_ci->m_fattr = old_fattr; } out: ksmbd_fd_put(work, fp); return ret; } static int fsctl_request_resume_key(struct ksmbd_work *work, struct smb2_ioctl_req *req, struct resume_key_ioctl_rsp *key_rsp) { struct ksmbd_file *fp; fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!fp) return -ENOENT; memset(key_rsp, 0, sizeof(*key_rsp)); key_rsp->ResumeKey[0] = req->VolatileFileId; key_rsp->ResumeKey[1] = req->PersistentFileId; ksmbd_fd_put(work, fp); return 0; } /** * smb2_ioctl() - handler for smb2 ioctl command * @work: smb work containing ioctl command buffer * * Return: 0 on success, otherwise error */ int smb2_ioctl(struct ksmbd_work *work) { struct smb2_ioctl_req *req; struct smb2_ioctl_rsp *rsp; unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len; u64 id = KSMBD_NO_FID; struct ksmbd_conn *conn = work->conn; int ret = 0; if (work->next_smb2_rcv_hdr_off) { req = ksmbd_req_buf_next(work); rsp = ksmbd_resp_buf_next(work); if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; } } else { req = smb2_get_msg(work->request_buf); rsp = smb2_get_msg(work->response_buf); } if (!has_file_id(id)) id = req->VolatileFileId; if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) { rsp->hdr.Status = STATUS_NOT_SUPPORTED; goto out; } cnt_code = le32_to_cpu(req->CtlCode); ret = smb2_calc_max_out_buf_len(work, 48, le32_to_cpu(req->MaxOutputResponse)); if (ret < 0) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; goto out; } out_buf_len = (unsigned int)ret; in_buf_len = le32_to_cpu(req->InputCount); switch (cnt_code) { case FSCTL_DFS_GET_REFERRALS: case FSCTL_DFS_GET_REFERRALS_EX: /* Not support DFS yet */ rsp->hdr.Status = STATUS_FS_DRIVER_REQUIRED; goto out; case FSCTL_CREATE_OR_GET_OBJECT_ID: { struct file_object_buf_type1_ioctl_rsp *obj_buf; nbytes = sizeof(struct file_object_buf_type1_ioctl_rsp); obj_buf = (struct file_object_buf_type1_ioctl_rsp *) &rsp->Buffer[0]; /* * TODO: This is dummy implementation to pass smbtorture * Need to check correct response later */ memset(obj_buf->ObjectId, 0x0, 16); memset(obj_buf->BirthVolumeId, 0x0, 16); memset(obj_buf->BirthObjectId, 0x0, 16); memset(obj_buf->DomainId, 0x0, 16); break; } case FSCTL_PIPE_TRANSCEIVE: out_buf_len = min_t(u32, KSMBD_IPC_MAX_PAYLOAD, out_buf_len); nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp); break; case FSCTL_VALIDATE_NEGOTIATE_INFO: if (conn->dialect < SMB30_PROT_ID) { ret = -EOPNOTSUPP; goto out; } if (in_buf_len < offsetof(struct validate_negotiate_info_req, Dialects)) { ret = -EINVAL; goto out; } if (out_buf_len < sizeof(struct validate_negotiate_info_rsp)) { ret = -EINVAL; goto out; } ret = fsctl_validate_negotiate_info(conn, (struct validate_negotiate_info_req *)&req->Buffer[0], (struct validate_negotiate_info_rsp *)&rsp->Buffer[0], in_buf_len); if (ret < 0) goto out; nbytes = sizeof(struct validate_negotiate_info_rsp); rsp->PersistentFileId = SMB2_NO_FID; rsp->VolatileFileId = SMB2_NO_FID; break; case FSCTL_QUERY_NETWORK_INTERFACE_INFO: ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len); if (ret < 0) goto out; nbytes = ret; break; case FSCTL_REQUEST_RESUME_KEY: if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) { ret = -EINVAL; goto out; } ret = fsctl_request_resume_key(work, req, (struct resume_key_ioctl_rsp *)&rsp->Buffer[0]); if (ret < 0) goto out; rsp->PersistentFileId = req->PersistentFileId; rsp->VolatileFileId = req->VolatileFileId; nbytes = sizeof(struct resume_key_ioctl_rsp); break; case FSCTL_COPYCHUNK: case FSCTL_COPYCHUNK_WRITE: if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { ksmbd_debug(SMB, "User does not have write permission\n"); ret = -EACCES; goto out; } if (in_buf_len < sizeof(struct copychunk_ioctl_req)) { ret = -EINVAL; goto out; } if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) { ret = -EINVAL; goto out; } nbytes = sizeof(struct copychunk_ioctl_rsp); rsp->VolatileFileId = req->VolatileFileId; rsp->PersistentFileId = req->PersistentFileId; fsctl_copychunk(work, (struct copychunk_ioctl_req *)&req->Buffer[0], le32_to_cpu(req->CtlCode), le32_to_cpu(req->InputCount), req->VolatileFileId, req->PersistentFileId, rsp); break; case FSCTL_SET_SPARSE: if (in_buf_len < sizeof(struct file_sparse)) { ret = -EINVAL; goto out; } ret = fsctl_set_sparse(work, id, (struct file_sparse *)&req->Buffer[0]); if (ret < 0) goto out; break; case FSCTL_SET_ZERO_DATA: { struct file_zero_data_information *zero_data; struct ksmbd_file *fp; loff_t off, len, bfz; if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { ksmbd_debug(SMB, "User does not have write permission\n"); ret = -EACCES; goto out; } if (in_buf_len < sizeof(struct file_zero_data_information)) { ret = -EINVAL; goto out; } zero_data = (struct file_zero_data_information *)&req->Buffer[0]; off = le64_to_cpu(zero_data->FileOffset); bfz = le64_to_cpu(zero_data->BeyondFinalZero); if (off < 0 || bfz < 0 || off > bfz) { ret = -EINVAL; goto out; } len = bfz - off; if (len) { fp = ksmbd_lookup_fd_fast(work, id); if (!fp) { ret = -ENOENT; goto out; } ret = ksmbd_vfs_zero_data(work, fp, off, len); ksmbd_fd_put(work, fp); if (ret < 0) goto out; } break; } case FSCTL_QUERY_ALLOCATED_RANGES: if (in_buf_len < sizeof(struct file_allocated_range_buffer)) { ret = -EINVAL; goto out; } ret = fsctl_query_allocated_ranges(work, id, (struct file_allocated_range_buffer *)&req->Buffer[0], (struct file_allocated_range_buffer *)&rsp->Buffer[0], out_buf_len / sizeof(struct file_allocated_range_buffer), &nbytes); if (ret == -E2BIG) { rsp->hdr.Status = STATUS_BUFFER_OVERFLOW; } else if (ret < 0) { nbytes = 0; goto out; } nbytes *= sizeof(struct file_allocated_range_buffer); break; case FSCTL_GET_REPARSE_POINT: { struct reparse_data_buffer *reparse_ptr; struct ksmbd_file *fp; reparse_ptr = (struct reparse_data_buffer *)&rsp->Buffer[0]; fp = ksmbd_lookup_fd_fast(work, id); if (!fp) { pr_err("not found fp!!\n"); ret = -ENOENT; goto out; } reparse_ptr->ReparseTag = smb2_get_reparse_tag_special_file(file_inode(fp->filp)->i_mode); reparse_ptr->ReparseDataLength = 0; ksmbd_fd_put(work, fp); nbytes = sizeof(struct reparse_data_buffer); break; } case FSCTL_DUPLICATE_EXTENTS_TO_FILE: { struct ksmbd_file *fp_in, *fp_out = NULL; struct duplicate_extents_to_file *dup_ext; loff_t src_off, dst_off, length, cloned; if (in_buf_len < sizeof(struct duplicate_extents_to_file)) { ret = -EINVAL; goto out; } dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0]; fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle, dup_ext->PersistentFileHandle); if (!fp_in) { pr_err("not found file handle in duplicate extent to file\n"); ret = -ENOENT; goto out; } fp_out = ksmbd_lookup_fd_fast(work, id); if (!fp_out) { pr_err("not found fp\n"); ret = -ENOENT; goto dup_ext_out; } src_off = le64_to_cpu(dup_ext->SourceFileOffset); dst_off = le64_to_cpu(dup_ext->TargetFileOffset); length = le64_to_cpu(dup_ext->ByteCount); /* * XXX: It is not clear if FSCTL_DUPLICATE_EXTENTS_TO_FILE * should fall back to vfs_copy_file_range(). This could be * beneficial when re-exporting nfs/smb mount, but note that * this can result in partial copy that returns an error status. * If/when FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX is implemented, * fall back to vfs_copy_file_range(), should be avoided when * the flag DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC is set. */ cloned = vfs_clone_file_range(fp_in->filp, src_off, fp_out->filp, dst_off, length, 0); if (cloned == -EXDEV || cloned == -EOPNOTSUPP) { ret = -EOPNOTSUPP; goto dup_ext_out; } else if (cloned != length) { cloned = vfs_copy_file_range(fp_in->filp, src_off, fp_out->filp, dst_off, length, 0); if (cloned != length) { if (cloned < 0) ret = cloned; else ret = -EINVAL; } } dup_ext_out: ksmbd_fd_put(work, fp_in); ksmbd_fd_put(work, fp_out); if (ret < 0) goto out; break; } default: ksmbd_debug(SMB, "not implemented yet ioctl command 0x%x\n", cnt_code); ret = -EOPNOTSUPP; goto out; } rsp->CtlCode = cpu_to_le32(cnt_code); rsp->InputCount = cpu_to_le32(0); rsp->InputOffset = cpu_to_le32(112); rsp->OutputOffset = cpu_to_le32(112); rsp->OutputCount = cpu_to_le32(nbytes); rsp->StructureSize = cpu_to_le16(49); rsp->Reserved = cpu_to_le16(0); rsp->Flags = cpu_to_le32(0); rsp->Reserved2 = cpu_to_le32(0); ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_ioctl_rsp) + nbytes); if (!ret) return ret; out: if (ret == -EACCES) rsp->hdr.Status = STATUS_ACCESS_DENIED; else if (ret == -ENOENT) rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND; else if (ret == -EOPNOTSUPP) rsp->hdr.Status = STATUS_NOT_SUPPORTED; else if (ret == -ENOSPC) rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL; else if (ret < 0 || rsp->hdr.Status == 0) rsp->hdr.Status = STATUS_INVALID_PARAMETER; smb2_set_err_rsp(work); return 0; } /** * smb20_oplock_break_ack() - handler for smb2.0 oplock break command * @work: smb work containing oplock break command buffer * * Return: 0 */ static void smb20_oplock_break_ack(struct ksmbd_work *work) { struct smb2_oplock_break *req; struct smb2_oplock_break *rsp; struct ksmbd_file *fp; struct oplock_info *opinfo = NULL; __le32 err = 0; int ret = 0; u64 volatile_id, persistent_id; char req_oplevel = 0, rsp_oplevel = 0; unsigned int oplock_change_type; WORK_BUFFERS(work, req, rsp); volatile_id = req->VolatileFid; persistent_id = req->PersistentFid; req_oplevel = req->OplockLevel; ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n", volatile_id, persistent_id, req_oplevel); fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id); if (!fp) { rsp->hdr.Status = STATUS_FILE_CLOSED; smb2_set_err_rsp(work); return; } opinfo = opinfo_get(fp); if (!opinfo) { pr_err("unexpected null oplock_info\n"); rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL; smb2_set_err_rsp(work); ksmbd_fd_put(work, fp); return; } if (opinfo->level == SMB2_OPLOCK_LEVEL_NONE) { rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL; goto err_out; } if (opinfo->op_state == OPLOCK_STATE_NONE) { ksmbd_debug(SMB, "unexpected oplock state 0x%x\n", opinfo->op_state); rsp->hdr.Status = STATUS_UNSUCCESSFUL; goto err_out; } if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE || opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) && (req_oplevel != SMB2_OPLOCK_LEVEL_II && req_oplevel != SMB2_OPLOCK_LEVEL_NONE)) { err = STATUS_INVALID_OPLOCK_PROTOCOL; oplock_change_type = OPLOCK_WRITE_TO_NONE; } else if (opinfo->level == SMB2_OPLOCK_LEVEL_II && req_oplevel != SMB2_OPLOCK_LEVEL_NONE) { err = STATUS_INVALID_OPLOCK_PROTOCOL; oplock_change_type = OPLOCK_READ_TO_NONE; } else if (req_oplevel == SMB2_OPLOCK_LEVEL_II || req_oplevel == SMB2_OPLOCK_LEVEL_NONE) { err = STATUS_INVALID_DEVICE_STATE; if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE || opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) && req_oplevel == SMB2_OPLOCK_LEVEL_II) { oplock_change_type = OPLOCK_WRITE_TO_READ; } else if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE || opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) && req_oplevel == SMB2_OPLOCK_LEVEL_NONE) { oplock_change_type = OPLOCK_WRITE_TO_NONE; } else if (opinfo->level == SMB2_OPLOCK_LEVEL_II && req_oplevel == SMB2_OPLOCK_LEVEL_NONE) { oplock_change_type = OPLOCK_READ_TO_NONE; } else { oplock_change_type = 0; } } else { oplock_change_type = 0; } switch (oplock_change_type) { case OPLOCK_WRITE_TO_READ: ret = opinfo_write_to_read(opinfo); rsp_oplevel = SMB2_OPLOCK_LEVEL_II; break; case OPLOCK_WRITE_TO_NONE: ret = opinfo_write_to_none(opinfo); rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE; break; case OPLOCK_READ_TO_NONE: ret = opinfo_read_to_none(opinfo); rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE; break; default: pr_err("unknown oplock change 0x%x -> 0x%x\n", opinfo->level, rsp_oplevel); } if (ret < 0) { rsp->hdr.Status = err; goto err_out; } opinfo_put(opinfo); ksmbd_fd_put(work, fp); opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); rsp->StructureSize = cpu_to_le16(24); rsp->OplockLevel = rsp_oplevel; rsp->Reserved = 0; rsp->Reserved2 = 0; rsp->VolatileFid = volatile_id; rsp->PersistentFid = persistent_id; ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break)); if (!ret) return; err_out: opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); opinfo_put(opinfo); ksmbd_fd_put(work, fp); smb2_set_err_rsp(work); } static int check_lease_state(struct lease *lease, __le32 req_state) { if ((lease->new_state == (SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)) && !(req_state & SMB2_LEASE_WRITE_CACHING_LE)) { lease->new_state = req_state; return 0; } if (lease->new_state == req_state) return 0; return 1; } /** * smb21_lease_break_ack() - handler for smb2.1 lease break command * @work: smb work containing lease break command buffer * * Return: 0 */ static void smb21_lease_break_ack(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_lease_ack *req; struct smb2_lease_ack *rsp; struct oplock_info *opinfo; __le32 err = 0; int ret = 0; unsigned int lease_change_type; __le32 lease_state; struct lease *lease; WORK_BUFFERS(work, req, rsp); ksmbd_debug(OPLOCK, "smb21 lease break, lease state(0x%x)\n", le32_to_cpu(req->LeaseState)); opinfo = lookup_lease_in_table(conn, req->LeaseKey); if (!opinfo) { ksmbd_debug(OPLOCK, "file not opened\n"); smb2_set_err_rsp(work); rsp->hdr.Status = STATUS_UNSUCCESSFUL; return; } lease = opinfo->o_lease; if (opinfo->op_state == OPLOCK_STATE_NONE) { pr_err("unexpected lease break state 0x%x\n", opinfo->op_state); rsp->hdr.Status = STATUS_UNSUCCESSFUL; goto err_out; } if (check_lease_state(lease, req->LeaseState)) { rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED; ksmbd_debug(OPLOCK, "req lease state: 0x%x, expected state: 0x%x\n", req->LeaseState, lease->new_state); goto err_out; } if (!atomic_read(&opinfo->breaking_cnt)) { rsp->hdr.Status = STATUS_UNSUCCESSFUL; goto err_out; } /* check for bad lease state */ if (req->LeaseState & (~(SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE))) { err = STATUS_INVALID_OPLOCK_PROTOCOL; if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) lease_change_type = OPLOCK_WRITE_TO_NONE; else lease_change_type = OPLOCK_READ_TO_NONE; ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n", le32_to_cpu(lease->state), le32_to_cpu(req->LeaseState)); } else if (lease->state == SMB2_LEASE_READ_CACHING_LE && req->LeaseState != SMB2_LEASE_NONE_LE) { err = STATUS_INVALID_OPLOCK_PROTOCOL; lease_change_type = OPLOCK_READ_TO_NONE; ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n", le32_to_cpu(lease->state), le32_to_cpu(req->LeaseState)); } else { /* valid lease state changes */ err = STATUS_INVALID_DEVICE_STATE; if (req->LeaseState == SMB2_LEASE_NONE_LE) { if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) lease_change_type = OPLOCK_WRITE_TO_NONE; else lease_change_type = OPLOCK_READ_TO_NONE; } else if (req->LeaseState & SMB2_LEASE_READ_CACHING_LE) { if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) lease_change_type = OPLOCK_WRITE_TO_READ; else lease_change_type = OPLOCK_READ_HANDLE_TO_READ; } else { lease_change_type = 0; } } switch (lease_change_type) { case OPLOCK_WRITE_TO_READ: ret = opinfo_write_to_read(opinfo); break; case OPLOCK_READ_HANDLE_TO_READ: ret = opinfo_read_handle_to_read(opinfo); break; case OPLOCK_WRITE_TO_NONE: ret = opinfo_write_to_none(opinfo); break; case OPLOCK_READ_TO_NONE: ret = opinfo_read_to_none(opinfo); break; default: ksmbd_debug(OPLOCK, "unknown lease change 0x%x -> 0x%x\n", le32_to_cpu(lease->state), le32_to_cpu(req->LeaseState)); } lease_state = lease->state; opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); atomic_dec(&opinfo->breaking_cnt); wake_up_interruptible_all(&opinfo->oplock_brk); opinfo_put(opinfo); if (ret < 0) { rsp->hdr.Status = err; goto err_out; } rsp->StructureSize = cpu_to_le16(36); rsp->Reserved = 0; rsp->Flags = 0; memcpy(rsp->LeaseKey, req->LeaseKey, 16); rsp->LeaseState = lease_state; rsp->LeaseDuration = 0; ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack)); if (!ret) return; err_out: opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); atomic_dec(&opinfo->breaking_cnt); wake_up_interruptible_all(&opinfo->oplock_brk); opinfo_put(opinfo); smb2_set_err_rsp(work); } /** * smb2_oplock_break() - dispatcher for smb2.0 and 2.1 oplock/lease break * @work: smb work containing oplock/lease break command buffer * * Return: 0 */ int smb2_oplock_break(struct ksmbd_work *work) { struct smb2_oplock_break *req; struct smb2_oplock_break *rsp; WORK_BUFFERS(work, req, rsp); switch (le16_to_cpu(req->StructureSize)) { case OP_BREAK_STRUCT_SIZE_20: smb20_oplock_break_ack(work); break; case OP_BREAK_STRUCT_SIZE_21: smb21_lease_break_ack(work); break; default: ksmbd_debug(OPLOCK, "invalid break cmd %d\n", le16_to_cpu(req->StructureSize)); rsp->hdr.Status = STATUS_INVALID_PARAMETER; smb2_set_err_rsp(work); } return 0; } /** * smb2_notify() - handler for smb2 notify request * @work: smb work containing notify command buffer * * Return: 0 */ int smb2_notify(struct ksmbd_work *work) { struct smb2_change_notify_req *req; struct smb2_change_notify_rsp *rsp; WORK_BUFFERS(work, req, rsp); if (work->next_smb2_rcv_hdr_off && req->hdr.NextCommand) { rsp->hdr.Status = STATUS_INTERNAL_ERROR; smb2_set_err_rsp(work); return 0; } smb2_set_err_rsp(work); rsp->hdr.Status = STATUS_NOT_IMPLEMENTED; return 0; } /** * smb2_is_sign_req() - handler for checking packet signing status * @work: smb work containing notify command buffer * @command: SMB2 command id * * Return: true if packed is signed, false otherwise */ bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command) { struct smb2_hdr *rcv_hdr2 = smb2_get_msg(work->request_buf); if ((rcv_hdr2->Flags & SMB2_FLAGS_SIGNED) && command != SMB2_NEGOTIATE_HE && command != SMB2_SESSION_SETUP_HE && command != SMB2_OPLOCK_BREAK_HE) return true; return false; } /** * smb2_check_sign_req() - handler for req packet sign processing * @work: smb work containing notify command buffer * * Return: 1 on success, 0 otherwise */ int smb2_check_sign_req(struct ksmbd_work *work) { struct smb2_hdr *hdr; char signature_req[SMB2_SIGNATURE_SIZE]; char signature[SMB2_HMACSHA256_SIZE]; struct kvec iov[1]; size_t len; hdr = smb2_get_msg(work->request_buf); if (work->next_smb2_rcv_hdr_off) hdr = ksmbd_req_buf_next(work); if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off) len = get_rfc1002_len(work->request_buf); else if (hdr->NextCommand) len = le32_to_cpu(hdr->NextCommand); else len = get_rfc1002_len(work->request_buf) - work->next_smb2_rcv_hdr_off; memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE); memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE); iov[0].iov_base = (char *)&hdr->ProtocolId; iov[0].iov_len = len; if (ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, 1, signature)) return 0; if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) { pr_err("bad smb2 signature\n"); return 0; } return 1; } /** * smb2_set_sign_rsp() - handler for rsp packet sign processing * @work: smb work containing notify command buffer * */ void smb2_set_sign_rsp(struct ksmbd_work *work) { struct smb2_hdr *hdr; char signature[SMB2_HMACSHA256_SIZE]; struct kvec *iov; int n_vec = 1; hdr = ksmbd_resp_buf_curr(work); hdr->Flags |= SMB2_FLAGS_SIGNED; memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE); if (hdr->Command == SMB2_READ) { iov = &work->iov[work->iov_idx - 1]; n_vec++; } else { iov = &work->iov[work->iov_idx]; } if (!ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, n_vec, signature)) memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE); } /** * smb3_check_sign_req() - handler for req packet sign processing * @work: smb work containing notify command buffer * * Return: 1 on success, 0 otherwise */ int smb3_check_sign_req(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; char *signing_key; struct smb2_hdr *hdr; struct channel *chann; char signature_req[SMB2_SIGNATURE_SIZE]; char signature[SMB2_CMACAES_SIZE]; struct kvec iov[1]; size_t len; hdr = smb2_get_msg(work->request_buf); if (work->next_smb2_rcv_hdr_off) hdr = ksmbd_req_buf_next(work); if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off) len = get_rfc1002_len(work->request_buf); else if (hdr->NextCommand) len = le32_to_cpu(hdr->NextCommand); else len = get_rfc1002_len(work->request_buf) - work->next_smb2_rcv_hdr_off; if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) { signing_key = work->sess->smb3signingkey; } else { chann = lookup_chann_list(work->sess, conn); if (!chann) { return 0; } signing_key = chann->smb3signingkey; } if (!signing_key) { pr_err("SMB3 signing key is not generated\n"); return 0; } memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE); memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE); iov[0].iov_base = (char *)&hdr->ProtocolId; iov[0].iov_len = len; if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature)) return 0; if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) { pr_err("bad smb2 signature\n"); return 0; } return 1; } /** * smb3_set_sign_rsp() - handler for rsp packet sign processing * @work: smb work containing notify command buffer * */ void smb3_set_sign_rsp(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct smb2_hdr *hdr; struct channel *chann; char signature[SMB2_CMACAES_SIZE]; struct kvec *iov; int n_vec = 1; char *signing_key; hdr = ksmbd_resp_buf_curr(work); if (conn->binding == false && le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) { signing_key = work->sess->smb3signingkey; } else { chann = lookup_chann_list(work->sess, work->conn); if (!chann) { return; } signing_key = chann->smb3signingkey; } if (!signing_key) return; hdr->Flags |= SMB2_FLAGS_SIGNED; memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE); if (hdr->Command == SMB2_READ) { iov = &work->iov[work->iov_idx - 1]; n_vec++; } else { iov = &work->iov[work->iov_idx]; } if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec, signature)) memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE); } /** * smb3_preauth_hash_rsp() - handler for computing preauth hash on response * @work: smb work containing response buffer * */ void smb3_preauth_hash_rsp(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; struct smb2_hdr *req, *rsp; if (conn->dialect != SMB311_PROT_ID) return; WORK_BUFFERS(work, req, rsp); if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE && conn->preauth_info) ksmbd_gen_preauth_integrity_hash(conn, work->response_buf, conn->preauth_info->Preauth_HashValue); if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE && sess) { __u8 *hash_value; if (conn->binding) { struct preauth_session *preauth_sess; preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id); if (!preauth_sess) return; hash_value = preauth_sess->Preauth_HashValue; } else { hash_value = sess->Preauth_HashValue; if (!hash_value) return; } ksmbd_gen_preauth_integrity_hash(conn, work->response_buf, hash_value); } } static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type) { struct smb2_transform_hdr *tr_hdr = tr_buf + 4; struct smb2_hdr *hdr = smb2_get_msg(old_buf); unsigned int orig_len = get_rfc1002_len(old_buf); /* tr_buf must be cleared by the caller */ tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM; tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len); tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED); if (cipher_type == SMB2_ENCRYPTION_AES128_GCM || cipher_type == SMB2_ENCRYPTION_AES256_GCM) get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE); else get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE); memcpy(&tr_hdr->SessionId, &hdr->SessionId, 8); inc_rfc1001_len(tr_buf, sizeof(struct smb2_transform_hdr)); inc_rfc1001_len(tr_buf, orig_len); } int smb3_encrypt_resp(struct ksmbd_work *work) { struct kvec *iov = work->iov; int rc = -ENOMEM; void *tr_buf; tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL); if (!tr_buf) return rc; /* fill transform header */ fill_transform_hdr(tr_buf, work->response_buf, work->conn->cipher_type); iov[0].iov_base = tr_buf; iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4; work->tr_buf = tr_buf; return ksmbd_crypt_message(work, iov, work->iov_idx + 1, 1); } bool smb3_is_transform_hdr(void *buf) { struct smb2_transform_hdr *trhdr = smb2_get_msg(buf); return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM; } int smb3_decrypt_req(struct ksmbd_work *work) { struct ksmbd_session *sess; char *buf = work->request_buf; unsigned int pdu_length = get_rfc1002_len(buf); struct kvec iov[2]; int buf_data_size = pdu_length - sizeof(struct smb2_transform_hdr); struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf); int rc = 0; if (pdu_length < sizeof(struct smb2_transform_hdr) || buf_data_size < sizeof(struct smb2_hdr)) { pr_err("Transform message is too small (%u)\n", pdu_length); return -ECONNABORTED; } if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) { pr_err("Transform message is broken\n"); return -ECONNABORTED; } sess = ksmbd_session_lookup_all(work->conn, le64_to_cpu(tr_hdr->SessionId)); if (!sess) { pr_err("invalid session id(%llx) in transform header\n", le64_to_cpu(tr_hdr->SessionId)); return -ECONNABORTED; } iov[0].iov_base = buf; iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4; iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr) + 4; iov[1].iov_len = buf_data_size; rc = ksmbd_crypt_message(work, iov, 2, 0); if (rc) return rc; memmove(buf + 4, iov[1].iov_base, buf_data_size); *(__be32 *)buf = cpu_to_be32(buf_data_size); return rc; } bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; struct smb2_hdr *rsp = smb2_get_msg(work->response_buf); if (conn->dialect < SMB30_PROT_ID) return false; if (work->next_smb2_rcv_hdr_off) rsp = ksmbd_resp_buf_next(work); if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE && sess->user && !user_guest(sess->user) && rsp->Status == STATUS_SUCCESS) return true; return false; }
linux-master
fs/smb/server/smb2pdu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2019 Samsung Electronics Co., Ltd. */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "glob.h" #include "vfs_cache.h" #include "oplock.h" #include "vfs.h" #include "connection.h" #include "mgmt/tree_connect.h" #include "mgmt/user_session.h" #include "smb_common.h" #define S_DEL_PENDING 1 #define S_DEL_ON_CLS 2 #define S_DEL_ON_CLS_STREAM 8 static unsigned int inode_hash_mask __read_mostly; static unsigned int inode_hash_shift __read_mostly; static struct hlist_head *inode_hashtable __read_mostly; static DEFINE_RWLOCK(inode_hash_lock); static struct ksmbd_file_table global_ft; static atomic_long_t fd_limit; static struct kmem_cache *filp_cache; void ksmbd_set_fd_limit(unsigned long limit) { limit = min(limit, get_max_files()); atomic_long_set(&fd_limit, limit); } static bool fd_limit_depleted(void) { long v = atomic_long_dec_return(&fd_limit); if (v >= 0) return false; atomic_long_inc(&fd_limit); return true; } static void fd_limit_close(void) { atomic_long_inc(&fd_limit); } /* * INODE hash */ static unsigned long inode_hash(struct super_block *sb, unsigned long hashval) { unsigned long tmp; tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / L1_CACHE_BYTES; tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift); return tmp & inode_hash_mask; } static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode) { struct hlist_head *head = inode_hashtable + inode_hash(inode->i_sb, inode->i_ino); struct ksmbd_inode *ci = NULL, *ret_ci = NULL; hlist_for_each_entry(ci, head, m_hash) { if (ci->m_inode == inode) { if (atomic_inc_not_zero(&ci->m_count)) ret_ci = ci; break; } } return ret_ci; } static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp) { return __ksmbd_inode_lookup(file_inode(fp->filp)); } static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode) { struct ksmbd_inode *ci; read_lock(&inode_hash_lock); ci = __ksmbd_inode_lookup(inode); read_unlock(&inode_hash_lock); return ci; } int ksmbd_query_inode_status(struct inode *inode) { struct ksmbd_inode *ci; int ret = KSMBD_INODE_STATUS_UNKNOWN; read_lock(&inode_hash_lock); ci = __ksmbd_inode_lookup(inode); if (ci) { ret = KSMBD_INODE_STATUS_OK; if (ci->m_flags & S_DEL_PENDING) ret = KSMBD_INODE_STATUS_PENDING_DELETE; atomic_dec(&ci->m_count); } read_unlock(&inode_hash_lock); return ret; } bool ksmbd_inode_pending_delete(struct ksmbd_file *fp) { return (fp->f_ci->m_flags & S_DEL_PENDING); } void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp) { fp->f_ci->m_flags |= S_DEL_PENDING; } void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp) { fp->f_ci->m_flags &= ~S_DEL_PENDING; } void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp, int file_info) { if (ksmbd_stream_fd(fp)) { fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM; return; } fp->f_ci->m_flags |= S_DEL_ON_CLS; } static void ksmbd_inode_hash(struct ksmbd_inode *ci) { struct hlist_head *b = inode_hashtable + inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino); hlist_add_head(&ci->m_hash, b); } static void ksmbd_inode_unhash(struct ksmbd_inode *ci) { write_lock(&inode_hash_lock); hlist_del_init(&ci->m_hash); write_unlock(&inode_hash_lock); } static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp) { ci->m_inode = file_inode(fp->filp); atomic_set(&ci->m_count, 1); atomic_set(&ci->op_count, 0); atomic_set(&ci->sop_count, 0); ci->m_flags = 0; ci->m_fattr = 0; INIT_LIST_HEAD(&ci->m_fp_list); INIT_LIST_HEAD(&ci->m_op_list); rwlock_init(&ci->m_lock); return 0; } static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp) { struct ksmbd_inode *ci, *tmpci; int rc; read_lock(&inode_hash_lock); ci = ksmbd_inode_lookup(fp); read_unlock(&inode_hash_lock); if (ci) return ci; ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL); if (!ci) return NULL; rc = ksmbd_inode_init(ci, fp); if (rc) { pr_err("inode initialized failed\n"); kfree(ci); return NULL; } write_lock(&inode_hash_lock); tmpci = ksmbd_inode_lookup(fp); if (!tmpci) { ksmbd_inode_hash(ci); } else { kfree(ci); ci = tmpci; } write_unlock(&inode_hash_lock); return ci; } static void ksmbd_inode_free(struct ksmbd_inode *ci) { ksmbd_inode_unhash(ci); kfree(ci); } static void ksmbd_inode_put(struct ksmbd_inode *ci) { if (atomic_dec_and_test(&ci->m_count)) ksmbd_inode_free(ci); } int __init ksmbd_inode_hash_init(void) { unsigned int loop; unsigned long numentries = 16384; unsigned long bucketsize = sizeof(struct hlist_head); unsigned long size; inode_hash_shift = ilog2(numentries); inode_hash_mask = (1 << inode_hash_shift) - 1; size = bucketsize << inode_hash_shift; /* init master fp hash table */ inode_hashtable = vmalloc(size); if (!inode_hashtable) return -ENOMEM; for (loop = 0; loop < (1U << inode_hash_shift); loop++) INIT_HLIST_HEAD(&inode_hashtable[loop]); return 0; } void ksmbd_release_inode_hash(void) { vfree(inode_hashtable); } static void __ksmbd_inode_close(struct ksmbd_file *fp) { struct ksmbd_inode *ci = fp->f_ci; int err; struct file *filp; filp = fp->filp; if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) { ci->m_flags &= ~S_DEL_ON_CLS_STREAM; err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp), &filp->f_path, fp->stream.name); if (err) pr_err("remove xattr failed : %s\n", fp->stream.name); } if (atomic_dec_and_test(&ci->m_count)) { write_lock(&ci->m_lock); if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) { ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING); write_unlock(&ci->m_lock); ksmbd_vfs_unlink(filp); write_lock(&ci->m_lock); } write_unlock(&ci->m_lock); ksmbd_inode_free(ci); } } static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp) { if (!has_file_id(fp->persistent_id)) return; write_lock(&global_ft.lock); idr_remove(global_ft.idr, fp->persistent_id); write_unlock(&global_ft.lock); } static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp) { if (!has_file_id(fp->volatile_id)) return; write_lock(&fp->f_ci->m_lock); list_del_init(&fp->node); write_unlock(&fp->f_ci->m_lock); write_lock(&ft->lock); idr_remove(ft->idr, fp->volatile_id); write_unlock(&ft->lock); } static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp) { struct file *filp; struct ksmbd_lock *smb_lock, *tmp_lock; fd_limit_close(); __ksmbd_remove_durable_fd(fp); __ksmbd_remove_fd(ft, fp); close_id_del_oplock(fp); filp = fp->filp; __ksmbd_inode_close(fp); if (!IS_ERR_OR_NULL(filp)) fput(filp); /* because the reference count of fp is 0, it is guaranteed that * there are not accesses to fp->lock_list. */ list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) { spin_lock(&fp->conn->llist_lock); list_del(&smb_lock->clist); spin_unlock(&fp->conn->llist_lock); list_del(&smb_lock->flist); locks_free_lock(smb_lock->fl); kfree(smb_lock); } if (ksmbd_stream_fd(fp)) kfree(fp->stream.name); kmem_cache_free(filp_cache, fp); } static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp) { if (!atomic_inc_not_zero(&fp->refcount)) return NULL; return fp; } static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft, u64 id) { struct ksmbd_file *fp; if (!has_file_id(id)) return NULL; read_lock(&ft->lock); fp = idr_find(ft->idr, id); if (fp) fp = ksmbd_fp_get(fp); read_unlock(&ft->lock); return fp; } static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp) { __ksmbd_close_fd(&work->sess->file_table, fp); atomic_dec(&work->conn->stats.open_files_count); } static void set_close_state_blocked_works(struct ksmbd_file *fp) { struct ksmbd_work *cancel_work; spin_lock(&fp->f_lock); list_for_each_entry(cancel_work, &fp->blocked_works, fp_entry) { cancel_work->state = KSMBD_WORK_CLOSED; cancel_work->cancel_fn(cancel_work->cancel_argv); } spin_unlock(&fp->f_lock); } int ksmbd_close_fd(struct ksmbd_work *work, u64 id) { struct ksmbd_file *fp; struct ksmbd_file_table *ft; if (!has_file_id(id)) return 0; ft = &work->sess->file_table; read_lock(&ft->lock); fp = idr_find(ft->idr, id); if (fp) { set_close_state_blocked_works(fp); if (!atomic_dec_and_test(&fp->refcount)) fp = NULL; } read_unlock(&ft->lock); if (!fp) return -EINVAL; __put_fd_final(work, fp); return 0; } void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp) { if (!fp) return; if (!atomic_dec_and_test(&fp->refcount)) return; __put_fd_final(work, fp); } static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp) { if (!fp) return false; if (fp->tcon != tcon) return false; return true; } struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id) { return __ksmbd_lookup_fd(&work->sess->file_table, id); } struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id) { struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id); if (__sanity_check(work->tcon, fp)) return fp; ksmbd_fd_put(work, fp); return NULL; } struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id, u64 pid) { struct ksmbd_file *fp; if (!has_file_id(id)) { id = work->compound_fid; pid = work->compound_pfid; } fp = __ksmbd_lookup_fd(&work->sess->file_table, id); if (!__sanity_check(work->tcon, fp)) { ksmbd_fd_put(work, fp); return NULL; } if (fp->persistent_id != pid) { ksmbd_fd_put(work, fp); return NULL; } return fp; } struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id) { return __ksmbd_lookup_fd(&global_ft, id); } struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid) { struct ksmbd_file *fp = NULL; unsigned int id; read_lock(&global_ft.lock); idr_for_each_entry(global_ft.idr, fp, id) { if (!memcmp(fp->create_guid, cguid, SMB2_CREATE_GUID_SIZE)) { fp = ksmbd_fp_get(fp); break; } } read_unlock(&global_ft.lock); return fp; } struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode) { struct ksmbd_file *lfp; struct ksmbd_inode *ci; ci = ksmbd_inode_lookup_by_vfsinode(inode); if (!ci) return NULL; read_lock(&ci->m_lock); list_for_each_entry(lfp, &ci->m_fp_list, node) { if (inode == file_inode(lfp->filp)) { atomic_dec(&ci->m_count); lfp = ksmbd_fp_get(lfp); read_unlock(&ci->m_lock); return lfp; } } atomic_dec(&ci->m_count); read_unlock(&ci->m_lock); return NULL; } #define OPEN_ID_TYPE_VOLATILE_ID (0) #define OPEN_ID_TYPE_PERSISTENT_ID (1) static void __open_id_set(struct ksmbd_file *fp, u64 id, int type) { if (type == OPEN_ID_TYPE_VOLATILE_ID) fp->volatile_id = id; if (type == OPEN_ID_TYPE_PERSISTENT_ID) fp->persistent_id = id; } static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp, int type) { u64 id = 0; int ret; if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) { __open_id_set(fp, KSMBD_NO_FID, type); return -EMFILE; } idr_preload(GFP_KERNEL); write_lock(&ft->lock); ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT); if (ret >= 0) { id = ret; ret = 0; } else { id = KSMBD_NO_FID; fd_limit_close(); } __open_id_set(fp, id, type); write_unlock(&ft->lock); idr_preload_end(); return ret; } unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp) { __open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID); return fp->persistent_id; } struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp) { struct ksmbd_file *fp; int ret; fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL); if (!fp) { pr_err("Failed to allocate memory\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&fp->blocked_works); INIT_LIST_HEAD(&fp->node); INIT_LIST_HEAD(&fp->lock_list); spin_lock_init(&fp->f_lock); atomic_set(&fp->refcount, 1); fp->filp = filp; fp->conn = work->conn; fp->tcon = work->tcon; fp->volatile_id = KSMBD_NO_FID; fp->persistent_id = KSMBD_NO_FID; fp->f_ci = ksmbd_inode_get(fp); if (!fp->f_ci) { ret = -ENOMEM; goto err_out; } ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID); if (ret) { ksmbd_inode_put(fp->f_ci); goto err_out; } atomic_inc(&work->conn->stats.open_files_count); return fp; err_out: kmem_cache_free(filp_cache, fp); return ERR_PTR(ret); } static int __close_file_table_ids(struct ksmbd_file_table *ft, struct ksmbd_tree_connect *tcon, bool (*skip)(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)) { unsigned int id; struct ksmbd_file *fp; int num = 0; idr_for_each_entry(ft->idr, fp, id) { if (skip(tcon, fp)) continue; set_close_state_blocked_works(fp); if (!atomic_dec_and_test(&fp->refcount)) continue; __ksmbd_close_fd(ft, fp); num++; } return num; } static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp) { return fp->tcon != tcon; } static bool session_fd_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp) { return false; } void ksmbd_close_tree_conn_fds(struct ksmbd_work *work) { int num = __close_file_table_ids(&work->sess->file_table, work->tcon, tree_conn_fd_check); atomic_sub(num, &work->conn->stats.open_files_count); } void ksmbd_close_session_fds(struct ksmbd_work *work) { int num = __close_file_table_ids(&work->sess->file_table, work->tcon, session_fd_check); atomic_sub(num, &work->conn->stats.open_files_count); } int ksmbd_init_global_file_table(void) { return ksmbd_init_file_table(&global_ft); } void ksmbd_free_global_file_table(void) { struct ksmbd_file *fp = NULL; unsigned int id; idr_for_each_entry(global_ft.idr, fp, id) { __ksmbd_remove_durable_fd(fp); kmem_cache_free(filp_cache, fp); } ksmbd_destroy_file_table(&global_ft); } int ksmbd_init_file_table(struct ksmbd_file_table *ft) { ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL); if (!ft->idr) return -ENOMEM; idr_init(ft->idr); rwlock_init(&ft->lock); return 0; } void ksmbd_destroy_file_table(struct ksmbd_file_table *ft) { if (!ft->idr) return; __close_file_table_ids(ft, NULL, session_fd_check); idr_destroy(ft->idr); kfree(ft->idr); ft->idr = NULL; } int ksmbd_init_file_cache(void) { filp_cache = kmem_cache_create("ksmbd_file_cache", sizeof(struct ksmbd_file), 0, SLAB_HWCACHE_ALIGN, NULL); if (!filp_cache) goto out; return 0; out: pr_err("failed to allocate file cache\n"); return -ENOMEM; } void ksmbd_exit_file_cache(void) { kmem_cache_destroy(filp_cache); }
linux-master
fs/smb/server/vfs_cache.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/kernel.h> #include <linux/xattr.h> #include <linux/fs.h> #include <linux/unicode.h> #include "misc.h" #include "smb_common.h" #include "connection.h" #include "vfs.h" #include "mgmt/share_config.h" /** * match_pattern() - compare a string with a pattern which might include * wildcard '*' and '?' * TODO : implement consideration about DOS_DOT, DOS_QM and DOS_STAR * * @str: string to compare with a pattern * @len: string length * @pattern: pattern string which might include wildcard '*' and '?' * * Return: 0 if pattern matched with the string, otherwise non zero value */ int match_pattern(const char *str, size_t len, const char *pattern) { const char *s = str; const char *p = pattern; bool star = false; while (*s && len) { switch (*p) { case '?': s++; len--; p++; break; case '*': star = true; str = s; if (!*++p) return true; pattern = p; break; default: if (tolower(*s) == tolower(*p)) { s++; len--; p++; } else { if (!star) return false; str++; s = str; p = pattern; } break; } } if (*p == '*') ++p; return !*p; } /* * is_char_allowed() - check for valid character * @ch: input character to be checked * * Return: 1 if char is allowed, otherwise 0 */ static inline int is_char_allowed(char ch) { /* check for control chars, wildcards etc. */ if (!(ch & 0x80) && (ch <= 0x1f || ch == '?' || ch == '"' || ch == '<' || ch == '>' || ch == '|' || ch == '*')) return 0; return 1; } int ksmbd_validate_filename(char *filename) { while (*filename) { char c = *filename; filename++; if (!is_char_allowed(c)) { ksmbd_debug(VFS, "File name validation failed: 0x%x\n", c); return -ENOENT; } } return 0; } static int ksmbd_validate_stream_name(char *stream_name) { while (*stream_name) { char c = *stream_name; stream_name++; if (c == '/' || c == ':' || c == '\\') { pr_err("Stream name validation failed: %c\n", c); return -ENOENT; } } return 0; } int parse_stream_name(char *filename, char **stream_name, int *s_type) { char *stream_type; char *s_name; int rc = 0; s_name = filename; filename = strsep(&s_name, ":"); ksmbd_debug(SMB, "filename : %s, streams : %s\n", filename, s_name); if (strchr(s_name, ':')) { stream_type = s_name; s_name = strsep(&stream_type, ":"); rc = ksmbd_validate_stream_name(s_name); if (rc < 0) { rc = -ENOENT; goto out; } ksmbd_debug(SMB, "stream name : %s, stream type : %s\n", s_name, stream_type); if (!strncasecmp("$data", stream_type, 5)) *s_type = DATA_STREAM; else if (!strncasecmp("$index_allocation", stream_type, 17)) *s_type = DIR_STREAM; else rc = -ENOENT; } *stream_name = s_name; out: return rc; } /** * convert_to_nt_pathname() - extract and return windows path string * whose share directory prefix was removed from file path * @share: ksmbd_share_config pointer * @path: path to report * * Return : windows path string or error */ char *convert_to_nt_pathname(struct ksmbd_share_config *share, const struct path *path) { char *pathname, *ab_pathname, *nt_pathname; int share_path_len = share->path_sz; pathname = kmalloc(PATH_MAX, GFP_KERNEL); if (!pathname) return ERR_PTR(-EACCES); ab_pathname = d_path(path, pathname, PATH_MAX); if (IS_ERR(ab_pathname)) { nt_pathname = ERR_PTR(-EACCES); goto free_pathname; } if (strncmp(ab_pathname, share->path, share_path_len)) { nt_pathname = ERR_PTR(-EACCES); goto free_pathname; } nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2, GFP_KERNEL); if (!nt_pathname) { nt_pathname = ERR_PTR(-ENOMEM); goto free_pathname; } if (ab_pathname[share_path_len] == '\0') strcpy(nt_pathname, "/"); strcat(nt_pathname, &ab_pathname[share_path_len]); ksmbd_conv_path_to_windows(nt_pathname); free_pathname: kfree(pathname); return nt_pathname; } int get_nlink(struct kstat *st) { int nlink; nlink = st->nlink; if (S_ISDIR(st->mode)) nlink--; return nlink; } void ksmbd_conv_path_to_unix(char *path) { strreplace(path, '\\', '/'); } void ksmbd_strip_last_slash(char *path) { int len = strlen(path); while (len && path[len - 1] == '/') { path[len - 1] = '\0'; len--; } } void ksmbd_conv_path_to_windows(char *path) { strreplace(path, '/', '\\'); } char *ksmbd_casefold_sharename(struct unicode_map *um, const char *name) { char *cf_name; int cf_len; cf_name = kzalloc(KSMBD_REQ_MAX_SHARE_NAME, GFP_KERNEL); if (!cf_name) return ERR_PTR(-ENOMEM); if (IS_ENABLED(CONFIG_UNICODE) && um) { const struct qstr q_name = {.name = name, .len = strlen(name)}; cf_len = utf8_casefold(um, &q_name, cf_name, KSMBD_REQ_MAX_SHARE_NAME); if (cf_len < 0) goto out_ascii; return cf_name; } out_ascii: cf_len = strscpy(cf_name, name, KSMBD_REQ_MAX_SHARE_NAME); if (cf_len < 0) { kfree(cf_name); return ERR_PTR(-E2BIG); } for (; *cf_name; ++cf_name) *cf_name = isascii(*cf_name) ? tolower(*cf_name) : *cf_name; return cf_name - cf_len; } /** * ksmbd_extract_sharename() - get share name from tree connect request * @treename: buffer containing tree name and share name * * Return: share name on success, otherwise error */ char *ksmbd_extract_sharename(struct unicode_map *um, const char *treename) { const char *name = treename, *pos = strrchr(name, '\\'); if (pos) name = (pos + 1); /* caller has to free the memory */ return ksmbd_casefold_sharename(um, name); } /** * convert_to_unix_name() - convert windows name to unix format * @share: ksmbd_share_config pointer * @name: file name that is relative to share * * Return: converted name on success, otherwise NULL */ char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name) { int no_slash = 0, name_len, path_len; char *new_name; if (name[0] == '/') name++; path_len = share->path_sz; name_len = strlen(name); new_name = kmalloc(path_len + name_len + 2, GFP_KERNEL); if (!new_name) return new_name; memcpy(new_name, share->path, path_len); if (new_name[path_len - 1] != '/') { new_name[path_len] = '/'; no_slash = 1; } memcpy(new_name + path_len + no_slash, name, name_len); path_len += name_len + no_slash; new_name[path_len] = 0x00; return new_name; } char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info, const struct nls_table *local_nls, int *conv_len) { char *conv; int sz = min(4 * d_info->name_len, PATH_MAX); if (!sz) return NULL; conv = kmalloc(sz, GFP_KERNEL); if (!conv) return NULL; /* XXX */ *conv_len = smbConvertToUTF16((__le16 *)conv, d_info->name, d_info->name_len, local_nls, 0); *conv_len *= 2; /* We allocate buffer twice bigger than needed. */ conv[*conv_len] = 0x00; conv[*conv_len + 1] = 0x00; return conv; } /* * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units) * into Unix UTC (based 1970-01-01, in seconds). */ struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc) { struct timespec64 ts; /* Subtract the NTFS time offset, then convert to 1s intervals. */ s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET; u64 abs_t; /* * Unfortunately can not use normal 64 bit division on 32 bit arch, but * the alternative, do_div, does not work with negative numbers so have * to special case them */ if (t < 0) { abs_t = -t; ts.tv_nsec = do_div(abs_t, 10000000) * 100; ts.tv_nsec = -ts.tv_nsec; ts.tv_sec = -abs_t; } else { abs_t = t; ts.tv_nsec = do_div(abs_t, 10000000) * 100; ts.tv_sec = abs_t; } return ts; } /* Convert the Unix UTC into NT UTC. */ inline u64 ksmbd_UnixTimeToNT(struct timespec64 t) { /* Convert to 100ns intervals and then add the NTFS time offset. */ return (u64)t.tv_sec * 10000000 + t.tv_nsec / 100 + NTFS_TIME_OFFSET; } inline long long ksmbd_systime(void) { struct timespec64 ts; ktime_get_real_ts64(&ts); return ksmbd_UnixTimeToNT(ts); }
linux-master
fs/smb/server/misc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/jhash.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/hashtable.h> #include <net/net_namespace.h> #include <net/genetlink.h> #include <linux/socket.h> #include <linux/workqueue.h> #include "vfs_cache.h" #include "transport_ipc.h" #include "server.h" #include "smb_common.h" #include "mgmt/user_config.h" #include "mgmt/share_config.h" #include "mgmt/user_session.h" #include "mgmt/tree_connect.h" #include "mgmt/ksmbd_ida.h" #include "connection.h" #include "transport_tcp.h" #include "transport_rdma.h" #define IPC_WAIT_TIMEOUT (2 * HZ) #define IPC_MSG_HASH_BITS 3 static DEFINE_HASHTABLE(ipc_msg_table, IPC_MSG_HASH_BITS); static DECLARE_RWSEM(ipc_msg_table_lock); static DEFINE_MUTEX(startup_lock); static DEFINE_IDA(ipc_ida); static unsigned int ksmbd_tools_pid; static bool ksmbd_ipc_validate_version(struct genl_info *m) { if (m->genlhdr->version != KSMBD_GENL_VERSION) { pr_err("%s. ksmbd: %d, kernel module: %d. %s.\n", "Daemon and kernel module version mismatch", m->genlhdr->version, KSMBD_GENL_VERSION, "User-space ksmbd should terminate"); return false; } return true; } struct ksmbd_ipc_msg { unsigned int type; unsigned int sz; unsigned char payload[]; }; struct ipc_msg_table_entry { unsigned int handle; unsigned int type; wait_queue_head_t wait; struct hlist_node ipc_table_hlist; void *response; }; static struct delayed_work ipc_timer_work; static int handle_startup_event(struct sk_buff *skb, struct genl_info *info); static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info); static int handle_generic_event(struct sk_buff *skb, struct genl_info *info); static int ksmbd_ipc_heartbeat_request(void); static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = { [KSMBD_EVENT_UNSPEC] = { .len = 0, }, [KSMBD_EVENT_HEARTBEAT_REQUEST] = { .len = sizeof(struct ksmbd_heartbeat), }, [KSMBD_EVENT_STARTING_UP] = { .len = sizeof(struct ksmbd_startup_request), }, [KSMBD_EVENT_SHUTTING_DOWN] = { .len = sizeof(struct ksmbd_shutdown_request), }, [KSMBD_EVENT_LOGIN_REQUEST] = { .len = sizeof(struct ksmbd_login_request), }, [KSMBD_EVENT_LOGIN_RESPONSE] = { .len = sizeof(struct ksmbd_login_response), }, [KSMBD_EVENT_SHARE_CONFIG_REQUEST] = { .len = sizeof(struct ksmbd_share_config_request), }, [KSMBD_EVENT_SHARE_CONFIG_RESPONSE] = { .len = sizeof(struct ksmbd_share_config_response), }, [KSMBD_EVENT_TREE_CONNECT_REQUEST] = { .len = sizeof(struct ksmbd_tree_connect_request), }, [KSMBD_EVENT_TREE_CONNECT_RESPONSE] = { .len = sizeof(struct ksmbd_tree_connect_response), }, [KSMBD_EVENT_TREE_DISCONNECT_REQUEST] = { .len = sizeof(struct ksmbd_tree_disconnect_request), }, [KSMBD_EVENT_LOGOUT_REQUEST] = { .len = sizeof(struct ksmbd_logout_request), }, [KSMBD_EVENT_RPC_REQUEST] = { }, [KSMBD_EVENT_RPC_RESPONSE] = { }, [KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST] = { }, [KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE] = { }, }; static struct genl_ops ksmbd_genl_ops[] = { { .cmd = KSMBD_EVENT_UNSPEC, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_HEARTBEAT_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_STARTING_UP, .doit = handle_startup_event, }, { .cmd = KSMBD_EVENT_SHUTTING_DOWN, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_LOGIN_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_LOGIN_RESPONSE, .doit = handle_generic_event, }, { .cmd = KSMBD_EVENT_SHARE_CONFIG_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_SHARE_CONFIG_RESPONSE, .doit = handle_generic_event, }, { .cmd = KSMBD_EVENT_TREE_CONNECT_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_TREE_CONNECT_RESPONSE, .doit = handle_generic_event, }, { .cmd = KSMBD_EVENT_TREE_DISCONNECT_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_LOGOUT_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_RPC_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_RPC_RESPONSE, .doit = handle_generic_event, }, { .cmd = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST, .doit = handle_unsupported_event, }, { .cmd = KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE, .doit = handle_generic_event, }, }; static struct genl_family ksmbd_genl_family = { .name = KSMBD_GENL_NAME, .version = KSMBD_GENL_VERSION, .hdrsize = 0, .maxattr = KSMBD_EVENT_MAX, .netnsok = true, .module = THIS_MODULE, .ops = ksmbd_genl_ops, .n_ops = ARRAY_SIZE(ksmbd_genl_ops), .resv_start_op = KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE + 1, }; static void ksmbd_nl_init_fixup(void) { int i; for (i = 0; i < ARRAY_SIZE(ksmbd_genl_ops); i++) ksmbd_genl_ops[i].validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP; ksmbd_genl_family.policy = ksmbd_nl_policy; } static int rpc_context_flags(struct ksmbd_session *sess) { if (user_guest(sess->user)) return KSMBD_RPC_RESTRICTED_CONTEXT; return 0; } static void ipc_update_last_active(void) { if (server_conf.ipc_timeout) server_conf.ipc_last_active = jiffies; } static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz) { struct ksmbd_ipc_msg *msg; size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg); msg = kvzalloc(msg_sz, GFP_KERNEL); if (msg) msg->sz = sz; return msg; } static void ipc_msg_free(struct ksmbd_ipc_msg *msg) { kvfree(msg); } static void ipc_msg_handle_free(int handle) { if (handle >= 0) ksmbd_release_id(&ipc_ida, handle); } static int handle_response(int type, void *payload, size_t sz) { unsigned int handle = *(unsigned int *)payload; struct ipc_msg_table_entry *entry; int ret = 0; ipc_update_last_active(); down_read(&ipc_msg_table_lock); hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) { if (handle != entry->handle) continue; entry->response = NULL; /* * Response message type value should be equal to * request message type + 1. */ if (entry->type + 1 != type) { pr_err("Waiting for IPC type %d, got %d. Ignore.\n", entry->type + 1, type); } entry->response = kvzalloc(sz, GFP_KERNEL); if (!entry->response) { ret = -ENOMEM; break; } memcpy(entry->response, payload, sz); wake_up_interruptible(&entry->wait); ret = 0; break; } up_read(&ipc_msg_table_lock); return ret; } static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) { int ret; ksmbd_set_fd_limit(req->file_max); server_conf.flags = req->flags; server_conf.signing = req->signing; server_conf.tcp_port = req->tcp_port; server_conf.ipc_timeout = req->ipc_timeout * HZ; server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL; server_conf.share_fake_fscaps = req->share_fake_fscaps; ksmbd_init_domain(req->sub_auth); if (req->smb2_max_read) init_smb2_max_read_size(req->smb2_max_read); if (req->smb2_max_write) init_smb2_max_write_size(req->smb2_max_write); if (req->smb2_max_trans) init_smb2_max_trans_size(req->smb2_max_trans); if (req->smb2_max_credits) init_smb2_max_credits(req->smb2_max_credits); if (req->smbd_max_io_size) init_smbd_max_io_size(req->smbd_max_io_size); if (req->max_connections) server_conf.max_connections = req->max_connections; ret = ksmbd_set_netbios_name(req->netbios_name); ret |= ksmbd_set_server_string(req->server_string); ret |= ksmbd_set_work_group(req->work_group); ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req), req->ifc_list_sz); if (ret) { pr_err("Server configuration error: %s %s %s\n", req->netbios_name, req->server_string, req->work_group); return ret; } if (req->min_prot[0]) { ret = ksmbd_lookup_protocol_idx(req->min_prot); if (ret >= 0) server_conf.min_protocol = ret; } if (req->max_prot[0]) { ret = ksmbd_lookup_protocol_idx(req->max_prot); if (ret >= 0) server_conf.max_protocol = ret; } if (server_conf.ipc_timeout) schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout); return 0; } static int handle_startup_event(struct sk_buff *skb, struct genl_info *info) { int ret = 0; #ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; #endif if (!ksmbd_ipc_validate_version(info)) return -EINVAL; if (!info->attrs[KSMBD_EVENT_STARTING_UP]) return -EINVAL; mutex_lock(&startup_lock); if (!ksmbd_server_configurable()) { mutex_unlock(&startup_lock); pr_err("Server reset is in progress, can't start daemon\n"); return -EINVAL; } if (ksmbd_tools_pid) { if (ksmbd_ipc_heartbeat_request() == 0) { ret = -EINVAL; goto out; } pr_err("Reconnect to a new user space daemon\n"); } else { struct ksmbd_startup_request *req; req = nla_data(info->attrs[info->genlhdr->cmd]); ret = ipc_server_config_on_startup(req); if (ret) goto out; server_queue_ctrl_init_work(); } ksmbd_tools_pid = info->snd_portid; ipc_update_last_active(); out: mutex_unlock(&startup_lock); return ret; } static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info) { pr_err("Unknown IPC event: %d, ignore.\n", info->genlhdr->cmd); return -EINVAL; } static int handle_generic_event(struct sk_buff *skb, struct genl_info *info) { void *payload; int sz; int type = info->genlhdr->cmd; #ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; #endif if (type >= KSMBD_EVENT_MAX) { WARN_ON(1); return -EINVAL; } if (!ksmbd_ipc_validate_version(info)) return -EINVAL; if (!info->attrs[type]) return -EINVAL; payload = nla_data(info->attrs[info->genlhdr->cmd]); sz = nla_len(info->attrs[info->genlhdr->cmd]); return handle_response(type, payload, sz); } static int ipc_msg_send(struct ksmbd_ipc_msg *msg) { struct genlmsghdr *nlh; struct sk_buff *skb; int ret = -EINVAL; if (!ksmbd_tools_pid) return ret; skb = genlmsg_new(msg->sz, GFP_KERNEL); if (!skb) return -ENOMEM; nlh = genlmsg_put(skb, 0, 0, &ksmbd_genl_family, 0, msg->type); if (!nlh) goto out; ret = nla_put(skb, msg->type, msg->sz, msg->payload); if (ret) { genlmsg_cancel(skb, nlh); goto out; } genlmsg_end(skb, nlh); ret = genlmsg_unicast(&init_net, skb, ksmbd_tools_pid); if (!ret) ipc_update_last_active(); return ret; out: nlmsg_free(skb); return ret; } static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle) { struct ipc_msg_table_entry entry; int ret; if ((int)handle < 0) return NULL; entry.type = msg->type; entry.response = NULL; init_waitqueue_head(&entry.wait); down_write(&ipc_msg_table_lock); entry.handle = handle; hash_add(ipc_msg_table, &entry.ipc_table_hlist, entry.handle); up_write(&ipc_msg_table_lock); ret = ipc_msg_send(msg); if (ret) goto out; ret = wait_event_interruptible_timeout(entry.wait, entry.response != NULL, IPC_WAIT_TIMEOUT); out: down_write(&ipc_msg_table_lock); hash_del(&entry.ipc_table_hlist); up_write(&ipc_msg_table_lock); return entry.response; } static int ksmbd_ipc_heartbeat_request(void) { struct ksmbd_ipc_msg *msg; int ret; msg = ipc_msg_alloc(sizeof(struct ksmbd_heartbeat)); if (!msg) return -EINVAL; msg->type = KSMBD_EVENT_HEARTBEAT_REQUEST; ret = ipc_msg_send(msg); ipc_msg_free(msg); return ret; } struct ksmbd_login_response *ksmbd_ipc_login_request(const char *account) { struct ksmbd_ipc_msg *msg; struct ksmbd_login_request *req; struct ksmbd_login_response *resp; if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ) return NULL; msg = ipc_msg_alloc(sizeof(struct ksmbd_login_request)); if (!msg) return NULL; msg->type = KSMBD_EVENT_LOGIN_REQUEST; req = (struct ksmbd_login_request *)msg->payload; req->handle = ksmbd_acquire_id(&ipc_ida); strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_handle_free(req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_spnego_authen_response * ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len) { struct ksmbd_ipc_msg *msg; struct ksmbd_spnego_authen_request *req; struct ksmbd_spnego_authen_response *resp; msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) + blob_len + 1); if (!msg) return NULL; msg->type = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST; req = (struct ksmbd_spnego_authen_request *)msg->payload; req->handle = ksmbd_acquire_id(&ipc_ida); req->spnego_blob_len = blob_len; memcpy(req->spnego_blob, spnego_blob, blob_len); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_handle_free(req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_tree_connect_response * ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess, struct ksmbd_share_config *share, struct ksmbd_tree_connect *tree_conn, struct sockaddr *peer_addr) { struct ksmbd_ipc_msg *msg; struct ksmbd_tree_connect_request *req; struct ksmbd_tree_connect_response *resp; if (strlen(user_name(sess->user)) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ) return NULL; if (strlen(share->name) >= KSMBD_REQ_MAX_SHARE_NAME) return NULL; msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_connect_request)); if (!msg) return NULL; msg->type = KSMBD_EVENT_TREE_CONNECT_REQUEST; req = (struct ksmbd_tree_connect_request *)msg->payload; req->handle = ksmbd_acquire_id(&ipc_ida); req->account_flags = sess->user->flags; req->session_id = sess->id; req->connect_id = tree_conn->id; strscpy(req->account, user_name(sess->user), KSMBD_REQ_MAX_ACCOUNT_NAME_SZ); strscpy(req->share, share->name, KSMBD_REQ_MAX_SHARE_NAME); snprintf(req->peer_addr, sizeof(req->peer_addr), "%pIS", peer_addr); if (peer_addr->sa_family == AF_INET6) req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_IPV6; if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2)) req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_SMB2; resp = ipc_msg_send_request(msg, req->handle); ipc_msg_handle_free(req->handle); ipc_msg_free(msg); return resp; } int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id, unsigned long long connect_id) { struct ksmbd_ipc_msg *msg; struct ksmbd_tree_disconnect_request *req; int ret; msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_disconnect_request)); if (!msg) return -ENOMEM; msg->type = KSMBD_EVENT_TREE_DISCONNECT_REQUEST; req = (struct ksmbd_tree_disconnect_request *)msg->payload; req->session_id = session_id; req->connect_id = connect_id; ret = ipc_msg_send(msg); ipc_msg_free(msg); return ret; } int ksmbd_ipc_logout_request(const char *account, int flags) { struct ksmbd_ipc_msg *msg; struct ksmbd_logout_request *req; int ret; if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ) return -EINVAL; msg = ipc_msg_alloc(sizeof(struct ksmbd_logout_request)); if (!msg) return -ENOMEM; msg->type = KSMBD_EVENT_LOGOUT_REQUEST; req = (struct ksmbd_logout_request *)msg->payload; req->account_flags = flags; strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ); ret = ipc_msg_send(msg); ipc_msg_free(msg); return ret; } struct ksmbd_share_config_response * ksmbd_ipc_share_config_request(const char *name) { struct ksmbd_ipc_msg *msg; struct ksmbd_share_config_request *req; struct ksmbd_share_config_response *resp; if (strlen(name) >= KSMBD_REQ_MAX_SHARE_NAME) return NULL; msg = ipc_msg_alloc(sizeof(struct ksmbd_share_config_request)); if (!msg) return NULL; msg->type = KSMBD_EVENT_SHARE_CONFIG_REQUEST; req = (struct ksmbd_share_config_request *)msg->payload; req->handle = ksmbd_acquire_id(&ipc_ida); strscpy(req->share_name, name, KSMBD_REQ_MAX_SHARE_NAME); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_handle_free(req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle) { struct ksmbd_ipc_msg *msg; struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command)); if (!msg) return NULL; msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; req->flags = ksmbd_session_rpc_method(sess, handle); req->flags |= KSMBD_RPC_OPEN_METHOD; req->payload_sz = 0; resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle) { struct ksmbd_ipc_msg *msg; struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command)); if (!msg) return NULL; msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; req->flags = ksmbd_session_rpc_method(sess, handle); req->flags |= KSMBD_RPC_CLOSE_METHOD; req->payload_sz = 0; resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle, void *payload, size_t payload_sz) { struct ksmbd_ipc_msg *msg; struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); if (!msg) return NULL; msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; req->flags = ksmbd_session_rpc_method(sess, handle); req->flags |= rpc_context_flags(sess); req->flags |= KSMBD_RPC_WRITE_METHOD; req->payload_sz = payload_sz; memcpy(req->payload, payload, payload_sz); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle) { struct ksmbd_ipc_msg *msg; struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command)); if (!msg) return NULL; msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; req->flags = ksmbd_session_rpc_method(sess, handle); req->flags |= rpc_context_flags(sess); req->flags |= KSMBD_RPC_READ_METHOD; req->payload_sz = 0; resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle, void *payload, size_t payload_sz) { struct ksmbd_ipc_msg *msg; struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); if (!msg) return NULL; msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; req->flags = ksmbd_session_rpc_method(sess, handle); req->flags |= rpc_context_flags(sess); req->flags |= KSMBD_RPC_IOCTL_METHOD; req->payload_sz = payload_sz; memcpy(req->payload, payload, payload_sz); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); return resp; } struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload, size_t payload_sz) { struct ksmbd_ipc_msg *msg; struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); if (!msg) return NULL; msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = ksmbd_acquire_id(&ipc_ida); req->flags = rpc_context_flags(sess); req->flags |= KSMBD_RPC_RAP_METHOD; req->payload_sz = payload_sz; memcpy(req->payload, payload, payload_sz); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_handle_free(req->handle); ipc_msg_free(msg); return resp; } static int __ipc_heartbeat(void) { unsigned long delta; if (!ksmbd_server_running()) return 0; if (time_after(jiffies, server_conf.ipc_last_active)) { delta = (jiffies - server_conf.ipc_last_active); } else { ipc_update_last_active(); schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout); return 0; } if (delta < server_conf.ipc_timeout) { schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout - delta); return 0; } if (ksmbd_ipc_heartbeat_request() == 0) { schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout); return 0; } mutex_lock(&startup_lock); WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING); server_conf.ipc_last_active = 0; ksmbd_tools_pid = 0; pr_err("No IPC daemon response for %lus\n", delta / HZ); mutex_unlock(&startup_lock); return -EINVAL; } static void ipc_timer_heartbeat(struct work_struct *w) { if (__ipc_heartbeat()) server_queue_ctrl_reset_work(); } int ksmbd_ipc_id_alloc(void) { return ksmbd_acquire_id(&ipc_ida); } void ksmbd_rpc_id_free(int handle) { ksmbd_release_id(&ipc_ida, handle); } void ksmbd_ipc_release(void) { cancel_delayed_work_sync(&ipc_timer_work); genl_unregister_family(&ksmbd_genl_family); } void ksmbd_ipc_soft_reset(void) { mutex_lock(&startup_lock); ksmbd_tools_pid = 0; cancel_delayed_work_sync(&ipc_timer_work); mutex_unlock(&startup_lock); } int ksmbd_ipc_init(void) { int ret = 0; ksmbd_nl_init_fixup(); INIT_DELAYED_WORK(&ipc_timer_work, ipc_timer_heartbeat); ret = genl_register_family(&ksmbd_genl_family); if (ret) { pr_err("Failed to register KSMBD netlink interface %d\n", ret); cancel_delayed_work_sync(&ipc_timer_work); } return ret; }
linux-master
fs/smb/server/transport_ipc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2017, Microsoft Corporation. * Copyright (C) 2018, LG Electronics. * * Author(s): Long Li <[email protected]>, * Hyunchul Lee <[email protected]> */ #define SUBMOD_NAME "smb_direct" #include <linux/kthread.h> #include <linux/list.h> #include <linux/mempool.h> #include <linux/highmem.h> #include <linux/scatterlist.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <rdma/rw.h> #include "glob.h" #include "connection.h" #include "smb_common.h" #include "smbstatus.h" #include "transport_rdma.h" #define SMB_DIRECT_PORT_IWARP 5445 #define SMB_DIRECT_PORT_INFINIBAND 445 #define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100) /* SMB_DIRECT negotiation timeout in seconds */ #define SMB_DIRECT_NEGOTIATE_TIMEOUT 120 #define SMB_DIRECT_MAX_SEND_SGES 6 #define SMB_DIRECT_MAX_RECV_SGES 1 /* * Default maximum number of RDMA read/write outstanding on this connection * This value is possibly decreased during QP creation on hardware limit */ #define SMB_DIRECT_CM_INITIATOR_DEPTH 8 /* Maximum number of retries on data transfer operations */ #define SMB_DIRECT_CM_RETRY 6 /* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */ #define SMB_DIRECT_CM_RNR_RETRY 0 /* * User configurable initial values per SMB_DIRECT transport connection * as defined in [MS-SMBD] 3.1.1.1 * Those may change after a SMB_DIRECT negotiation */ /* Set 445 port to SMB Direct port by default */ static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND; /* The local peer's maximum number of credits to grant to the peer */ static int smb_direct_receive_credit_max = 255; /* The remote peer's credit request of local peer */ static int smb_direct_send_credit_target = 255; /* The maximum single message size can be sent to remote peer */ static int smb_direct_max_send_size = 1364; /* The maximum fragmented upper-layer payload receive size supported */ static int smb_direct_max_fragmented_recv_size = 1024 * 1024; /* The maximum single-message size which can be received */ static int smb_direct_max_receive_size = 1364; static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE; static LIST_HEAD(smb_direct_device_list); static DEFINE_RWLOCK(smb_direct_device_lock); struct smb_direct_device { struct ib_device *ib_dev; struct list_head list; }; static struct smb_direct_listener { struct rdma_cm_id *cm_id; } smb_direct_listener; static struct workqueue_struct *smb_direct_wq; enum smb_direct_status { SMB_DIRECT_CS_NEW = 0, SMB_DIRECT_CS_CONNECTED, SMB_DIRECT_CS_DISCONNECTING, SMB_DIRECT_CS_DISCONNECTED, }; struct smb_direct_transport { struct ksmbd_transport transport; enum smb_direct_status status; bool full_packet_received; wait_queue_head_t wait_status; struct rdma_cm_id *cm_id; struct ib_cq *send_cq; struct ib_cq *recv_cq; struct ib_pd *pd; struct ib_qp *qp; int max_send_size; int max_recv_size; int max_fragmented_send_size; int max_fragmented_recv_size; int max_rdma_rw_size; spinlock_t reassembly_queue_lock; struct list_head reassembly_queue; int reassembly_data_length; int reassembly_queue_length; int first_entry_offset; wait_queue_head_t wait_reassembly_queue; spinlock_t receive_credit_lock; int recv_credits; int count_avail_recvmsg; int recv_credit_max; int recv_credit_target; spinlock_t recvmsg_queue_lock; struct list_head recvmsg_queue; spinlock_t empty_recvmsg_queue_lock; struct list_head empty_recvmsg_queue; int send_credit_target; atomic_t send_credits; spinlock_t lock_new_recv_credits; int new_recv_credits; int max_rw_credits; int pages_per_rw_credit; atomic_t rw_credits; wait_queue_head_t wait_send_credits; wait_queue_head_t wait_rw_credits; mempool_t *sendmsg_mempool; struct kmem_cache *sendmsg_cache; mempool_t *recvmsg_mempool; struct kmem_cache *recvmsg_cache; wait_queue_head_t wait_send_pending; atomic_t send_pending; struct delayed_work post_recv_credits_work; struct work_struct send_immediate_work; struct work_struct disconnect_work; bool negotiation_requested; }; #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport)) enum { SMB_DIRECT_MSG_NEGOTIATE_REQ = 0, SMB_DIRECT_MSG_DATA_TRANSFER }; static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops; struct smb_direct_send_ctx { struct list_head msg_list; int wr_cnt; bool need_invalidate_rkey; unsigned int remote_key; }; struct smb_direct_sendmsg { struct smb_direct_transport *transport; struct ib_send_wr wr; struct list_head list; int num_sge; struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES]; struct ib_cqe cqe; u8 packet[]; }; struct smb_direct_recvmsg { struct smb_direct_transport *transport; struct list_head list; int type; struct ib_sge sge; struct ib_cqe cqe; bool first_segment; u8 packet[]; }; struct smb_direct_rdma_rw_msg { struct smb_direct_transport *t; struct ib_cqe cqe; int status; struct completion *completion; struct list_head list; struct rdma_rw_ctx rw_ctx; struct sg_table sgt; struct scatterlist sg_list[]; }; void init_smbd_max_io_size(unsigned int sz) { sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE); smb_direct_max_read_write_size = sz; } unsigned int get_smbd_max_read_write_size(void) { return smb_direct_max_read_write_size; } static inline int get_buf_page_count(void *buf, int size) { return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) - (uintptr_t)buf / PAGE_SIZE; } static void smb_direct_destroy_pools(struct smb_direct_transport *transport); static void smb_direct_post_recv_credits(struct work_struct *work); static int smb_direct_post_send_data(struct smb_direct_transport *t, struct smb_direct_send_ctx *send_ctx, struct kvec *iov, int niov, int remaining_data_length); static inline struct smb_direct_transport * smb_trans_direct_transfort(struct ksmbd_transport *t) { return container_of(t, struct smb_direct_transport, transport); } static inline void *smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg) { return (void *)recvmsg->packet; } static inline bool is_receive_credit_post_required(int receive_credits, int avail_recvmsg_count) { return receive_credits <= (smb_direct_receive_credit_max >> 3) && avail_recvmsg_count >= (receive_credits >> 2); } static struct smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t) { struct smb_direct_recvmsg *recvmsg = NULL; spin_lock(&t->recvmsg_queue_lock); if (!list_empty(&t->recvmsg_queue)) { recvmsg = list_first_entry(&t->recvmsg_queue, struct smb_direct_recvmsg, list); list_del(&recvmsg->list); } spin_unlock(&t->recvmsg_queue_lock); return recvmsg; } static void put_recvmsg(struct smb_direct_transport *t, struct smb_direct_recvmsg *recvmsg) { ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, recvmsg->sge.length, DMA_FROM_DEVICE); spin_lock(&t->recvmsg_queue_lock); list_add(&recvmsg->list, &t->recvmsg_queue); spin_unlock(&t->recvmsg_queue_lock); } static struct smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t) { struct smb_direct_recvmsg *recvmsg = NULL; spin_lock(&t->empty_recvmsg_queue_lock); if (!list_empty(&t->empty_recvmsg_queue)) { recvmsg = list_first_entry(&t->empty_recvmsg_queue, struct smb_direct_recvmsg, list); list_del(&recvmsg->list); } spin_unlock(&t->empty_recvmsg_queue_lock); return recvmsg; } static void put_empty_recvmsg(struct smb_direct_transport *t, struct smb_direct_recvmsg *recvmsg) { ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, recvmsg->sge.length, DMA_FROM_DEVICE); spin_lock(&t->empty_recvmsg_queue_lock); list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue); spin_unlock(&t->empty_recvmsg_queue_lock); } static void enqueue_reassembly(struct smb_direct_transport *t, struct smb_direct_recvmsg *recvmsg, int data_length) { spin_lock(&t->reassembly_queue_lock); list_add_tail(&recvmsg->list, &t->reassembly_queue); t->reassembly_queue_length++; /* * Make sure reassembly_data_length is updated after list and * reassembly_queue_length are updated. On the dequeue side * reassembly_data_length is checked without a lock to determine * if reassembly_queue_length and list is up to date */ virt_wmb(); t->reassembly_data_length += data_length; spin_unlock(&t->reassembly_queue_lock); } static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t) { if (!list_empty(&t->reassembly_queue)) return list_first_entry(&t->reassembly_queue, struct smb_direct_recvmsg, list); else return NULL; } static void smb_direct_disconnect_rdma_work(struct work_struct *work) { struct smb_direct_transport *t = container_of(work, struct smb_direct_transport, disconnect_work); if (t->status == SMB_DIRECT_CS_CONNECTED) { t->status = SMB_DIRECT_CS_DISCONNECTING; rdma_disconnect(t->cm_id); } } static void smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t) { if (t->status == SMB_DIRECT_CS_CONNECTED) queue_work(smb_direct_wq, &t->disconnect_work); } static void smb_direct_send_immediate_work(struct work_struct *work) { struct smb_direct_transport *t = container_of(work, struct smb_direct_transport, send_immediate_work); if (t->status != SMB_DIRECT_CS_CONNECTED) return; smb_direct_post_send_data(t, NULL, NULL, 0, 0); } static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) { struct smb_direct_transport *t; struct ksmbd_conn *conn; t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return NULL; t->cm_id = cm_id; cm_id->context = t; t->status = SMB_DIRECT_CS_NEW; init_waitqueue_head(&t->wait_status); spin_lock_init(&t->reassembly_queue_lock); INIT_LIST_HEAD(&t->reassembly_queue); t->reassembly_data_length = 0; t->reassembly_queue_length = 0; init_waitqueue_head(&t->wait_reassembly_queue); init_waitqueue_head(&t->wait_send_credits); init_waitqueue_head(&t->wait_rw_credits); spin_lock_init(&t->receive_credit_lock); spin_lock_init(&t->recvmsg_queue_lock); INIT_LIST_HEAD(&t->recvmsg_queue); spin_lock_init(&t->empty_recvmsg_queue_lock); INIT_LIST_HEAD(&t->empty_recvmsg_queue); init_waitqueue_head(&t->wait_send_pending); atomic_set(&t->send_pending, 0); spin_lock_init(&t->lock_new_recv_credits); INIT_DELAYED_WORK(&t->post_recv_credits_work, smb_direct_post_recv_credits); INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); conn = ksmbd_conn_alloc(); if (!conn) goto err; conn->transport = KSMBD_TRANS(t); KSMBD_TRANS(t)->conn = conn; KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops; return t; err: kfree(t); return NULL; } static void free_transport(struct smb_direct_transport *t) { struct smb_direct_recvmsg *recvmsg; wake_up_interruptible(&t->wait_send_credits); ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n"); wait_event(t->wait_send_pending, atomic_read(&t->send_pending) == 0); cancel_work_sync(&t->disconnect_work); cancel_delayed_work_sync(&t->post_recv_credits_work); cancel_work_sync(&t->send_immediate_work); if (t->qp) { ib_drain_qp(t->qp); ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); ib_destroy_qp(t->qp); } ksmbd_debug(RDMA, "drain the reassembly queue\n"); do { spin_lock(&t->reassembly_queue_lock); recvmsg = get_first_reassembly(t); if (recvmsg) { list_del(&recvmsg->list); spin_unlock(&t->reassembly_queue_lock); put_recvmsg(t, recvmsg); } else { spin_unlock(&t->reassembly_queue_lock); } } while (recvmsg); t->reassembly_data_length = 0; if (t->send_cq) ib_free_cq(t->send_cq); if (t->recv_cq) ib_free_cq(t->recv_cq); if (t->pd) ib_dealloc_pd(t->pd); if (t->cm_id) rdma_destroy_id(t->cm_id); smb_direct_destroy_pools(t); ksmbd_conn_free(KSMBD_TRANS(t)->conn); kfree(t); } static struct smb_direct_sendmsg *smb_direct_alloc_sendmsg(struct smb_direct_transport *t) { struct smb_direct_sendmsg *msg; msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL); if (!msg) return ERR_PTR(-ENOMEM); msg->transport = t; INIT_LIST_HEAD(&msg->list); msg->num_sge = 0; return msg; } static void smb_direct_free_sendmsg(struct smb_direct_transport *t, struct smb_direct_sendmsg *msg) { int i; if (msg->num_sge > 0) { ib_dma_unmap_single(t->cm_id->device, msg->sge[0].addr, msg->sge[0].length, DMA_TO_DEVICE); for (i = 1; i < msg->num_sge; i++) ib_dma_unmap_page(t->cm_id->device, msg->sge[i].addr, msg->sge[i].length, DMA_TO_DEVICE); } mempool_free(msg, t->sendmsg_mempool); } static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg) { switch (recvmsg->type) { case SMB_DIRECT_MSG_DATA_TRANSFER: { struct smb_direct_data_transfer *req = (struct smb_direct_data_transfer *)recvmsg->packet; struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet + le32_to_cpu(req->data_offset)); ksmbd_debug(RDMA, "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n", le16_to_cpu(req->credits_granted), le16_to_cpu(req->credits_requested), req->data_length, req->remaining_data_length, hdr->ProtocolId, hdr->Command); break; } case SMB_DIRECT_MSG_NEGOTIATE_REQ: { struct smb_direct_negotiate_req *req = (struct smb_direct_negotiate_req *)recvmsg->packet; ksmbd_debug(RDMA, "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n", le16_to_cpu(req->min_version), le16_to_cpu(req->max_version), le16_to_cpu(req->credits_requested), le32_to_cpu(req->preferred_send_size), le32_to_cpu(req->max_receive_size), le32_to_cpu(req->max_fragmented_size)); if (le16_to_cpu(req->min_version) > 0x0100 || le16_to_cpu(req->max_version) < 0x0100) return -EOPNOTSUPP; if (le16_to_cpu(req->credits_requested) <= 0 || le32_to_cpu(req->max_receive_size) <= 128 || le32_to_cpu(req->max_fragmented_size) <= 128 * 1024) return -ECONNABORTED; break; } default: return -EINVAL; } return 0; } static void recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct smb_direct_recvmsg *recvmsg; struct smb_direct_transport *t; recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe); t = recvmsg->transport; if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { if (wc->status != IB_WC_WR_FLUSH_ERR) { pr_err("Recv error. status='%s (%d)' opcode=%d\n", ib_wc_status_msg(wc->status), wc->status, wc->opcode); smb_direct_disconnect_rdma_connection(t); } put_empty_recvmsg(t, recvmsg); return; } ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n", ib_wc_status_msg(wc->status), wc->status, wc->opcode); ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr, recvmsg->sge.length, DMA_FROM_DEVICE); switch (recvmsg->type) { case SMB_DIRECT_MSG_NEGOTIATE_REQ: if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) { put_empty_recvmsg(t, recvmsg); return; } t->negotiation_requested = true; t->full_packet_received = true; t->status = SMB_DIRECT_CS_CONNECTED; enqueue_reassembly(t, recvmsg, 0); wake_up_interruptible(&t->wait_status); break; case SMB_DIRECT_MSG_DATA_TRANSFER: { struct smb_direct_data_transfer *data_transfer = (struct smb_direct_data_transfer *)recvmsg->packet; unsigned int data_length; int avail_recvmsg_count, receive_credits; if (wc->byte_len < offsetof(struct smb_direct_data_transfer, padding)) { put_empty_recvmsg(t, recvmsg); return; } data_length = le32_to_cpu(data_transfer->data_length); if (data_length) { if (wc->byte_len < sizeof(struct smb_direct_data_transfer) + (u64)data_length) { put_empty_recvmsg(t, recvmsg); return; } if (t->full_packet_received) recvmsg->first_segment = true; if (le32_to_cpu(data_transfer->remaining_data_length)) t->full_packet_received = false; else t->full_packet_received = true; enqueue_reassembly(t, recvmsg, (int)data_length); wake_up_interruptible(&t->wait_reassembly_queue); spin_lock(&t->receive_credit_lock); receive_credits = --(t->recv_credits); avail_recvmsg_count = t->count_avail_recvmsg; spin_unlock(&t->receive_credit_lock); } else { put_empty_recvmsg(t, recvmsg); spin_lock(&t->receive_credit_lock); receive_credits = --(t->recv_credits); avail_recvmsg_count = ++(t->count_avail_recvmsg); spin_unlock(&t->receive_credit_lock); } t->recv_credit_target = le16_to_cpu(data_transfer->credits_requested); atomic_add(le16_to_cpu(data_transfer->credits_granted), &t->send_credits); if (le16_to_cpu(data_transfer->flags) & SMB_DIRECT_RESPONSE_REQUESTED) queue_work(smb_direct_wq, &t->send_immediate_work); if (atomic_read(&t->send_credits) > 0) wake_up_interruptible(&t->wait_send_credits); if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count)) mod_delayed_work(smb_direct_wq, &t->post_recv_credits_work, 0); break; } default: break; } } static int smb_direct_post_recv(struct smb_direct_transport *t, struct smb_direct_recvmsg *recvmsg) { struct ib_recv_wr wr; int ret; recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device, recvmsg->packet, t->max_recv_size, DMA_FROM_DEVICE); ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr); if (ret) return ret; recvmsg->sge.length = t->max_recv_size; recvmsg->sge.lkey = t->pd->local_dma_lkey; recvmsg->cqe.done = recv_done; wr.wr_cqe = &recvmsg->cqe; wr.next = NULL; wr.sg_list = &recvmsg->sge; wr.num_sge = 1; ret = ib_post_recv(t->qp, &wr, NULL); if (ret) { pr_err("Can't post recv: %d\n", ret); ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, recvmsg->sge.length, DMA_FROM_DEVICE); smb_direct_disconnect_rdma_connection(t); return ret; } return ret; } static int smb_direct_read(struct ksmbd_transport *t, char *buf, unsigned int size, int unused) { struct smb_direct_recvmsg *recvmsg; struct smb_direct_data_transfer *data_transfer; int to_copy, to_read, data_read, offset; u32 data_length, remaining_data_length, data_offset; int rc; struct smb_direct_transport *st = smb_trans_direct_transfort(t); again: if (st->status != SMB_DIRECT_CS_CONNECTED) { pr_err("disconnected\n"); return -ENOTCONN; } /* * No need to hold the reassembly queue lock all the time as we are * the only one reading from the front of the queue. The transport * may add more entries to the back of the queue at the same time */ if (st->reassembly_data_length >= size) { int queue_length; int queue_removed = 0; /* * Need to make sure reassembly_data_length is read before * reading reassembly_queue_length and calling * get_first_reassembly. This call is lock free * as we never read at the end of the queue which are being * updated in SOFTIRQ as more data is received */ virt_rmb(); queue_length = st->reassembly_queue_length; data_read = 0; to_read = size; offset = st->first_entry_offset; while (data_read < size) { recvmsg = get_first_reassembly(st); data_transfer = smb_direct_recvmsg_payload(recvmsg); data_length = le32_to_cpu(data_transfer->data_length); remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); data_offset = le32_to_cpu(data_transfer->data_offset); /* * The upper layer expects RFC1002 length at the * beginning of the payload. Return it to indicate * the total length of the packet. This minimize the * change to upper layer packet processing logic. This * will be eventually remove when an intermediate * transport layer is added */ if (recvmsg->first_segment && size == 4) { unsigned int rfc1002_len = data_length + remaining_data_length; *((__be32 *)buf) = cpu_to_be32(rfc1002_len); data_read = 4; recvmsg->first_segment = false; ksmbd_debug(RDMA, "returning rfc1002 length %d\n", rfc1002_len); goto read_rfc1002_done; } to_copy = min_t(int, data_length - offset, to_read); memcpy(buf + data_read, (char *)data_transfer + data_offset + offset, to_copy); /* move on to the next buffer? */ if (to_copy == data_length - offset) { queue_length--; /* * No need to lock if we are not at the * end of the queue */ if (queue_length) { list_del(&recvmsg->list); } else { spin_lock_irq(&st->reassembly_queue_lock); list_del(&recvmsg->list); spin_unlock_irq(&st->reassembly_queue_lock); } queue_removed++; put_recvmsg(st, recvmsg); offset = 0; } else { offset += to_copy; } to_read -= to_copy; data_read += to_copy; } spin_lock_irq(&st->reassembly_queue_lock); st->reassembly_data_length -= data_read; st->reassembly_queue_length -= queue_removed; spin_unlock_irq(&st->reassembly_queue_lock); spin_lock(&st->receive_credit_lock); st->count_avail_recvmsg += queue_removed; if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) { spin_unlock(&st->receive_credit_lock); mod_delayed_work(smb_direct_wq, &st->post_recv_credits_work, 0); } else { spin_unlock(&st->receive_credit_lock); } st->first_entry_offset = offset; ksmbd_debug(RDMA, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n", data_read, st->reassembly_data_length, st->first_entry_offset); read_rfc1002_done: return data_read; } ksmbd_debug(RDMA, "wait_event on more data\n"); rc = wait_event_interruptible(st->wait_reassembly_queue, st->reassembly_data_length >= size || st->status != SMB_DIRECT_CS_CONNECTED); if (rc) return -EINTR; goto again; } static void smb_direct_post_recv_credits(struct work_struct *work) { struct smb_direct_transport *t = container_of(work, struct smb_direct_transport, post_recv_credits_work.work); struct smb_direct_recvmsg *recvmsg; int receive_credits, credits = 0; int ret; int use_free = 1; spin_lock(&t->receive_credit_lock); receive_credits = t->recv_credits; spin_unlock(&t->receive_credit_lock); if (receive_credits < t->recv_credit_target) { while (true) { if (use_free) recvmsg = get_free_recvmsg(t); else recvmsg = get_empty_recvmsg(t); if (!recvmsg) { if (use_free) { use_free = 0; continue; } else { break; } } recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER; recvmsg->first_segment = false; ret = smb_direct_post_recv(t, recvmsg); if (ret) { pr_err("Can't post recv: %d\n", ret); put_recvmsg(t, recvmsg); break; } credits++; } } spin_lock(&t->receive_credit_lock); t->recv_credits += credits; t->count_avail_recvmsg -= credits; spin_unlock(&t->receive_credit_lock); spin_lock(&t->lock_new_recv_credits); t->new_recv_credits += credits; spin_unlock(&t->lock_new_recv_credits); if (credits) queue_work(smb_direct_wq, &t->send_immediate_work); } static void send_done(struct ib_cq *cq, struct ib_wc *wc) { struct smb_direct_sendmsg *sendmsg, *sibling; struct smb_direct_transport *t; struct list_head *pos, *prev, *end; sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe); t = sendmsg->transport; ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n", ib_wc_status_msg(wc->status), wc->status, wc->opcode); if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { pr_err("Send error. status='%s (%d)', opcode=%d\n", ib_wc_status_msg(wc->status), wc->status, wc->opcode); smb_direct_disconnect_rdma_connection(t); } if (atomic_dec_and_test(&t->send_pending)) wake_up(&t->wait_send_pending); /* iterate and free the list of messages in reverse. the list's head * is invalid. */ for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next; prev != end; pos = prev, prev = prev->prev) { sibling = container_of(pos, struct smb_direct_sendmsg, list); smb_direct_free_sendmsg(t, sibling); } sibling = container_of(pos, struct smb_direct_sendmsg, list); smb_direct_free_sendmsg(t, sibling); } static int manage_credits_prior_sending(struct smb_direct_transport *t) { int new_credits; spin_lock(&t->lock_new_recv_credits); new_credits = t->new_recv_credits; t->new_recv_credits = 0; spin_unlock(&t->lock_new_recv_credits); return new_credits; } static int smb_direct_post_send(struct smb_direct_transport *t, struct ib_send_wr *wr) { int ret; atomic_inc(&t->send_pending); ret = ib_post_send(t->qp, wr, NULL); if (ret) { pr_err("failed to post send: %d\n", ret); if (atomic_dec_and_test(&t->send_pending)) wake_up(&t->wait_send_pending); smb_direct_disconnect_rdma_connection(t); } return ret; } static void smb_direct_send_ctx_init(struct smb_direct_transport *t, struct smb_direct_send_ctx *send_ctx, bool need_invalidate_rkey, unsigned int remote_key) { INIT_LIST_HEAD(&send_ctx->msg_list); send_ctx->wr_cnt = 0; send_ctx->need_invalidate_rkey = need_invalidate_rkey; send_ctx->remote_key = remote_key; } static int smb_direct_flush_send_list(struct smb_direct_transport *t, struct smb_direct_send_ctx *send_ctx, bool is_last) { struct smb_direct_sendmsg *first, *last; int ret; if (list_empty(&send_ctx->msg_list)) return 0; first = list_first_entry(&send_ctx->msg_list, struct smb_direct_sendmsg, list); last = list_last_entry(&send_ctx->msg_list, struct smb_direct_sendmsg, list); last->wr.send_flags = IB_SEND_SIGNALED; last->wr.wr_cqe = &last->cqe; if (is_last && send_ctx->need_invalidate_rkey) { last->wr.opcode = IB_WR_SEND_WITH_INV; last->wr.ex.invalidate_rkey = send_ctx->remote_key; } ret = smb_direct_post_send(t, &first->wr); if (!ret) { smb_direct_send_ctx_init(t, send_ctx, send_ctx->need_invalidate_rkey, send_ctx->remote_key); } else { atomic_add(send_ctx->wr_cnt, &t->send_credits); wake_up(&t->wait_send_credits); list_for_each_entry_safe(first, last, &send_ctx->msg_list, list) { smb_direct_free_sendmsg(t, first); } } return ret; } static int wait_for_credits(struct smb_direct_transport *t, wait_queue_head_t *waitq, atomic_t *total_credits, int needed) { int ret; do { if (atomic_sub_return(needed, total_credits) >= 0) return 0; atomic_add(needed, total_credits); ret = wait_event_interruptible(*waitq, atomic_read(total_credits) >= needed || t->status != SMB_DIRECT_CS_CONNECTED); if (t->status != SMB_DIRECT_CS_CONNECTED) return -ENOTCONN; else if (ret < 0) return ret; } while (true); } static int wait_for_send_credits(struct smb_direct_transport *t, struct smb_direct_send_ctx *send_ctx) { int ret; if (send_ctx && (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) { ret = smb_direct_flush_send_list(t, send_ctx, false); if (ret) return ret; } return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1); } static int wait_for_rw_credits(struct smb_direct_transport *t, int credits) { return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits); } static int calc_rw_credits(struct smb_direct_transport *t, char *buf, unsigned int len) { return DIV_ROUND_UP(get_buf_page_count(buf, len), t->pages_per_rw_credit); } static int smb_direct_create_header(struct smb_direct_transport *t, int size, int remaining_data_length, struct smb_direct_sendmsg **sendmsg_out) { struct smb_direct_sendmsg *sendmsg; struct smb_direct_data_transfer *packet; int header_length; int ret; sendmsg = smb_direct_alloc_sendmsg(t); if (IS_ERR(sendmsg)) return PTR_ERR(sendmsg); /* Fill in the packet header */ packet = (struct smb_direct_data_transfer *)sendmsg->packet; packet->credits_requested = cpu_to_le16(t->send_credit_target); packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); packet->flags = 0; packet->reserved = 0; if (!size) packet->data_offset = 0; else packet->data_offset = cpu_to_le32(24); packet->data_length = cpu_to_le32(size); packet->remaining_data_length = cpu_to_le32(remaining_data_length); packet->padding = 0; ksmbd_debug(RDMA, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n", le16_to_cpu(packet->credits_requested), le16_to_cpu(packet->credits_granted), le32_to_cpu(packet->data_offset), le32_to_cpu(packet->data_length), le32_to_cpu(packet->remaining_data_length)); /* Map the packet to DMA */ header_length = sizeof(struct smb_direct_data_transfer); /* If this is a packet without payload, don't send padding */ if (!size) header_length = offsetof(struct smb_direct_data_transfer, padding); sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, (void *)packet, header_length, DMA_TO_DEVICE); ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); if (ret) { smb_direct_free_sendmsg(t, sendmsg); return ret; } sendmsg->num_sge = 1; sendmsg->sge[0].length = header_length; sendmsg->sge[0].lkey = t->pd->local_dma_lkey; *sendmsg_out = sendmsg; return 0; } static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries) { bool high = is_vmalloc_addr(buf); struct page *page; int offset, len; int i = 0; if (size <= 0 || nentries < get_buf_page_count(buf, size)) return -EINVAL; offset = offset_in_page(buf); buf -= offset; while (size > 0) { len = min_t(int, PAGE_SIZE - offset, size); if (high) page = vmalloc_to_page(buf); else page = kmap_to_page(buf); if (!sg_list) return -EINVAL; sg_set_page(sg_list, page, len, offset); sg_list = sg_next(sg_list); buf += PAGE_SIZE; size -= len; offset = 0; i++; } return i; } static int get_mapped_sg_list(struct ib_device *device, void *buf, int size, struct scatterlist *sg_list, int nentries, enum dma_data_direction dir) { int npages; npages = get_sg_list(buf, size, sg_list, nentries); if (npages < 0) return -EINVAL; return ib_dma_map_sg(device, sg_list, npages, dir); } static int post_sendmsg(struct smb_direct_transport *t, struct smb_direct_send_ctx *send_ctx, struct smb_direct_sendmsg *msg) { int i; for (i = 0; i < msg->num_sge; i++) ib_dma_sync_single_for_device(t->cm_id->device, msg->sge[i].addr, msg->sge[i].length, DMA_TO_DEVICE); msg->cqe.done = send_done; msg->wr.opcode = IB_WR_SEND; msg->wr.sg_list = &msg->sge[0]; msg->wr.num_sge = msg->num_sge; msg->wr.next = NULL; if (send_ctx) { msg->wr.wr_cqe = NULL; msg->wr.send_flags = 0; if (!list_empty(&send_ctx->msg_list)) { struct smb_direct_sendmsg *last; last = list_last_entry(&send_ctx->msg_list, struct smb_direct_sendmsg, list); last->wr.next = &msg->wr; } list_add_tail(&msg->list, &send_ctx->msg_list); send_ctx->wr_cnt++; return 0; } msg->wr.wr_cqe = &msg->cqe; msg->wr.send_flags = IB_SEND_SIGNALED; return smb_direct_post_send(t, &msg->wr); } static int smb_direct_post_send_data(struct smb_direct_transport *t, struct smb_direct_send_ctx *send_ctx, struct kvec *iov, int niov, int remaining_data_length) { int i, j, ret; struct smb_direct_sendmsg *msg; int data_length; struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1]; ret = wait_for_send_credits(t, send_ctx); if (ret) return ret; data_length = 0; for (i = 0; i < niov; i++) data_length += iov[i].iov_len; ret = smb_direct_create_header(t, data_length, remaining_data_length, &msg); if (ret) { atomic_inc(&t->send_credits); return ret; } for (i = 0; i < niov; i++) { struct ib_sge *sge; int sg_cnt; sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1); sg_cnt = get_mapped_sg_list(t->cm_id->device, iov[i].iov_base, iov[i].iov_len, sg, SMB_DIRECT_MAX_SEND_SGES - 1, DMA_TO_DEVICE); if (sg_cnt <= 0) { pr_err("failed to map buffer\n"); ret = -ENOMEM; goto err; } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) { pr_err("buffer not fitted into sges\n"); ret = -E2BIG; ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt, DMA_TO_DEVICE); goto err; } for (j = 0; j < sg_cnt; j++) { sge = &msg->sge[msg->num_sge]; sge->addr = sg_dma_address(&sg[j]); sge->length = sg_dma_len(&sg[j]); sge->lkey = t->pd->local_dma_lkey; msg->num_sge++; } } ret = post_sendmsg(t, send_ctx, msg); if (ret) goto err; return 0; err: smb_direct_free_sendmsg(t, msg); atomic_inc(&t->send_credits); return ret; } static int smb_direct_writev(struct ksmbd_transport *t, struct kvec *iov, int niovs, int buflen, bool need_invalidate, unsigned int remote_key) { struct smb_direct_transport *st = smb_trans_direct_transfort(t); int remaining_data_length; int start, i, j; int max_iov_size = st->max_send_size - sizeof(struct smb_direct_data_transfer); int ret; struct kvec vec; struct smb_direct_send_ctx send_ctx; if (st->status != SMB_DIRECT_CS_CONNECTED) return -ENOTCONN; //FIXME: skip RFC1002 header.. buflen -= 4; remaining_data_length = buflen; ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen); smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key); start = i = 1; buflen = 0; while (true) { buflen += iov[i].iov_len; if (buflen > max_iov_size) { if (i > start) { remaining_data_length -= (buflen - iov[i].iov_len); ret = smb_direct_post_send_data(st, &send_ctx, &iov[start], i - start, remaining_data_length); if (ret) goto done; } else { /* iov[start] is too big, break it */ int nvec = (buflen + max_iov_size - 1) / max_iov_size; for (j = 0; j < nvec; j++) { vec.iov_base = (char *)iov[start].iov_base + j * max_iov_size; vec.iov_len = min_t(int, max_iov_size, buflen - max_iov_size * j); remaining_data_length -= vec.iov_len; ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1, remaining_data_length); if (ret) goto done; } i++; if (i == niovs) break; } start = i; buflen = 0; } else { i++; if (i == niovs) { /* send out all remaining vecs */ remaining_data_length -= buflen; ret = smb_direct_post_send_data(st, &send_ctx, &iov[start], i - start, remaining_data_length); if (ret) goto done; break; } } } done: ret = smb_direct_flush_send_list(st, &send_ctx, true); /* * As an optimization, we don't wait for individual I/O to finish * before sending the next one. * Send them all and wait for pending send count to get to 0 * that means all the I/Os have been out and we are good to return */ wait_event(st->wait_send_pending, atomic_read(&st->send_pending) == 0); return ret; } static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t, struct smb_direct_rdma_rw_msg *msg, enum dma_data_direction dir) { rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, msg->sgt.sgl, msg->sgt.nents, dir); sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); kfree(msg); } static void read_write_done(struct ib_cq *cq, struct ib_wc *wc, enum dma_data_direction dir) { struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe, struct smb_direct_rdma_rw_msg, cqe); struct smb_direct_transport *t = msg->t; if (wc->status != IB_WC_SUCCESS) { msg->status = -EIO; pr_err("read/write error. opcode = %d, status = %s(%d)\n", wc->opcode, ib_wc_status_msg(wc->status), wc->status); if (wc->status != IB_WC_WR_FLUSH_ERR) smb_direct_disconnect_rdma_connection(t); } complete(msg->completion); } static void read_done(struct ib_cq *cq, struct ib_wc *wc) { read_write_done(cq, wc, DMA_FROM_DEVICE); } static void write_done(struct ib_cq *cq, struct ib_wc *wc) { read_write_done(cq, wc, DMA_TO_DEVICE); } static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf, int buf_len, struct smb2_buffer_desc_v1 *desc, unsigned int desc_len, bool is_read) { struct smb_direct_rdma_rw_msg *msg, *next_msg; int i, ret; DECLARE_COMPLETION_ONSTACK(completion); struct ib_send_wr *first_wr; LIST_HEAD(msg_list); char *desc_buf; int credits_needed; unsigned int desc_buf_len, desc_num = 0; if (t->status != SMB_DIRECT_CS_CONNECTED) return -ENOTCONN; if (buf_len > t->max_rdma_rw_size) return -EINVAL; /* calculate needed credits */ credits_needed = 0; desc_buf = buf; for (i = 0; i < desc_len / sizeof(*desc); i++) { if (!buf_len) break; desc_buf_len = le32_to_cpu(desc[i].length); if (!desc_buf_len) return -EINVAL; if (desc_buf_len > buf_len) { desc_buf_len = buf_len; desc[i].length = cpu_to_le32(desc_buf_len); buf_len = 0; } credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len); desc_buf += desc_buf_len; buf_len -= desc_buf_len; desc_num++; } ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n", is_read ? "read" : "write", buf_len, credits_needed); ret = wait_for_rw_credits(t, credits_needed); if (ret < 0) return ret; /* build rdma_rw_ctx for each descriptor */ desc_buf = buf; for (i = 0; i < desc_num; i++) { msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) + sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto out; } desc_buf_len = le32_to_cpu(desc[i].length); msg->t = t; msg->cqe.done = is_read ? read_done : write_done; msg->completion = &completion; msg->sgt.sgl = &msg->sg_list[0]; ret = sg_alloc_table_chained(&msg->sgt, get_buf_page_count(desc_buf, desc_buf_len), msg->sg_list, SG_CHUNK_SIZE); if (ret) { kfree(msg); ret = -ENOMEM; goto out; } ret = get_sg_list(desc_buf, desc_buf_len, msg->sgt.sgl, msg->sgt.orig_nents); if (ret < 0) { sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); kfree(msg); goto out; } ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port, msg->sgt.sgl, get_buf_page_count(desc_buf, desc_buf_len), 0, le64_to_cpu(desc[i].offset), le32_to_cpu(desc[i].token), is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); if (ret < 0) { pr_err("failed to init rdma_rw_ctx: %d\n", ret); sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); kfree(msg); goto out; } list_add_tail(&msg->list, &msg_list); desc_buf += desc_buf_len; } /* concatenate work requests of rdma_rw_ctxs */ first_wr = NULL; list_for_each_entry_reverse(msg, &msg_list, list) { first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port, &msg->cqe, first_wr); } ret = ib_post_send(t->qp, first_wr, NULL); if (ret) { pr_err("failed to post send wr for RDMA R/W: %d\n", ret); goto out; } msg = list_last_entry(&msg_list, struct smb_direct_rdma_rw_msg, list); wait_for_completion(&completion); ret = msg->status; out: list_for_each_entry_safe(msg, next_msg, &msg_list, list) { list_del(&msg->list); smb_direct_free_rdma_rw_msg(t, msg, is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } atomic_add(credits_needed, &t->rw_credits); wake_up(&t->wait_rw_credits); return ret; } static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf, unsigned int buflen, struct smb2_buffer_desc_v1 *desc, unsigned int desc_len) { return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, desc, desc_len, false); } static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf, unsigned int buflen, struct smb2_buffer_desc_v1 *desc, unsigned int desc_len) { return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, desc, desc_len, true); } static void smb_direct_disconnect(struct ksmbd_transport *t) { struct smb_direct_transport *st = smb_trans_direct_transfort(t); ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id); smb_direct_disconnect_rdma_work(&st->disconnect_work); wait_event_interruptible(st->wait_status, st->status == SMB_DIRECT_CS_DISCONNECTED); free_transport(st); } static void smb_direct_shutdown(struct ksmbd_transport *t) { struct smb_direct_transport *st = smb_trans_direct_transfort(t); ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id); smb_direct_disconnect_rdma_work(&st->disconnect_work); } static int smb_direct_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct smb_direct_transport *t = cm_id->context; ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n", cm_id, rdma_event_msg(event->event), event->event); switch (event->event) { case RDMA_CM_EVENT_ESTABLISHED: { t->status = SMB_DIRECT_CS_CONNECTED; wake_up_interruptible(&t->wait_status); break; } case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DISCONNECTED: { ib_drain_qp(t->qp); t->status = SMB_DIRECT_CS_DISCONNECTED; wake_up_interruptible(&t->wait_status); wake_up_interruptible(&t->wait_reassembly_queue); wake_up(&t->wait_send_credits); break; } case RDMA_CM_EVENT_CONNECT_ERROR: { t->status = SMB_DIRECT_CS_DISCONNECTED; wake_up_interruptible(&t->wait_status); break; } default: pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n", cm_id, rdma_event_msg(event->event), event->event); break; } return 0; } static void smb_direct_qpair_handler(struct ib_event *event, void *context) { struct smb_direct_transport *t = context; ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n", t->cm_id, ib_event_msg(event->event), event->event); switch (event->event) { case IB_EVENT_CQ_ERR: case IB_EVENT_QP_FATAL: smb_direct_disconnect_rdma_connection(t); break; default: break; } } static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, int failed) { struct smb_direct_sendmsg *sendmsg; struct smb_direct_negotiate_resp *resp; int ret; sendmsg = smb_direct_alloc_sendmsg(t); if (IS_ERR(sendmsg)) return -ENOMEM; resp = (struct smb_direct_negotiate_resp *)sendmsg->packet; if (failed) { memset(resp, 0, sizeof(*resp)); resp->min_version = cpu_to_le16(0x0100); resp->max_version = cpu_to_le16(0x0100); resp->status = STATUS_NOT_SUPPORTED; } else { resp->status = STATUS_SUCCESS; resp->min_version = SMB_DIRECT_VERSION_LE; resp->max_version = SMB_DIRECT_VERSION_LE; resp->negotiated_version = SMB_DIRECT_VERSION_LE; resp->reserved = 0; resp->credits_requested = cpu_to_le16(t->send_credit_target); resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size); resp->preferred_send_size = cpu_to_le32(t->max_send_size); resp->max_receive_size = cpu_to_le32(t->max_recv_size); resp->max_fragmented_size = cpu_to_le32(t->max_fragmented_recv_size); } sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, (void *)resp, sizeof(*resp), DMA_TO_DEVICE); ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); if (ret) { smb_direct_free_sendmsg(t, sendmsg); return ret; } sendmsg->num_sge = 1; sendmsg->sge[0].length = sizeof(*resp); sendmsg->sge[0].lkey = t->pd->local_dma_lkey; ret = post_sendmsg(t, NULL, sendmsg); if (ret) { smb_direct_free_sendmsg(t, sendmsg); return ret; } wait_event(t->wait_send_pending, atomic_read(&t->send_pending) == 0); return 0; } static int smb_direct_accept_client(struct smb_direct_transport *t) { struct rdma_conn_param conn_param; struct ib_port_immutable port_immutable; u32 ird_ord_hdr[2]; int ret; memset(&conn_param, 0, sizeof(conn_param)); conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, SMB_DIRECT_CM_INITIATOR_DEPTH); conn_param.responder_resources = 0; t->cm_id->device->ops.get_port_immutable(t->cm_id->device, t->cm_id->port_num, &port_immutable); if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { ird_ord_hdr[0] = conn_param.responder_resources; ird_ord_hdr[1] = 1; conn_param.private_data = ird_ord_hdr; conn_param.private_data_len = sizeof(ird_ord_hdr); } else { conn_param.private_data = NULL; conn_param.private_data_len = 0; } conn_param.retry_count = SMB_DIRECT_CM_RETRY; conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY; conn_param.flow_control = 0; ret = rdma_accept(t->cm_id, &conn_param); if (ret) { pr_err("error at rdma_accept: %d\n", ret); return ret; } return 0; } static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) { int ret; struct smb_direct_recvmsg *recvmsg; recvmsg = get_free_recvmsg(t); if (!recvmsg) return -ENOMEM; recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ; ret = smb_direct_post_recv(t, recvmsg); if (ret) { pr_err("Can't post recv: %d\n", ret); goto out_err; } t->negotiation_requested = false; ret = smb_direct_accept_client(t); if (ret) { pr_err("Can't accept client\n"); goto out_err; } smb_direct_post_recv_credits(&t->post_recv_credits_work.work); return 0; out_err: put_recvmsg(t, recvmsg); return ret; } static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t) { return min_t(unsigned int, t->cm_id->device->attrs.max_fast_reg_page_list_len, 256); } static int smb_direct_init_params(struct smb_direct_transport *t, struct ib_qp_cap *cap) { struct ib_device *device = t->cm_id->device; int max_send_sges, max_rw_wrs, max_send_wrs; unsigned int max_sge_per_wr, wrs_per_credit; /* need 3 more sge. because a SMB_DIRECT header, SMB2 header, * SMB2 response could be mapped. */ t->max_send_size = smb_direct_max_send_size; max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3; if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) { pr_err("max_send_size %d is too large\n", t->max_send_size); return -EINVAL; } /* Calculate the number of work requests for RDMA R/W. * The maximum number of pages which can be registered * with one Memory region can be transferred with one * R/W credit. And at least 4 work requests for each credit * are needed for MR registration, RDMA R/W, local & remote * MR invalidation. */ t->max_rdma_rw_size = smb_direct_max_read_write_size; t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t); t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size, (t->pages_per_rw_credit - 1) * PAGE_SIZE); max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge, device->attrs.max_sge_rd); max_sge_per_wr = max_t(unsigned int, max_sge_per_wr, max_send_sges); wrs_per_credit = max_t(unsigned int, 4, DIV_ROUND_UP(t->pages_per_rw_credit, max_sge_per_wr) + 1); max_rw_wrs = t->max_rw_credits * wrs_per_credit; max_send_wrs = smb_direct_send_credit_target + max_rw_wrs; if (max_send_wrs > device->attrs.max_cqe || max_send_wrs > device->attrs.max_qp_wr) { pr_err("consider lowering send_credit_target = %d\n", smb_direct_send_credit_target); pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", device->attrs.max_cqe, device->attrs.max_qp_wr); return -EINVAL; } if (smb_direct_receive_credit_max > device->attrs.max_cqe || smb_direct_receive_credit_max > device->attrs.max_qp_wr) { pr_err("consider lowering receive_credit_max = %d\n", smb_direct_receive_credit_max); pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n", device->attrs.max_cqe, device->attrs.max_qp_wr); return -EINVAL; } if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) { pr_err("warning: device max_recv_sge = %d too small\n", device->attrs.max_recv_sge); return -EINVAL; } t->recv_credits = 0; t->count_avail_recvmsg = 0; t->recv_credit_max = smb_direct_receive_credit_max; t->recv_credit_target = 10; t->new_recv_credits = 0; t->send_credit_target = smb_direct_send_credit_target; atomic_set(&t->send_credits, 0); atomic_set(&t->rw_credits, t->max_rw_credits); t->max_send_size = smb_direct_max_send_size; t->max_recv_size = smb_direct_max_receive_size; t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size; cap->max_send_wr = max_send_wrs; cap->max_recv_wr = t->recv_credit_max; cap->max_send_sge = max_sge_per_wr; cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES; cap->max_inline_data = 0; cap->max_rdma_ctxs = t->max_rw_credits; return 0; } static void smb_direct_destroy_pools(struct smb_direct_transport *t) { struct smb_direct_recvmsg *recvmsg; while ((recvmsg = get_free_recvmsg(t))) mempool_free(recvmsg, t->recvmsg_mempool); while ((recvmsg = get_empty_recvmsg(t))) mempool_free(recvmsg, t->recvmsg_mempool); mempool_destroy(t->recvmsg_mempool); t->recvmsg_mempool = NULL; kmem_cache_destroy(t->recvmsg_cache); t->recvmsg_cache = NULL; mempool_destroy(t->sendmsg_mempool); t->sendmsg_mempool = NULL; kmem_cache_destroy(t->sendmsg_cache); t->sendmsg_cache = NULL; } static int smb_direct_create_pools(struct smb_direct_transport *t) { char name[80]; int i; struct smb_direct_recvmsg *recvmsg; snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t); t->sendmsg_cache = kmem_cache_create(name, sizeof(struct smb_direct_sendmsg) + sizeof(struct smb_direct_negotiate_resp), 0, SLAB_HWCACHE_ALIGN, NULL); if (!t->sendmsg_cache) return -ENOMEM; t->sendmsg_mempool = mempool_create(t->send_credit_target, mempool_alloc_slab, mempool_free_slab, t->sendmsg_cache); if (!t->sendmsg_mempool) goto err; snprintf(name, sizeof(name), "smb_direct_resp_%p", t); t->recvmsg_cache = kmem_cache_create(name, sizeof(struct smb_direct_recvmsg) + t->max_recv_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!t->recvmsg_cache) goto err; t->recvmsg_mempool = mempool_create(t->recv_credit_max, mempool_alloc_slab, mempool_free_slab, t->recvmsg_cache); if (!t->recvmsg_mempool) goto err; INIT_LIST_HEAD(&t->recvmsg_queue); for (i = 0; i < t->recv_credit_max; i++) { recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL); if (!recvmsg) goto err; recvmsg->transport = t; list_add(&recvmsg->list, &t->recvmsg_queue); } t->count_avail_recvmsg = t->recv_credit_max; return 0; err: smb_direct_destroy_pools(t); return -ENOMEM; } static int smb_direct_create_qpair(struct smb_direct_transport *t, struct ib_qp_cap *cap) { int ret; struct ib_qp_init_attr qp_attr; int pages_per_rw; t->pd = ib_alloc_pd(t->cm_id->device, 0); if (IS_ERR(t->pd)) { pr_err("Can't create RDMA PD\n"); ret = PTR_ERR(t->pd); t->pd = NULL; return ret; } t->send_cq = ib_alloc_cq(t->cm_id->device, t, smb_direct_send_credit_target + cap->max_rdma_ctxs, 0, IB_POLL_WORKQUEUE); if (IS_ERR(t->send_cq)) { pr_err("Can't create RDMA send CQ\n"); ret = PTR_ERR(t->send_cq); t->send_cq = NULL; goto err; } t->recv_cq = ib_alloc_cq(t->cm_id->device, t, t->recv_credit_max, 0, IB_POLL_WORKQUEUE); if (IS_ERR(t->recv_cq)) { pr_err("Can't create RDMA recv CQ\n"); ret = PTR_ERR(t->recv_cq); t->recv_cq = NULL; goto err; } memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.event_handler = smb_direct_qpair_handler; qp_attr.qp_context = t; qp_attr.cap = *cap; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = t->send_cq; qp_attr.recv_cq = t->recv_cq; qp_attr.port_num = ~0; ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr); if (ret) { pr_err("Can't create RDMA QP: %d\n", ret); goto err; } t->qp = t->cm_id->qp; t->cm_id->event_handler = smb_direct_cm_handler; pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) { ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, t->max_rw_credits, IB_MR_TYPE_MEM_REG, t->pages_per_rw_credit, 0); if (ret) { pr_err("failed to init mr pool count %d pages %d\n", t->max_rw_credits, t->pages_per_rw_credit); goto err; } } return 0; err: if (t->qp) { ib_destroy_qp(t->qp); t->qp = NULL; } if (t->recv_cq) { ib_destroy_cq(t->recv_cq); t->recv_cq = NULL; } if (t->send_cq) { ib_destroy_cq(t->send_cq); t->send_cq = NULL; } if (t->pd) { ib_dealloc_pd(t->pd); t->pd = NULL; } return ret; } static int smb_direct_prepare(struct ksmbd_transport *t) { struct smb_direct_transport *st = smb_trans_direct_transfort(t); struct smb_direct_recvmsg *recvmsg; struct smb_direct_negotiate_req *req; int ret; ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n"); ret = wait_event_interruptible_timeout(st->wait_status, st->negotiation_requested || st->status == SMB_DIRECT_CS_DISCONNECTED, SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ); if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED) return ret < 0 ? ret : -ETIMEDOUT; recvmsg = get_first_reassembly(st); if (!recvmsg) return -ECONNABORTED; ret = smb_direct_check_recvmsg(recvmsg); if (ret == -ECONNABORTED) goto out; req = (struct smb_direct_negotiate_req *)recvmsg->packet; st->max_recv_size = min_t(int, st->max_recv_size, le32_to_cpu(req->preferred_send_size)); st->max_send_size = min_t(int, st->max_send_size, le32_to_cpu(req->max_receive_size)); st->max_fragmented_send_size = le32_to_cpu(req->max_fragmented_size); st->max_fragmented_recv_size = (st->recv_credit_max * st->max_recv_size) / 2; ret = smb_direct_send_negotiate_response(st, ret); out: spin_lock_irq(&st->reassembly_queue_lock); st->reassembly_queue_length--; list_del(&recvmsg->list); spin_unlock_irq(&st->reassembly_queue_lock); put_recvmsg(st, recvmsg); return ret; } static int smb_direct_connect(struct smb_direct_transport *st) { int ret; struct ib_qp_cap qp_cap; ret = smb_direct_init_params(st, &qp_cap); if (ret) { pr_err("Can't configure RDMA parameters\n"); return ret; } ret = smb_direct_create_pools(st); if (ret) { pr_err("Can't init RDMA pool: %d\n", ret); return ret; } ret = smb_direct_create_qpair(st, &qp_cap); if (ret) { pr_err("Can't accept RDMA client: %d\n", ret); return ret; } ret = smb_direct_prepare_negotiation(st); if (ret) { pr_err("Can't negotiate: %d\n", ret); return ret; } return 0; } static bool rdma_frwr_is_supported(struct ib_device_attr *attrs) { if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) return false; if (attrs->max_fast_reg_page_list_len == 0) return false; return true; } static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id) { struct smb_direct_transport *t; int ret; if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) { ksmbd_debug(RDMA, "Fast Registration Work Requests is not supported. device capabilities=%llx\n", new_cm_id->device->attrs.device_cap_flags); return -EPROTONOSUPPORT; } t = alloc_transport(new_cm_id); if (!t) return -ENOMEM; ret = smb_direct_connect(t); if (ret) goto out_err; KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop, KSMBD_TRANS(t)->conn, "ksmbd:r%u", smb_direct_port); if (IS_ERR(KSMBD_TRANS(t)->handler)) { ret = PTR_ERR(KSMBD_TRANS(t)->handler); pr_err("Can't start thread\n"); goto out_err; } return 0; out_err: free_transport(t); return ret; } static int smb_direct_listen_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: { int ret = smb_direct_handle_connect_request(cm_id); if (ret) { pr_err("Can't create transport: %d\n", ret); return ret; } ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n", cm_id); break; } default: pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n", cm_id, rdma_event_msg(event->event), event->event); break; } return 0; } static int smb_direct_listen(int port) { int ret; struct rdma_cm_id *cm_id; struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; cm_id = rdma_create_id(&init_net, smb_direct_listen_handler, &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) { pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id)); return PTR_ERR(cm_id); } ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); if (ret) { pr_err("Can't bind: %d\n", ret); goto err; } smb_direct_listener.cm_id = cm_id; ret = rdma_listen(cm_id, 10); if (ret) { pr_err("Can't listen: %d\n", ret); goto err; } return 0; err: smb_direct_listener.cm_id = NULL; rdma_destroy_id(cm_id); return ret; } static int smb_direct_ib_client_add(struct ib_device *ib_dev) { struct smb_direct_device *smb_dev; /* Set 5445 port if device type is iWARP(No IB) */ if (ib_dev->node_type != RDMA_NODE_IB_CA) smb_direct_port = SMB_DIRECT_PORT_IWARP; if (!ib_dev->ops.get_netdev || !rdma_frwr_is_supported(&ib_dev->attrs)) return 0; smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL); if (!smb_dev) return -ENOMEM; smb_dev->ib_dev = ib_dev; write_lock(&smb_direct_device_lock); list_add(&smb_dev->list, &smb_direct_device_list); write_unlock(&smb_direct_device_lock); ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name); return 0; } static void smb_direct_ib_client_remove(struct ib_device *ib_dev, void *client_data) { struct smb_direct_device *smb_dev, *tmp; write_lock(&smb_direct_device_lock); list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) { if (smb_dev->ib_dev == ib_dev) { list_del(&smb_dev->list); kfree(smb_dev); break; } } write_unlock(&smb_direct_device_lock); } static struct ib_client smb_direct_ib_client = { .name = "ksmbd_smb_direct_ib", .add = smb_direct_ib_client_add, .remove = smb_direct_ib_client_remove, }; int ksmbd_rdma_init(void) { int ret; smb_direct_listener.cm_id = NULL; ret = ib_register_client(&smb_direct_ib_client); if (ret) { pr_err("failed to ib_register_client\n"); return ret; } /* When a client is running out of send credits, the credits are * granted by the server's sending a packet using this queue. * This avoids the situation that a clients cannot send packets * for lack of credits */ smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq", WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); if (!smb_direct_wq) return -ENOMEM; ret = smb_direct_listen(smb_direct_port); if (ret) { destroy_workqueue(smb_direct_wq); smb_direct_wq = NULL; pr_err("Can't listen: %d\n", ret); return ret; } ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n", smb_direct_listener.cm_id); return 0; } void ksmbd_rdma_destroy(void) { if (!smb_direct_listener.cm_id) return; ib_unregister_client(&smb_direct_ib_client); rdma_destroy_id(smb_direct_listener.cm_id); smb_direct_listener.cm_id = NULL; if (smb_direct_wq) { destroy_workqueue(smb_direct_wq); smb_direct_wq = NULL; } } bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { struct smb_direct_device *smb_dev; int i; bool rdma_capable = false; read_lock(&smb_direct_device_lock); list_for_each_entry(smb_dev, &smb_direct_device_list, list) { for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) { struct net_device *ndev; ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev, i + 1); if (!ndev) continue; if (ndev == netdev) { dev_put(ndev); rdma_capable = true; goto out; } dev_put(ndev); } } out: read_unlock(&smb_direct_device_lock); if (rdma_capable == false) { struct ib_device *ibdev; ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN); if (ibdev) { if (rdma_frwr_is_supported(&ibdev->attrs)) rdma_capable = true; ib_device_put(ibdev); } } return rdma_capable; } static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = { .prepare = smb_direct_prepare, .disconnect = smb_direct_disconnect, .shutdown = smb_direct_shutdown, .writev = smb_direct_writev, .read = smb_direct_read, .rdma_read = smb_direct_rdma_read, .rdma_write = smb_direct_rdma_write, };
linux-master
fs/smb/server/transport_rdma.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/uaccess.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #include <linux/xattr.h> #include <linux/falloc.h> #include <linux/fsnotify.h> #include <linux/dcache.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/sched/xacct.h> #include <linux/crc32c.h> #include <linux/namei.h> #include "glob.h" #include "oplock.h" #include "connection.h" #include "vfs.h" #include "vfs_cache.h" #include "smbacl.h" #include "ndr.h" #include "auth.h" #include "misc.h" #include "smb_common.h" #include "mgmt/share_config.h" #include "mgmt/tree_connect.h" #include "mgmt/user_session.h" #include "mgmt/user_config.h" static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work, struct inode *parent_inode, struct inode *inode) { if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_INHERIT_OWNER)) return; i_uid_write(inode, i_uid_read(parent_inode)); } /** * ksmbd_vfs_lock_parent() - lock parent dentry if it is stable */ int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child) { inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); if (child->d_parent != parent) { inode_unlock(d_inode(parent)); return -ENOENT; } return 0; } static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf, char *pathname, unsigned int flags, struct path *parent_path, struct path *path) { struct qstr last; struct filename *filename; struct path *root_share_path = &share_conf->vfs_path; int err, type; struct dentry *d; if (pathname[0] == '\0') { pathname = share_conf->path; root_share_path = NULL; } else { flags |= LOOKUP_BENEATH; } filename = getname_kernel(pathname); if (IS_ERR(filename)) return PTR_ERR(filename); err = vfs_path_parent_lookup(filename, flags, parent_path, &last, &type, root_share_path); if (err) { putname(filename); return err; } if (unlikely(type != LAST_NORM)) { path_put(parent_path); putname(filename); return -ENOENT; } inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT); d = lookup_one_qstr_excl(&last, parent_path->dentry, 0); if (IS_ERR(d)) goto err_out; if (d_is_negative(d)) { dput(d); goto err_out; } path->dentry = d; path->mnt = mntget(parent_path->mnt); if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_CROSSMNT)) { err = follow_down(path, 0); if (err < 0) { path_put(path); goto err_out; } } putname(filename); return 0; err_out: inode_unlock(d_inode(parent_path->dentry)); path_put(parent_path); putname(filename); return -ENOENT; } void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap, struct dentry *dentry, __le32 *daccess) { *daccess = cpu_to_le32(FILE_READ_ATTRIBUTES | READ_CONTROL); if (!inode_permission(idmap, d_inode(dentry), MAY_OPEN | MAY_WRITE)) *daccess |= cpu_to_le32(WRITE_DAC | WRITE_OWNER | SYNCHRONIZE | FILE_WRITE_DATA | FILE_APPEND_DATA | FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES | FILE_DELETE_CHILD); if (!inode_permission(idmap, d_inode(dentry), MAY_OPEN | MAY_READ)) *daccess |= FILE_READ_DATA_LE | FILE_READ_EA_LE; if (!inode_permission(idmap, d_inode(dentry), MAY_OPEN | MAY_EXEC)) *daccess |= FILE_EXECUTE_LE; if (!inode_permission(idmap, d_inode(dentry->d_parent), MAY_EXEC | MAY_WRITE)) *daccess |= FILE_DELETE_LE; } /** * ksmbd_vfs_create() - vfs helper for smb create file * @work: work * @name: file name that is relative to share * @mode: file create mode * * Return: 0 on success, otherwise error */ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode) { struct path path; struct dentry *dentry; int err; dentry = ksmbd_vfs_kern_path_create(work, name, LOOKUP_NO_SYMLINKS, &path); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); if (err != -ENOENT) pr_err("path create failed for %s, err %d\n", name, err); return err; } err = mnt_want_write(path.mnt); if (err) goto out_err; mode |= S_IFREG; err = vfs_create(mnt_idmap(path.mnt), d_inode(path.dentry), dentry, mode, true); if (!err) { ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(dentry)); } else { pr_err("File(%s): creation failed (err:%d)\n", name, err); } mnt_drop_write(path.mnt); out_err: done_path_create(&path, dentry); return err; } /** * ksmbd_vfs_mkdir() - vfs helper for smb create directory * @work: work * @name: directory name that is relative to share * @mode: directory create mode * * Return: 0 on success, otherwise error */ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode) { struct mnt_idmap *idmap; struct path path; struct dentry *dentry; int err; dentry = ksmbd_vfs_kern_path_create(work, name, LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY, &path); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); if (err != -EEXIST) ksmbd_debug(VFS, "path create failed for %s, err %d\n", name, err); return err; } err = mnt_want_write(path.mnt); if (err) goto out_err2; idmap = mnt_idmap(path.mnt); mode |= S_IFDIR; err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode); if (!err && d_unhashed(dentry)) { struct dentry *d; d = lookup_one(idmap, dentry->d_name.name, dentry->d_parent, dentry->d_name.len); if (IS_ERR(d)) { err = PTR_ERR(d); goto out_err1; } if (unlikely(d_is_negative(d))) { dput(d); err = -ENOENT; goto out_err1; } ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d)); dput(d); } out_err1: mnt_drop_write(path.mnt); out_err2: done_path_create(&path, dentry); if (err) pr_err("mkdir(%s): creation failed (err:%d)\n", name, err); return err; } static ssize_t ksmbd_vfs_getcasexattr(struct mnt_idmap *idmap, struct dentry *dentry, char *attr_name, int attr_name_len, char **attr_value) { char *name, *xattr_list = NULL; ssize_t value_len = -ENOENT, xattr_list_len; xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list); if (xattr_list_len <= 0) goto out; for (name = xattr_list; name - xattr_list < xattr_list_len; name += strlen(name) + 1) { ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name)); if (strncasecmp(attr_name, name, attr_name_len)) continue; value_len = ksmbd_vfs_getxattr(idmap, dentry, name, attr_value); if (value_len < 0) pr_err("failed to get xattr in file\n"); break; } out: kvfree(xattr_list); return value_len; } static int ksmbd_vfs_stream_read(struct ksmbd_file *fp, char *buf, loff_t *pos, size_t count) { ssize_t v_len; char *stream_buf = NULL; ksmbd_debug(VFS, "read stream data pos : %llu, count : %zd\n", *pos, count); v_len = ksmbd_vfs_getcasexattr(file_mnt_idmap(fp->filp), fp->filp->f_path.dentry, fp->stream.name, fp->stream.size, &stream_buf); if ((int)v_len <= 0) return (int)v_len; if (v_len <= *pos) { count = -EINVAL; goto free_buf; } if (v_len - *pos < count) count = v_len - *pos; memcpy(buf, &stream_buf[*pos], count); free_buf: kvfree(stream_buf); return count; } /** * check_lock_range() - vfs helper for smb byte range file locking * @filp: the file to apply the lock to * @start: lock start byte offset * @end: lock end byte offset * @type: byte range type read/write * * Return: 0 on success, otherwise error */ static int check_lock_range(struct file *filp, loff_t start, loff_t end, unsigned char type) { struct file_lock *flock; struct file_lock_context *ctx = locks_inode_context(file_inode(filp)); int error = 0; if (!ctx || list_empty_careful(&ctx->flc_posix)) return 0; spin_lock(&ctx->flc_lock); list_for_each_entry(flock, &ctx->flc_posix, fl_list) { /* check conflict locks */ if (flock->fl_end >= start && end >= flock->fl_start) { if (flock->fl_type == F_RDLCK) { if (type == WRITE) { pr_err("not allow write by shared lock\n"); error = 1; goto out; } } else if (flock->fl_type == F_WRLCK) { /* check owner in lock */ if (flock->fl_file != filp) { error = 1; pr_err("not allow rw access by exclusive lock from other opens\n"); goto out; } } } } out: spin_unlock(&ctx->flc_lock); return error; } /** * ksmbd_vfs_read() - vfs helper for smb file read * @work: smb work * @fid: file id of open file * @count: read byte count * @pos: file pos * @rbuf: read data buffer * * Return: number of read bytes on success, otherwise error */ int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count, loff_t *pos, char *rbuf) { struct file *filp = fp->filp; ssize_t nbytes = 0; struct inode *inode = file_inode(filp); if (S_ISDIR(inode->i_mode)) return -EISDIR; if (unlikely(count == 0)) return 0; if (work->conn->connection_type) { if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) { pr_err("no right to read(%pD)\n", fp->filp); return -EACCES; } } if (ksmbd_stream_fd(fp)) return ksmbd_vfs_stream_read(fp, rbuf, pos, count); if (!work->tcon->posix_extensions) { int ret; ret = check_lock_range(filp, *pos, *pos + count - 1, READ); if (ret) { pr_err("unable to read due to lock\n"); return -EAGAIN; } } nbytes = kernel_read(filp, rbuf, count, pos); if (nbytes < 0) { pr_err("smb read failed, err = %zd\n", nbytes); return nbytes; } filp->f_pos = *pos; return nbytes; } static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, size_t count) { char *stream_buf = NULL, *wbuf; struct mnt_idmap *idmap = file_mnt_idmap(fp->filp); size_t size; ssize_t v_len; int err = 0; ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n", *pos, count); size = *pos + count; if (size > XATTR_SIZE_MAX) { size = XATTR_SIZE_MAX; count = (*pos + count) - XATTR_SIZE_MAX; } v_len = ksmbd_vfs_getcasexattr(idmap, fp->filp->f_path.dentry, fp->stream.name, fp->stream.size, &stream_buf); if (v_len < 0) { pr_err("not found stream in xattr : %zd\n", v_len); err = v_len; goto out; } if (v_len < size) { wbuf = kvzalloc(size, GFP_KERNEL); if (!wbuf) { err = -ENOMEM; goto out; } if (v_len > 0) memcpy(wbuf, stream_buf, v_len); kvfree(stream_buf); stream_buf = wbuf; } memcpy(&stream_buf[*pos], buf, count); err = ksmbd_vfs_setxattr(idmap, &fp->filp->f_path, fp->stream.name, (void *)stream_buf, size, 0); if (err < 0) goto out; fp->filp->f_pos = *pos; err = 0; out: kvfree(stream_buf); return err; } /** * ksmbd_vfs_write() - vfs helper for smb file write * @work: work * @fid: file id of open file * @buf: buf containing data for writing * @count: read byte count * @pos: file pos * @sync: fsync after write * @written: number of bytes written * * Return: 0 on success, otherwise error */ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp, char *buf, size_t count, loff_t *pos, bool sync, ssize_t *written) { struct file *filp; loff_t offset = *pos; int err = 0; if (work->conn->connection_type) { if (!(fp->daccess & FILE_WRITE_DATA_LE)) { pr_err("no right to write(%pD)\n", fp->filp); err = -EACCES; goto out; } } filp = fp->filp; if (ksmbd_stream_fd(fp)) { err = ksmbd_vfs_stream_write(fp, buf, pos, count); if (!err) *written = count; goto out; } if (!work->tcon->posix_extensions) { err = check_lock_range(filp, *pos, *pos + count - 1, WRITE); if (err) { pr_err("unable to write due to lock\n"); err = -EAGAIN; goto out; } } /* Do we need to break any of a levelII oplock? */ smb_break_all_levII_oplock(work, fp, 1); err = kernel_write(filp, buf, count, pos); if (err < 0) { ksmbd_debug(VFS, "smb write failed, err = %d\n", err); goto out; } filp->f_pos = *pos; *written = err; err = 0; if (sync) { err = vfs_fsync_range(filp, offset, offset + *written, 0); if (err < 0) pr_err("fsync failed for filename = %pD, err = %d\n", fp->filp, err); } out: return err; } /** * ksmbd_vfs_getattr() - vfs helper for smb getattr * @work: work * @fid: file id of open file * @attrs: inode attributes * * Return: 0 on success, otherwise error */ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat) { int err; err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT); if (err) pr_err("getattr failed, err %d\n", err); return err; } /** * ksmbd_vfs_fsync() - vfs helper for smb fsync * @work: work * @fid: file id of open file * * Return: 0 on success, otherwise error */ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id) { struct ksmbd_file *fp; int err; fp = ksmbd_lookup_fd_slow(work, fid, p_id); if (!fp) { pr_err("failed to get filp for fid %llu\n", fid); return -ENOENT; } err = vfs_fsync(fp->filp, 0); if (err < 0) pr_err("smb fsync failed, err = %d\n", err); ksmbd_fd_put(work, fp); return err; } /** * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink * @name: directory or file name that is relative to share * * Return: 0 on success, otherwise error */ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path) { struct mnt_idmap *idmap; struct dentry *parent = path->dentry->d_parent; int err; if (ksmbd_override_fsids(work)) return -ENOMEM; if (!d_inode(path->dentry)->i_nlink) { err = -ENOENT; goto out_err; } err = mnt_want_write(path->mnt); if (err) goto out_err; idmap = mnt_idmap(path->mnt); if (S_ISDIR(d_inode(path->dentry)->i_mode)) { err = vfs_rmdir(idmap, d_inode(parent), path->dentry); if (err && err != -ENOTEMPTY) ksmbd_debug(VFS, "rmdir failed, err %d\n", err); } else { err = vfs_unlink(idmap, d_inode(parent), path->dentry, NULL); if (err) ksmbd_debug(VFS, "unlink failed, err %d\n", err); } mnt_drop_write(path->mnt); out_err: ksmbd_revert_fsids(work); return err; } /** * ksmbd_vfs_link() - vfs helper for creating smb hardlink * @oldname: source file name * @newname: hardlink name that is relative to share * * Return: 0 on success, otherwise error */ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname, const char *newname) { struct path oldpath, newpath; struct dentry *dentry; int err; if (ksmbd_override_fsids(work)) return -ENOMEM; err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath); if (err) { pr_err("cannot get linux path for %s, err = %d\n", oldname, err); goto out1; } dentry = ksmbd_vfs_kern_path_create(work, newname, LOOKUP_NO_SYMLINKS | LOOKUP_REVAL, &newpath); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); pr_err("path create err for %s, err %d\n", newname, err); goto out2; } err = -EXDEV; if (oldpath.mnt != newpath.mnt) { pr_err("vfs_link failed err %d\n", err); goto out3; } err = mnt_want_write(newpath.mnt); if (err) goto out3; err = vfs_link(oldpath.dentry, mnt_idmap(newpath.mnt), d_inode(newpath.dentry), dentry, NULL); if (err) ksmbd_debug(VFS, "vfs_link failed err %d\n", err); mnt_drop_write(newpath.mnt); out3: done_path_create(&newpath, dentry); out2: path_put(&oldpath); out1: ksmbd_revert_fsids(work); return err; } int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path, char *newname, int flags) { struct dentry *old_parent, *new_dentry, *trap; struct dentry *old_child = old_path->dentry; struct path new_path; struct qstr new_last; struct renamedata rd; struct filename *to; struct ksmbd_share_config *share_conf = work->tcon->share_conf; struct ksmbd_file *parent_fp; int new_type; int err, lookup_flags = LOOKUP_NO_SYMLINKS; if (ksmbd_override_fsids(work)) return -ENOMEM; to = getname_kernel(newname); if (IS_ERR(to)) { err = PTR_ERR(to); goto revert_fsids; } retry: err = vfs_path_parent_lookup(to, lookup_flags | LOOKUP_BENEATH, &new_path, &new_last, &new_type, &share_conf->vfs_path); if (err) goto out1; if (old_path->mnt != new_path.mnt) { err = -EXDEV; goto out2; } err = mnt_want_write(old_path->mnt); if (err) goto out2; trap = lock_rename_child(old_child, new_path.dentry); old_parent = dget(old_child->d_parent); if (d_unhashed(old_child)) { err = -EINVAL; goto out3; } parent_fp = ksmbd_lookup_fd_inode(d_inode(old_child->d_parent)); if (parent_fp) { if (parent_fp->daccess & FILE_DELETE_LE) { pr_err("parent dir is opened with delete access\n"); err = -ESHARE; ksmbd_fd_put(work, parent_fp); goto out3; } ksmbd_fd_put(work, parent_fp); } new_dentry = lookup_one_qstr_excl(&new_last, new_path.dentry, lookup_flags | LOOKUP_RENAME_TARGET); if (IS_ERR(new_dentry)) { err = PTR_ERR(new_dentry); goto out3; } if (d_is_symlink(new_dentry)) { err = -EACCES; goto out4; } if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) { err = -EEXIST; goto out4; } if (old_child == trap) { err = -EINVAL; goto out4; } if (new_dentry == trap) { err = -ENOTEMPTY; goto out4; } rd.old_mnt_idmap = mnt_idmap(old_path->mnt), rd.old_dir = d_inode(old_parent), rd.old_dentry = old_child, rd.new_mnt_idmap = mnt_idmap(new_path.mnt), rd.new_dir = new_path.dentry->d_inode, rd.new_dentry = new_dentry, rd.flags = flags, rd.delegated_inode = NULL, err = vfs_rename(&rd); if (err) ksmbd_debug(VFS, "vfs_rename failed err %d\n", err); out4: dput(new_dentry); out3: dput(old_parent); unlock_rename(old_parent, new_path.dentry); mnt_drop_write(old_path->mnt); out2: path_put(&new_path); if (retry_estale(err, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out1: putname(to); revert_fsids: ksmbd_revert_fsids(work); return err; } /** * ksmbd_vfs_truncate() - vfs helper for smb file truncate * @work: work * @fid: file id of old file * @size: truncate to given size * * Return: 0 on success, otherwise error */ int ksmbd_vfs_truncate(struct ksmbd_work *work, struct ksmbd_file *fp, loff_t size) { int err = 0; struct file *filp; filp = fp->filp; /* Do we need to break any of a levelII oplock? */ smb_break_all_levII_oplock(work, fp, 1); if (!work->tcon->posix_extensions) { struct inode *inode = file_inode(filp); if (size < inode->i_size) { err = check_lock_range(filp, size, inode->i_size - 1, WRITE); } else { err = check_lock_range(filp, inode->i_size, size - 1, WRITE); } if (err) { pr_err("failed due to lock\n"); return -EAGAIN; } } err = vfs_truncate(&filp->f_path, size); if (err) pr_err("truncate failed, err %d\n", err); return err; } /** * ksmbd_vfs_listxattr() - vfs helper for smb list extended attributes * @dentry: dentry of file for listing xattrs * @list: destination buffer * @size: destination buffer length * * Return: xattr list length on success, otherwise error */ ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list) { ssize_t size; char *vlist = NULL; size = vfs_listxattr(dentry, NULL, 0); if (size <= 0) return size; vlist = kvzalloc(size, GFP_KERNEL); if (!vlist) return -ENOMEM; *list = vlist; size = vfs_listxattr(dentry, vlist, size); if (size < 0) { ksmbd_debug(VFS, "listxattr failed\n"); kvfree(vlist); *list = NULL; } return size; } static ssize_t ksmbd_vfs_xattr_len(struct mnt_idmap *idmap, struct dentry *dentry, char *xattr_name) { return vfs_getxattr(idmap, dentry, xattr_name, NULL, 0); } /** * ksmbd_vfs_getxattr() - vfs helper for smb get extended attributes value * @idmap: idmap * @dentry: dentry of file for getting xattrs * @xattr_name: name of xattr name to query * @xattr_buf: destination buffer xattr value * * Return: read xattr value length on success, otherwise error */ ssize_t ksmbd_vfs_getxattr(struct mnt_idmap *idmap, struct dentry *dentry, char *xattr_name, char **xattr_buf) { ssize_t xattr_len; char *buf; *xattr_buf = NULL; xattr_len = ksmbd_vfs_xattr_len(idmap, dentry, xattr_name); if (xattr_len < 0) return xattr_len; buf = kmalloc(xattr_len + 1, GFP_KERNEL); if (!buf) return -ENOMEM; xattr_len = vfs_getxattr(idmap, dentry, xattr_name, (void *)buf, xattr_len); if (xattr_len > 0) *xattr_buf = buf; else kfree(buf); return xattr_len; } /** * ksmbd_vfs_setxattr() - vfs helper for smb set extended attributes value * @idmap: idmap of the relevant mount * @dentry: dentry to set XATTR at * @attr_name: xattr name for setxattr * @attr_value: xattr value to set * @attr_size: size of xattr value * @flags: destination buffer length * * Return: 0 on success, otherwise error */ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap, const struct path *path, const char *attr_name, void *attr_value, size_t attr_size, int flags) { int err; err = mnt_want_write(path->mnt); if (err) return err; err = vfs_setxattr(idmap, path->dentry, attr_name, attr_value, attr_size, flags); if (err) ksmbd_debug(VFS, "setxattr failed, err %d\n", err); mnt_drop_write(path->mnt); return err; } /** * ksmbd_vfs_set_fadvise() - convert smb IO caching options to linux options * @filp: file pointer for IO * @options: smb IO options */ void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option) { struct address_space *mapping; mapping = filp->f_mapping; if (!option || !mapping) return; if (option & FILE_WRITE_THROUGH_LE) { filp->f_flags |= O_SYNC; } else if (option & FILE_SEQUENTIAL_ONLY_LE) { filp->f_ra.ra_pages = inode_to_bdi(mapping->host)->ra_pages * 2; spin_lock(&filp->f_lock); filp->f_mode &= ~FMODE_RANDOM; spin_unlock(&filp->f_lock); } else if (option & FILE_RANDOM_ACCESS_LE) { spin_lock(&filp->f_lock); filp->f_mode |= FMODE_RANDOM; spin_unlock(&filp->f_lock); } } int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp, loff_t off, loff_t len) { smb_break_all_levII_oplock(work, fp, 1); if (fp->f_ci->m_fattr & FILE_ATTRIBUTE_SPARSE_FILE_LE) return vfs_fallocate(fp->filp, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, off, len); return vfs_fallocate(fp->filp, FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE, off, len); } int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length, struct file_allocated_range_buffer *ranges, unsigned int in_count, unsigned int *out_count) { struct file *f = fp->filp; struct inode *inode = file_inode(fp->filp); loff_t maxbytes = (u64)inode->i_sb->s_maxbytes, end; loff_t extent_start, extent_end; int ret = 0; if (start > maxbytes) return -EFBIG; if (!in_count) return 0; /* * Shrink request scope to what the fs can actually handle. */ if (length > maxbytes || (maxbytes - length) < start) length = maxbytes - start; if (start + length > inode->i_size) length = inode->i_size - start; *out_count = 0; end = start + length; while (start < end && *out_count < in_count) { extent_start = vfs_llseek(f, start, SEEK_DATA); if (extent_start < 0) { if (extent_start != -ENXIO) ret = (int)extent_start; break; } if (extent_start >= end) break; extent_end = vfs_llseek(f, extent_start, SEEK_HOLE); if (extent_end < 0) { if (extent_end != -ENXIO) ret = (int)extent_end; break; } else if (extent_start >= extent_end) { break; } ranges[*out_count].file_offset = cpu_to_le64(extent_start); ranges[(*out_count)++].length = cpu_to_le64(min(extent_end, end) - extent_start); start = extent_end; } return ret; } int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap, const struct path *path, char *attr_name) { int err; err = mnt_want_write(path->mnt); if (err) return err; err = vfs_removexattr(idmap, path->dentry, attr_name); mnt_drop_write(path->mnt); return err; } int ksmbd_vfs_unlink(struct file *filp) { int err = 0; struct dentry *dir, *dentry = filp->f_path.dentry; struct mnt_idmap *idmap = file_mnt_idmap(filp); err = mnt_want_write(filp->f_path.mnt); if (err) return err; dir = dget_parent(dentry); err = ksmbd_vfs_lock_parent(dir, dentry); if (err) goto out; dget(dentry); if (S_ISDIR(d_inode(dentry)->i_mode)) err = vfs_rmdir(idmap, d_inode(dir), dentry); else err = vfs_unlink(idmap, d_inode(dir), dentry, NULL); dput(dentry); inode_unlock(d_inode(dir)); if (err) ksmbd_debug(VFS, "failed to delete, err %d\n", err); out: dput(dir); mnt_drop_write(filp->f_path.mnt); return err; } static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct ksmbd_readdir_data *buf; buf = container_of(ctx, struct ksmbd_readdir_data, ctx); buf->dirent_count++; return buf->dirent_count <= 2; } /** * ksmbd_vfs_empty_dir() - check for empty directory * @fp: ksmbd file pointer * * Return: true if directory empty, otherwise false */ int ksmbd_vfs_empty_dir(struct ksmbd_file *fp) { int err; struct ksmbd_readdir_data readdir_data; memset(&readdir_data, 0, sizeof(struct ksmbd_readdir_data)); set_ctx_actor(&readdir_data.ctx, __dir_empty); readdir_data.dirent_count = 0; err = iterate_dir(fp->filp, &readdir_data.ctx); if (readdir_data.dirent_count > 2) err = -ENOTEMPTY; else err = 0; return err; } static bool __caseless_lookup(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct ksmbd_readdir_data *buf; int cmp = -EINVAL; buf = container_of(ctx, struct ksmbd_readdir_data, ctx); if (buf->used != namlen) return true; if (IS_ENABLED(CONFIG_UNICODE) && buf->um) { const struct qstr q_buf = {.name = buf->private, .len = buf->used}; const struct qstr q_name = {.name = name, .len = namlen}; cmp = utf8_strncasecmp(buf->um, &q_buf, &q_name); } if (cmp < 0) cmp = strncasecmp((char *)buf->private, name, namlen); if (!cmp) { memcpy((char *)buf->private, name, namlen); buf->dirent_count = 1; return false; } return true; } /** * ksmbd_vfs_lookup_in_dir() - lookup a file in a directory * @dir: path info * @name: filename to lookup * @namelen: filename length * * Return: 0 on success, otherwise error */ static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name, size_t namelen, struct unicode_map *um) { int ret; struct file *dfilp; int flags = O_RDONLY | O_LARGEFILE; struct ksmbd_readdir_data readdir_data = { .ctx.actor = __caseless_lookup, .private = name, .used = namelen, .dirent_count = 0, .um = um, }; dfilp = dentry_open(dir, flags, current_cred()); if (IS_ERR(dfilp)) return PTR_ERR(dfilp); ret = iterate_dir(dfilp, &readdir_data.ctx); if (readdir_data.dirent_count > 0) ret = 0; fput(dfilp); return ret; } /** * ksmbd_vfs_kern_path_locked() - lookup a file and get path info * @name: file path that is relative to share * @flags: lookup flags * @path: if lookup succeed, return path info * @caseless: caseless filename lookup * * Return: 0 on success, otherwise error */ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, unsigned int flags, struct path *parent_path, struct path *path, bool caseless) { struct ksmbd_share_config *share_conf = work->tcon->share_conf; int err; err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, parent_path, path); if (!err) return 0; if (caseless) { char *filepath; size_t path_len, remain_len; filepath = kstrdup(name, GFP_KERNEL); if (!filepath) return -ENOMEM; path_len = strlen(filepath); remain_len = path_len; *parent_path = share_conf->vfs_path; path_get(parent_path); while (d_can_lookup(parent_path->dentry)) { char *filename = filepath + path_len - remain_len; char *next = strchrnul(filename, '/'); size_t filename_len = next - filename; bool is_last = !next[0]; if (filename_len == 0) break; err = ksmbd_vfs_lookup_in_dir(parent_path, filename, filename_len, work->conn->um); if (err) goto out2; next[0] = '\0'; err = vfs_path_lookup(share_conf->vfs_path.dentry, share_conf->vfs_path.mnt, filepath, flags, path); if (err) goto out2; else if (is_last) goto out1; path_put(parent_path); *parent_path = *path; next[0] = '/'; remain_len -= filename_len + 1; } err = -EINVAL; out2: path_put(parent_path); out1: kfree(filepath); } if (!err) { err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry); if (err) { path_put(path); path_put(parent_path); } } return err; } struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, const char *name, unsigned int flags, struct path *path) { char *abs_name; struct dentry *dent; abs_name = convert_to_unix_name(work->tcon->share_conf, name); if (!abs_name) return ERR_PTR(-ENOMEM); dent = kern_path_create(AT_FDCWD, abs_name, path, flags); kfree(abs_name); return dent; } int ksmbd_vfs_remove_acl_xattrs(struct mnt_idmap *idmap, const struct path *path) { char *name, *xattr_list = NULL; ssize_t xattr_list_len; int err = 0; xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; } else if (!xattr_list_len) { ksmbd_debug(SMB, "empty xattr in the file\n"); goto out; } err = mnt_want_write(path->mnt); if (err) goto out; for (name = xattr_list; name - xattr_list < xattr_list_len; name += strlen(name) + 1) { ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name)); if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1) || !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1)) { err = vfs_remove_acl(idmap, path->dentry, name); if (err) ksmbd_debug(SMB, "remove acl xattr failed : %s\n", name); } } mnt_drop_write(path->mnt); out: kvfree(xattr_list); return err; } int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path) { char *name, *xattr_list = NULL; ssize_t xattr_list_len; int err = 0; xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; } else if (!xattr_list_len) { ksmbd_debug(SMB, "empty xattr in the file\n"); goto out; } for (name = xattr_list; name - xattr_list < xattr_list_len; name += strlen(name) + 1) { ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name)); if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) { err = ksmbd_vfs_remove_xattr(idmap, path, name); if (err) ksmbd_debug(SMB, "remove xattr failed : %s\n", name); } } out: kvfree(xattr_list); return err; } static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct mnt_idmap *idmap, struct inode *inode, int acl_type) { struct xattr_smb_acl *smb_acl = NULL; struct posix_acl *posix_acls; struct posix_acl_entry *pa_entry; struct xattr_acl_entry *xa_entry; int i; if (!IS_ENABLED(CONFIG_FS_POSIX_ACL)) return NULL; posix_acls = get_inode_acl(inode, acl_type); if (IS_ERR_OR_NULL(posix_acls)) return NULL; smb_acl = kzalloc(sizeof(struct xattr_smb_acl) + sizeof(struct xattr_acl_entry) * posix_acls->a_count, GFP_KERNEL); if (!smb_acl) goto out; smb_acl->count = posix_acls->a_count; pa_entry = posix_acls->a_entries; xa_entry = smb_acl->entries; for (i = 0; i < posix_acls->a_count; i++, pa_entry++, xa_entry++) { switch (pa_entry->e_tag) { case ACL_USER: xa_entry->type = SMB_ACL_USER; xa_entry->uid = posix_acl_uid_translate(idmap, pa_entry); break; case ACL_USER_OBJ: xa_entry->type = SMB_ACL_USER_OBJ; break; case ACL_GROUP: xa_entry->type = SMB_ACL_GROUP; xa_entry->gid = posix_acl_gid_translate(idmap, pa_entry); break; case ACL_GROUP_OBJ: xa_entry->type = SMB_ACL_GROUP_OBJ; break; case ACL_OTHER: xa_entry->type = SMB_ACL_OTHER; break; case ACL_MASK: xa_entry->type = SMB_ACL_MASK; break; default: pr_err("unknown type : 0x%x\n", pa_entry->e_tag); goto out; } if (pa_entry->e_perm & ACL_READ) xa_entry->perm |= SMB_ACL_READ; if (pa_entry->e_perm & ACL_WRITE) xa_entry->perm |= SMB_ACL_WRITE; if (pa_entry->e_perm & ACL_EXECUTE) xa_entry->perm |= SMB_ACL_EXECUTE; } out: posix_acl_release(posix_acls); return smb_acl; } int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn, struct mnt_idmap *idmap, const struct path *path, struct smb_ntsd *pntsd, int len) { int rc; struct ndr sd_ndr = {0}, acl_ndr = {0}; struct xattr_ntacl acl = {0}; struct xattr_smb_acl *smb_acl, *def_smb_acl = NULL; struct dentry *dentry = path->dentry; struct inode *inode = d_inode(dentry); acl.version = 4; acl.hash_type = XATTR_SD_HASH_TYPE_SHA256; acl.current_time = ksmbd_UnixTimeToNT(current_time(inode)); memcpy(acl.desc, "posix_acl", 9); acl.desc_len = 10; pntsd->osidoffset = cpu_to_le32(le32_to_cpu(pntsd->osidoffset) + NDR_NTSD_OFFSETOF); pntsd->gsidoffset = cpu_to_le32(le32_to_cpu(pntsd->gsidoffset) + NDR_NTSD_OFFSETOF); pntsd->dacloffset = cpu_to_le32(le32_to_cpu(pntsd->dacloffset) + NDR_NTSD_OFFSETOF); acl.sd_buf = (char *)pntsd; acl.sd_size = len; rc = ksmbd_gen_sd_hash(conn, acl.sd_buf, acl.sd_size, acl.hash); if (rc) { pr_err("failed to generate hash for ndr acl\n"); return rc; } smb_acl = ksmbd_vfs_make_xattr_posix_acl(idmap, inode, ACL_TYPE_ACCESS); if (S_ISDIR(inode->i_mode)) def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(idmap, inode, ACL_TYPE_DEFAULT); rc = ndr_encode_posix_acl(&acl_ndr, idmap, inode, smb_acl, def_smb_acl); if (rc) { pr_err("failed to encode ndr to posix acl\n"); goto out; } rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset, acl.posix_acl_hash); if (rc) { pr_err("failed to generate hash for ndr acl\n"); goto out; } rc = ndr_encode_v4_ntacl(&sd_ndr, &acl); if (rc) { pr_err("failed to encode ndr to posix acl\n"); goto out; } rc = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_SD, sd_ndr.data, sd_ndr.offset, 0); if (rc < 0) pr_err("Failed to store XATTR ntacl :%d\n", rc); kfree(sd_ndr.data); out: kfree(acl_ndr.data); kfree(smb_acl); kfree(def_smb_acl); return rc; } int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn, struct mnt_idmap *idmap, struct dentry *dentry, struct smb_ntsd **pntsd) { int rc; struct ndr n; struct inode *inode = d_inode(dentry); struct ndr acl_ndr = {0}; struct xattr_ntacl acl; struct xattr_smb_acl *smb_acl = NULL, *def_smb_acl = NULL; __u8 cmp_hash[XATTR_SD_HASH_SIZE] = {0}; rc = ksmbd_vfs_getxattr(idmap, dentry, XATTR_NAME_SD, &n.data); if (rc <= 0) return rc; n.length = rc; rc = ndr_decode_v4_ntacl(&n, &acl); if (rc) goto free_n_data; smb_acl = ksmbd_vfs_make_xattr_posix_acl(idmap, inode, ACL_TYPE_ACCESS); if (S_ISDIR(inode->i_mode)) def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(idmap, inode, ACL_TYPE_DEFAULT); rc = ndr_encode_posix_acl(&acl_ndr, idmap, inode, smb_acl, def_smb_acl); if (rc) { pr_err("failed to encode ndr to posix acl\n"); goto out_free; } rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset, cmp_hash); if (rc) { pr_err("failed to generate hash for ndr acl\n"); goto out_free; } if (memcmp(cmp_hash, acl.posix_acl_hash, XATTR_SD_HASH_SIZE)) { pr_err("hash value diff\n"); rc = -EINVAL; goto out_free; } *pntsd = acl.sd_buf; if (acl.sd_size < sizeof(struct smb_ntsd)) { pr_err("sd size is invalid\n"); goto out_free; } (*pntsd)->osidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->osidoffset) - NDR_NTSD_OFFSETOF); (*pntsd)->gsidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->gsidoffset) - NDR_NTSD_OFFSETOF); (*pntsd)->dacloffset = cpu_to_le32(le32_to_cpu((*pntsd)->dacloffset) - NDR_NTSD_OFFSETOF); rc = acl.sd_size; out_free: kfree(acl_ndr.data); kfree(smb_acl); kfree(def_smb_acl); if (rc < 0) { kfree(acl.sd_buf); *pntsd = NULL; } free_n_data: kfree(n.data); return rc; } int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap, const struct path *path, struct xattr_dos_attrib *da) { struct ndr n; int err; err = ndr_encode_dos_attr(&n, da); if (err) return err; err = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_DOS_ATTRIBUTE, (void *)n.data, n.offset, 0); if (err) ksmbd_debug(SMB, "failed to store dos attribute in xattr\n"); kfree(n.data); return err; } int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap, struct dentry *dentry, struct xattr_dos_attrib *da) { struct ndr n; int err; err = ksmbd_vfs_getxattr(idmap, dentry, XATTR_NAME_DOS_ATTRIBUTE, (char **)&n.data); if (err > 0) { n.length = err; if (ndr_decode_dos_attr(&n, da)) err = -EINVAL; kfree(n.data); } else { ksmbd_debug(SMB, "failed to load dos attribute in xattr\n"); } return err; } /** * ksmbd_vfs_init_kstat() - convert unix stat information to smb stat format * @p: destination buffer * @ksmbd_kstat: ksmbd kstat wrapper */ void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat) { struct file_directory_info *info = (struct file_directory_info *)(*p); struct kstat *kstat = ksmbd_kstat->kstat; u64 time; info->FileIndex = 0; info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time); time = ksmbd_UnixTimeToNT(kstat->atime); info->LastAccessTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(kstat->mtime); info->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(kstat->ctime); info->ChangeTime = cpu_to_le64(time); if (ksmbd_kstat->file_attributes & FILE_ATTRIBUTE_DIRECTORY_LE) { info->EndOfFile = 0; info->AllocationSize = 0; } else { info->EndOfFile = cpu_to_le64(kstat->size); info->AllocationSize = cpu_to_le64(kstat->blocks << 9); } info->ExtFileAttributes = ksmbd_kstat->file_attributes; return info; } int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work, struct mnt_idmap *idmap, struct dentry *dentry, struct ksmbd_kstat *ksmbd_kstat) { u64 time; int rc; generic_fillattr(idmap, STATX_BASIC_STATS, d_inode(dentry), ksmbd_kstat->kstat); time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime); ksmbd_kstat->create_time = time; /* * set default value for the case that store dos attributes is not yes * or that acl is disable in server's filesystem and the config is yes. */ if (S_ISDIR(ksmbd_kstat->kstat->mode)) ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_DIRECTORY_LE; else ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_ARCHIVE_LE; if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) { struct xattr_dos_attrib da; rc = ksmbd_vfs_get_dos_attrib_xattr(idmap, dentry, &da); if (rc > 0) { ksmbd_kstat->file_attributes = cpu_to_le32(da.attr); ksmbd_kstat->create_time = da.create_time; } else { ksmbd_debug(VFS, "fail to load dos attribute.\n"); } } return 0; } ssize_t ksmbd_vfs_casexattr_len(struct mnt_idmap *idmap, struct dentry *dentry, char *attr_name, int attr_name_len) { char *name, *xattr_list = NULL; ssize_t value_len = -ENOENT, xattr_list_len; xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list); if (xattr_list_len <= 0) goto out; for (name = xattr_list; name - xattr_list < xattr_list_len; name += strlen(name) + 1) { ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name)); if (strncasecmp(attr_name, name, attr_name_len)) continue; value_len = ksmbd_vfs_xattr_len(idmap, dentry, name); break; } out: kvfree(xattr_list); return value_len; } int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name, size_t *xattr_stream_name_size, int s_type) { char *type, *buf; if (s_type == DIR_STREAM) type = ":$INDEX_ALLOCATION"; else type = ":$DATA"; buf = kasprintf(GFP_KERNEL, "%s%s%s", XATTR_NAME_STREAM, stream_name, type); if (!buf) return -ENOMEM; *xattr_stream_name = buf; *xattr_stream_name_size = strlen(buf) + 1; return 0; } int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work, struct ksmbd_file *src_fp, struct ksmbd_file *dst_fp, struct srv_copychunk *chunks, unsigned int chunk_count, unsigned int *chunk_count_written, unsigned int *chunk_size_written, loff_t *total_size_written) { unsigned int i; loff_t src_off, dst_off, src_file_size; size_t len; int ret; *chunk_count_written = 0; *chunk_size_written = 0; *total_size_written = 0; if (!(src_fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) { pr_err("no right to read(%pD)\n", src_fp->filp); return -EACCES; } if (!(dst_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) { pr_err("no right to write(%pD)\n", dst_fp->filp); return -EACCES; } if (ksmbd_stream_fd(src_fp) || ksmbd_stream_fd(dst_fp)) return -EBADF; smb_break_all_levII_oplock(work, dst_fp, 1); if (!work->tcon->posix_extensions) { for (i = 0; i < chunk_count; i++) { src_off = le64_to_cpu(chunks[i].SourceOffset); dst_off = le64_to_cpu(chunks[i].TargetOffset); len = le32_to_cpu(chunks[i].Length); if (check_lock_range(src_fp->filp, src_off, src_off + len - 1, READ)) return -EAGAIN; if (check_lock_range(dst_fp->filp, dst_off, dst_off + len - 1, WRITE)) return -EAGAIN; } } src_file_size = i_size_read(file_inode(src_fp->filp)); for (i = 0; i < chunk_count; i++) { src_off = le64_to_cpu(chunks[i].SourceOffset); dst_off = le64_to_cpu(chunks[i].TargetOffset); len = le32_to_cpu(chunks[i].Length); if (src_off + len > src_file_size) return -E2BIG; ret = vfs_copy_file_range(src_fp->filp, src_off, dst_fp->filp, dst_off, len, 0); if (ret == -EOPNOTSUPP || ret == -EXDEV) ret = vfs_copy_file_range(src_fp->filp, src_off, dst_fp->filp, dst_off, len, COPY_FILE_SPLICE); if (ret < 0) return ret; *chunk_count_written += 1; *total_size_written += ret; } return 0; } void ksmbd_vfs_posix_lock_wait(struct file_lock *flock) { wait_event(flock->fl_wait, !flock->fl_blocker); } int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout) { return wait_event_interruptible_timeout(flock->fl_wait, !flock->fl_blocker, timeout); } void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock) { locks_delete_block(flock); } int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap, struct path *path) { struct posix_acl_state acl_state; struct posix_acl *acls; struct dentry *dentry = path->dentry; struct inode *inode = d_inode(dentry); int rc; if (!IS_ENABLED(CONFIG_FS_POSIX_ACL)) return -EOPNOTSUPP; ksmbd_debug(SMB, "Set posix acls\n"); rc = init_acl_state(&acl_state, 1); if (rc) return rc; /* Set default owner group */ acl_state.owner.allow = (inode->i_mode & 0700) >> 6; acl_state.group.allow = (inode->i_mode & 0070) >> 3; acl_state.other.allow = inode->i_mode & 0007; acl_state.users->aces[acl_state.users->n].uid = inode->i_uid; acl_state.users->aces[acl_state.users->n++].perms.allow = acl_state.owner.allow; acl_state.groups->aces[acl_state.groups->n].gid = inode->i_gid; acl_state.groups->aces[acl_state.groups->n++].perms.allow = acl_state.group.allow; acl_state.mask.allow = 0x07; acls = posix_acl_alloc(6, GFP_KERNEL); if (!acls) { free_acl_state(&acl_state); return -ENOMEM; } posix_state_to_acl(&acl_state, acls->a_entries); rc = mnt_want_write(path->mnt); if (rc) goto out_err; rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls); if (rc < 0) ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n", rc); else if (S_ISDIR(inode->i_mode)) { posix_state_to_acl(&acl_state, acls->a_entries); rc = set_posix_acl(idmap, dentry, ACL_TYPE_DEFAULT, acls); if (rc < 0) ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n", rc); } mnt_drop_write(path->mnt); out_err: free_acl_state(&acl_state); posix_acl_release(acls); return rc; } int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap, struct path *path, struct inode *parent_inode) { struct posix_acl *acls; struct posix_acl_entry *pace; struct dentry *dentry = path->dentry; struct inode *inode = d_inode(dentry); int rc, i; if (!IS_ENABLED(CONFIG_FS_POSIX_ACL)) return -EOPNOTSUPP; acls = get_inode_acl(parent_inode, ACL_TYPE_DEFAULT); if (IS_ERR_OR_NULL(acls)) return -ENOENT; pace = acls->a_entries; for (i = 0; i < acls->a_count; i++, pace++) { if (pace->e_tag == ACL_MASK) { pace->e_perm = 0x07; break; } } rc = mnt_want_write(path->mnt); if (rc) goto out_err; rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls); if (rc < 0) ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n", rc); if (S_ISDIR(inode->i_mode)) { rc = set_posix_acl(idmap, dentry, ACL_TYPE_DEFAULT, acls); if (rc < 0) ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n", rc); } mnt_drop_write(path->mnt); out_err: posix_acl_release(acls); return rc; }
linux-master
fs/smb/server/vfs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/slab.h> #include <linux/mm.h> #include "user_config.h" #include "../transport_ipc.h" struct ksmbd_user *ksmbd_login_user(const char *account) { struct ksmbd_login_response *resp; struct ksmbd_user *user = NULL; resp = ksmbd_ipc_login_request(account); if (!resp) return NULL; if (!(resp->status & KSMBD_USER_FLAG_OK)) goto out; user = ksmbd_alloc_user(resp); out: kvfree(resp); return user; } struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp) { struct ksmbd_user *user = NULL; user = kmalloc(sizeof(struct ksmbd_user), GFP_KERNEL); if (!user) return NULL; user->name = kstrdup(resp->account, GFP_KERNEL); user->flags = resp->status; user->gid = resp->gid; user->uid = resp->uid; user->passkey_sz = resp->hash_sz; user->passkey = kmalloc(resp->hash_sz, GFP_KERNEL); if (user->passkey) memcpy(user->passkey, resp->hash, resp->hash_sz); if (!user->name || !user->passkey) { kfree(user->name); kfree(user->passkey); kfree(user); user = NULL; } return user; } void ksmbd_free_user(struct ksmbd_user *user) { ksmbd_ipc_logout_request(user->name, user->flags); kfree(user->name); kfree(user->passkey); kfree(user); } int ksmbd_anonymous_user(struct ksmbd_user *user) { if (user->name[0] == '\0') return 1; return 0; } bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2) { if (strcmp(u1->name, u2->name)) return false; if (memcmp(u1->passkey, u2->passkey, u1->passkey_sz)) return false; return true; }
linux-master
fs/smb/server/mgmt/user_config.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include "ksmbd_ida.h" static inline int __acquire_id(struct ida *ida, int from, int to) { return ida_simple_get(ida, from, to, GFP_KERNEL); } int ksmbd_acquire_smb2_tid(struct ida *ida) { int id; id = __acquire_id(ida, 1, 0xFFFFFFFF); return id; } int ksmbd_acquire_smb2_uid(struct ida *ida) { int id; id = __acquire_id(ida, 1, 0); if (id == 0xFFFE) id = __acquire_id(ida, 1, 0); return id; } int ksmbd_acquire_async_msg_id(struct ida *ida) { return __acquire_id(ida, 1, 0); } int ksmbd_acquire_id(struct ida *ida) { return __acquire_id(ida, 0, 0); } void ksmbd_release_id(struct ida *ida, int id) { ida_simple_remove(ida, id); }
linux-master
fs/smb/server/mgmt/ksmbd_ida.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/list.h> #include <linux/slab.h> #include <linux/xarray.h> #include "../transport_ipc.h" #include "../connection.h" #include "tree_connect.h" #include "user_config.h" #include "share_config.h" #include "user_session.h" struct ksmbd_tree_conn_status ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess, const char *share_name) { struct ksmbd_tree_conn_status status = {-ENOENT, NULL}; struct ksmbd_tree_connect_response *resp = NULL; struct ksmbd_share_config *sc; struct ksmbd_tree_connect *tree_conn = NULL; struct sockaddr *peer_addr; int ret; sc = ksmbd_share_config_get(conn->um, share_name); if (!sc) return status; tree_conn = kzalloc(sizeof(struct ksmbd_tree_connect), GFP_KERNEL); if (!tree_conn) { status.ret = -ENOMEM; goto out_error; } tree_conn->id = ksmbd_acquire_tree_conn_id(sess); if (tree_conn->id < 0) { status.ret = -EINVAL; goto out_error; } peer_addr = KSMBD_TCP_PEER_SOCKADDR(conn); resp = ksmbd_ipc_tree_connect_request(sess, sc, tree_conn, peer_addr); if (!resp) { status.ret = -EINVAL; goto out_error; } status.ret = resp->status; if (status.ret != KSMBD_TREE_CONN_STATUS_OK) goto out_error; tree_conn->flags = resp->connection_flags; if (test_tree_conn_flag(tree_conn, KSMBD_TREE_CONN_FLAG_UPDATE)) { struct ksmbd_share_config *new_sc; ksmbd_share_config_del(sc); new_sc = ksmbd_share_config_get(conn->um, share_name); if (!new_sc) { pr_err("Failed to update stale share config\n"); status.ret = -ESTALE; goto out_error; } ksmbd_share_config_put(sc); sc = new_sc; } tree_conn->user = sess->user; tree_conn->share_conf = sc; status.tree_conn = tree_conn; ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn, GFP_KERNEL)); if (ret) { status.ret = -ENOMEM; goto out_error; } kvfree(resp); return status; out_error: if (tree_conn) ksmbd_release_tree_conn_id(sess, tree_conn->id); ksmbd_share_config_put(sc); kfree(tree_conn); kvfree(resp); return status; } int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, struct ksmbd_tree_connect *tree_conn) { int ret; ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id); ksmbd_release_tree_conn_id(sess, tree_conn->id); xa_erase(&sess->tree_conns, tree_conn->id); ksmbd_share_config_put(tree_conn->share_conf); kfree(tree_conn); return ret; } struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess, unsigned int id) { struct ksmbd_tree_connect *tcon; tcon = xa_load(&sess->tree_conns, id); if (tcon) { if (test_bit(TREE_CONN_EXPIRE, &tcon->status)) tcon = NULL; } return tcon; } int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess) { int ret = 0; struct ksmbd_tree_connect *tc; unsigned long id; if (!sess) return -EINVAL; xa_for_each(&sess->tree_conns, id, tc) ret |= ksmbd_tree_conn_disconnect(sess, tc); xa_destroy(&sess->tree_conns); return ret; }
linux-master
fs/smb/server/mgmt/tree_connect.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/list.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/xarray.h> #include "ksmbd_ida.h" #include "user_session.h" #include "user_config.h" #include "tree_connect.h" #include "../transport_ipc.h" #include "../connection.h" #include "../vfs_cache.h" static DEFINE_IDA(session_ida); #define SESSION_HASH_BITS 3 static DEFINE_HASHTABLE(sessions_table, SESSION_HASH_BITS); static DECLARE_RWSEM(sessions_table_lock); struct ksmbd_session_rpc { int id; unsigned int method; }; static void free_channel_list(struct ksmbd_session *sess) { struct channel *chann; unsigned long index; xa_for_each(&sess->ksmbd_chann_list, index, chann) { xa_erase(&sess->ksmbd_chann_list, index); kfree(chann); } xa_destroy(&sess->ksmbd_chann_list); } static void __session_rpc_close(struct ksmbd_session *sess, struct ksmbd_session_rpc *entry) { struct ksmbd_rpc_command *resp; resp = ksmbd_rpc_close(sess, entry->id); if (!resp) pr_err("Unable to close RPC pipe %d\n", entry->id); kvfree(resp); ksmbd_rpc_id_free(entry->id); kfree(entry); } static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess) { struct ksmbd_session_rpc *entry; long index; xa_for_each(&sess->rpc_handle_list, index, entry) { xa_erase(&sess->rpc_handle_list, index); __session_rpc_close(sess, entry); } xa_destroy(&sess->rpc_handle_list); } static int __rpc_method(char *rpc_name) { if (!strcmp(rpc_name, "\\srvsvc") || !strcmp(rpc_name, "srvsvc")) return KSMBD_RPC_SRVSVC_METHOD_INVOKE; if (!strcmp(rpc_name, "\\wkssvc") || !strcmp(rpc_name, "wkssvc")) return KSMBD_RPC_WKSSVC_METHOD_INVOKE; if (!strcmp(rpc_name, "LANMAN") || !strcmp(rpc_name, "lanman")) return KSMBD_RPC_RAP_METHOD; if (!strcmp(rpc_name, "\\samr") || !strcmp(rpc_name, "samr")) return KSMBD_RPC_SAMR_METHOD_INVOKE; if (!strcmp(rpc_name, "\\lsarpc") || !strcmp(rpc_name, "lsarpc")) return KSMBD_RPC_LSARPC_METHOD_INVOKE; pr_err("Unsupported RPC: %s\n", rpc_name); return 0; } int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) { struct ksmbd_session_rpc *entry; struct ksmbd_rpc_command *resp; int method; method = __rpc_method(rpc_name); if (!method) return -EINVAL; entry = kzalloc(sizeof(struct ksmbd_session_rpc), GFP_KERNEL); if (!entry) return -ENOMEM; entry->method = method; entry->id = ksmbd_ipc_id_alloc(); if (entry->id < 0) goto free_entry; xa_store(&sess->rpc_handle_list, entry->id, entry, GFP_KERNEL); resp = ksmbd_rpc_open(sess, entry->id); if (!resp) goto free_id; kvfree(resp); return entry->id; free_id: xa_erase(&sess->rpc_handle_list, entry->id); ksmbd_rpc_id_free(entry->id); free_entry: kfree(entry); return -EINVAL; } void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id) { struct ksmbd_session_rpc *entry; entry = xa_erase(&sess->rpc_handle_list, id); if (entry) __session_rpc_close(sess, entry); } int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id) { struct ksmbd_session_rpc *entry; entry = xa_load(&sess->rpc_handle_list, id); return entry ? entry->method : 0; } void ksmbd_session_destroy(struct ksmbd_session *sess) { if (!sess) return; if (sess->user) ksmbd_free_user(sess->user); ksmbd_tree_conn_session_logoff(sess); ksmbd_destroy_file_table(&sess->file_table); ksmbd_session_rpc_clear_list(sess); free_channel_list(sess); kfree(sess->Preauth_HashValue); ksmbd_release_id(&session_ida, sess->id); kfree(sess); } static struct ksmbd_session *__session_lookup(unsigned long long id) { struct ksmbd_session *sess; hash_for_each_possible(sessions_table, sess, hlist, id) { if (id == sess->id) { sess->last_active = jiffies; return sess; } } return NULL; } static void ksmbd_expire_session(struct ksmbd_conn *conn) { unsigned long id; struct ksmbd_session *sess; down_write(&sessions_table_lock); xa_for_each(&conn->sessions, id, sess) { if (sess->state != SMB2_SESSION_VALID || time_after(jiffies, sess->last_active + SMB2_SESSION_TIMEOUT)) { xa_erase(&conn->sessions, sess->id); hash_del(&sess->hlist); ksmbd_session_destroy(sess); continue; } } up_write(&sessions_table_lock); } int ksmbd_session_register(struct ksmbd_conn *conn, struct ksmbd_session *sess) { sess->dialect = conn->dialect; memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE); ksmbd_expire_session(conn); return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL)); } static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess) { struct channel *chann; chann = xa_erase(&sess->ksmbd_chann_list, (long)conn); if (!chann) return -ENOENT; kfree(chann); return 0; } void ksmbd_sessions_deregister(struct ksmbd_conn *conn) { struct ksmbd_session *sess; unsigned long id; down_write(&sessions_table_lock); if (conn->binding) { int bkt; struct hlist_node *tmp; hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) { if (!ksmbd_chann_del(conn, sess) && xa_empty(&sess->ksmbd_chann_list)) { hash_del(&sess->hlist); ksmbd_session_destroy(sess); } } } xa_for_each(&conn->sessions, id, sess) { unsigned long chann_id; struct channel *chann; xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) { if (chann->conn != conn) ksmbd_conn_set_exiting(chann->conn); } ksmbd_chann_del(conn, sess); if (xa_empty(&sess->ksmbd_chann_list)) { xa_erase(&conn->sessions, sess->id); hash_del(&sess->hlist); ksmbd_session_destroy(sess); } } up_write(&sessions_table_lock); } struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id) { struct ksmbd_session *sess; sess = xa_load(&conn->sessions, id); if (sess) sess->last_active = jiffies; return sess; } struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id) { struct ksmbd_session *sess; down_read(&sessions_table_lock); sess = __session_lookup(id); if (sess) sess->last_active = jiffies; up_read(&sessions_table_lock); return sess; } struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn, unsigned long long id) { struct ksmbd_session *sess; sess = ksmbd_session_lookup(conn, id); if (!sess && conn->binding) sess = ksmbd_session_lookup_slowpath(id); if (sess && sess->state != SMB2_SESSION_VALID) sess = NULL; return sess; } struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn, u64 sess_id) { struct preauth_session *sess; sess = kmalloc(sizeof(struct preauth_session), GFP_KERNEL); if (!sess) return NULL; sess->id = sess_id; memcpy(sess->Preauth_HashValue, conn->preauth_info->Preauth_HashValue, PREAUTH_HASHVALUE_SIZE); list_add(&sess->preauth_entry, &conn->preauth_sess_table); return sess; } static bool ksmbd_preauth_session_id_match(struct preauth_session *sess, unsigned long long id) { return sess->id == id; } struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn, unsigned long long id) { struct preauth_session *sess = NULL; list_for_each_entry(sess, &conn->preauth_sess_table, preauth_entry) { if (ksmbd_preauth_session_id_match(sess, id)) return sess; } return NULL; } static int __init_smb2_session(struct ksmbd_session *sess) { int id = ksmbd_acquire_smb2_uid(&session_ida); if (id < 0) return -EINVAL; sess->id = id; return 0; } static struct ksmbd_session *__session_create(int protocol) { struct ksmbd_session *sess; int ret; if (protocol != CIFDS_SESSION_FLAG_SMB2) return NULL; sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL); if (!sess) return NULL; if (ksmbd_init_file_table(&sess->file_table)) goto error; sess->last_active = jiffies; sess->state = SMB2_SESSION_IN_PROGRESS; set_session_flag(sess, protocol); xa_init(&sess->tree_conns); xa_init(&sess->ksmbd_chann_list); xa_init(&sess->rpc_handle_list); sess->sequence_number = 1; ret = __init_smb2_session(sess); if (ret) goto error; ida_init(&sess->tree_conn_ida); down_write(&sessions_table_lock); hash_add(sessions_table, &sess->hlist, sess->id); up_write(&sessions_table_lock); return sess; error: ksmbd_session_destroy(sess); return NULL; } struct ksmbd_session *ksmbd_smb2_session_create(void) { return __session_create(CIFDS_SESSION_FLAG_SMB2); } int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess) { int id = -EINVAL; if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2)) id = ksmbd_acquire_smb2_tid(&sess->tree_conn_ida); return id; } void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id) { if (id >= 0) ksmbd_release_id(&sess->tree_conn_ida, id); }
linux-master
fs/smb/server/mgmt/user_session.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include <linux/list.h> #include <linux/jhash.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/parser.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/mm.h> #include "share_config.h" #include "user_config.h" #include "user_session.h" #include "../transport_ipc.h" #include "../misc.h" #define SHARE_HASH_BITS 3 static DEFINE_HASHTABLE(shares_table, SHARE_HASH_BITS); static DECLARE_RWSEM(shares_table_lock); struct ksmbd_veto_pattern { char *pattern; struct list_head list; }; static unsigned int share_name_hash(const char *name) { return jhash(name, strlen(name), 0); } static void kill_share(struct ksmbd_share_config *share) { while (!list_empty(&share->veto_list)) { struct ksmbd_veto_pattern *p; p = list_entry(share->veto_list.next, struct ksmbd_veto_pattern, list); list_del(&p->list); kfree(p->pattern); kfree(p); } if (share->path) path_put(&share->vfs_path); kfree(share->name); kfree(share->path); kfree(share); } void ksmbd_share_config_del(struct ksmbd_share_config *share) { down_write(&shares_table_lock); hash_del(&share->hlist); up_write(&shares_table_lock); } void __ksmbd_share_config_put(struct ksmbd_share_config *share) { ksmbd_share_config_del(share); kill_share(share); } static struct ksmbd_share_config * __get_share_config(struct ksmbd_share_config *share) { if (!atomic_inc_not_zero(&share->refcount)) return NULL; return share; } static struct ksmbd_share_config *__share_lookup(const char *name) { struct ksmbd_share_config *share; unsigned int key = share_name_hash(name); hash_for_each_possible(shares_table, share, hlist, key) { if (!strcmp(name, share->name)) return share; } return NULL; } static int parse_veto_list(struct ksmbd_share_config *share, char *veto_list, int veto_list_sz) { int sz = 0; if (!veto_list_sz) return 0; while (veto_list_sz > 0) { struct ksmbd_veto_pattern *p; sz = strlen(veto_list); if (!sz) break; p = kzalloc(sizeof(struct ksmbd_veto_pattern), GFP_KERNEL); if (!p) return -ENOMEM; p->pattern = kstrdup(veto_list, GFP_KERNEL); if (!p->pattern) { kfree(p); return -ENOMEM; } list_add(&p->list, &share->veto_list); veto_list += sz + 1; veto_list_sz -= (sz + 1); } return 0; } static struct ksmbd_share_config *share_config_request(struct unicode_map *um, const char *name) { struct ksmbd_share_config_response *resp; struct ksmbd_share_config *share = NULL; struct ksmbd_share_config *lookup; int ret; resp = ksmbd_ipc_share_config_request(name); if (!resp) return NULL; if (resp->flags == KSMBD_SHARE_FLAG_INVALID) goto out; if (*resp->share_name) { char *cf_resp_name; bool equal; cf_resp_name = ksmbd_casefold_sharename(um, resp->share_name); if (IS_ERR(cf_resp_name)) goto out; equal = !strcmp(cf_resp_name, name); kfree(cf_resp_name); if (!equal) goto out; } share = kzalloc(sizeof(struct ksmbd_share_config), GFP_KERNEL); if (!share) goto out; share->flags = resp->flags; atomic_set(&share->refcount, 1); INIT_LIST_HEAD(&share->veto_list); share->name = kstrdup(name, GFP_KERNEL); if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) { share->path = kstrdup(ksmbd_share_config_path(resp), GFP_KERNEL); if (share->path) share->path_sz = strlen(share->path); share->create_mask = resp->create_mask; share->directory_mask = resp->directory_mask; share->force_create_mode = resp->force_create_mode; share->force_directory_mode = resp->force_directory_mode; share->force_uid = resp->force_uid; share->force_gid = resp->force_gid; ret = parse_veto_list(share, KSMBD_SHARE_CONFIG_VETO_LIST(resp), resp->veto_list_sz); if (!ret && share->path) { ret = kern_path(share->path, 0, &share->vfs_path); if (ret) { ksmbd_debug(SMB, "failed to access '%s'\n", share->path); /* Avoid put_path() */ kfree(share->path); share->path = NULL; } } if (ret || !share->name) { kill_share(share); share = NULL; goto out; } } down_write(&shares_table_lock); lookup = __share_lookup(name); if (lookup) lookup = __get_share_config(lookup); if (!lookup) { hash_add(shares_table, &share->hlist, share_name_hash(name)); } else { kill_share(share); share = lookup; } up_write(&shares_table_lock); out: kvfree(resp); return share; } struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um, const char *name) { struct ksmbd_share_config *share; down_read(&shares_table_lock); share = __share_lookup(name); if (share) share = __get_share_config(share); up_read(&shares_table_lock); if (share) return share; return share_config_request(um, name); } bool ksmbd_share_veto_filename(struct ksmbd_share_config *share, const char *filename) { struct ksmbd_veto_pattern *p; list_for_each_entry(p, &share->veto_list, list) { if (match_wildcard(p->pattern, filename)) return true; } return false; }
linux-master
fs/smb/server/mgmt/share_config.c
// SPDX-License-Identifier: GPL-2.0 /* * Cryptographic API. * * MD4 Message Digest Algorithm (RFC1320). * * Implementation derived from Andrew Tridgell and Steve French's * CIFS MD4 implementation, and the cryptoapi implementation * originally based on the public domain implementation written * by Colin Plumb in 1993. * * Copyright (c) Andrew Tridgell 1997-1998. * Modified by Steve French ([email protected]) 2002 * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 David S. Miller ([email protected]) * Copyright (c) 2002 James Morris <[email protected]> * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> #include "md4.h" MODULE_LICENSE("GPL"); static inline u32 lshift(u32 x, unsigned int s) { x &= 0xFFFFFFFF; return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s)); } static inline u32 F(u32 x, u32 y, u32 z) { return (x & y) | ((~x) & z); } static inline u32 G(u32 x, u32 y, u32 z) { return (x & y) | (x & z) | (y & z); } static inline u32 H(u32 x, u32 y, u32 z) { return x ^ y ^ z; } #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) static void md4_transform(u32 *hash, u32 const *in) { u32 a, b, c, d; a = hash[0]; b = hash[1]; c = hash[2]; d = hash[3]; ROUND1(a, b, c, d, in[0], 3); ROUND1(d, a, b, c, in[1], 7); ROUND1(c, d, a, b, in[2], 11); ROUND1(b, c, d, a, in[3], 19); ROUND1(a, b, c, d, in[4], 3); ROUND1(d, a, b, c, in[5], 7); ROUND1(c, d, a, b, in[6], 11); ROUND1(b, c, d, a, in[7], 19); ROUND1(a, b, c, d, in[8], 3); ROUND1(d, a, b, c, in[9], 7); ROUND1(c, d, a, b, in[10], 11); ROUND1(b, c, d, a, in[11], 19); ROUND1(a, b, c, d, in[12], 3); ROUND1(d, a, b, c, in[13], 7); ROUND1(c, d, a, b, in[14], 11); ROUND1(b, c, d, a, in[15], 19); ROUND2(a, b, c, d, in[0], 3); ROUND2(d, a, b, c, in[4], 5); ROUND2(c, d, a, b, in[8], 9); ROUND2(b, c, d, a, in[12], 13); ROUND2(a, b, c, d, in[1], 3); ROUND2(d, a, b, c, in[5], 5); ROUND2(c, d, a, b, in[9], 9); ROUND2(b, c, d, a, in[13], 13); ROUND2(a, b, c, d, in[2], 3); ROUND2(d, a, b, c, in[6], 5); ROUND2(c, d, a, b, in[10], 9); ROUND2(b, c, d, a, in[14], 13); ROUND2(a, b, c, d, in[3], 3); ROUND2(d, a, b, c, in[7], 5); ROUND2(c, d, a, b, in[11], 9); ROUND2(b, c, d, a, in[15], 13); ROUND3(a, b, c, d, in[0], 3); ROUND3(d, a, b, c, in[8], 9); ROUND3(c, d, a, b, in[4], 11); ROUND3(b, c, d, a, in[12], 15); ROUND3(a, b, c, d, in[2], 3); ROUND3(d, a, b, c, in[10], 9); ROUND3(c, d, a, b, in[6], 11); ROUND3(b, c, d, a, in[14], 15); ROUND3(a, b, c, d, in[1], 3); ROUND3(d, a, b, c, in[9], 9); ROUND3(c, d, a, b, in[5], 11); ROUND3(b, c, d, a, in[13], 15); ROUND3(a, b, c, d, in[3], 3); ROUND3(d, a, b, c, in[11], 9); ROUND3(c, d, a, b, in[7], 11); ROUND3(b, c, d, a, in[15], 15); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } static inline void md4_transform_helper(struct md4_ctx *ctx) { le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block)); md4_transform(ctx->hash, ctx->block); } int cifs_md4_init(struct md4_ctx *mctx) { memset(mctx, 0, sizeof(struct md4_ctx)); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; return 0; } EXPORT_SYMBOL_GPL(cifs_md4_init); int cifs_md4_update(struct md4_ctx *mctx, const u8 *data, unsigned int len) { const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md4_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md4_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); return 0; } EXPORT_SYMBOL_GPL(cifs_md4_update); int cifs_md4_final(struct md4_ctx *mctx, u8 *out) { const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof(u64)); md4_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32)); md4_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); return 0; } EXPORT_SYMBOL_GPL(cifs_md4_final);
linux-master
fs/smb/common/cifs_md4.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API * * ARC4 Cipher Algorithm * * Jon Oberheide <[email protected]> */ #include <linux/module.h> #include "arc4.h" MODULE_LICENSE("GPL"); int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len) { int i, j = 0, k = 0; ctx->x = 1; ctx->y = 0; for (i = 0; i < 256; i++) ctx->S[i] = i; for (i = 0; i < 256; i++) { u32 a = ctx->S[i]; j = (j + in_key[k] + a) & 0xff; ctx->S[i] = ctx->S[j]; ctx->S[j] = a; if (++k >= key_len) k = 0; } return 0; } EXPORT_SYMBOL_GPL(cifs_arc4_setkey); void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len) { u32 *const S = ctx->S; u32 x, y, a, b; u32 ty, ta, tb; if (len == 0) return; x = ctx->x; y = ctx->y; a = S[x]; y = (y + a) & 0xff; b = S[y]; do { S[y] = a; a = (a + b) & 0xff; S[x] = b; x = (x + 1) & 0xff; ta = S[x]; ty = (y + ta) & 0xff; tb = S[ty]; *out++ = *in++ ^ S[a]; if (--len == 0) break; y = ty; a = ta; b = tb; } while (true); ctx->x = x; ctx->y = y; } EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
linux-master
fs/smb/common/cifs_arc4.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Unix SMB/Netbios implementation. Version 1.9. SMB parameters and setup Copyright (C) Andrew Tridgell 1992-2000 Copyright (C) Luke Kenneth Casson Leighton 1996-2000 Modified by Jeremy Allison 1995. Copyright (C) Andrew Bartlett <[email protected]> 2002-2003 Modified by Steve French ([email protected]) 2002-2003 */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fips.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/random.h> #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifsproto.h" #include "../common/md4.h" #ifndef false #define false 0 #endif #ifndef true #define true 1 #endif /* following came from the other byteorder.h to avoid include conflicts */ #define CVAL(buf,pos) (((unsigned char *)(buf))[pos]) #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8) #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val))) /* produce a md4 message digest from data of length n bytes */ static int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; struct md4_ctx mctx; rc = cifs_md4_init(&mctx); if (rc) { cifs_dbg(VFS, "%s: Could not init MD4\n", __func__); goto mdfour_err; } rc = cifs_md4_update(&mctx, link_str, link_len); if (rc) { cifs_dbg(VFS, "%s: Could not update MD4\n", __func__); goto mdfour_err; } rc = cifs_md4_final(&mctx, md4_hash); if (rc) cifs_dbg(VFS, "%s: Could not finalize MD4\n", __func__); mdfour_err: return rc; } /* * Creates the MD4 Hash of the users password in NT UNICODE. */ int E_md4hash(const unsigned char *passwd, unsigned char *p16, const struct nls_table *codepage) { int rc; int len; __le16 wpwd[129]; /* Password cannot be longer than 128 characters */ if (passwd) /* Password must be converted to NT unicode */ len = cifs_strtoUTF16(wpwd, passwd, 128, codepage); else { len = 0; *wpwd = 0; /* Ensure string is null terminated */ } rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); memzero_explicit(wpwd, sizeof(wpwd)); return rc; }
linux-master
fs/smb/client/smbencrypt.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018, Microsoft Corporation. * * Author(s): Steve French <[email protected]> */ #define CREATE_TRACE_POINTS #include "trace.h"
linux-master
fs/smb/client/trace.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002, 2011 * Etersoft, 2012 * Author(s): Pavel Shilovsky ([email protected]), * Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "fscache.h" #include "smb2glob.h" #include "smb2pdu.h" #include "smb2proto.h" #include "cached_dir.h" #include "smb2status.h" static void free_set_inf_compound(struct smb_rqst *rqst) { if (rqst[1].rq_iov) SMB2_set_info_free(&rqst[1]); if (rqst[2].rq_iov) SMB2_close_free(&rqst[2]); } /* * note: If cfile is passed, the reference to it is dropped here. * So make sure that you do not reuse cfile after return from this func. * * If passing @out_iov and @out_buftype, ensure to make them both large enough * (>= 3) to hold all compounded responses. Caller is also responsible for * freeing them up with free_rsp_buf(). */ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, __u32 desired_access, __u32 create_disposition, __u32 create_options, umode_t mode, void *ptr, int command, struct cifsFileInfo *cfile, __u8 **extbuf, size_t *extbuflen, struct kvec *out_iov, int *out_buftype) { struct smb2_compound_vars *vars = NULL; struct kvec *rsp_iov; struct smb_rqst *rqst; int rc; __le16 *utf16_path = NULL; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_fid fid; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server; int num_rqst = 0; int resp_buftype[3]; struct smb2_query_info_rsp *qi_rsp = NULL; struct cifs_open_info_data *idata; int flags = 0; __u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0}; unsigned int size[2]; void *data[2]; int len; vars = kzalloc(sizeof(*vars), GFP_ATOMIC); if (vars == NULL) return -ENOMEM; rqst = &vars->rqst[0]; rsp_iov = &vars->rsp_iov[0]; server = cifs_pick_channel(ses); if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; /* We already have a handle so we can skip the open */ if (cfile) goto after_open; /* Open */ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); if (!utf16_path) { rc = -ENOMEM; goto finished; } vars->oparms = (struct cifs_open_parms) { .tcon = tcon, .path = full_path, .desired_access = desired_access, .disposition = create_disposition, .create_options = cifs_create_options(cifs_sb, create_options), .fid = &fid, .mode = mode, .cifs_sb = cifs_sb, }; rqst[num_rqst].rq_iov = &vars->open_iov[0]; rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE; rc = SMB2_open_init(tcon, server, &rqst[num_rqst], &oplock, &vars->oparms, utf16_path); kfree(utf16_path); if (rc) goto finished; smb2_set_next_command(tcon, &rqst[num_rqst]); after_open: num_rqst++; rc = 0; /* Operation */ switch (command) { case SMB2_OP_QUERY_INFO: rqst[num_rqst].rq_iov = &vars->qi_iov; rqst[num_rqst].rq_nvec = 1; if (cfile) rc = SMB2_query_info_init(tcon, server, &rqst[num_rqst], cfile->fid.persistent_fid, cfile->fid.volatile_fid, FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 0, NULL); else { rc = SMB2_query_info_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 0, NULL); if (!rc) { smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst]); } } if (rc) goto finished; num_rqst++; trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_POSIX_QUERY_INFO: rqst[num_rqst].rq_iov = &vars->qi_iov; rqst[num_rqst].rq_nvec = 1; if (cfile) rc = SMB2_query_info_init(tcon, server, &rqst[num_rqst], cfile->fid.persistent_fid, cfile->fid.volatile_fid, SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0, /* TBD: fix following to allow for longer SIDs */ sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) + (sizeof(struct cifs_sid) * 2), 0, NULL); else { rc = SMB2_query_info_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0, sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) + (sizeof(struct cifs_sid) * 2), 0, NULL); if (!rc) { smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst]); } } if (rc) goto finished; num_rqst++; trace_smb3_posix_query_info_compound_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_DELETE: trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_MKDIR: /* * Directories are created through parameters in the * SMB2_open() call. */ trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_RMDIR: rqst[num_rqst].rq_iov = &vars->si_iov[0]; rqst[num_rqst].rq_nvec = 1; size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */ data[0] = &delete_pending[0]; rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, current->tgid, FILE_DISPOSITION_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); if (rc) goto finished; smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst++]); trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_SET_EOF: rqst[num_rqst].rq_iov = &vars->si_iov[0]; rqst[num_rqst].rq_nvec = 1; size[0] = 8; /* sizeof __le64 */ data[0] = ptr; if (cfile) { rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); } else { rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, current->tgid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); if (!rc) { smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst]); } } if (rc) goto finished; num_rqst++; trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_SET_INFO: rqst[num_rqst].rq_iov = &vars->si_iov[0]; rqst[num_rqst].rq_nvec = 1; size[0] = sizeof(FILE_BASIC_INFO); data[0] = ptr; if (cfile) rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, FILE_BASIC_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); else { rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, current->tgid, FILE_BASIC_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); if (!rc) { smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst]); } } if (rc) goto finished; num_rqst++; trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_RENAME: rqst[num_rqst].rq_iov = &vars->si_iov[0]; rqst[num_rqst].rq_nvec = 2; len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX)); vars->rename_info.ReplaceIfExists = 1; vars->rename_info.RootDirectory = 0; vars->rename_info.FileNameLength = cpu_to_le32(len); size[0] = sizeof(struct smb2_file_rename_info); data[0] = &vars->rename_info; size[1] = len + 2 /* null */; data[1] = (__le16 *)ptr; if (cfile) rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, FILE_RENAME_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); else { rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, current->tgid, FILE_RENAME_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); if (!rc) { smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst]); } } if (rc) goto finished; num_rqst++; trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_HARDLINK: rqst[num_rqst].rq_iov = &vars->si_iov[0]; rqst[num_rqst].rq_nvec = 2; len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX)); vars->link_info.ReplaceIfExists = 0; vars->link_info.RootDirectory = 0; vars->link_info.FileNameLength = cpu_to_le32(len); size[0] = sizeof(struct smb2_file_link_info); data[0] = &vars->link_info; size[1] = len + 2 /* null */; data[1] = (__le16 *)ptr; rc = SMB2_set_info_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, current->tgid, FILE_LINK_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); if (rc) goto finished; smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst++]); trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path); break; default: cifs_dbg(VFS, "Invalid command\n"); rc = -EINVAL; } if (rc) goto finished; /* We already have a handle so we can skip the close */ if (cfile) goto after_close; /* Close */ flags |= CIFS_CP_CREATE_CLOSE_OP; rqst[num_rqst].rq_iov = &vars->close_iov; rqst[num_rqst].rq_nvec = 1; rc = SMB2_close_init(tcon, server, &rqst[num_rqst], COMPOUND_FID, COMPOUND_FID, false); smb2_set_related(&rqst[num_rqst]); if (rc) goto finished; after_close: num_rqst++; if (cfile) { rc = compound_send_recv(xid, ses, server, flags, num_rqst - 2, &rqst[1], &resp_buftype[1], &rsp_iov[1]); } else rc = compound_send_recv(xid, ses, server, flags, num_rqst, rqst, resp_buftype, rsp_iov); finished: SMB2_open_free(&rqst[0]); if (rc == -EREMCHG) { pr_warn_once("server share %s deleted\n", tcon->tree_name); tcon->need_reconnect = true; } switch (command) { case SMB2_OP_QUERY_INFO: idata = ptr; if (rc == 0 && cfile && cfile->symlink_target) { idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!idata->symlink_target) rc = -ENOMEM; } if (rc == 0) { qi_rsp = (struct smb2_query_info_rsp *) rsp_iov[1].iov_base; rc = smb2_validate_and_copy_iov( le16_to_cpu(qi_rsp->OutputBufferOffset), le32_to_cpu(qi_rsp->OutputBufferLength), &rsp_iov[1], sizeof(idata->fi), (char *)&idata->fi); } if (rqst[1].rq_iov) SMB2_query_info_free(&rqst[1]); if (rqst[2].rq_iov) SMB2_close_free(&rqst[2]); if (rc) trace_smb3_query_info_compound_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_query_info_compound_done(xid, ses->Suid, tcon->tid); break; case SMB2_OP_POSIX_QUERY_INFO: idata = ptr; if (rc == 0 && cfile && cfile->symlink_target) { idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!idata->symlink_target) rc = -ENOMEM; } if (rc == 0) { qi_rsp = (struct smb2_query_info_rsp *) rsp_iov[1].iov_base; rc = smb2_validate_and_copy_iov( le16_to_cpu(qi_rsp->OutputBufferOffset), le32_to_cpu(qi_rsp->OutputBufferLength), &rsp_iov[1], sizeof(idata->posix_fi) /* add SIDs */, (char *)&idata->posix_fi); } if (rc == 0) { unsigned int length = le32_to_cpu(qi_rsp->OutputBufferLength); if (length > sizeof(idata->posix_fi)) { char *base = (char *)rsp_iov[1].iov_base + le16_to_cpu(qi_rsp->OutputBufferOffset) + sizeof(idata->posix_fi); *extbuflen = length - sizeof(idata->posix_fi); *extbuf = kmemdup(base, *extbuflen, GFP_KERNEL); if (!*extbuf) rc = -ENOMEM; } else { rc = -EINVAL; } } if (rqst[1].rq_iov) SMB2_query_info_free(&rqst[1]); if (rqst[2].rq_iov) SMB2_close_free(&rqst[2]); if (rc) trace_smb3_posix_query_info_compound_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_posix_query_info_compound_done(xid, ses->Suid, tcon->tid); break; case SMB2_OP_DELETE: if (rc) trace_smb3_delete_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_delete_done(xid, ses->Suid, tcon->tid); if (rqst[1].rq_iov) SMB2_close_free(&rqst[1]); break; case SMB2_OP_MKDIR: if (rc) trace_smb3_mkdir_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid); if (rqst[1].rq_iov) SMB2_close_free(&rqst[1]); break; case SMB2_OP_HARDLINK: if (rc) trace_smb3_hardlink_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid); free_set_inf_compound(rqst); break; case SMB2_OP_RENAME: if (rc) trace_smb3_rename_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_rename_done(xid, ses->Suid, tcon->tid); free_set_inf_compound(rqst); break; case SMB2_OP_RMDIR: if (rc) trace_smb3_rmdir_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid); free_set_inf_compound(rqst); break; case SMB2_OP_SET_EOF: if (rc) trace_smb3_set_eof_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid); free_set_inf_compound(rqst); break; case SMB2_OP_SET_INFO: if (rc) trace_smb3_set_info_compound_err(xid, ses->Suid, tcon->tid, rc); else trace_smb3_set_info_compound_done(xid, ses->Suid, tcon->tid); free_set_inf_compound(rqst); break; } if (cfile) cifsFileInfo_put(cfile); if (out_iov && out_buftype) { memcpy(out_iov, rsp_iov, 3 * sizeof(*out_iov)); memcpy(out_buftype, resp_buftype, 3 * sizeof(*out_buftype)); } else { free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); } kfree(vars); return rc; } static int parse_create_response(struct cifs_open_info_data *data, struct cifs_sb_info *cifs_sb, const struct kvec *iov) { struct smb2_create_rsp *rsp = iov->iov_base; bool reparse_point = false; u32 tag = 0; int rc = 0; switch (rsp->hdr.Status) { case STATUS_IO_REPARSE_TAG_NOT_HANDLED: reparse_point = true; break; case STATUS_STOPPED_ON_SYMLINK: rc = smb2_parse_symlink_response(cifs_sb, iov, &data->symlink_target); if (rc) return rc; tag = IO_REPARSE_TAG_SYMLINK; reparse_point = true; break; case STATUS_SUCCESS: reparse_point = !!(rsp->Flags & SMB2_CREATE_FLAG_REPARSEPOINT); break; } data->reparse_point = reparse_point; data->reparse_tag = tag; return rc; } int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, struct cifs_open_info_data *data) { __u32 create_options = 0; struct cifsFileInfo *cfile; struct cached_fid *cfid = NULL; struct smb2_hdr *hdr; struct kvec out_iov[3] = {}; int out_buftype[3] = {}; bool islink; int rc, rc2; data->adjust_tz = false; data->reparse_point = false; if (strcmp(full_path, "")) rc = -ENOENT; else rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); /* If it is a root and its handle is cached then use it */ if (!rc) { if (cfid->file_all_info_is_valid) { memcpy(&data->fi, &cfid->file_all_info, sizeof(data->fi)); } else { rc = SMB2_query_info(xid, tcon, cfid->fid.persistent_fid, cfid->fid.volatile_fid, &data->fi); } close_cached_dir(cfid); return rc; } cifs_get_readable_path(tcon, full_path, &cfile); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN, create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile, NULL, NULL, out_iov, out_buftype); hdr = out_iov[0].iov_base; /* * If first iov is unset, then SMB session was dropped or we've got a * cached open file (@cfile). */ if (!hdr || out_buftype[0] == CIFS_NO_BUFFER) goto out; switch (rc) { case 0: case -EOPNOTSUPP: rc = parse_create_response(data, cifs_sb, &out_iov[0]); if (rc || !data->reparse_point) goto out; create_options |= OPEN_REPARSE_POINT; /* Failed on a symbolic link - query a reparse point info */ cifs_get_readable_path(tcon, full_path, &cfile); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN, create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile, NULL, NULL, NULL, NULL); break; case -EREMOTE: break; default: if (hdr->Status != STATUS_OBJECT_NAME_INVALID) break; rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb, full_path, &islink); if (rc2) { rc = rc2; goto out; } if (islink) rc = -EREMOTE; } out: free_rsp_buf(out_buftype[0], out_iov[0].iov_base); free_rsp_buf(out_buftype[1], out_iov[1].iov_base); free_rsp_buf(out_buftype[2], out_iov[2].iov_base); return rc; } int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, struct cifs_open_info_data *data, struct cifs_sid *owner, struct cifs_sid *group) { int rc; __u32 create_options = 0; struct cifsFileInfo *cfile; struct kvec out_iov[3] = {}; int out_buftype[3] = {}; __u8 *sidsbuf = NULL; __u8 *sidsbuf_end = NULL; size_t sidsbuflen = 0; size_t owner_len, group_len; data->adjust_tz = false; data->reparse_point = false; /* * BB TODO: Add support for using the cached root handle. * Create SMB2_query_posix_info worker function to do non-compounded query * when we already have an open file handle for this. For now this is fast enough * (always using the compounded version). */ cifs_get_readable_path(tcon, full_path, &cfile); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN, create_options, ACL_NO_MODE, data, SMB2_OP_POSIX_QUERY_INFO, cfile, &sidsbuf, &sidsbuflen, out_iov, out_buftype); /* * If first iov is unset, then SMB session was dropped or we've got a * cached open file (@cfile). */ if (!out_iov[0].iov_base || out_buftype[0] == CIFS_NO_BUFFER) goto out; switch (rc) { case 0: case -EOPNOTSUPP: /* BB TODO: When support for special files added to Samba re-verify this path */ rc = parse_create_response(data, cifs_sb, &out_iov[0]); if (rc || !data->reparse_point) goto out; create_options |= OPEN_REPARSE_POINT; /* Failed on a symbolic link - query a reparse point info */ cifs_get_readable_path(tcon, full_path, &cfile); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN, create_options, ACL_NO_MODE, data, SMB2_OP_POSIX_QUERY_INFO, cfile, &sidsbuf, &sidsbuflen, NULL, NULL); break; } out: if (rc == 0) { sidsbuf_end = sidsbuf + sidsbuflen; owner_len = posix_info_sid_size(sidsbuf, sidsbuf_end); if (owner_len == -1) { rc = -EINVAL; goto out; } memcpy(owner, sidsbuf, owner_len); group_len = posix_info_sid_size( sidsbuf + owner_len, sidsbuf_end); if (group_len == -1) { rc = -EINVAL; goto out; } memcpy(group, sidsbuf + owner_len, group_len); } kfree(sidsbuf); free_rsp_buf(out_buftype[0], out_iov[0].iov_base); free_rsp_buf(out_buftype[1], out_iov[1].iov_base); free_rsp_buf(out_buftype[2], out_iov[2].iov_base); return rc; } int smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { return smb2_compound_op(xid, tcon, cifs_sb, name, FILE_WRITE_ATTRIBUTES, FILE_CREATE, CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR, NULL, NULL, NULL, NULL, NULL); } void smb2_mkdir_setinfo(struct inode *inode, const char *name, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, const unsigned int xid) { FILE_BASIC_INFO data; struct cifsInodeInfo *cifs_i; struct cifsFileInfo *cfile; u32 dosattrs; int tmprc; memset(&data, 0, sizeof(data)); cifs_i = CIFS_I(inode); dosattrs = cifs_i->cifsAttrs | ATTR_READONLY; data.Attributes = cpu_to_le32(dosattrs); cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile); tmprc = smb2_compound_op(xid, tcon, cifs_sb, name, FILE_WRITE_ATTRIBUTES, FILE_CREATE, CREATE_NOT_FILE, ACL_NO_MODE, &data, SMB2_OP_SET_INFO, cfile, NULL, NULL, NULL, NULL); if (tmprc == 0) cifs_i->cifsAttrs = dosattrs; } int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { drop_cached_dir_by_name(xid, tcon, name, cifs_sb); return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, CREATE_NOT_FILE, ACL_NO_MODE, NULL, SMB2_OP_RMDIR, NULL, NULL, NULL, NULL, NULL); } int smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT, ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL, NULL, NULL, NULL, NULL); } static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb, __u32 access, int command, struct cifsFileInfo *cfile) { __le16 *smb2_to_name = NULL; int rc; smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb); if (smb2_to_name == NULL) { rc = -ENOMEM; goto smb2_rename_path; } rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access, FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name, command, cfile, NULL, NULL, NULL, NULL); smb2_rename_path: kfree(smb2_to_name); return rc; } int smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { struct cifsFileInfo *cfile; drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb); cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile); return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, DELETE, SMB2_OP_RENAME, cfile); } int smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK, NULL); } int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon, const char *full_path, __u64 size, struct cifs_sb_info *cifs_sb, bool set_alloc) { __le64 eof = cpu_to_le64(size); struct cifsFileInfo *cfile; cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); return smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE, &eof, SMB2_OP_SET_EOF, cfile, NULL, NULL, NULL, NULL); } int smb2_set_file_info(struct inode *inode, const char *full_path, FILE_BASIC_INFO *buf, const unsigned int xid) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; struct cifs_tcon *tcon; struct cifsFileInfo *cfile; int rc; if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) && (buf->Attributes == 0)) return 0; /* would be a no op, no sense sending this */ tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile, NULL, NULL, NULL, NULL); cifs_put_tlink(tlink); return rc; }
linux-master
fs/smb/client/smb2inode.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French ([email protected]) * Jeremy Allison ([email protected]) 2006. * */ #include <linux/fs.h> #include <linux/list.h> #include <linux/gfp.h> #include <linux/wait.h> #include <linux/net.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/tcp.h> #include <linux/bvec.h> #include <linux/highmem.h> #include <linux/uaccess.h> #include <linux/processor.h> #include <linux/mempool.h> #include <linux/sched/signal.h> #include <linux/task_io_accounting_ops.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "smb2proto.h" #include "smbdirect.h" /* Max number of iovectors we can use off the stack when sending requests. */ #define CIFS_MAX_IOV_SIZE 8 void cifs_wake_up_task(struct mid_q_entry *mid) { if (mid->mid_state == MID_RESPONSE_RECEIVED) mid->mid_state = MID_RESPONSE_READY; wake_up_process(mid->callback_data); } static struct mid_q_entry * alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) { struct mid_q_entry *temp; if (server == NULL) { cifs_dbg(VFS, "%s: null TCP session\n", __func__); return NULL; } temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); kref_init(&temp->refcount); temp->mid = get_mid(smb_buffer); temp->pid = current->pid; temp->command = cpu_to_le16(smb_buffer->Command); cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command); /* easier to use jiffies */ /* when mid allocated can be before when sent */ temp->when_alloc = jiffies; temp->server = server; /* * The default is for the mid to be synchronous, so the * default callback just wakes up the current task. */ get_task_struct(current); temp->creator = current; temp->callback = cifs_wake_up_task; temp->callback_data = current; atomic_inc(&mid_count); temp->mid_state = MID_REQUEST_ALLOCATED; return temp; } static void __release_mid(struct kref *refcount) { struct mid_q_entry *midEntry = container_of(refcount, struct mid_q_entry, refcount); #ifdef CONFIG_CIFS_STATS2 __le16 command = midEntry->server->vals->lock_cmd; __u16 smb_cmd = le16_to_cpu(midEntry->command); unsigned long now; unsigned long roundtrip_time; #endif struct TCP_Server_Info *server = midEntry->server; if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) && (midEntry->mid_state == MID_RESPONSE_RECEIVED || midEntry->mid_state == MID_RESPONSE_READY) && server->ops->handle_cancelled_mid) server->ops->handle_cancelled_mid(midEntry, server); midEntry->mid_state = MID_FREE; atomic_dec(&mid_count); if (midEntry->large_buf) cifs_buf_release(midEntry->resp_buf); else cifs_small_buf_release(midEntry->resp_buf); #ifdef CONFIG_CIFS_STATS2 now = jiffies; if (now < midEntry->when_alloc) cifs_server_dbg(VFS, "Invalid mid allocation time\n"); roundtrip_time = now - midEntry->when_alloc; if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) { if (atomic_read(&server->num_cmds[smb_cmd]) == 0) { server->slowest_cmd[smb_cmd] = roundtrip_time; server->fastest_cmd[smb_cmd] = roundtrip_time; } else { if (server->slowest_cmd[smb_cmd] < roundtrip_time) server->slowest_cmd[smb_cmd] = roundtrip_time; else if (server->fastest_cmd[smb_cmd] > roundtrip_time) server->fastest_cmd[smb_cmd] = roundtrip_time; } cifs_stats_inc(&server->num_cmds[smb_cmd]); server->time_per_cmd[smb_cmd] += roundtrip_time; } /* * commands taking longer than one second (default) can be indications * that something is wrong, unless it is quite a slow link or a very * busy server. Note that this calc is unlikely or impossible to wrap * as long as slow_rsp_threshold is not set way above recommended max * value (32767 ie 9 hours) and is generally harmless even if wrong * since only affects debug counters - so leaving the calc as simple * comparison rather than doing multiple conversions and overflow * checks */ if ((slow_rsp_threshold != 0) && time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) && (midEntry->command != command)) { /* * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command * NB: le16_to_cpu returns unsigned so can not be negative below */ if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) cifs_stats_inc(&server->smb2slowcmd[smb_cmd]); trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid, midEntry->when_sent, midEntry->when_received); if (cifsFYI & CIFS_TIMER) { pr_debug("slow rsp: cmd %d mid %llu", midEntry->command, midEntry->mid); cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n", now - midEntry->when_alloc, now - midEntry->when_sent, now - midEntry->when_received); } } #endif put_task_struct(midEntry->creator); mempool_free(midEntry, cifs_mid_poolp); } void release_mid(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->server; spin_lock(&server->mid_lock); kref_put(&mid->refcount, __release_mid); spin_unlock(&server->mid_lock); } void delete_mid(struct mid_q_entry *mid) { spin_lock(&mid->server->mid_lock); if (!(mid->mid_flags & MID_DELETED)) { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; } spin_unlock(&mid->server->mid_lock); release_mid(mid); } /* * smb_send_kvec - send an array of kvecs to the server * @server: Server to send the data to * @smb_msg: Message to send * @sent: amount of data sent on socket is stored here * * Our basic "send data to server" function. Should be called with srv_mutex * held. The caller is responsible for handling the results. */ static int smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, size_t *sent) { int rc = 0; int retries = 0; struct socket *ssocket = server->ssocket; *sent = 0; if (server->noblocksnd) smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else smb_msg->msg_flags = MSG_NOSIGNAL; while (msg_data_left(smb_msg)) { /* * If blocking send, we try 3 times, since each can block * for 5 seconds. For nonblocking we have to try more * but wait increasing amounts of time allowing time for * socket to clear. The overall time we wait in either * case to send on the socket is about 15 seconds. * Similarly we wait for 15 seconds for a response from * the server in SendReceive[2] for the server to send * a response back for most types of requests (except * SMB Write past end of file which can be slow, and * blocking lock operations). NFS waits slightly longer * than CIFS, but this can make it take longer for * nonresponsive servers to be detected and 15 seconds * is more than enough time for modern networks to * send a packet. In most cases if we fail to send * after the retries we will kill the socket and * reconnect which may clear the network problem. */ rc = sock_sendmsg(ssocket, smb_msg); if (rc == -EAGAIN) { retries++; if (retries >= 14 || (!server->noblocksnd && (retries > 2))) { cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n", ssocket); return -EAGAIN; } msleep(1 << retries); continue; } if (rc < 0) return rc; if (rc == 0) { /* should never happen, letting socket clear before retrying is our only obvious option here */ cifs_server_dbg(VFS, "tcp sent no data\n"); msleep(500); continue; } /* send was at least partially successful */ *sent += rc; retries = 0; /* in case we get ENOSPC on the next send */ } return 0; } unsigned long smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst) { unsigned int i; struct kvec *iov; int nvec; unsigned long buflen = 0; if (!is_smb1(server) && rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { iov = &rqst->rq_iov[1]; nvec = rqst->rq_nvec - 1; } else { iov = rqst->rq_iov; nvec = rqst->rq_nvec; } /* total up iov array first */ for (i = 0; i < nvec; i++) buflen += iov[i].iov_len; buflen += iov_iter_count(&rqst->rq_iter); return buflen; } static int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) { int rc; struct kvec *iov; int n_vec; unsigned int send_length = 0; unsigned int i, j; sigset_t mask, oldmask; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg = {}; __be32 rfc1002_marker; cifs_in_send_inc(server); if (cifs_rdma_enabled(server)) { /* return -EAGAIN when connecting or reconnecting */ rc = -EAGAIN; if (server->smbd_conn) rc = smbd_send(server, num_rqst, rqst); goto smbd_done; } rc = -EAGAIN; if (ssocket == NULL) goto out; rc = -ERESTARTSYS; if (fatal_signal_pending(current)) { cifs_dbg(FYI, "signal pending before send request\n"); goto out; } rc = 0; /* cork the socket */ tcp_sock_set_cork(ssocket->sk, true); for (j = 0; j < num_rqst; j++) send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* * We should not allow signals to interrupt the network send because * any partial send will cause session reconnects thus increasing * latency of system calls and overload a server with unnecessary * requests. */ sigfillset(&mask); sigprocmask(SIG_BLOCK, &mask, &oldmask); /* Generate a rfc1002 marker for SMB2+ */ if (!is_smb1(server)) { struct kvec hiov = { .iov_base = &rfc1002_marker, .iov_len = 4 }; iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto unmask; total_len += sent; send_length += 4; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length); for (j = 0; j < num_rqst; j++) { iov = rqst[j].rq_iov; n_vec = rqst[j].rq_nvec; size = 0; for (i = 0; i < n_vec; i++) { dump_smb(iov[i].iov_base, iov[i].iov_len); size += iov[i].iov_len; } iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto unmask; total_len += sent; if (iov_iter_count(&rqst[j].rq_iter) > 0) { smb_msg.msg_iter = rqst[j].rq_iter; rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } } unmask: sigprocmask(SIG_SETMASK, &oldmask, NULL); /* * If signal is pending but we have already sent the whole packet to * the server we need to return success status to allow a corresponding * mid entry to be kept in the pending requests queue thus allowing * to handle responses from the server by the client. * * If only part of the packet has been sent there is no need to hide * interrupt because the session will be reconnected anyway, so there * won't be any response from the server to handle. */ if (signal_pending(current) && (total_len != send_length)) { cifs_dbg(FYI, "signal is pending after attempt to send\n"); rc = -ERESTARTSYS; } /* uncork it */ tcp_sock_set_cork(ssocket->sk, false); if ((total_len > 0) && (total_len != send_length)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", send_length, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ cifs_signal_cifsd_for_reconnect(server, false); trace_smb3_partial_send_reconnect(server->CurrentMid, server->conn_id, server->hostname); } smbd_done: if (rc < 0 && rc != -EINTR) cifs_server_dbg(VFS, "Error %d sending data on socket to server\n", rc); else if (rc > 0) rc = 0; out: cifs_in_send_dec(server); return rc; } struct send_req_vars { struct smb2_transform_hdr tr_hdr; struct smb_rqst rqst[MAX_COMPOUND]; struct kvec iov; }; static int smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct send_req_vars *vars; struct smb_rqst *cur_rqst; struct kvec *iov; int rc; if (!(flags & CIFS_TRANSFORM_REQ)) return __smb_send_rqst(server, num_rqst, rqst); if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; if (!server->ops->init_transform_rq) { cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n"); return -EIO; } vars = kzalloc(sizeof(*vars), GFP_NOFS); if (!vars) return -ENOMEM; cur_rqst = vars->rqst; iov = &vars->iov; iov->iov_base = &vars->tr_hdr; iov->iov_len = sizeof(vars->tr_hdr); cur_rqst[0].rq_iov = iov; cur_rqst[0].rq_nvec = 1; rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) goto out; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); out: kfree(vars); return rc; } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; iov[0].iov_base = smb_buffer; iov[0].iov_len = 4; iov[1].iov_base = (char *)smb_buffer + 4; iov[1].iov_len = smb_buf_length; return __smb_send_rqst(server, 1, &rqst); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, const int timeout, const int flags, unsigned int *instance) { long rc; int *credits; int optype; long int t; int scredits, in_flight; if (timeout < 0) t = MAX_JIFFY_OFFSET; else t = msecs_to_jiffies(timeout); optype = flags & CIFS_OP_MASK; *instance = 0; credits = server->ops->get_credits_field(server, optype); /* Since an echo is already inflight, no need to wait to send another */ if (*credits <= 0 && optype == CIFS_ECHO_OP) return -EAGAIN; spin_lock(&server->req_lock); if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) { /* oplock breaks must not be held up */ server->in_flight++; if (server->in_flight > server->max_in_flight) server->max_in_flight = server->in_flight; *credits -= 1; *instance = server->reconnect_instance; scredits = *credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); trace_smb3_nblk_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, -1, in_flight); cifs_dbg(FYI, "%s: remove %u credits total=%d\n", __func__, 1, scredits); return 0; } while (1) { spin_unlock(&server->req_lock); spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return -ENOENT; } spin_unlock(&server->srv_lock); spin_lock(&server->req_lock); if (*credits < num_credits) { scredits = *credits; spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable_timeout(server->request_q, has_credits(server, credits, num_credits), t); cifs_num_waiters_dec(server); if (!rc) { spin_lock(&server->req_lock); scredits = *credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); trace_smb3_credit_timeout(server->CurrentMid, server->conn_id, server->hostname, scredits, num_credits, in_flight); cifs_server_dbg(VFS, "wait timed out after %d ms\n", timeout); return -EBUSY; } if (rc == -ERESTARTSYS) return -ERESTARTSYS; spin_lock(&server->req_lock); } else { /* * For normal commands, reserve the last MAX_COMPOUND * credits to compound requests. * Otherwise these compounds could be permanently * starved for credits by single-credit requests. * * To prevent spinning CPU, block this thread until * there are >MAX_COMPOUND credits available. * But only do this is we already have a lot of * credits in flight to avoid triggering this check * for servers that are slow to hand out credits on * new sessions. */ if (!optype && num_credits == 1 && server->in_flight > 2 * MAX_COMPOUND && *credits <= MAX_COMPOUND) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable_timeout( server->request_q, has_credits(server, credits, MAX_COMPOUND + 1), t); cifs_num_waiters_dec(server); if (!rc) { spin_lock(&server->req_lock); scredits = *credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); trace_smb3_credit_timeout( server->CurrentMid, server->conn_id, server->hostname, scredits, num_credits, in_flight); cifs_server_dbg(VFS, "wait timed out after %d ms\n", timeout); return -EBUSY; } if (rc == -ERESTARTSYS) return -ERESTARTSYS; spin_lock(&server->req_lock); continue; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) { *credits -= num_credits; server->in_flight += num_credits; if (server->in_flight > server->max_in_flight) server->max_in_flight = server->in_flight; *instance = server->reconnect_instance; } scredits = *credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); trace_smb3_waitff_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, -(num_credits), in_flight); cifs_dbg(FYI, "%s: remove %u credits total=%d\n", __func__, num_credits, scredits); break; } } return 0; } static int wait_for_free_request(struct TCP_Server_Info *server, const int flags, unsigned int *instance) { return wait_for_free_credits(server, 1, -1, flags, instance); } static int wait_for_compound_request(struct TCP_Server_Info *server, int num, const int flags, unsigned int *instance) { int *credits; int scredits, in_flight; credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK); spin_lock(&server->req_lock); scredits = *credits; in_flight = server->in_flight; if (*credits < num) { /* * If the server is tight on resources or just gives us less * credits for other reasons (e.g. requests are coming out of * order and the server delays granting more credits until it * processes a missing mid) and we exhausted most available * credits there may be situations when we try to send * a compound request but we don't have enough credits. At this * point the client needs to decide if it should wait for * additional credits or fail the request. If at least one * request is in flight there is a high probability that the * server will return enough credits to satisfy this compound * request. * * Return immediately if no requests in flight since we will be * stuck on waiting for credits. */ if (server->in_flight == 0) { spin_unlock(&server->req_lock); trace_smb3_insufficient_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, num, in_flight); cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n", __func__, in_flight, num, scredits); return -EDEADLK; } } spin_unlock(&server->req_lock); return wait_for_free_credits(server, num, 60000, flags, instance); } int cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, unsigned int *num, struct cifs_credits *credits) { *num = size; credits->value = 0; credits->instance = server->reconnect_instance; return 0; } static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, struct mid_q_entry **ppmidQ) { spin_lock(&ses->ses_lock); if (ses->ses_status == SES_NEW) { if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && (in_buf->Command != SMB_COM_NEGOTIATE)) { spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are setting up session */ } if (ses->ses_status == SES_EXITING) { /* check if SMB session is bad because we are setting it up */ if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are shutting down session */ } spin_unlock(&ses->ses_lock); *ppmidQ = alloc_mid(in_buf, ses->server); if (*ppmidQ == NULL) return -ENOMEM; spin_lock(&ses->server->mid_lock); list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); spin_unlock(&ses->server->mid_lock); return 0; } static int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) { int error; error = wait_event_state(server->response_q, midQ->mid_state != MID_REQUEST_SUBMITTED && midQ->mid_state != MID_RESPONSE_RECEIVED, (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE)); if (error < 0) return -ERESTARTSYS; return 0; } struct mid_q_entry * cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) { int rc; struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; if (rqst->rq_iov[0].iov_len != 4 || rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) return ERR_PTR(-EIO); /* enable signing if server requires it */ if (server->sign) hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; mid = alloc_mid(hdr, server); if (mid == NULL) return ERR_PTR(-ENOMEM); rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); if (rc) { release_mid(mid); return ERR_PTR(rc); } return mid; } /* * Send a SMB request and set the callback function in the mid to handle * the result. Caller is responsible for dealing with timeouts. */ int cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, mid_receive_t *receive, mid_callback_t *callback, mid_handle_t *handle, void *cbdata, const int flags, const struct cifs_credits *exist_credits) { int rc; struct mid_q_entry *mid; struct cifs_credits credits = { .value = 0, .instance = 0 }; unsigned int instance; int optype; optype = flags & CIFS_OP_MASK; if ((flags & CIFS_HAS_CREDITS) == 0) { rc = wait_for_free_request(server, flags, &instance); if (rc) return rc; credits.value = 1; credits.instance = instance; } else instance = exist_credits->instance; cifs_server_lock(server); /* * We can't use credits obtained from the previous session to send this * request. Check if there were reconnects after we obtained credits and * return -EAGAIN in such cases to let callers handle it. */ if (instance != server->reconnect_instance) { cifs_server_unlock(server); add_credits_and_wake_if(server, &credits, optype); return -EAGAIN; } mid = server->ops->setup_async_request(server, rqst); if (IS_ERR(mid)) { cifs_server_unlock(server); add_credits_and_wake_if(server, &credits, optype); return PTR_ERR(mid); } mid->receive = receive; mid->callback = callback; mid->callback_data = cbdata; mid->handle = handle; mid->mid_state = MID_REQUEST_SUBMITTED; /* put it on the pending_mid_q */ spin_lock(&server->mid_lock); list_add_tail(&mid->qhead, &server->pending_mid_q); spin_unlock(&server->mid_lock); /* * Need to store the time in mid before calling I/O. For call_async, * I/O response may come back and free the mid entry on another thread. */ cifs_save_when_sent(mid); rc = smb_send_rqst(server, 1, rqst, flags); if (rc < 0) { revert_current_mid(server, mid->credits); server->sequence_number -= 2; delete_mid(mid); } cifs_server_unlock(server); if (rc == 0) return 0; add_credits_and_wake_if(server, &credits, optype); return rc; } /* * * Send an SMB Request. No response info (other than return code) * needs to be parsed. * * flags indicate the type of request buffer and how long to wait * and whether to log NT STATUS code (error) before mapping it to POSIX error * */ int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, char *in_buf, int flags) { int rc; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; iov[0].iov_base = in_buf; iov[0].iov_len = get_rfc1002_length(in_buf) + 4; flags |= CIFS_NO_RSP_BUF; rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc); return rc; } static int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) { int rc = 0; cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); spin_lock(&server->mid_lock); switch (mid->mid_state) { case MID_RESPONSE_READY: spin_unlock(&server->mid_lock); return rc; case MID_RETRY_NEEDED: rc = -EAGAIN; break; case MID_RESPONSE_MALFORMED: rc = -EIO; break; case MID_SHUTDOWN: rc = -EHOSTDOWN; break; default: if (!(mid->mid_flags & MID_DELETED)) { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; } cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n", __func__, mid->mid, mid->mid_state); rc = -EIO; } spin_unlock(&server->mid_lock); release_mid(mid); return rc; } static inline int send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst, struct mid_q_entry *mid) { return server->ops->send_cancel ? server->ops->send_cancel(server, rqst, mid) : 0; } int cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, bool log_error) { unsigned int len = get_rfc1002_length(mid->resp_buf) + 4; dump_smb(mid->resp_buf, min_t(u32, 92, len)); /* convert the length into a more usable form */ if (server->sign) { struct kvec iov[2]; int rc = 0; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; iov[0].iov_base = mid->resp_buf; iov[0].iov_len = 4; iov[1].iov_base = (char *)mid->resp_buf + 4; iov[1].iov_len = len - 4; /* FIXME: add code to kill session */ rc = cifs_verify_signature(&rqst, server, mid->sequence_number); if (rc) cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } /* BB special case reconnect tid and uid here? */ return map_and_check_smb_error(mid, log_error); } struct mid_q_entry * cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored, struct smb_rqst *rqst) { int rc; struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; if (rqst->rq_iov[0].iov_len != 4 || rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) return ERR_PTR(-EIO); rc = allocate_mid(ses, hdr, &mid); if (rc) return ERR_PTR(rc); rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); if (rc) { delete_mid(mid); return ERR_PTR(rc); } return mid; } static void cifs_compound_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->server; struct cifs_credits credits; credits.value = server->ops->get_credits(mid); credits.instance = server->reconnect_instance; add_credits(server, &credits, mid->optype); if (mid->mid_state == MID_RESPONSE_RECEIVED) mid->mid_state = MID_RESPONSE_READY; } static void cifs_compound_last_callback(struct mid_q_entry *mid) { cifs_compound_callback(mid); cifs_wake_up_task(mid); } static void cifs_cancelled_callback(struct mid_q_entry *mid) { cifs_compound_callback(mid); release_mid(mid); } /* * Return a channel (master if none) of @ses that can be used to send * regular requests. * * If we are currently binding a new channel (negprot/sess.setup), * return the new incomplete channel. */ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses) { uint index = 0; unsigned int min_in_flight = UINT_MAX, max_in_flight = 0; struct TCP_Server_Info *server = NULL; int i; if (!ses) return NULL; spin_lock(&ses->chan_lock); for (i = 0; i < ses->chan_count; i++) { server = ses->chans[i].server; if (!server) continue; /* * strictly speaking, we should pick up req_lock to read * server->in_flight. But it shouldn't matter much here if we * race while reading this data. The worst that can happen is * that we could use a channel that's not least loaded. Avoiding * taking the lock could help reduce wait time, which is * important for this function */ if (server->in_flight < min_in_flight) { min_in_flight = server->in_flight; index = i; } if (server->in_flight > max_in_flight) max_in_flight = server->in_flight; } /* if all channels are equally loaded, fall back to round-robin */ if (min_in_flight == max_in_flight) { index = (uint)atomic_inc_return(&ses->chan_seq); index %= ses->chan_count; } spin_unlock(&ses->chan_lock); return ses->chans[index].server; } int compound_send_recv(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server, const int flags, const int num_rqst, struct smb_rqst *rqst, int *resp_buf_type, struct kvec *resp_iov) { int i, j, optype, rc = 0; struct mid_q_entry *midQ[MAX_COMPOUND]; bool cancelled_mid[MAX_COMPOUND] = {false}; struct cifs_credits credits[MAX_COMPOUND] = { { .value = 0, .instance = 0 } }; unsigned int instance; char *buf; optype = flags & CIFS_OP_MASK; for (i = 0; i < num_rqst; i++) resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */ if (!ses || !ses->server || !server) { cifs_dbg(VFS, "Null session\n"); return -EIO; } spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return -ENOENT; } spin_unlock(&server->srv_lock); /* * Wait for all the requests to become available. * This approach still leaves the possibility to be stuck waiting for * credits if the server doesn't grant credits to the outstanding * requests and if the client is completely idle, not generating any * other requests. * This can be handled by the eventual session reconnect. */ rc = wait_for_compound_request(server, num_rqst, flags, &instance); if (rc) return rc; for (i = 0; i < num_rqst; i++) { credits[i].value = 1; credits[i].instance = instance; } /* * Make sure that we sign in the same order that we send on this socket * and avoid races inside tcp sendmsg code that could cause corruption * of smb data. */ cifs_server_lock(server); /* * All the parts of the compound chain belong obtained credits from the * same session. We can not use credits obtained from the previous * session to send this request. Check if there were reconnects after * we obtained credits and return -EAGAIN in such cases to let callers * handle it. */ if (instance != server->reconnect_instance) { cifs_server_unlock(server); for (j = 0; j < num_rqst; j++) add_credits(server, &credits[j], optype); return -EAGAIN; } for (i = 0; i < num_rqst; i++) { midQ[i] = server->ops->setup_request(ses, server, &rqst[i]); if (IS_ERR(midQ[i])) { revert_current_mid(server, i); for (j = 0; j < i; j++) delete_mid(midQ[j]); cifs_server_unlock(server); /* Update # of requests on wire to server */ for (j = 0; j < num_rqst; j++) add_credits(server, &credits[j], optype); return PTR_ERR(midQ[i]); } midQ[i]->mid_state = MID_REQUEST_SUBMITTED; midQ[i]->optype = optype; /* * Invoke callback for every part of the compound chain * to calculate credits properly. Wake up this thread only when * the last element is received. */ if (i < num_rqst - 1) midQ[i]->callback = cifs_compound_callback; else midQ[i]->callback = cifs_compound_last_callback; } rc = smb_send_rqst(server, num_rqst, rqst, flags); for (i = 0; i < num_rqst; i++) cifs_save_when_sent(midQ[i]); if (rc < 0) { revert_current_mid(server, num_rqst); server->sequence_number -= 2; } cifs_server_unlock(server); /* * If sending failed for some reason or it is an oplock break that we * will not receive a response to - return credits back */ if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) { for (i = 0; i < num_rqst; i++) add_credits(server, &credits[i], optype); goto out; } /* * At this point the request is passed to the network stack - we assume * that any credits taken from the server structure on the client have * been spent and we can't return them back. Once we receive responses * we will collect credits granted by the server in the mid callbacks * and add those credits to the server structure. */ /* * Compounding is never used during session establish. */ spin_lock(&ses->ses_lock); if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { spin_unlock(&ses->ses_lock); cifs_server_lock(server); smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec); cifs_server_unlock(server); spin_lock(&ses->ses_lock); } spin_unlock(&ses->ses_lock); for (i = 0; i < num_rqst; i++) { rc = wait_for_response(server, midQ[i]); if (rc != 0) break; } if (rc != 0) { for (; i < num_rqst; i++) { cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", midQ[i]->mid, le16_to_cpu(midQ[i]->command)); send_cancel(server, &rqst[i], midQ[i]); spin_lock(&server->mid_lock); midQ[i]->mid_flags |= MID_WAIT_CANCELLED; if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED || midQ[i]->mid_state == MID_RESPONSE_RECEIVED) { midQ[i]->callback = cifs_cancelled_callback; cancelled_mid[i] = true; credits[i].value = 0; } spin_unlock(&server->mid_lock); } } for (i = 0; i < num_rqst; i++) { if (rc < 0) goto out; rc = cifs_sync_mid_result(midQ[i], server); if (rc != 0) { /* mark this mid as cancelled to not free it below */ cancelled_mid[i] = true; goto out; } if (!midQ[i]->resp_buf || midQ[i]->mid_state != MID_RESPONSE_READY) { rc = -EIO; cifs_dbg(FYI, "Bad MID state?\n"); goto out; } buf = (char *)midQ[i]->resp_buf; resp_iov[i].iov_base = buf; resp_iov[i].iov_len = midQ[i]->resp_buf_size + HEADER_PREAMBLE_SIZE(server); if (midQ[i]->large_buf) resp_buf_type[i] = CIFS_LARGE_BUFFER; else resp_buf_type[i] = CIFS_SMALL_BUFFER; rc = server->ops->check_receive(midQ[i], server, flags & CIFS_LOG_ERROR); /* mark it so buf will not be freed by delete_mid */ if ((flags & CIFS_NO_RSP_BUF) == 0) midQ[i]->resp_buf = NULL; } /* * Compounding is never used during session establish. */ spin_lock(&ses->ses_lock); if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { struct kvec iov = { .iov_base = resp_iov[0].iov_base, .iov_len = resp_iov[0].iov_len }; spin_unlock(&ses->ses_lock); cifs_server_lock(server); smb311_update_preauth_hash(ses, server, &iov, 1); cifs_server_unlock(server); spin_lock(&ses->ses_lock); } spin_unlock(&ses->ses_lock); out: /* * This will dequeue all mids. After this it is important that the * demultiplex_thread will not process any of these mids any futher. * This is prevented above by using a noop callback that will not * wake this thread except for the very last PDU. */ for (i = 0; i < num_rqst; i++) { if (!cancelled_mid[i]) delete_mid(midQ[i]); } return rc; } int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server, struct smb_rqst *rqst, int *resp_buf_type, const int flags, struct kvec *resp_iov) { return compound_send_recv(xid, ses, server, flags, 1, rqst, resp_buf_type, resp_iov); } int SendReceive2(const unsigned int xid, struct cifs_ses *ses, struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, const int flags, struct kvec *resp_iov) { struct smb_rqst rqst; struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov; int rc; if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec), GFP_KERNEL); if (!new_iov) { /* otherwise cifs_send_recv below sets resp_buf_type */ *resp_buf_type = CIFS_NO_BUFFER; return -ENOMEM; } } else new_iov = s_iov; /* 1st iov is a RFC1001 length followed by the rest of the packet */ memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); new_iov[0].iov_base = new_iov[1].iov_base; new_iov[0].iov_len = 4; new_iov[1].iov_base += 4; new_iov[1].iov_len -= 4; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = new_iov; rqst.rq_nvec = n_vec + 1; rc = cifs_send_recv(xid, ses, ses->server, &rqst, resp_buf_type, flags, resp_iov); if (n_vec + 1 > CIFS_MAX_IOV_SIZE) kfree(new_iov); return rc; } int SendReceive(const unsigned int xid, struct cifs_ses *ses, struct smb_hdr *in_buf, struct smb_hdr *out_buf, int *pbytes_returned, const int flags) { int rc = 0; struct mid_q_entry *midQ; unsigned int len = be32_to_cpu(in_buf->smb_buf_length); struct kvec iov = { .iov_base = in_buf, .iov_len = len }; struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; struct cifs_credits credits = { .value = 1, .instance = 0 }; struct TCP_Server_Info *server; if (ses == NULL) { cifs_dbg(VFS, "Null smb session\n"); return -EIO; } server = ses->server; if (server == NULL) { cifs_dbg(VFS, "Null tcp session\n"); return -EIO; } spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return -ENOENT; } spin_unlock(&server->srv_lock); /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or use ses->maxReq */ if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", len); return -EIO; } rc = wait_for_free_request(server, flags, &credits.instance); if (rc) return rc; /* make sure that we sign in the same order that we send on this socket and avoid races inside tcp sendmsg code that could cause corruption of smb data */ cifs_server_lock(server); rc = allocate_mid(ses, in_buf, &midQ); if (rc) { cifs_server_unlock(server); /* Update # of requests on wire to server */ add_credits(server, &credits, 0); return rc; } rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); if (rc) { cifs_server_unlock(server); goto out; } midQ->mid_state = MID_REQUEST_SUBMITTED; rc = smb_send(server, in_buf, len); cifs_save_when_sent(midQ); if (rc < 0) server->sequence_number -= 2; cifs_server_unlock(server); if (rc < 0) goto out; rc = wait_for_response(server, midQ); if (rc != 0) { send_cancel(server, &rqst, midQ); spin_lock(&server->mid_lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED || midQ->mid_state == MID_RESPONSE_RECEIVED) { /* no longer considered to be "in-flight" */ midQ->callback = release_mid; spin_unlock(&server->mid_lock); add_credits(server, &credits, 0); return rc; } spin_unlock(&server->mid_lock); } rc = cifs_sync_mid_result(midQ, server); if (rc != 0) { add_credits(server, &credits, 0); return rc; } if (!midQ->resp_buf || !out_buf || midQ->mid_state != MID_RESPONSE_READY) { rc = -EIO; cifs_server_dbg(VFS, "Bad MID state?\n"); goto out; } *pbytes_returned = get_rfc1002_length(midQ->resp_buf); memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, server, 0); out: delete_mid(midQ); add_credits(server, &credits, 0); return rc; } /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows blocking lock to return. */ static int send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, struct smb_hdr *in_buf, struct smb_hdr *out_buf) { int bytes_returned; struct cifs_ses *ses = tcon->ses; LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; /* We just modify the current in_buf to change the type of lock from LOCKING_ANDX_SHARED_LOCK or LOCKING_ANDX_EXCLUSIVE_LOCK to LOCKING_ANDX_CANCEL_LOCK. */ pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; pSMB->Timeout = 0; pSMB->hdr.Mid = get_next_mid(ses->server); return SendReceive(xid, ses, in_buf, out_buf, &bytes_returned, 0); } int SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, struct smb_hdr *in_buf, struct smb_hdr *out_buf, int *pbytes_returned) { int rc = 0; int rstart = 0; struct mid_q_entry *midQ; struct cifs_ses *ses; unsigned int len = be32_to_cpu(in_buf->smb_buf_length); struct kvec iov = { .iov_base = in_buf, .iov_len = len }; struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; unsigned int instance; struct TCP_Server_Info *server; if (tcon == NULL || tcon->ses == NULL) { cifs_dbg(VFS, "Null smb session\n"); return -EIO; } ses = tcon->ses; server = ses->server; if (server == NULL) { cifs_dbg(VFS, "Null tcp session\n"); return -EIO; } spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return -ENOENT; } spin_unlock(&server->srv_lock); /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or use ses->maxReq */ if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", len); return -EIO; } rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance); if (rc) return rc; /* make sure that we sign in the same order that we send on this socket and avoid races inside tcp sendmsg code that could cause corruption of smb data */ cifs_server_lock(server); rc = allocate_mid(ses, in_buf, &midQ); if (rc) { cifs_server_unlock(server); return rc; } rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); if (rc) { delete_mid(midQ); cifs_server_unlock(server); return rc; } midQ->mid_state = MID_REQUEST_SUBMITTED; rc = smb_send(server, in_buf, len); cifs_save_when_sent(midQ); if (rc < 0) server->sequence_number -= 2; cifs_server_unlock(server); if (rc < 0) { delete_mid(midQ); return rc; } /* Wait for a reply - allow signals to interrupt. */ rc = wait_event_interruptible(server->response_q, (!(midQ->mid_state == MID_REQUEST_SUBMITTED || midQ->mid_state == MID_RESPONSE_RECEIVED)) || ((server->tcpStatus != CifsGood) && (server->tcpStatus != CifsNew))); /* Were we interrupted by a signal ? */ spin_lock(&server->srv_lock); if ((rc == -ERESTARTSYS) && (midQ->mid_state == MID_REQUEST_SUBMITTED || midQ->mid_state == MID_RESPONSE_RECEIVED) && ((server->tcpStatus == CifsGood) || (server->tcpStatus == CifsNew))) { spin_unlock(&server->srv_lock); if (in_buf->Command == SMB_COM_TRANSACTION2) { /* POSIX lock. We send a NT_CANCEL SMB to cause the blocking lock to return. */ rc = send_cancel(server, &rqst, midQ); if (rc) { delete_mid(midQ); return rc; } } else { /* Windows lock. We send a LOCKINGX_CANCEL_LOCK to cause the blocking lock to return. */ rc = send_lock_cancel(xid, tcon, in_buf, out_buf); /* If we get -ENOLCK back the lock may have already been removed. Don't exit in this case. */ if (rc && rc != -ENOLCK) { delete_mid(midQ); return rc; } } rc = wait_for_response(server, midQ); if (rc) { send_cancel(server, &rqst, midQ); spin_lock(&server->mid_lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED || midQ->mid_state == MID_RESPONSE_RECEIVED) { /* no longer considered to be "in-flight" */ midQ->callback = release_mid; spin_unlock(&server->mid_lock); return rc; } spin_unlock(&server->mid_lock); } /* We got the response - restart system call. */ rstart = 1; spin_lock(&server->srv_lock); } spin_unlock(&server->srv_lock); rc = cifs_sync_mid_result(midQ, server); if (rc != 0) return rc; /* rcvd frame is ok */ if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) { rc = -EIO; cifs_tcon_dbg(VFS, "Bad MID state?\n"); goto out; } *pbytes_returned = get_rfc1002_length(midQ->resp_buf); memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, server, 0); out: delete_mid(midQ); if (rstart && rc == -EACCES) return -ERESTARTSYS; return rc; } /* * Discard any remaining data in the current SMB. To do this, we borrow the * current bigbuf. */ int cifs_discard_remaining_data(struct TCP_Server_Info *server) { unsigned int rfclen = server->pdu_size; size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) - server->total_read; while (remaining > 0) { ssize_t length; length = cifs_discard_from_socket(server, min_t(size_t, remaining, CIFSMaxBufSize + MAX_HEADER_SIZE(server))); if (length < 0) return length; server->total_read += length; remaining -= length; } return 0; } static int __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, bool malformed) { int length; length = cifs_discard_remaining_data(server); dequeue_mid(mid, malformed); mid->resp_buf = server->smallbuf; server->smallbuf = NULL; return length; } static int cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; return __cifs_readv_discard(server, mid, rdata->result); } int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) { int length, len; unsigned int data_offset, data_len; struct cifs_readdata *rdata = mid->callback_data; char *buf = server->smallbuf; unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server); bool use_rdma_mr = false; cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", __func__, mid->mid, rdata->offset, rdata->bytes); /* * read the rest of READ_RSP header (sans Data array), or whatever we * can if there's not enough data. At this point, we've read down to * the Mid. */ len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - HEADER_SIZE(server) + 1; length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len); if (length < 0) return length; server->total_read += length; if (server->ops->is_session_expired && server->ops->is_session_expired(buf)) { cifs_reconnect(server, true); return -1; } if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server)) { cifs_discard_remaining_data(server); return -1; } /* set up first two iov for signature check and to get credits */ rdata->iov[0].iov_base = buf; rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server); rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server); rdata->iov[1].iov_len = server->total_read - HEADER_PREAMBLE_SIZE(server); cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", rdata->iov[0].iov_base, rdata->iov[0].iov_len); cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", rdata->iov[1].iov_base, rdata->iov[1].iov_len); /* Was the SMB read successful? */ rdata->result = server->ops->map_error(buf, false); if (rdata->result != 0) { cifs_dbg(FYI, "%s: server returned error %d\n", __func__, rdata->result); /* normal error on read response */ return __cifs_readv_discard(server, mid, false); } /* Is there enough to get to the rest of the READ_RSP header? */ if (server->total_read < server->vals->read_rsp_size) { cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", __func__, server->total_read, server->vals->read_rsp_size); rdata->result = -EIO; return cifs_readv_discard(server, mid); } data_offset = server->ops->read_data_offset(buf) + HEADER_PREAMBLE_SIZE(server); if (data_offset < server->total_read) { /* * win2k8 sometimes sends an offset of 0 when the read * is beyond the EOF. Treat it as if the data starts just after * the header. */ cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", __func__, data_offset); data_offset = server->total_read; } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { /* data_offset is beyond the end of smallbuf */ cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", __func__, data_offset); rdata->result = -EIO; return cifs_readv_discard(server, mid); } cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", __func__, server->total_read, data_offset); len = data_offset - server->total_read; if (len > 0) { /* read any junk before data into the rest of smallbuf */ length = cifs_read_from_socket(server, buf + server->total_read, len); if (length < 0) return length; server->total_read += length; } /* how much data is in the response? */ #ifdef CONFIG_CIFS_SMB_DIRECT use_rdma_mr = rdata->mr; #endif data_len = server->ops->read_data_length(buf, use_rdma_mr); if (!use_rdma_mr && (data_offset + data_len > buflen)) { /* data_len is corrupt -- discard frame */ rdata->result = -EIO; return cifs_readv_discard(server, mid); } #ifdef CONFIG_CIFS_SMB_DIRECT if (rdata->mr) length = data_len; /* An RDMA read is already done. */ else #endif length = cifs_read_iter_from_socket(server, &rdata->iter, data_len); if (length > 0) rdata->got_bytes += length; server->total_read += length; cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", server->total_read, buflen, data_len); /* discard anything left over */ if (server->total_read < buflen) return cifs_readv_discard(server, mid); dequeue_mid(mid, false); mid->resp_buf = server->smallbuf; server->smallbuf = NULL; return length; }
linux-master
fs/smb/client/transport.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002, 2011 * Author(s): Steve French ([email protected]), * Pavel Shilovsky (([email protected]) 2012 * */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "fscache.h" #include "smb2proto.h" #include "smb2status.h" static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov) { struct smb2_err_rsp *err = iov->iov_base; struct smb2_symlink_err_rsp *sym = ERR_PTR(-EINVAL); u32 len; if (err->ErrorContextCount) { struct smb2_error_context_rsp *p, *end; len = (u32)err->ErrorContextCount * (offsetof(struct smb2_error_context_rsp, ErrorContextData) + sizeof(struct smb2_symlink_err_rsp)); if (le32_to_cpu(err->ByteCount) < len || iov->iov_len < len + sizeof(*err) + 1) return ERR_PTR(-EINVAL); p = (struct smb2_error_context_rsp *)err->ErrorData; end = (struct smb2_error_context_rsp *)((u8 *)err + iov->iov_len); do { if (le32_to_cpu(p->ErrorId) == SMB2_ERROR_ID_DEFAULT) { sym = (struct smb2_symlink_err_rsp *)&p->ErrorContextData; break; } cifs_dbg(FYI, "%s: skipping unhandled error context: 0x%x\n", __func__, le32_to_cpu(p->ErrorId)); len = ALIGN(le32_to_cpu(p->ErrorDataLength), 8); p = (struct smb2_error_context_rsp *)((u8 *)&p->ErrorContextData + len); } while (p < end); } else if (le32_to_cpu(err->ByteCount) >= sizeof(*sym) && iov->iov_len >= SMB2_SYMLINK_STRUCT_SIZE) { sym = (struct smb2_symlink_err_rsp *)err->ErrorData; } if (!IS_ERR(sym) && (le32_to_cpu(sym->SymLinkErrorTag) != SYMLINK_ERROR_TAG || le32_to_cpu(sym->ReparseTag) != IO_REPARSE_TAG_SYMLINK)) sym = ERR_PTR(-EINVAL); return sym; } int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path) { struct smb2_symlink_err_rsp *sym; unsigned int sub_offs, sub_len; unsigned int print_offs, print_len; char *s; if (!cifs_sb || !iov || !iov->iov_base || !iov->iov_len || !path) return -EINVAL; sym = symlink_data(iov); if (IS_ERR(sym)) return PTR_ERR(sym); sub_len = le16_to_cpu(sym->SubstituteNameLength); sub_offs = le16_to_cpu(sym->SubstituteNameOffset); print_len = le16_to_cpu(sym->PrintNameLength); print_offs = le16_to_cpu(sym->PrintNameOffset); if (iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offs + sub_len || iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + print_offs + print_len) return -EINVAL; s = cifs_strndup_from_utf16((char *)sym->PathBuffer + sub_offs, sub_len, true, cifs_sb->local_nls); if (!s) return -ENOMEM; convert_delimiter(s, '/'); cifs_dbg(FYI, "%s: symlink target: %s\n", __func__, s); *path = s; return 0; } int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, void *buf) { int rc; __le16 *smb2_path; __u8 smb2_oplock; struct cifs_open_info_data *data = buf; struct smb2_file_all_info file_info = {}; struct smb2_file_all_info *smb2_data = data ? &file_info : NULL; struct kvec err_iov = {}; int err_buftype = CIFS_NO_BUFFER; struct cifs_fid *fid = oparms->fid; struct network_resiliency_req nr_ioctl_req; smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb); if (smb2_path == NULL) return -ENOMEM; oparms->desired_access |= FILE_READ_ATTRIBUTES; smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, &err_buftype); if (rc && data) { struct smb2_hdr *hdr = err_iov.iov_base; if (unlikely(!err_iov.iov_base || err_buftype == CIFS_NO_BUFFER)) goto out; if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) { rc = smb2_parse_symlink_response(oparms->cifs_sb, &err_iov, &data->symlink_target); if (!rc) { memset(smb2_data, 0, sizeof(*smb2_data)); oparms->create_options |= OPEN_REPARSE_POINT; rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, NULL, NULL); oparms->create_options &= ~OPEN_REPARSE_POINT; } } } if (rc) goto out; if (oparms->tcon->use_resilient) { /* default timeout is 0, servers pick default (120 seconds) */ nr_ioctl_req.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); nr_ioctl_req.Reserved = 0; rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), CIFSMaxBufSize, NULL, NULL /* no return info */); if (rc == -EOPNOTSUPP) { cifs_dbg(VFS, "resiliency not supported by server, disabling\n"); oparms->tcon->use_resilient = false; } else if (rc) cifs_dbg(FYI, "error %d setting resiliency\n", rc); rc = 0; } if (smb2_data) { /* if open response does not have IndexNumber field - get it */ if (smb2_data->IndexNumber == 0) { rc = SMB2_get_srv_num(xid, oparms->tcon, fid->persistent_fid, fid->volatile_fid, &smb2_data->IndexNumber); if (rc) { /* * let get_inode_info disable server inode * numbers */ smb2_data->IndexNumber = 0; rc = 0; } } memcpy(&data->fi, smb2_data, sizeof(data->fi)); } *oplock = smb2_oplock; out: free_rsp_buf(err_buftype, err_iov.iov_base); kfree(smb2_path); return rc; } int smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, const unsigned int xid) { int rc = 0, stored_rc; unsigned int max_num, num = 0, max_buf; struct smb2_lock_element *buf, *cur; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifsLockInfo *li, *tmp; __u64 length = 1 + flock->fl_end - flock->fl_start; struct list_head tmp_llist; INIT_LIST_HEAD(&tmp_llist); /* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it before using. */ max_buf = tcon->ses->server->maxBuf; if (max_buf < sizeof(struct smb2_lock_element)) return -EINVAL; BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); max_num = max_buf / sizeof(struct smb2_lock_element); buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); if (!buf) return -ENOMEM; cur = buf; cifs_down_write(&cinode->lock_sem); list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (flock->fl_start > li->offset || (flock->fl_start + length) < (li->offset + li->length)) continue; if (current->tgid != li->pid) /* * flock and OFD lock are associated with an open * file description, not the process. */ if (!(flock->fl_flags & (FL_FLOCK | FL_OFDLCK))) continue; if (cinode->can_cache_brlcks) { /* * We can cache brlock requests - simply remove a lock * from the file's list. */ list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); continue; } cur->Length = cpu_to_le64(li->length); cur->Offset = cpu_to_le64(li->offset); cur->Flags = cpu_to_le32(SMB2_LOCKFLAG_UNLOCK); /* * We need to save a lock here to let us add it again to the * file's list if the unlock range request fails on the server. */ list_move(&li->llist, &tmp_llist); if (++num == max_num) { stored_rc = smb2_lockv(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, num, buf); if (stored_rc) { /* * We failed on the unlock range request - add * all locks from the tmp list to the head of * the file's list. */ cifs_move_llist(&tmp_llist, &cfile->llist->locks); rc = stored_rc; } else /* * The unlock range request succeed - free the * tmp list. */ cifs_free_llist(&tmp_llist); cur = buf; num = 0; } else cur++; } if (num) { stored_rc = smb2_lockv(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, num, buf); if (stored_rc) { cifs_move_llist(&tmp_llist, &cfile->llist->locks); rc = stored_rc; } else cifs_free_llist(&tmp_llist); } up_write(&cinode->lock_sem); kfree(buf); return rc; } static int smb2_push_mand_fdlocks(struct cifs_fid_locks *fdlocks, const unsigned int xid, struct smb2_lock_element *buf, unsigned int max_num) { int rc = 0, stored_rc; struct cifsFileInfo *cfile = fdlocks->cfile; struct cifsLockInfo *li; unsigned int num = 0; struct smb2_lock_element *cur = buf; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); list_for_each_entry(li, &fdlocks->locks, llist) { cur->Length = cpu_to_le64(li->length); cur->Offset = cpu_to_le64(li->offset); cur->Flags = cpu_to_le32(li->type | SMB2_LOCKFLAG_FAIL_IMMEDIATELY); if (++num == max_num) { stored_rc = smb2_lockv(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, num, buf); if (stored_rc) rc = stored_rc; cur = buf; num = 0; } else cur++; } if (num) { stored_rc = smb2_lockv(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, num, buf); if (stored_rc) rc = stored_rc; } return rc; } int smb2_push_mandatory_locks(struct cifsFileInfo *cfile) { int rc = 0, stored_rc; unsigned int xid; unsigned int max_num, max_buf; struct smb2_lock_element *buf; struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_fid_locks *fdlocks; xid = get_xid(); /* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it for zero before using. */ max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf; if (max_buf < sizeof(struct smb2_lock_element)) { free_xid(xid); return -EINVAL; } BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); max_num = max_buf / sizeof(struct smb2_lock_element); buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); if (!buf) { free_xid(xid); return -ENOMEM; } list_for_each_entry(fdlocks, &cinode->llist, llist) { stored_rc = smb2_push_mand_fdlocks(fdlocks, xid, buf, max_num); if (stored_rc) rc = stored_rc; } kfree(buf); free_xid(xid); return rc; }
linux-master
fs/smb/client/smb2file.c
// SPDX-License-Identifier: LGPL-2.1 /* * CIFS filesystem cache interface * * Copyright (c) 2010 Novell, Inc. * Author(s): Suresh Jayaraman <[email protected]> * */ #include "fscache.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifsproto.h" static void cifs_fscache_fill_volume_coherency( struct cifs_tcon *tcon, struct cifs_fscache_volume_coherency_data *cd) { memset(cd, 0, sizeof(*cd)); cd->resource_id = cpu_to_le64(tcon->resource_id); cd->vol_create_time = tcon->vol_create_time; cd->vol_serial_number = cpu_to_le32(tcon->vol_serial_number); } int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) { struct cifs_fscache_volume_coherency_data cd; struct TCP_Server_Info *server = tcon->ses->server; struct fscache_volume *vcookie; const struct sockaddr *sa = (struct sockaddr *)&server->dstaddr; size_t slen, i; char *sharename; char *key; int ret = -ENOMEM; tcon->fscache = NULL; switch (sa->sa_family) { case AF_INET: case AF_INET6: break; default: cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family); return -EINVAL; } memset(&key, 0, sizeof(key)); sharename = extract_sharename(tcon->tree_name); if (IS_ERR(sharename)) { cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__); return PTR_ERR(sharename); } slen = strlen(sharename); for (i = 0; i < slen; i++) if (sharename[i] == '/') sharename[i] = ';'; key = kasprintf(GFP_KERNEL, "cifs,%pISpc,%s", sa, sharename); if (!key) goto out; cifs_fscache_fill_volume_coherency(tcon, &cd); vcookie = fscache_acquire_volume(key, NULL, /* preferred_cache */ &cd, sizeof(cd)); cifs_dbg(FYI, "%s: (%s/0x%p)\n", __func__, key, vcookie); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { ret = PTR_ERR(vcookie); goto out_2; } pr_err("Cache volume key already in use (%s)\n", key); vcookie = NULL; } tcon->fscache = vcookie; ret = 0; out_2: kfree(key); out: kfree(sharename); return ret; } void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) { struct cifs_fscache_volume_coherency_data cd; cifs_dbg(FYI, "%s: (0x%p)\n", __func__, tcon->fscache); cifs_fscache_fill_volume_coherency(tcon, &cd); fscache_relinquish_volume(tcon->fscache, &cd, false); tcon->fscache = NULL; } void cifs_fscache_get_inode_cookie(struct inode *inode) { struct cifs_fscache_inode_coherency_data cd; struct cifsInodeInfo *cifsi = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd); cifsi->netfs.cache = fscache_acquire_cookie(tcon->fscache, 0, &cifsi->uniqueid, sizeof(cifsi->uniqueid), &cd, sizeof(cd), i_size_read(&cifsi->netfs.inode)); if (cifsi->netfs.cache) mapping_set_release_always(inode->i_mapping); } void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) { if (update) { struct cifs_fscache_inode_coherency_data cd; loff_t i_size = i_size_read(inode); cifs_fscache_fill_coherency(inode, &cd); fscache_unuse_cookie(cifs_inode_cookie(inode), &cd, &i_size); } else { fscache_unuse_cookie(cifs_inode_cookie(inode), NULL, NULL); } } void cifs_fscache_release_inode_cookie(struct inode *inode) { struct cifsInodeInfo *cifsi = CIFS_I(inode); struct fscache_cookie *cookie = cifs_inode_cookie(inode); if (cookie) { cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie); fscache_relinquish_cookie(cookie, false); cifsi->netfs.cache = NULL; } } /* * Fallback page reading interface. */ static int fscache_fallback_read_page(struct inode *inode, struct page *page) { struct netfs_cache_resources cres; struct fscache_cookie *cookie = cifs_inode_cookie(inode); struct iov_iter iter; struct bio_vec bvec; int ret; memset(&cres, 0, sizeof(cres)); bvec_set_page(&bvec, page, PAGE_SIZE, 0); iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE); ret = fscache_begin_read_operation(&cres, cookie); if (ret < 0) return ret; ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL, NULL, NULL); fscache_end_operation(&cres); return ret; } /* * Fallback page writing interface. */ static int fscache_fallback_write_pages(struct inode *inode, loff_t start, size_t len, bool no_space_allocated_yet) { struct netfs_cache_resources cres; struct fscache_cookie *cookie = cifs_inode_cookie(inode); struct iov_iter iter; int ret; memset(&cres, 0, sizeof(cres)); iov_iter_xarray(&iter, ITER_SOURCE, &inode->i_mapping->i_pages, start, len); ret = fscache_begin_write_operation(&cres, cookie); if (ret < 0) return ret; ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode), no_space_allocated_yet); if (ret == 0) ret = fscache_write(&cres, start, &iter, NULL, NULL); fscache_end_operation(&cres); return ret; } /* * Retrieve a page from FS-Cache */ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) { int ret; cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n", __func__, cifs_inode_cookie(inode), page, inode); ret = fscache_fallback_read_page(inode, page); if (ret < 0) return ret; /* Read completed synchronously */ SetPageUptodate(page); return 0; } void __cifs_readahead_to_fscache(struct inode *inode, loff_t pos, size_t len) { cifs_dbg(FYI, "%s: (fsc: %p, p: %llx, l: %zx, i: %p)\n", __func__, cifs_inode_cookie(inode), pos, len, inode); fscache_fallback_write_pages(inode, pos, len, true); } /* * Query the cache occupancy. */ int __cifs_fscache_query_occupancy(struct inode *inode, pgoff_t first, unsigned int nr_pages, pgoff_t *_data_first, unsigned int *_data_nr_pages) { struct netfs_cache_resources cres; struct fscache_cookie *cookie = cifs_inode_cookie(inode); loff_t start, data_start; size_t len, data_len; int ret; ret = fscache_begin_read_operation(&cres, cookie); if (ret < 0) return ret; start = first * PAGE_SIZE; len = nr_pages * PAGE_SIZE; ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE, &data_start, &data_len); if (ret == 0) { *_data_first = data_start / PAGE_SIZE; *_data_nr_pages = len / PAGE_SIZE; } fscache_end_operation(&cres); return ret; }
linux-master
fs/smb/client/fscache.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (c) International Business Machines Corp., 2003, 2007 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/posix_acl_xattr.h> #include <linux/slab.h> #include <linux/xattr.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "cifs_ioctl.h" #define MAX_EA_VALUE_SIZE CIFSMaxBufSize #define CIFS_XATTR_CIFS_ACL "system.cifs_acl" /* DACL only */ #define CIFS_XATTR_CIFS_NTSD "system.cifs_ntsd" /* owner plus DACL */ #define CIFS_XATTR_CIFS_NTSD_FULL "system.cifs_ntsd_full" /* owner/DACL/SACL */ #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */ #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */ /* * Although these three are just aliases for the above, need to move away from * confusing users and using the 20+ year old term 'cifs' when it is no longer * secure, replaced by SMB2 (then even more highly secure SMB3) many years ago */ #define SMB3_XATTR_CIFS_ACL "system.smb3_acl" /* DACL only */ #define SMB3_XATTR_CIFS_NTSD "system.smb3_ntsd" /* owner plus DACL */ #define SMB3_XATTR_CIFS_NTSD_FULL "system.smb3_ntsd_full" /* owner/DACL/SACL */ #define SMB3_XATTR_ATTRIB "smb3.dosattrib" /* full name: user.smb3.dosattrib */ #define SMB3_XATTR_CREATETIME "smb3.creationtime" /* user.smb3.creationtime */ /* BB need to add server (Samba e.g) support for security and trusted prefix */ enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT, XATTR_CIFS_NTSD, XATTR_CIFS_NTSD_FULL }; static int cifs_attrib_set(unsigned int xid, struct cifs_tcon *pTcon, struct inode *inode, const char *full_path, const void *value, size_t size) { ssize_t rc = -EOPNOTSUPP; __u32 *pattrib = (__u32 *)value; __u32 attrib; FILE_BASIC_INFO info_buf; if ((value == NULL) || (size != sizeof(__u32))) return -ERANGE; memset(&info_buf, 0, sizeof(info_buf)); attrib = *pattrib; info_buf.Attributes = cpu_to_le32(attrib); if (pTcon->ses->server->ops->set_file_info) rc = pTcon->ses->server->ops->set_file_info(inode, full_path, &info_buf, xid); if (rc == 0) CIFS_I(inode)->cifsAttrs = attrib; return rc; } static int cifs_creation_time_set(unsigned int xid, struct cifs_tcon *pTcon, struct inode *inode, const char *full_path, const void *value, size_t size) { ssize_t rc = -EOPNOTSUPP; __u64 *pcreation_time = (__u64 *)value; __u64 creation_time; FILE_BASIC_INFO info_buf; if ((value == NULL) || (size != sizeof(__u64))) return -ERANGE; memset(&info_buf, 0, sizeof(info_buf)); creation_time = *pcreation_time; info_buf.CreationTime = cpu_to_le64(creation_time); if (pTcon->ses->server->ops->set_file_info) rc = pTcon->ses->server->ops->set_file_info(inode, full_path, &info_buf, xid); if (rc == 0) CIFS_I(inode)->createtime = creation_time; return rc; } static int cifs_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { int rc = -EOPNOTSUPP; unsigned int xid; struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto out; } /* return dos attributes as pseudo xattr */ /* return alt name if available as pseudo attr */ /* if proc/fs/cifs/streamstoxattr is set then search server for EAs or streams to returns as xattrs */ if (size > MAX_EA_VALUE_SIZE) { cifs_dbg(FYI, "size of EA value too large\n"); rc = -EOPNOTSUPP; goto out; } switch (handler->flags) { case XATTR_USER: cifs_dbg(FYI, "%s:setting user xattr %s\n", __func__, name); if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) || (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) { rc = cifs_attrib_set(xid, pTcon, inode, full_path, value, size); if (rc == 0) /* force revalidate of the inode */ CIFS_I(inode)->time = 0; break; } else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) || (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) { rc = cifs_creation_time_set(xid, pTcon, inode, full_path, value, size); if (rc == 0) /* force revalidate of the inode */ CIFS_I(inode)->time = 0; break; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) goto out; if (pTcon->ses->server->ops->set_EA) rc = pTcon->ses->server->ops->set_EA(xid, pTcon, full_path, name, value, (__u16)size, cifs_sb->local_nls, cifs_sb); break; case XATTR_CIFS_ACL: case XATTR_CIFS_NTSD: case XATTR_CIFS_NTSD_FULL: { struct cifs_ntsd *pacl; if (!value) goto out; pacl = kmalloc(size, GFP_KERNEL); if (!pacl) { rc = -ENOMEM; } else { memcpy(pacl, value, size); if (pTcon->ses->server->ops->set_acl) { int aclflags = 0; rc = 0; switch (handler->flags) { case XATTR_CIFS_NTSD_FULL: aclflags = (CIFS_ACL_OWNER | CIFS_ACL_GROUP | CIFS_ACL_DACL | CIFS_ACL_SACL); break; case XATTR_CIFS_NTSD: aclflags = (CIFS_ACL_OWNER | CIFS_ACL_GROUP | CIFS_ACL_DACL); break; case XATTR_CIFS_ACL: default: aclflags = CIFS_ACL_DACL; } rc = pTcon->ses->server->ops->set_acl(pacl, size, inode, full_path, aclflags); } else { rc = -EOPNOTSUPP; } if (rc == 0) /* force revalidate of the inode */ CIFS_I(inode)->time = 0; kfree(pacl); } break; } } out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return rc; } static int cifs_attrib_get(struct dentry *dentry, struct inode *inode, void *value, size_t size) { ssize_t rc; __u32 *pattribute; rc = cifs_revalidate_dentry_attr(dentry); if (rc) return rc; if ((value == NULL) || (size == 0)) return sizeof(__u32); else if (size < sizeof(__u32)) return -ERANGE; /* return dos attributes as pseudo xattr */ pattribute = (__u32 *)value; *pattribute = CIFS_I(inode)->cifsAttrs; return sizeof(__u32); } static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode, void *value, size_t size) { ssize_t rc; __u64 *pcreatetime; rc = cifs_revalidate_dentry_attr(dentry); if (rc) return rc; if ((value == NULL) || (size == 0)) return sizeof(__u64); else if (size < sizeof(__u64)) return -ERANGE; /* return dos attributes as pseudo xattr */ pcreatetime = (__u64 *)value; *pcreatetime = CIFS_I(inode)->createtime; return sizeof(__u64); } static int cifs_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *value, size_t size) { ssize_t rc = -EOPNOTSUPP; unsigned int xid; struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto out; } /* return alt name if available as pseudo attr */ switch (handler->flags) { case XATTR_USER: cifs_dbg(FYI, "%s:querying user xattr %s\n", __func__, name); if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) || (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) { rc = cifs_attrib_get(dentry, inode, value, size); break; } else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) || (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) { rc = cifs_creation_time_get(dentry, inode, value, size); break; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) goto out; if (pTcon->ses->server->ops->query_all_EAs) rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, full_path, name, value, size, cifs_sb); break; case XATTR_CIFS_ACL: case XATTR_CIFS_NTSD: case XATTR_CIFS_NTSD_FULL: { /* * fetch owner, DACL, and SACL if asked for full descriptor, * fetch owner and DACL otherwise */ u32 acllen, extra_info; struct cifs_ntsd *pacl; if (pTcon->ses->server->ops->get_acl == NULL) goto out; /* rc already EOPNOTSUPP */ if (handler->flags == XATTR_CIFS_NTSD_FULL) { extra_info = SACL_SECINFO; } else { extra_info = 0; } pacl = pTcon->ses->server->ops->get_acl(cifs_sb, inode, full_path, &acllen, extra_info); if (IS_ERR(pacl)) { rc = PTR_ERR(pacl); cifs_dbg(VFS, "%s: error %zd getting sec desc\n", __func__, rc); } else { if (value) { if (acllen > size) acllen = -ERANGE; else memcpy(value, pacl, acllen); } rc = acllen; kfree(pacl); } break; } } /* We could add an additional check for streams ie if proc/fs/cifs/streamstoxattr is set then search server for EAs or streams to returns as xattrs */ if (rc == -EINVAL) rc = -EOPNOTSUPP; out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return rc; } ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) { ssize_t rc = -EOPNOTSUPP; unsigned int xid; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) return -EOPNOTSUPP; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto list_ea_exit; } /* return dos attributes as pseudo xattr */ /* return alt name if available as pseudo attr */ /* if proc/fs/cifs/streamstoxattr is set then search server for EAs or streams to returns as xattrs */ if (pTcon->ses->server->ops->query_all_EAs) rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, full_path, NULL, data, buf_size, cifs_sb); list_ea_exit: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return rc; } static const struct xattr_handler cifs_user_xattr_handler = { .prefix = XATTR_USER_PREFIX, .flags = XATTR_USER, .get = cifs_xattr_get, .set = cifs_xattr_set, }; /* os2.* attributes are treated like user.* attributes */ static const struct xattr_handler cifs_os2_xattr_handler = { .prefix = XATTR_OS2_PREFIX, .flags = XATTR_USER, .get = cifs_xattr_get, .set = cifs_xattr_set, }; static const struct xattr_handler cifs_cifs_acl_xattr_handler = { .name = CIFS_XATTR_CIFS_ACL, .flags = XATTR_CIFS_ACL, .get = cifs_xattr_get, .set = cifs_xattr_set, }; /* * Although this is just an alias for the above, need to move away from * confusing users and using the 20 year old term 'cifs' when it is no * longer secure and was replaced by SMB2/SMB3 a long time ago, and * SMB3 and later are highly secure. */ static const struct xattr_handler smb3_acl_xattr_handler = { .name = SMB3_XATTR_CIFS_ACL, .flags = XATTR_CIFS_ACL, .get = cifs_xattr_get, .set = cifs_xattr_set, }; static const struct xattr_handler cifs_cifs_ntsd_xattr_handler = { .name = CIFS_XATTR_CIFS_NTSD, .flags = XATTR_CIFS_NTSD, .get = cifs_xattr_get, .set = cifs_xattr_set, }; /* * Although this is just an alias for the above, need to move away from * confusing users and using the 20 year old term 'cifs' when it is no * longer secure and was replaced by SMB2/SMB3 a long time ago, and * SMB3 and later are highly secure. */ static const struct xattr_handler smb3_ntsd_xattr_handler = { .name = SMB3_XATTR_CIFS_NTSD, .flags = XATTR_CIFS_NTSD, .get = cifs_xattr_get, .set = cifs_xattr_set, }; static const struct xattr_handler cifs_cifs_ntsd_full_xattr_handler = { .name = CIFS_XATTR_CIFS_NTSD_FULL, .flags = XATTR_CIFS_NTSD_FULL, .get = cifs_xattr_get, .set = cifs_xattr_set, }; /* * Although this is just an alias for the above, need to move away from * confusing users and using the 20 year old term 'cifs' when it is no * longer secure and was replaced by SMB2/SMB3 a long time ago, and * SMB3 and later are highly secure. */ static const struct xattr_handler smb3_ntsd_full_xattr_handler = { .name = SMB3_XATTR_CIFS_NTSD_FULL, .flags = XATTR_CIFS_NTSD_FULL, .get = cifs_xattr_get, .set = cifs_xattr_set, }; const struct xattr_handler *cifs_xattr_handlers[] = { &cifs_user_xattr_handler, &cifs_os2_xattr_handler, &cifs_cifs_acl_xattr_handler, &smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */ &cifs_cifs_ntsd_xattr_handler, &smb3_ntsd_xattr_handler, /* alias for above since avoiding "cifs" */ &cifs_cifs_ntsd_full_xattr_handler, &smb3_ntsd_full_xattr_handler, /* alias for above since avoiding "cifs" */ NULL };
linux-master
fs/smb/client/xattr.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2011 * Etersoft, 2012 * Author(s): Steve French ([email protected]) * Pavel Shilovsky ([email protected]) 2012 * */ #include <linux/ctype.h> #include "cifsglob.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_debug.h" #include "cifs_unicode.h" #include "smb2status.h" #include "smb2glob.h" #include "nterr.h" #include "cached_dir.h" static int check_smb2_hdr(struct smb2_hdr *shdr, __u64 mid) { __u64 wire_mid = le64_to_cpu(shdr->MessageId); /* * Make sure that this really is an SMB, that it is a response, * and that the message ids match. */ if ((shdr->ProtocolId == SMB2_PROTO_NUMBER) && (mid == wire_mid)) { if (shdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) return 0; else { /* only one valid case where server sends us request */ if (shdr->Command == SMB2_OPLOCK_BREAK) return 0; else cifs_dbg(VFS, "Received Request not response\n"); } } else { /* bad signature or mid */ if (shdr->ProtocolId != SMB2_PROTO_NUMBER) cifs_dbg(VFS, "Bad protocol string signature header %x\n", le32_to_cpu(shdr->ProtocolId)); if (mid != wire_mid) cifs_dbg(VFS, "Mids do not match: %llu and %llu\n", mid, wire_mid); } cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid); return 1; } /* * The following table defines the expected "StructureSize" of SMB2 responses * in order by SMB2 command. This is similar to "wct" in SMB/CIFS responses. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order */ static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ cpu_to_le16(65), /* SMB2_SESSION_SETUP */ cpu_to_le16(9), /* SMB2_LOGOFF */ cpu_to_le16(4), /* SMB2_TREE_CONNECT */ cpu_to_le16(16), /* SMB2_TREE_DISCONNECT */ cpu_to_le16(4), /* SMB2_CREATE */ cpu_to_le16(89), /* SMB2_CLOSE */ cpu_to_le16(60), /* SMB2_FLUSH */ cpu_to_le16(4), /* SMB2_READ */ cpu_to_le16(17), /* SMB2_WRITE */ cpu_to_le16(17), /* SMB2_LOCK */ cpu_to_le16(4), /* SMB2_IOCTL */ cpu_to_le16(49), /* BB CHECK this ... not listed in documentation */ /* SMB2_CANCEL */ cpu_to_le16(0), /* SMB2_ECHO */ cpu_to_le16(4), /* SMB2_QUERY_DIRECTORY */ cpu_to_le16(9), /* SMB2_CHANGE_NOTIFY */ cpu_to_le16(9), /* SMB2_QUERY_INFO */ cpu_to_le16(9), /* SMB2_SET_INFO */ cpu_to_le16(2), /* BB FIXME can also be 44 for lease break */ /* SMB2_OPLOCK_BREAK */ cpu_to_le16(24) }; #define SMB311_NEGPROT_BASE_SIZE (sizeof(struct smb2_hdr) + sizeof(struct smb2_negotiate_rsp)) static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len, __u32 non_ctxlen) { __u16 neg_count; __u32 nc_offset, size_of_pad_before_neg_ctxts; struct smb2_negotiate_rsp *pneg_rsp = (struct smb2_negotiate_rsp *)hdr; /* Negotiate contexts are only valid for latest dialect SMB3.11 */ neg_count = le16_to_cpu(pneg_rsp->NegotiateContextCount); if ((neg_count == 0) || (pneg_rsp->DialectRevision != cpu_to_le16(SMB311_PROT_ID))) return 0; /* * if SPNEGO blob present (ie the RFC2478 GSS info which indicates * which security mechanisms the server supports) make sure that * the negotiate contexts start after it */ nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset); /* * non_ctxlen is at least shdr->StructureSize + pdu->StructureSize2 * and the latter is 1 byte bigger than the fix-sized area of the * NEGOTIATE response */ if (nc_offset + 1 < non_ctxlen) { pr_warn_once("Invalid negotiate context offset %d\n", nc_offset); return 0; } else if (nc_offset + 1 == non_ctxlen) { cifs_dbg(FYI, "no SPNEGO security blob in negprot rsp\n"); size_of_pad_before_neg_ctxts = 0; } else if (non_ctxlen == SMB311_NEGPROT_BASE_SIZE + 1) /* has padding, but no SPNEGO blob */ size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen + 1; else size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen; /* Verify that at least minimal negotiate contexts fit within frame */ if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) { pr_warn_once("negotiate context goes beyond end\n"); return 0; } cifs_dbg(FYI, "length of negcontexts %d pad %d\n", len - nc_offset, size_of_pad_before_neg_ctxts); /* length of negcontexts including pad from end of sec blob to them */ return (len - nc_offset) + size_of_pad_before_neg_ctxts; } int smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server) { struct TCP_Server_Info *pserver; struct smb2_hdr *shdr = (struct smb2_hdr *)buf; struct smb2_pdu *pdu = (struct smb2_pdu *)shdr; int hdr_size = sizeof(struct smb2_hdr); int pdu_size = sizeof(struct smb2_pdu); int command; __u32 calc_len; /* calculated length */ __u64 mid; /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; /* * Add function to do table lookup of StructureSize by command * ie Validate the wct via smb2_struct_sizes table above */ if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { struct smb2_transform_hdr *thdr = (struct smb2_transform_hdr *)buf; struct cifs_ses *ses = NULL; struct cifs_ses *iter; /* decrypt frame now that it is completely read in */ spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(iter, &pserver->smb_ses_list, smb_ses_list) { if (iter->Suid == le64_to_cpu(thdr->SessionId)) { ses = iter; break; } } spin_unlock(&cifs_tcp_ses_lock); if (!ses) { cifs_dbg(VFS, "no decryption - session id not found\n"); return 1; } } mid = le64_to_cpu(shdr->MessageId); if (len < pdu_size) { if ((len >= hdr_size) && (shdr->Status != 0)) { pdu->StructureSize2 = 0; /* * As with SMB/CIFS, on some error cases servers may * not return wct properly */ return 0; } else { cifs_dbg(VFS, "Length less than SMB header size\n"); } return 1; } if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE) { cifs_dbg(VFS, "SMB length greater than maximum, mid=%llu\n", mid); return 1; } if (check_smb2_hdr(shdr, mid)) return 1; if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { cifs_dbg(VFS, "Invalid structure size %u\n", le16_to_cpu(shdr->StructureSize)); return 1; } command = le16_to_cpu(shdr->Command); if (command >= NUMBER_OF_SMB2_COMMANDS) { cifs_dbg(VFS, "Invalid SMB2 command %d\n", command); return 1; } if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) { if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 || pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) { /* error packets have 9 byte structure size */ cifs_dbg(VFS, "Invalid response size %u for command %d\n", le16_to_cpu(pdu->StructureSize2), command); return 1; } else if (command == SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0) && (le16_to_cpu(pdu->StructureSize2) != 44) && (le16_to_cpu(pdu->StructureSize2) != 36)) { /* special case for SMB2.1 lease break message */ cifs_dbg(VFS, "Invalid response size %d for oplock break\n", le16_to_cpu(pdu->StructureSize2)); return 1; } } calc_len = smb2_calc_size(buf); /* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might * be 0, and not a real miscalculation */ if (command == SMB2_IOCTL_HE && calc_len == 0) return 0; if (command == SMB2_NEGOTIATE_HE) calc_len += get_neg_ctxt_len(shdr, len, calc_len); if (len != calc_len) { /* create failed on symlink */ if (command == SMB2_CREATE_HE && shdr->Status == STATUS_STOPPED_ON_SYMLINK) return 0; /* Windows 7 server returns 24 bytes more */ if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE) return 0; /* server can return one byte more due to implied bcc[0] */ if (calc_len == len + 1) return 0; /* * Some windows servers (win2016) will pad also the final * PDU in a compound to 8 bytes. */ if (ALIGN(calc_len, 8) == len) return 0; /* * MacOS server pads after SMB2.1 write response with 3 bytes * of junk. Other servers match RFC1001 len to actual * SMB2/SMB3 frame length (header + smb2 response specific data) * Some windows servers also pad up to 8 bytes when compounding. */ if (calc_len < len) return 0; /* Only log a message if len was really miscalculated */ if (unlikely(cifsFYI)) cifs_dbg(FYI, "Server response too short: calculated " "length %u doesn't match read length %u (cmd=%d, mid=%llu)\n", calc_len, len, command, mid); else pr_warn("Server response too short: calculated length " "%u doesn't match read length %u (cmd=%d, mid=%llu)\n", calc_len, len, command, mid); return 1; } return 0; } /* * The size of the variable area depends on the offset and length fields * located in different fields for various SMB2 responses. SMB2 responses * with no variable length info, show an offset of zero for the offset field. */ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ true, /* SMB2_SESSION_SETUP */ true, /* SMB2_LOGOFF */ false, /* SMB2_TREE_CONNECT */ false, /* SMB2_TREE_DISCONNECT */ false, /* SMB2_CREATE */ true, /* SMB2_CLOSE */ false, /* SMB2_FLUSH */ false, /* SMB2_READ */ true, /* SMB2_WRITE */ false, /* SMB2_LOCK */ false, /* SMB2_IOCTL */ true, /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */ /* SMB2_ECHO */ false, /* SMB2_QUERY_DIRECTORY */ true, /* SMB2_CHANGE_NOTIFY */ true, /* SMB2_QUERY_INFO */ true, /* SMB2_SET_INFO */ false, /* SMB2_OPLOCK_BREAK */ false }; /* * Returns the pointer to the beginning of the data area. Length of the data * area and the offset to it (from the beginning of the smb are also returned. */ char * smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr) { *off = 0; *len = 0; /* error responses do not have data area */ if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED && (((struct smb2_err_rsp *)shdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE) return NULL; /* * Following commands have data areas so we have to get the location * of the data buffer offset and data buffer length for the particular * command. */ switch (shdr->Command) { case SMB2_NEGOTIATE: *off = le16_to_cpu( ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferOffset); *len = le16_to_cpu( ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferLength); break; case SMB2_SESSION_SETUP: *off = le16_to_cpu( ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferOffset); *len = le16_to_cpu( ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferLength); break; case SMB2_CREATE: *off = le32_to_cpu( ((struct smb2_create_rsp *)shdr)->CreateContextsOffset); *len = le32_to_cpu( ((struct smb2_create_rsp *)shdr)->CreateContextsLength); break; case SMB2_QUERY_INFO: *off = le16_to_cpu( ((struct smb2_query_info_rsp *)shdr)->OutputBufferOffset); *len = le32_to_cpu( ((struct smb2_query_info_rsp *)shdr)->OutputBufferLength); break; case SMB2_READ: /* TODO: is this a bug ? */ *off = ((struct smb2_read_rsp *)shdr)->DataOffset; *len = le32_to_cpu(((struct smb2_read_rsp *)shdr)->DataLength); break; case SMB2_QUERY_DIRECTORY: *off = le16_to_cpu( ((struct smb2_query_directory_rsp *)shdr)->OutputBufferOffset); *len = le32_to_cpu( ((struct smb2_query_directory_rsp *)shdr)->OutputBufferLength); break; case SMB2_IOCTL: *off = le32_to_cpu( ((struct smb2_ioctl_rsp *)shdr)->OutputOffset); *len = le32_to_cpu( ((struct smb2_ioctl_rsp *)shdr)->OutputCount); break; case SMB2_CHANGE_NOTIFY: *off = le16_to_cpu( ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset); *len = le32_to_cpu( ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength); break; default: cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command)); break; } /* * Invalid length or offset probably means data area is invalid, but * we have little choice but to ignore the data area in this case. */ if (*off > 4096) { cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off); *len = 0; *off = 0; } else if (*off < 0) { cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n", *off); *off = 0; *len = 0; } else if (*len < 0) { cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n", *len); *len = 0; } else if (*len > 128 * 1024) { cifs_dbg(VFS, "data area larger than 128K: %d\n", *len); *len = 0; } /* return pointer to beginning of data area, ie offset from SMB start */ if ((*off != 0) && (*len != 0)) return (char *)shdr + *off; else return NULL; } /* * Calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message. */ unsigned int smb2_calc_size(void *buf) { struct smb2_pdu *pdu = buf; struct smb2_hdr *shdr = &pdu->hdr; int offset; /* the offset from the beginning of SMB to data area */ int data_length; /* the length of the variable length data area */ /* Structure Size has already been checked to make sure it is 64 */ int len = le16_to_cpu(shdr->StructureSize); /* * StructureSize2, ie length of fixed parameter area has already * been checked to make sure it is the correct length. */ len += le16_to_cpu(pdu->StructureSize2); if (has_smb2_data_area[le16_to_cpu(shdr->Command)] == false) goto calc_size_exit; smb2_get_data_area_len(&offset, &data_length, shdr); cifs_dbg(FYI, "SMB2 data length %d offset %d\n", data_length, offset); if (data_length > 0) { /* * Check to make sure that data area begins after fixed area, * Note that last byte of the fixed area is part of data area * for some commands, typically those with odd StructureSize, * so we must add one to the calculation. */ if (offset + 1 < len) { cifs_dbg(VFS, "data area offset %d overlaps SMB2 header %d\n", offset + 1, len); data_length = 0; } else { len = offset + data_length; } } calc_size_exit: cifs_dbg(FYI, "SMB2 len %d\n", len); return len; } /* Note: caller must free return buffer */ __le16 * cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb) { int len; const char *start_of_path; __le16 *to; int map_type; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) map_type = SFM_MAP_UNI_RSVD; else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) map_type = SFU_MAP_UNI_RSVD; else map_type = NO_MAP_UNI_RSVD; /* Windows doesn't allow paths beginning with \ */ if (from[0] == '\\') start_of_path = from + 1; /* SMB311 POSIX extensions paths do not include leading slash */ else if (cifs_sb_master_tlink(cifs_sb) && cifs_sb_master_tcon(cifs_sb)->posix_extensions && (from[0] == '/')) { start_of_path = from + 1; } else start_of_path = from; to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len, cifs_sb->local_nls, map_type); return to; } __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode) { __le32 lease = 0; if (CIFS_CACHE_WRITE(cinode)) lease |= SMB2_LEASE_WRITE_CACHING_LE; if (CIFS_CACHE_HANDLE(cinode)) lease |= SMB2_LEASE_HANDLE_CACHING_LE; if (CIFS_CACHE_READ(cinode)) lease |= SMB2_LEASE_READ_CACHING_LE; return lease; } struct smb2_lease_break_work { struct work_struct lease_break; struct tcon_link *tlink; __u8 lease_key[16]; __le32 lease_state; }; static void cifs_ses_oplock_break(struct work_struct *work) { struct smb2_lease_break_work *lw = container_of(work, struct smb2_lease_break_work, lease_break); int rc = 0; rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key, lw->lease_state); cifs_dbg(FYI, "Lease release rc %d\n", rc); cifs_put_tlink(lw->tlink); kfree(lw); } static void smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key, __le32 new_lease_state) { struct smb2_lease_break_work *lw; lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); if (!lw) { cifs_put_tlink(tlink); return; } INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); lw->tlink = tlink; lw->lease_state = new_lease_state; memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE); queue_work(cifsiod_wq, &lw->lease_break); } static bool smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) { __u8 lease_state; struct cifsFileInfo *cfile; struct cifsInodeInfo *cinode; int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); lease_state = le32_to_cpu(rsp->NewLeaseState); list_for_each_entry(cfile, &tcon->openFileList, tlist) { cinode = CIFS_I(d_inode(cfile->dentry)); if (memcmp(cinode->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; cifs_dbg(FYI, "found in the open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", lease_state); if (ack_req) cfile->oplock_break_cancelled = false; else cfile->oplock_break_cancelled = true; set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); cfile->oplock_level = lease_state; cifs_queue_oplock_break(cfile); return true; } return false; } static struct cifs_pending_open * smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) { __u8 lease_state = le32_to_cpu(rsp->NewLeaseState); int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); struct cifs_pending_open *open; struct cifs_pending_open *found = NULL; list_for_each_entry(open, &tcon->pending_opens, olist) { if (memcmp(open->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; if (!found && ack_req) { found = open; } cifs_dbg(FYI, "found in the pending open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", lease_state); open->oplock = lease_state; } return found; } static bool smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server) { struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer; struct TCP_Server_Info *pserver; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifs_pending_open *open; cifs_dbg(FYI, "Checking for lease break\n"); /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { spin_lock(&tcon->open_file_lock); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); if (smb2_tcon_has_lease(tcon, rsp)) { spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } open = smb2_tcon_find_pending_open_lease(tcon, rsp); if (open) { __u8 lease_key[SMB2_LEASE_KEY_SIZE]; struct tcon_link *tlink; tlink = cifs_get_tlink(open->tlink); memcpy(lease_key, open->lease_key, SMB2_LEASE_KEY_SIZE); spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); smb2_queue_pending_open_break(tlink, lease_key, rsp->NewLeaseState); return true; } spin_unlock(&tcon->open_file_lock); if (cached_dir_lease_break(tcon, rsp->LeaseKey)) { spin_unlock(&cifs_tcp_ses_lock); return true; } } } spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "Can not process lease break - no lease matched\n"); trace_smb3_lease_not_found(le32_to_cpu(rsp->CurrentLeaseState), le32_to_cpu(rsp->hdr.Id.SyncId.TreeId), le64_to_cpu(rsp->hdr.SessionId), *((u64 *)rsp->LeaseKey), *((u64 *)&rsp->LeaseKey[8])); return false; } bool smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) { struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer; struct TCP_Server_Info *pserver; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsInodeInfo *cinode; struct cifsFileInfo *cfile; cifs_dbg(FYI, "Checking for oplock break\n"); if (rsp->hdr.Command != SMB2_OPLOCK_BREAK) return false; if (rsp->StructureSize != smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { if (le16_to_cpu(rsp->StructureSize) == 44) return smb2_is_valid_lease_break(buffer, server); else return false; } cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel); /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { spin_lock(&tcon->open_file_lock); list_for_each_entry(cfile, &tcon->openFileList, tlist) { if (rsp->PersistentFid != cfile->fid.persistent_fid || rsp->VolatileFid != cfile->fid.volatile_fid) continue; cifs_dbg(FYI, "file id match, oplock break\n"); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); cinode = CIFS_I(d_inode(cfile->dentry)); spin_lock(&cfile->file_info_lock); if (!CIFS_CACHE_WRITE(cinode) && rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE) cfile->oplock_break_cancelled = true; else cfile->oplock_break_cancelled = false; set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); cfile->oplock_epoch = 0; cfile->oplock_level = rsp->OplockLevel; spin_unlock(&cfile->file_info_lock); cifs_queue_oplock_break(cfile); spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } spin_unlock(&tcon->open_file_lock); } } spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "No file id matched, oplock break ignored\n"); trace_smb3_oplock_not_found(0 /* no xid */, rsp->PersistentFid, le32_to_cpu(rsp->hdr.Id.SyncId.TreeId), le64_to_cpu(rsp->hdr.SessionId)); return true; } void smb2_cancelled_close_fid(struct work_struct *work) { struct close_cancelled_open *cancelled = container_of(work, struct close_cancelled_open, work); struct cifs_tcon *tcon = cancelled->tcon; int rc; if (cancelled->mid) cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llu\n", cancelled->mid); else cifs_tcon_dbg(VFS, "Close interrupted close\n"); rc = SMB2_close(0, tcon, cancelled->fid.persistent_fid, cancelled->fid.volatile_fid); if (rc) cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc); cifs_put_tcon(tcon); kfree(cancelled); } /* * Caller should already has an extra reference to @tcon * This function is used to queue work to close a handle to prevent leaks * on the server. * We handle two cases. If an open was interrupted after we sent the * SMB2_CREATE to the server but before we processed the reply, and second * if a close was interrupted before we sent the SMB2_CLOSE to the server. */ static int __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid, __u64 persistent_fid, __u64 volatile_fid) { struct close_cancelled_open *cancelled; cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC); if (!cancelled) return -ENOMEM; cancelled->fid.persistent_fid = persistent_fid; cancelled->fid.volatile_fid = volatile_fid; cancelled->tcon = tcon; cancelled->cmd = cmd; cancelled->mid = mid; INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false); return 0; } int smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid) { int rc; cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); spin_lock(&cifs_tcp_ses_lock); if (tcon->tc_count <= 0) { struct TCP_Server_Info *server = NULL; WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative"); spin_unlock(&cifs_tcp_ses_lock); if (tcon->ses) server = tcon->ses->server; cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n", tcon->tid, persistent_fid, volatile_fid); return 0; } tcon->tc_count++; spin_unlock(&cifs_tcp_ses_lock); rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0, persistent_fid, volatile_fid); if (rc) cifs_put_tcon(tcon); return rc; } int smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server) { struct smb2_hdr *hdr = mid->resp_buf; struct smb2_create_rsp *rsp = mid->resp_buf; struct cifs_tcon *tcon; int rc; if ((mid->optype & CIFS_CP_CREATE_CLOSE_OP) || hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS) return 0; tcon = smb2_find_smb_tcon(server, le64_to_cpu(hdr->SessionId), le32_to_cpu(hdr->Id.SyncId.TreeId)); if (!tcon) return -ENOENT; rc = __smb2_handle_cancelled_cmd(tcon, le16_to_cpu(hdr->Command), le64_to_cpu(hdr->MessageId), rsp->PersistentFileId, rsp->VolatileFileId); if (rc) cifs_put_tcon(tcon); return rc; } /** * smb311_update_preauth_hash - update @ses hash with the packet data in @iov * * Assumes @iov does not contain the rfc1002 length and iov[0] has the * SMB2 header. * * @ses: server session structure * @server: pointer to server info * @iov: array containing the SMB request we will send to the server * @nvec: number of array entries for the iov */ int smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server, struct kvec *iov, int nvec) { int i, rc; struct smb2_hdr *hdr; struct shash_desc *sha512 = NULL; hdr = (struct smb2_hdr *)iov[0].iov_base; /* neg prot are always taken */ if (hdr->Command == SMB2_NEGOTIATE) goto ok; /* * If we process a command which wasn't a negprot it means the * neg prot was already done, so the server dialect was set * and we can test it. Preauth requires 3.1.1 for now. */ if (server->dialect != SMB311_PROT_ID) return 0; if (hdr->Command != SMB2_SESSION_SETUP) return 0; /* skip last sess setup response */ if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) && (hdr->Status == NT_STATUS_OK || (hdr->Status != cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED)))) return 0; ok: rc = smb311_crypto_shash_allocate(server); if (rc) return rc; sha512 = server->secmech.sha512; rc = crypto_shash_init(sha512); if (rc) { cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__); return rc; } rc = crypto_shash_update(sha512, ses->preauth_sha_hash, SMB2_PREAUTH_HASH_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__); return rc; } for (i = 0; i < nvec; i++) { rc = crypto_shash_update(sha512, iov[i].iov_base, iov[i].iov_len); if (rc) { cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__); return rc; } } rc = crypto_shash_final(sha512, ses->preauth_sha_hash); if (rc) { cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n", __func__); return rc; } return 0; }
linux-master
fs/smb/client/smb2misc.c
// SPDX-License-Identifier: GPL-2.0 /* * Witness Service client for CIFS * * Copyright (c) 2020 Samuel Cabrero <[email protected]> */ #include <linux/kref.h> #include <net/genetlink.h> #include <uapi/linux/cifs/cifs_netlink.h> #include "cifs_swn.h" #include "cifsglob.h" #include "cifsproto.h" #include "fscache.h" #include "cifs_debug.h" #include "netlink.h" static DEFINE_IDR(cifs_swnreg_idr); static DEFINE_MUTEX(cifs_swnreg_idr_mutex); struct cifs_swn_reg { int id; struct kref ref_count; const char *net_name; const char *share_name; bool net_name_notify; bool share_name_notify; bool ip_notify; struct cifs_tcon *tcon; }; static int cifs_swn_auth_info_krb(struct cifs_tcon *tcon, struct sk_buff *skb) { int ret; ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_KRB_AUTH); if (ret < 0) return ret; return 0; } static int cifs_swn_auth_info_ntlm(struct cifs_tcon *tcon, struct sk_buff *skb) { int ret; if (tcon->ses->user_name != NULL) { ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_USER_NAME, tcon->ses->user_name); if (ret < 0) return ret; } if (tcon->ses->password != NULL) { ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_PASSWORD, tcon->ses->password); if (ret < 0) return ret; } if (tcon->ses->domainName != NULL) { ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_DOMAIN_NAME, tcon->ses->domainName); if (ret < 0) return ret; } return 0; } /* * Sends a register message to the userspace daemon based on the registration. * The authentication information to connect to the witness service is bundled * into the message. */ static int cifs_swn_send_register_message(struct cifs_swn_reg *swnreg) { struct sk_buff *skb; struct genlmsghdr *hdr; enum securityEnum authtype; struct sockaddr_storage *addr; int ret; skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (skb == NULL) { ret = -ENOMEM; goto fail; } hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_REGISTER); if (hdr == NULL) { ret = -ENOMEM; goto nlmsg_fail; } ret = nla_put_u32(skb, CIFS_GENL_ATTR_SWN_REGISTRATION_ID, swnreg->id); if (ret < 0) goto nlmsg_fail; ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_NET_NAME, swnreg->net_name); if (ret < 0) goto nlmsg_fail; ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME, swnreg->share_name); if (ret < 0) goto nlmsg_fail; /* * If there is an address stored use it instead of the server address, because we are * in the process of reconnecting to it after a share has been moved or we have been * told to switch to it (client move message). In these cases we unregister from the * server address and register to the new address when we receive the notification. */ if (swnreg->tcon->ses->server->use_swn_dstaddr) addr = &swnreg->tcon->ses->server->swn_dstaddr; else addr = &swnreg->tcon->ses->server->dstaddr; ret = nla_put(skb, CIFS_GENL_ATTR_SWN_IP, sizeof(struct sockaddr_storage), addr); if (ret < 0) goto nlmsg_fail; if (swnreg->net_name_notify) { ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY); if (ret < 0) goto nlmsg_fail; } if (swnreg->share_name_notify) { ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY); if (ret < 0) goto nlmsg_fail; } if (swnreg->ip_notify) { ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_IP_NOTIFY); if (ret < 0) goto nlmsg_fail; } authtype = cifs_select_sectype(swnreg->tcon->ses->server, swnreg->tcon->ses->sectype); switch (authtype) { case Kerberos: ret = cifs_swn_auth_info_krb(swnreg->tcon, skb); if (ret < 0) { cifs_dbg(VFS, "%s: Failed to get kerberos auth info: %d\n", __func__, ret); goto nlmsg_fail; } break; case NTLMv2: case RawNTLMSSP: ret = cifs_swn_auth_info_ntlm(swnreg->tcon, skb); if (ret < 0) { cifs_dbg(VFS, "%s: Failed to get NTLM auth info: %d\n", __func__, ret); goto nlmsg_fail; } break; default: cifs_dbg(VFS, "%s: secType %d not supported!\n", __func__, authtype); ret = -EINVAL; goto nlmsg_fail; } genlmsg_end(skb, hdr); genlmsg_multicast(&cifs_genl_family, skb, 0, CIFS_GENL_MCGRP_SWN, GFP_ATOMIC); cifs_dbg(FYI, "%s: Message to register for network name %s with id %d sent\n", __func__, swnreg->net_name, swnreg->id); return 0; nlmsg_fail: genlmsg_cancel(skb, hdr); nlmsg_free(skb); fail: return ret; } /* * Sends an uregister message to the userspace daemon based on the registration */ static int cifs_swn_send_unregister_message(struct cifs_swn_reg *swnreg) { struct sk_buff *skb; struct genlmsghdr *hdr; int ret; skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (skb == NULL) return -ENOMEM; hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_UNREGISTER); if (hdr == NULL) { ret = -ENOMEM; goto nlmsg_fail; } ret = nla_put_u32(skb, CIFS_GENL_ATTR_SWN_REGISTRATION_ID, swnreg->id); if (ret < 0) goto nlmsg_fail; ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_NET_NAME, swnreg->net_name); if (ret < 0) goto nlmsg_fail; ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME, swnreg->share_name); if (ret < 0) goto nlmsg_fail; ret = nla_put(skb, CIFS_GENL_ATTR_SWN_IP, sizeof(struct sockaddr_storage), &swnreg->tcon->ses->server->dstaddr); if (ret < 0) goto nlmsg_fail; if (swnreg->net_name_notify) { ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY); if (ret < 0) goto nlmsg_fail; } if (swnreg->share_name_notify) { ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY); if (ret < 0) goto nlmsg_fail; } if (swnreg->ip_notify) { ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_IP_NOTIFY); if (ret < 0) goto nlmsg_fail; } genlmsg_end(skb, hdr); genlmsg_multicast(&cifs_genl_family, skb, 0, CIFS_GENL_MCGRP_SWN, GFP_ATOMIC); cifs_dbg(FYI, "%s: Message to unregister for network name %s with id %d sent\n", __func__, swnreg->net_name, swnreg->id); return 0; nlmsg_fail: genlmsg_cancel(skb, hdr); nlmsg_free(skb); return ret; } /* * Try to find a matching registration for the tcon's server name and share name. * Calls to this function must be protected by cifs_swnreg_idr_mutex. * TODO Try to avoid memory allocations */ static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon) { struct cifs_swn_reg *swnreg; int id; const char *share_name; const char *net_name; net_name = extract_hostname(tcon->tree_name); if (IS_ERR(net_name)) { int ret; ret = PTR_ERR(net_name); cifs_dbg(VFS, "%s: failed to extract host name from target '%s': %d\n", __func__, tcon->tree_name, ret); return ERR_PTR(-EINVAL); } share_name = extract_sharename(tcon->tree_name); if (IS_ERR(share_name)) { int ret; ret = PTR_ERR(share_name); cifs_dbg(VFS, "%s: failed to extract share name from target '%s': %d\n", __func__, tcon->tree_name, ret); kfree(net_name); return ERR_PTR(-EINVAL); } idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) { if (strcasecmp(swnreg->net_name, net_name) != 0 || strcasecmp(swnreg->share_name, share_name) != 0) { continue; } cifs_dbg(FYI, "Existing swn registration for %s:%s found\n", swnreg->net_name, swnreg->share_name); kfree(net_name); kfree(share_name); return swnreg; } kfree(net_name); kfree(share_name); return ERR_PTR(-EEXIST); } /* * Get a registration for the tcon's server and share name, allocating a new one if it does not * exists */ static struct cifs_swn_reg *cifs_get_swn_reg(struct cifs_tcon *tcon) { struct cifs_swn_reg *reg = NULL; int ret; mutex_lock(&cifs_swnreg_idr_mutex); /* Check if we are already registered for this network and share names */ reg = cifs_find_swn_reg(tcon); if (!IS_ERR(reg)) { kref_get(&reg->ref_count); mutex_unlock(&cifs_swnreg_idr_mutex); return reg; } else if (PTR_ERR(reg) != -EEXIST) { mutex_unlock(&cifs_swnreg_idr_mutex); return reg; } reg = kmalloc(sizeof(struct cifs_swn_reg), GFP_ATOMIC); if (reg == NULL) { mutex_unlock(&cifs_swnreg_idr_mutex); return ERR_PTR(-ENOMEM); } kref_init(&reg->ref_count); reg->id = idr_alloc(&cifs_swnreg_idr, reg, 1, 0, GFP_ATOMIC); if (reg->id < 0) { cifs_dbg(FYI, "%s: failed to allocate registration id\n", __func__); ret = reg->id; goto fail; } reg->net_name = extract_hostname(tcon->tree_name); if (IS_ERR(reg->net_name)) { ret = PTR_ERR(reg->net_name); cifs_dbg(VFS, "%s: failed to extract host name from target: %d\n", __func__, ret); goto fail_idr; } reg->share_name = extract_sharename(tcon->tree_name); if (IS_ERR(reg->share_name)) { ret = PTR_ERR(reg->share_name); cifs_dbg(VFS, "%s: failed to extract share name from target: %d\n", __func__, ret); goto fail_net_name; } reg->net_name_notify = true; reg->share_name_notify = true; reg->ip_notify = (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT); reg->tcon = tcon; mutex_unlock(&cifs_swnreg_idr_mutex); return reg; fail_net_name: kfree(reg->net_name); fail_idr: idr_remove(&cifs_swnreg_idr, reg->id); fail: kfree(reg); mutex_unlock(&cifs_swnreg_idr_mutex); return ERR_PTR(ret); } static void cifs_swn_reg_release(struct kref *ref) { struct cifs_swn_reg *swnreg = container_of(ref, struct cifs_swn_reg, ref_count); int ret; ret = cifs_swn_send_unregister_message(swnreg); if (ret < 0) cifs_dbg(VFS, "%s: Failed to send unregister message: %d\n", __func__, ret); idr_remove(&cifs_swnreg_idr, swnreg->id); kfree(swnreg->net_name); kfree(swnreg->share_name); kfree(swnreg); } static void cifs_put_swn_reg(struct cifs_swn_reg *swnreg) { mutex_lock(&cifs_swnreg_idr_mutex); kref_put(&swnreg->ref_count, cifs_swn_reg_release); mutex_unlock(&cifs_swnreg_idr_mutex); } static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const char *name, int state) { switch (state) { case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE: cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name); cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true); break; case CIFS_SWN_RESOURCE_STATE_AVAILABLE: cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name); cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true); break; case CIFS_SWN_RESOURCE_STATE_UNKNOWN: cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name); break; } return 0; } static bool cifs_sockaddr_equal(struct sockaddr_storage *addr1, struct sockaddr_storage *addr2) { if (addr1->ss_family != addr2->ss_family) return false; if (addr1->ss_family == AF_INET) { return (memcmp(&((const struct sockaddr_in *)addr1)->sin_addr, &((const struct sockaddr_in *)addr2)->sin_addr, sizeof(struct in_addr)) == 0); } if (addr1->ss_family == AF_INET6) { return (memcmp(&((const struct sockaddr_in6 *)addr1)->sin6_addr, &((const struct sockaddr_in6 *)addr2)->sin6_addr, sizeof(struct in6_addr)) == 0); } return false; } static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new, const struct sockaddr_storage *old, struct sockaddr_storage *dst) { __be16 port = cpu_to_be16(CIFS_PORT); if (old->ss_family == AF_INET) { struct sockaddr_in *ipv4 = (struct sockaddr_in *)old; port = ipv4->sin_port; } else if (old->ss_family == AF_INET6) { struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)old; port = ipv6->sin6_port; } if (new->ss_family == AF_INET) { struct sockaddr_in *ipv4 = (struct sockaddr_in *)new; ipv4->sin_port = port; } else if (new->ss_family == AF_INET6) { struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)new; ipv6->sin6_port = port; } *dst = *new; return 0; } static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *addr) { int ret = 0; /* Store the reconnect address */ cifs_server_lock(tcon->ses->server); if (cifs_sockaddr_equal(&tcon->ses->server->dstaddr, addr)) goto unlock; ret = cifs_swn_store_swn_addr(addr, &tcon->ses->server->dstaddr, &tcon->ses->server->swn_dstaddr); if (ret < 0) { cifs_dbg(VFS, "%s: failed to store address: %d\n", __func__, ret); goto unlock; } tcon->ses->server->use_swn_dstaddr = true; /* * Unregister to stop receiving notifications for the old IP address. */ ret = cifs_swn_unregister(tcon); if (ret < 0) { cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n", __func__, ret); goto unlock; } /* * And register to receive notifications for the new IP address now that we have * stored the new address. */ ret = cifs_swn_register(tcon); if (ret < 0) { cifs_dbg(VFS, "%s: Failed to register for witness notifications: %d\n", __func__, ret); goto unlock; } cifs_signal_cifsd_for_reconnect(tcon->ses->server, false); unlock: cifs_server_unlock(tcon->ses->server); return ret; } static int cifs_swn_client_move(struct cifs_swn_reg *swnreg, struct sockaddr_storage *addr) { struct sockaddr_in *ipv4 = (struct sockaddr_in *)addr; struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)addr; if (addr->ss_family == AF_INET) cifs_dbg(FYI, "%s: move to %pI4\n", __func__, &ipv4->sin_addr); else if (addr->ss_family == AF_INET6) cifs_dbg(FYI, "%s: move to %pI6\n", __func__, &ipv6->sin6_addr); return cifs_swn_reconnect(swnreg->tcon, addr); } int cifs_swn_notify(struct sk_buff *skb, struct genl_info *info) { struct cifs_swn_reg *swnreg; char name[256]; int type; if (info->attrs[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]) { int swnreg_id; swnreg_id = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]); mutex_lock(&cifs_swnreg_idr_mutex); swnreg = idr_find(&cifs_swnreg_idr, swnreg_id); mutex_unlock(&cifs_swnreg_idr_mutex); if (swnreg == NULL) { cifs_dbg(FYI, "%s: registration id %d not found\n", __func__, swnreg_id); return -EINVAL; } } else { cifs_dbg(FYI, "%s: missing registration id attribute\n", __func__); return -EINVAL; } if (info->attrs[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]) { type = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]); } else { cifs_dbg(FYI, "%s: missing notification type attribute\n", __func__); return -EINVAL; } switch (type) { case CIFS_SWN_NOTIFICATION_RESOURCE_CHANGE: { int state; if (info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_NAME]) { nla_strscpy(name, info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_NAME], sizeof(name)); } else { cifs_dbg(FYI, "%s: missing resource name attribute\n", __func__); return -EINVAL; } if (info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]) { state = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]); } else { cifs_dbg(FYI, "%s: missing resource state attribute\n", __func__); return -EINVAL; } return cifs_swn_resource_state_changed(swnreg, name, state); } case CIFS_SWN_NOTIFICATION_CLIENT_MOVE: { struct sockaddr_storage addr; if (info->attrs[CIFS_GENL_ATTR_SWN_IP]) { nla_memcpy(&addr, info->attrs[CIFS_GENL_ATTR_SWN_IP], sizeof(addr)); } else { cifs_dbg(FYI, "%s: missing IP address attribute\n", __func__); return -EINVAL; } return cifs_swn_client_move(swnreg, &addr); } default: cifs_dbg(FYI, "%s: unknown notification type %d\n", __func__, type); break; } return 0; } int cifs_swn_register(struct cifs_tcon *tcon) { struct cifs_swn_reg *swnreg; int ret; swnreg = cifs_get_swn_reg(tcon); if (IS_ERR(swnreg)) return PTR_ERR(swnreg); ret = cifs_swn_send_register_message(swnreg); if (ret < 0) { cifs_dbg(VFS, "%s: Failed to send swn register message: %d\n", __func__, ret); /* Do not put the swnreg or return error, the echo task will retry */ } return 0; } int cifs_swn_unregister(struct cifs_tcon *tcon) { struct cifs_swn_reg *swnreg; mutex_lock(&cifs_swnreg_idr_mutex); swnreg = cifs_find_swn_reg(tcon); if (IS_ERR(swnreg)) { mutex_unlock(&cifs_swnreg_idr_mutex); return PTR_ERR(swnreg); } mutex_unlock(&cifs_swnreg_idr_mutex); cifs_put_swn_reg(swnreg); return 0; } void cifs_swn_dump(struct seq_file *m) { struct cifs_swn_reg *swnreg; struct sockaddr_in *sa; struct sockaddr_in6 *sa6; int id; seq_puts(m, "Witness registrations:"); mutex_lock(&cifs_swnreg_idr_mutex); idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) { seq_printf(m, "\nId: %u Refs: %u Network name: '%s'%s Share name: '%s'%s Ip address: ", id, kref_read(&swnreg->ref_count), swnreg->net_name, swnreg->net_name_notify ? "(y)" : "(n)", swnreg->share_name, swnreg->share_name_notify ? "(y)" : "(n)"); switch (swnreg->tcon->ses->server->dstaddr.ss_family) { case AF_INET: sa = (struct sockaddr_in *) &swnreg->tcon->ses->server->dstaddr; seq_printf(m, "%pI4", &sa->sin_addr.s_addr); break; case AF_INET6: sa6 = (struct sockaddr_in6 *) &swnreg->tcon->ses->server->dstaddr; seq_printf(m, "%pI6", &sa6->sin6_addr.s6_addr); if (sa6->sin6_scope_id) seq_printf(m, "%%%u", sa6->sin6_scope_id); break; default: seq_puts(m, "(unknown)"); } seq_printf(m, "%s", swnreg->ip_notify ? "(y)" : "(n)"); } mutex_unlock(&cifs_swnreg_idr_mutex); seq_puts(m, "\n"); } void cifs_swn_check(void) { struct cifs_swn_reg *swnreg; int id; int ret; mutex_lock(&cifs_swnreg_idr_mutex); idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) { ret = cifs_swn_send_register_message(swnreg); if (ret < 0) cifs_dbg(FYI, "%s: Failed to send register message: %d\n", __func__, ret); } mutex_unlock(&cifs_swnreg_idr_mutex); }
linux-master
fs/smb/client/cifs_swn.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/namei.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "smb2proto.h" #include "cifs_ioctl.h" /* * M-F Symlink Functions - Begin */ #define CIFS_MF_SYMLINK_LEN_OFFSET (4+1) #define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1)) #define CIFS_MF_SYMLINK_LINK_OFFSET (CIFS_MF_SYMLINK_MD5_OFFSET+(32+1)) #define CIFS_MF_SYMLINK_LINK_MAXLEN (1024) #define CIFS_MF_SYMLINK_FILE_SIZE \ (CIFS_MF_SYMLINK_LINK_OFFSET + CIFS_MF_SYMLINK_LINK_MAXLEN) #define CIFS_MF_SYMLINK_LEN_FORMAT "XSym\n%04u\n" #define CIFS_MF_SYMLINK_MD5_FORMAT "%16phN\n" #define CIFS_MF_SYMLINK_MD5_ARGS(md5_hash) md5_hash static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; struct shash_desc *md5 = NULL; rc = cifs_alloc_hash("md5", &md5); if (rc) goto symlink_hash_err; rc = crypto_shash_init(md5); if (rc) { cifs_dbg(VFS, "%s: Could not init md5 shash\n", __func__); goto symlink_hash_err; } rc = crypto_shash_update(md5, link_str, link_len); if (rc) { cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__); goto symlink_hash_err; } rc = crypto_shash_final(md5, md5_hash); if (rc) cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); symlink_hash_err: cifs_free_hash(&md5); return rc; } static int parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len, char **_link_str) { int rc; unsigned int link_len; const char *md5_str1; const char *link_str; u8 md5_hash[16]; char md5_str2[34]; if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE) return -EINVAL; md5_str1 = (const char *)&buf[CIFS_MF_SYMLINK_MD5_OFFSET]; link_str = (const char *)&buf[CIFS_MF_SYMLINK_LINK_OFFSET]; rc = sscanf(buf, CIFS_MF_SYMLINK_LEN_FORMAT, &link_len); if (rc != 1) return -EINVAL; if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN) return -EINVAL; rc = symlink_hash(link_len, link_str, md5_hash); if (rc) { cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc); return rc; } scnprintf(md5_str2, sizeof(md5_str2), CIFS_MF_SYMLINK_MD5_FORMAT, CIFS_MF_SYMLINK_MD5_ARGS(md5_hash)); if (strncmp(md5_str1, md5_str2, 17) != 0) return -EINVAL; if (_link_str) { *_link_str = kstrndup(link_str, link_len, GFP_KERNEL); if (!*_link_str) return -ENOMEM; } *_link_len = link_len; return 0; } static int format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str) { int rc; unsigned int link_len; unsigned int ofs; u8 md5_hash[16]; if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE) return -EINVAL; link_len = strlen(link_str); if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN) return -ENAMETOOLONG; rc = symlink_hash(link_len, link_str, md5_hash); if (rc) { cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc); return rc; } scnprintf(buf, buf_len, CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT, link_len, CIFS_MF_SYMLINK_MD5_ARGS(md5_hash)); ofs = CIFS_MF_SYMLINK_LINK_OFFSET; memcpy(buf + ofs, link_str, link_len); ofs += link_len; if (ofs < CIFS_MF_SYMLINK_FILE_SIZE) { buf[ofs] = '\n'; ofs++; } while (ofs < CIFS_MF_SYMLINK_FILE_SIZE) { buf[ofs] = ' '; ofs++; } return 0; } bool couldbe_mf_symlink(const struct cifs_fattr *fattr) { if (!S_ISREG(fattr->cf_mode)) /* it's not a symlink */ return false; if (fattr->cf_eof != CIFS_MF_SYMLINK_FILE_SIZE) /* it's not a symlink */ return false; return true; } static int create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *fromName, const char *toName) { int rc; u8 *buf; unsigned int bytes_written = 0; buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; rc = format_mf_symlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName); if (rc) goto out; if (tcon->ses->server->ops->create_mf_symlink) rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon, cifs_sb, fromName, buf, &bytes_written); else rc = -EOPNOTSUPP; if (rc) goto out; if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE) rc = -EIO; out: kfree(buf); return rc; } int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, const unsigned char *path) { int rc; u8 *buf = NULL; unsigned int link_len = 0; unsigned int bytes_read = 0; char *symlink = NULL; if (!couldbe_mf_symlink(fattr)) /* it's not a symlink */ return 0; buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; if (tcon->ses->server->ops->query_mf_symlink) rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon, cifs_sb, path, buf, &bytes_read); else rc = -ENOSYS; if (rc) goto out; if (bytes_read == 0) /* not a symlink */ goto out; rc = parse_mf_symlink(buf, bytes_read, &link_len, &symlink); if (rc == -EINVAL) { /* it's not a symlink */ rc = 0; goto out; } if (rc != 0) goto out; /* it is a symlink */ fattr->cf_eof = link_len; fattr->cf_mode &= ~S_IFMT; fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; fattr->cf_dtype = DT_LNK; fattr->cf_symlink_target = symlink; out: kfree(buf); return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * SMB 1.0 Protocol specific functions */ int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const unsigned char *path, char *pbuf, unsigned int *pbytes_read) { int rc; int oplock = 0; struct cifs_fid fid; struct cifs_open_parms oparms; struct cifs_io_parms io_parms = {0}; int buf_type = CIFS_NO_BUFFER; FILE_ALL_INFO file_info; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = GENERIC_READ, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_OPEN, .path = path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, &file_info); if (rc) return rc; if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { rc = -ENOENT; /* it's not a symlink */ goto out; } io_parms.netfid = fid.netfid; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type); out: CIFSSMBClose(xid, tcon, fid.netfid); return rc; } int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const unsigned char *path, char *pbuf, unsigned int *pbytes_written) { int rc; int oplock = 0; struct cifs_fid fid; struct cifs_open_parms oparms; struct cifs_io_parms io_parms = {0}; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = GENERIC_WRITE, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_CREATE, .path = path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc) return rc; io_parms.netfid = fid.netfid; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; rc = CIFSSMBWrite(xid, &io_parms, pbytes_written, pbuf); CIFSSMBClose(xid, tcon, fid.netfid); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* * SMB 2.1/SMB3 Protocol specific functions */ int smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const unsigned char *path, char *pbuf, unsigned int *pbytes_read) { int rc; struct cifs_fid fid; struct cifs_open_parms oparms; struct cifs_io_parms io_parms = {0}; int buf_type = CIFS_NO_BUFFER; __le16 *utf16_path; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct smb2_file_all_info *pfile_info = NULL; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .path = path, .desired_access = GENERIC_READ, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_OPEN, .fid = &fid, }; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (utf16_path == NULL) return -ENOMEM; pfile_info = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, GFP_KERNEL); if (pfile_info == NULL) { kfree(utf16_path); return -ENOMEM; } rc = SMB2_open(xid, &oparms, utf16_path, &oplock, pfile_info, NULL, NULL, NULL); if (rc) goto qmf_out_open_fail; if (pfile_info->EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { /* it's not a symlink */ rc = -ENOENT; /* Is there a better rc to return? */ goto qmf_out; } io_parms.netfid = fid.netfid; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; io_parms.persistent_fid = fid.persistent_fid; io_parms.volatile_fid = fid.volatile_fid; rc = SMB2_read(xid, &io_parms, pbytes_read, &pbuf, &buf_type); qmf_out: SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); qmf_out_open_fail: kfree(utf16_path); kfree(pfile_info); return rc; } int smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const unsigned char *path, char *pbuf, unsigned int *pbytes_written) { int rc; struct cifs_fid fid; struct cifs_open_parms oparms; struct cifs_io_parms io_parms = {0}; __le16 *utf16_path; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct kvec iov[2]; cifs_dbg(FYI, "%s: path: %s\n", __func__, path); utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) return -ENOMEM; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .path = path, .desired_access = GENERIC_WRITE, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_CREATE, .fid = &fid, .mode = 0644, }; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, NULL); if (rc) { kfree(utf16_path); return rc; } io_parms.netfid = fid.netfid; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; io_parms.persistent_fid = fid.persistent_fid; io_parms.volatile_fid = fid.volatile_fid; /* iov[0] is reserved for smb header */ iov[1].iov_base = pbuf; iov[1].iov_len = CIFS_MF_SYMLINK_FILE_SIZE; rc = SMB2_write(xid, &io_parms, pbytes_written, iov, 1); /* Make sure we wrote all of the symlink data */ if ((rc == 0) && (*pbytes_written != CIFS_MF_SYMLINK_FILE_SIZE)) rc = -EIO; SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); kfree(utf16_path); return rc; } /* * M-F Symlink Functions - End */ int cifs_hardlink(struct dentry *old_file, struct inode *inode, struct dentry *direntry) { int rc = -EACCES; unsigned int xid; const char *from_name, *to_name; void *page1, *page2; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifsInodeInfo *cifsInode; if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); page1 = alloc_dentry_path(); page2 = alloc_dentry_path(); from_name = build_path_from_dentry(old_file, page1); if (IS_ERR(from_name)) { rc = PTR_ERR(from_name); goto cifs_hl_exit; } to_name = build_path_from_dentry(direntry, page2); if (IS_ERR(to_name)) { rc = PTR_ERR(to_name); goto cifs_hl_exit; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext) rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name, cifs_sb->local_nls, cifs_remap(cifs_sb)); else { #else { #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ server = tcon->ses->server; if (!server->ops->create_hardlink) { rc = -ENOSYS; goto cifs_hl_exit; } rc = server->ops->create_hardlink(xid, tcon, from_name, to_name, cifs_sb); if ((rc == -EIO) || (rc == -EINVAL)) rc = -EOPNOTSUPP; } d_drop(direntry); /* force new lookup from server of target */ /* * if source file is cached (oplocked) revalidate will not go to server * until the file is closed or oplock broken so update nlinks locally */ if (d_really_is_positive(old_file)) { cifsInode = CIFS_I(d_inode(old_file)); if (rc == 0) { spin_lock(&d_inode(old_file)->i_lock); inc_nlink(d_inode(old_file)); spin_unlock(&d_inode(old_file)->i_lock); /* * parent dir timestamps will update from srv within a * second, would it really be worth it to set the parent * dir cifs inode time to zero to force revalidate * (faster) for it too? */ } /* * if not oplocked will force revalidate to get info on source * file from srv. Note Samba server prior to 4.2 has bug - * not updating src file ctime on hardlinks but Windows servers * handle it properly */ cifsInode->time = 0; /* * Will update parent dir timestamps from srv within a second. * Would it really be worth it to set the parent dir (cifs * inode) time field to zero to force revalidate on parent * directory faster ie * * CIFS_I(inode)->time = 0; */ } cifs_hl_exit: free_dentry_path(page1); free_dentry_path(page2); free_xid(xid); cifs_put_tlink(tlink); return rc; } int cifs_symlink(struct mnt_idmap *idmap, struct inode *inode, struct dentry *direntry, const char *symname) { int rc = -EOPNOTSUPP; unsigned int xid; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; struct inode *newinode = NULL; if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; page = alloc_dentry_path(); if (!page) return -ENOMEM; xid = get_xid(); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); goto symlink_exit; } pTcon = tlink_tcon(tlink); full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto symlink_exit; } cifs_dbg(FYI, "Full path: %s\n", full_path); cifs_dbg(FYI, "symname is %s\n", symname); /* BB what if DFS and this volume is on different share? BB */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY else if (pTcon->unix_ext) rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, cifs_sb->local_nls, cifs_remap(cifs_sb)); #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* else rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, cifs_sb_target->local_nls); */ if (rc == 0) { if (pTcon->posix_extensions) rc = smb311_posix_get_inode_info(&newinode, full_path, inode->i_sb, xid); else if (pTcon->unix_ext) rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else rc = cifs_get_inode_info(&newinode, full_path, NULL, inode->i_sb, xid, NULL); if (rc != 0) { cifs_dbg(FYI, "Create symlink ok, getinodeinfo fail rc = %d\n", rc); } else { d_instantiate(direntry, newinode); } } symlink_exit: free_dentry_path(page); cifs_put_tlink(tlink); free_xid(xid); return rc; }
linux-master
fs/smb/client/link.c
// SPDX-License-Identifier: GPL-2.0 /* * SMB2 version specific operations * * Copyright (c) 2012, Jeff Layton <[email protected]> */ #include <linux/pagemap.h> #include <linux/vfs.h> #include <linux/falloc.h> #include <linux/scatterlist.h> #include <linux/uuid.h> #include <linux/sort.h> #include <crypto/aead.h> #include <linux/fiemap.h> #include <uapi/linux/magic.h> #include "cifsfs.h" #include "cifsglob.h" #include "smb2pdu.h" #include "smb2proto.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_unicode.h" #include "smb2status.h" #include "smb2glob.h" #include "cifs_ioctl.h" #include "smbdirect.h" #include "fscache.h" #include "fs_context.h" #include "cached_dir.h" /* Change credits for different ops and return the total number of credits */ static int change_conf(struct TCP_Server_Info *server) { server->credits += server->echo_credits + server->oplock_credits; if (server->credits > server->max_credits) server->credits = server->max_credits; server->oplock_credits = server->echo_credits = 0; switch (server->credits) { case 0: return 0; case 1: server->echoes = false; server->oplocks = false; break; case 2: server->echoes = true; server->oplocks = false; server->echo_credits = 1; break; default: server->echoes = true; if (enable_oplocks) { server->oplocks = true; server->oplock_credits = 1; } else server->oplocks = false; server->echo_credits = 1; } server->credits -= server->echo_credits + server->oplock_credits; return server->credits + server->echo_credits + server->oplock_credits; } static void smb2_add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits, const int optype) { int *val, rc = -1; int scredits, in_flight; unsigned int add = credits->value; unsigned int instance = credits->instance; bool reconnect_detected = false; bool reconnect_with_invalid_credits = false; spin_lock(&server->req_lock); val = server->ops->get_credits_field(server, optype); /* eg found case where write overlapping reconnect messed up credits */ if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0)) reconnect_with_invalid_credits = true; if ((instance == 0) || (instance == server->reconnect_instance)) *val += add; else reconnect_detected = true; if (*val > 65000) { *val = 65000; /* Don't get near 64K credits, avoid srv bugs */ pr_warn_once("server overflowed SMB3 credits\n"); trace_smb3_overflow_credits(server->CurrentMid, server->conn_id, server->hostname, *val, add, server->in_flight); } WARN_ON_ONCE(server->in_flight == 0); server->in_flight--; if (server->in_flight == 0 && ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) && ((optype & CIFS_OP_MASK) != CIFS_SESS_OP)) rc = change_conf(server); /* * Sometimes server returns 0 credits on oplock break ack - we need to * rebalance credits in this case. */ else if (server->in_flight > 0 && server->oplock_credits == 0 && server->oplocks) { if (server->credits > 1) { server->credits--; server->oplock_credits++; } } else if ((server->in_flight > 0) && (server->oplock_credits > 3) && ((optype & CIFS_OP_MASK) == CIFS_OBREAK_OP)) /* if now have too many oplock credits, rebalance so don't starve normal ops */ change_conf(server); scredits = *val; in_flight = server->in_flight; spin_unlock(&server->req_lock); wake_up(&server->request_q); if (reconnect_detected) { trace_smb3_reconnect_detected(server->CurrentMid, server->conn_id, server->hostname, scredits, add, in_flight); cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n", add, instance); } if (reconnect_with_invalid_credits) { trace_smb3_reconnect_with_invalid_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, add, in_flight); cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n", optype, scredits, add); } spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect || server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return; } spin_unlock(&server->srv_lock); switch (rc) { case -1: /* change_conf hasn't been executed */ break; case 0: cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n"); break; case 1: cifs_server_dbg(VFS, "disabling echoes and oplocks\n"); break; case 2: cifs_dbg(FYI, "disabling oplocks\n"); break; default: /* change_conf rebalanced credits for different types */ break; } trace_smb3_add_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, add, in_flight); cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits); } static void smb2_set_credits(struct TCP_Server_Info *server, const int val) { int scredits, in_flight; spin_lock(&server->req_lock); server->credits = val; if (val == 1) { server->reconnect_instance++; /* * ChannelSequence updated for all channels in primary channel so that consistent * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1 */ if (SERVER_IS_CHAN(server)) server->primary_server->channel_sequence_num++; else server->channel_sequence_num++; } scredits = server->credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); trace_smb3_set_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, val, in_flight); cifs_dbg(FYI, "%s: set %u credits\n", __func__, val); /* don't log while holding the lock */ if (val == 1) cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n"); } static int * smb2_get_credits_field(struct TCP_Server_Info *server, const int optype) { switch (optype) { case CIFS_ECHO_OP: return &server->echo_credits; case CIFS_OBREAK_OP: return &server->oplock_credits; default: return &server->credits; } } static unsigned int smb2_get_credits(struct mid_q_entry *mid) { return mid->credits_received; } static int smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, unsigned int *num, struct cifs_credits *credits) { int rc = 0; unsigned int scredits, in_flight; spin_lock(&server->req_lock); while (1) { spin_unlock(&server->req_lock); spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return -ENOENT; } spin_unlock(&server->srv_lock); spin_lock(&server->req_lock); if (server->credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, &server->credits, 1)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { scredits = server->credits; /* can deadlock with reopen */ if (scredits <= 8) { *num = SMB2_MAX_BUFFER_SIZE; credits->value = 0; credits->instance = 0; break; } /* leave some credits for reopen and other ops */ scredits -= 8; *num = min_t(unsigned int, size, scredits * SMB2_MAX_BUFFER_SIZE); credits->value = DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE); credits->instance = server->reconnect_instance; server->credits -= credits->value; server->in_flight++; if (server->in_flight > server->max_in_flight) server->max_in_flight = server->in_flight; break; } } scredits = server->credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); trace_smb3_wait_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, -(credits->value), in_flight); cifs_dbg(FYI, "%s: removed %u credits total=%d\n", __func__, credits->value, scredits); return rc; } static int smb2_adjust_credits(struct TCP_Server_Info *server, struct cifs_credits *credits, const unsigned int payload_size) { int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE); int scredits, in_flight; if (!credits->value || credits->value == new_val) return 0; if (credits->value < new_val) { trace_smb3_too_many_credits(server->CurrentMid, server->conn_id, server->hostname, 0, credits->value - new_val, 0); cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)", credits->value, new_val); return -EOPNOTSUPP; } spin_lock(&server->req_lock); if (server->reconnect_instance != credits->instance) { scredits = server->credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); trace_smb3_reconnect_detected(server->CurrentMid, server->conn_id, server->hostname, scredits, credits->value - new_val, in_flight); cifs_server_dbg(VFS, "trying to return %d credits to old session\n", credits->value - new_val); return -EAGAIN; } server->credits += credits->value - new_val; scredits = server->credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); wake_up(&server->request_q); trace_smb3_adj_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, credits->value - new_val, in_flight); cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n", __func__, credits->value - new_val, scredits); credits->value = new_val; return 0; } static __u64 smb2_get_next_mid(struct TCP_Server_Info *server) { __u64 mid; /* for SMB2 we need the current value */ spin_lock(&server->mid_lock); mid = server->CurrentMid++; spin_unlock(&server->mid_lock); return mid; } static void smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) { spin_lock(&server->mid_lock); if (server->CurrentMid >= val) server->CurrentMid -= val; spin_unlock(&server->mid_lock); } static struct mid_q_entry * __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue) { struct mid_q_entry *mid; struct smb2_hdr *shdr = (struct smb2_hdr *)buf; __u64 wire_mid = le64_to_cpu(shdr->MessageId); if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n"); return NULL; } spin_lock(&server->mid_lock); list_for_each_entry(mid, &server->pending_mid_q, qhead) { if ((mid->mid == wire_mid) && (mid->mid_state == MID_REQUEST_SUBMITTED) && (mid->command == shdr->Command)) { kref_get(&mid->refcount); if (dequeue) { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; } spin_unlock(&server->mid_lock); return mid; } } spin_unlock(&server->mid_lock); return NULL; } static struct mid_q_entry * smb2_find_mid(struct TCP_Server_Info *server, char *buf) { return __smb2_find_mid(server, buf, false); } static struct mid_q_entry * smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf) { return __smb2_find_mid(server, buf, true); } static void smb2_dump_detail(void *buf, struct TCP_Server_Info *server) { #ifdef CONFIG_CIFS_DEBUG2 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n", shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId, shdr->Id.SyncId.ProcessId); cifs_server_dbg(VFS, "smb buf %p len %u\n", buf, server->ops->calc_smb_size(buf)); #endif } static bool smb2_need_neg(struct TCP_Server_Info *server) { return server->max_read == 0; } static int smb2_negotiate(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { int rc; spin_lock(&server->mid_lock); server->CurrentMid = 0; spin_unlock(&server->mid_lock); rc = SMB2_negotiate(xid, ses, server); /* BB we probably don't need to retry with modern servers */ if (rc == -EAGAIN) rc = -EHOSTDOWN; return rc; } static unsigned int smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int wsize; /* start with specified wsize, or default */ wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE; wsize = min_t(unsigned int, wsize, server->max_write); if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); return wsize; } static unsigned int smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int wsize; /* start with specified wsize, or default */ wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE; wsize = min_t(unsigned int, wsize, server->max_write); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->rdma) { if (server->sign) /* * Account for SMB2 data transfer packet header and * possible encryption header */ wsize = min_t(unsigned int, wsize, server->smbd_conn->max_fragmented_send_size - SMB2_READWRITE_PDU_HEADER_SIZE - sizeof(struct smb2_transform_hdr)); else wsize = min_t(unsigned int, wsize, server->smbd_conn->max_readwrite_size); } #endif if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); return wsize; } static unsigned int smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int rsize; /* start with specified rsize, or default */ rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE; rsize = min_t(unsigned int, rsize, server->max_read); if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); return rsize; } static unsigned int smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int rsize; /* start with specified rsize, or default */ rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE; rsize = min_t(unsigned int, rsize, server->max_read); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->rdma) { if (server->sign) /* * Account for SMB2 data transfer packet header and * possible encryption header */ rsize = min_t(unsigned int, rsize, server->smbd_conn->max_fragmented_recv_size - SMB2_READWRITE_PDU_HEADER_SIZE - sizeof(struct smb2_transform_hdr)); else rsize = min_t(unsigned int, rsize, server->smbd_conn->max_readwrite_size); } #endif if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); return rsize; } /* * compare two interfaces a and b * return 0 if everything matches. * return 1 if a is rdma capable, or rss capable, or has higher link speed * return -1 otherwise. */ static int iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b) { int cmp_ret = 0; WARN_ON(!a || !b); if (a->rdma_capable == b->rdma_capable) { if (a->rss_capable == b->rss_capable) { if (a->speed == b->speed) { cmp_ret = cifs_ipaddr_cmp((struct sockaddr *) &a->sockaddr, (struct sockaddr *) &b->sockaddr); if (!cmp_ret) return 0; else if (cmp_ret > 0) return 1; else return -1; } else if (a->speed > b->speed) return 1; else return -1; } else if (a->rss_capable > b->rss_capable) return 1; else return -1; } else if (a->rdma_capable > b->rdma_capable) return 1; else return -1; } static int parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, size_t buf_len, struct cifs_ses *ses, bool in_mount) { struct network_interface_info_ioctl_rsp *p; struct sockaddr_in *addr4; struct sockaddr_in6 *addr6; struct iface_info_ipv4 *p4; struct iface_info_ipv6 *p6; struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL; struct cifs_server_iface tmp_iface; ssize_t bytes_left; size_t next = 0; int nb_iface = 0; int rc = 0, ret = 0; bytes_left = buf_len; p = buf; spin_lock(&ses->iface_lock); /* do not query too frequently, this time with lock held */ if (ses->iface_last_update && time_before(jiffies, ses->iface_last_update + (SMB_INTERFACE_POLL_INTERVAL * HZ))) { spin_unlock(&ses->iface_lock); return 0; } /* * Go through iface_list and do kref_put to remove * any unused ifaces. ifaces in use will be removed * when the last user calls a kref_put on it */ list_for_each_entry_safe(iface, niface, &ses->iface_list, iface_head) { iface->is_active = 0; kref_put(&iface->refcount, release_iface); ses->iface_count--; } spin_unlock(&ses->iface_lock); /* * Samba server e.g. can return an empty interface list in some cases, * which would only be a problem if we were requesting multichannel */ if (bytes_left == 0) { /* avoid spamming logs every 10 minutes, so log only in mount */ if ((ses->chan_max > 1) && in_mount) cifs_dbg(VFS, "multichannel not available\n" "Empty network interface list returned by server %s\n", ses->server->hostname); rc = -EINVAL; goto out; } while (bytes_left >= sizeof(*p)) { memset(&tmp_iface, 0, sizeof(tmp_iface)); tmp_iface.speed = le64_to_cpu(p->LinkSpeed); tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; switch (p->Family) { /* * The kernel and wire socket structures have the same * layout and use network byte order but make the * conversion explicit in case either one changes. */ case INTERNETWORK: addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr; p4 = (struct iface_info_ipv4 *)p->Buffer; addr4->sin_family = AF_INET; memcpy(&addr4->sin_addr, &p4->IPv4Address, 4); /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */ addr4->sin_port = cpu_to_be16(CIFS_PORT); cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__, &addr4->sin_addr); break; case INTERNETWORKV6: addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr; p6 = (struct iface_info_ipv6 *)p->Buffer; addr6->sin6_family = AF_INET6; memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16); /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */ addr6->sin6_flowinfo = 0; addr6->sin6_scope_id = 0; addr6->sin6_port = cpu_to_be16(CIFS_PORT); cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__, &addr6->sin6_addr); break; default: cifs_dbg(VFS, "%s: skipping unsupported socket family\n", __func__); goto next_iface; } /* * The iface_list is assumed to be sorted by speed. * Check if the new interface exists in that list. * NEVER change iface. it could be in use. * Add a new one instead */ spin_lock(&ses->iface_lock); list_for_each_entry_safe(iface, niface, &ses->iface_list, iface_head) { ret = iface_cmp(iface, &tmp_iface); if (!ret) { /* just get a ref so that it doesn't get picked/freed */ iface->is_active = 1; kref_get(&iface->refcount); ses->iface_count++; spin_unlock(&ses->iface_lock); goto next_iface; } else if (ret < 0) { /* all remaining ifaces are slower */ kref_get(&iface->refcount); break; } } spin_unlock(&ses->iface_lock); /* no match. insert the entry in the list */ info = kmalloc(sizeof(struct cifs_server_iface), GFP_KERNEL); if (!info) { rc = -ENOMEM; goto out; } memcpy(info, &tmp_iface, sizeof(tmp_iface)); /* add this new entry to the list */ kref_init(&info->refcount); info->is_active = 1; cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count); cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__, le32_to_cpu(p->Capability)); spin_lock(&ses->iface_lock); if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) { list_add_tail(&info->iface_head, &iface->iface_head); kref_put(&iface->refcount, release_iface); } else list_add_tail(&info->iface_head, &ses->iface_list); ses->iface_count++; spin_unlock(&ses->iface_lock); ses->iface_last_update = jiffies; next_iface: nb_iface++; next = le32_to_cpu(p->Next); if (!next) { bytes_left -= sizeof(*p); break; } p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); bytes_left -= next; } if (!nb_iface) { cifs_dbg(VFS, "%s: malformed interface info\n", __func__); rc = -EINVAL; goto out; } /* Azure rounds the buffer size up 8, to a 16 byte boundary */ if ((bytes_left > 8) || p->Next) cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); if (!ses->iface_count) { rc = -EINVAL; goto out; } out: return rc; } int SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount) { int rc; unsigned int ret_data_len = 0; struct network_interface_info_ioctl_rsp *out_buf = NULL; struct cifs_ses *ses = tcon->ses; /* do not query too frequently */ if (ses->iface_last_update && time_before(jiffies, ses->iface_last_update + (SMB_INTERFACE_POLL_INTERVAL * HZ))) return 0; rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_QUERY_NETWORK_INTERFACE_INFO, NULL /* no data input */, 0 /* no data input */, CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); if (rc == -EOPNOTSUPP) { cifs_dbg(FYI, "server does not support query network interfaces\n"); ret_data_len = 0; } else if (rc != 0) { cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc); goto out; } rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount); if (rc) goto out; out: kfree(out_buf); return rc; } static void smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb) { int rc; __le16 srch_path = 0; /* Null - open root of share */ u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct cifs_fid fid; struct cached_fid *cfid = NULL; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = "", .desired_access = FILE_READ_ATTRIBUTES, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = &fid, }; rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid); if (rc == 0) memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid)); else rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL, NULL); if (rc) return; SMB3_request_interfaces(xid, tcon, true /* called during mount */); SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_ATTRIBUTE_INFORMATION); SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_DEVICE_INFORMATION); SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_VOLUME_INFORMATION); SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */ if (cfid == NULL) SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); else close_cached_dir(cfid); } static void smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb) { int rc; __le16 srch_path = 0; /* Null - open root of share */ u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct cifs_fid fid; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = "", .desired_access = FILE_READ_ATTRIBUTES, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = &fid, }; rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL, NULL); if (rc) return; SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_ATTRIBUTE_INFORMATION); SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_DEVICE_INFORMATION); SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } static int smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path) { __le16 *utf16_path; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; int err_buftype = CIFS_NO_BUFFER; struct cifs_open_parms oparms; struct kvec err_iov = {}; struct cifs_fid fid; struct cached_fid *cfid; bool islink; int rc, rc2; rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid); if (!rc) { if (cfid->has_lease) { close_cached_dir(cfid); return 0; } close_cached_dir(cfid); } utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); if (!utf16_path) return -ENOMEM; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = full_path, .desired_access = FILE_READ_ATTRIBUTES, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = &fid, }; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, &err_iov, &err_buftype); if (rc) { struct smb2_hdr *hdr = err_iov.iov_base; if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER)) goto out; if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) { rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb, full_path, &islink); if (rc2) { rc = rc2; goto out; } if (islink) rc = -EREMOTE; } if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)) rc = -EOPNOTSUPP; goto out; } rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); out: free_rsp_buf(err_buftype, err_iov.iov_base); kfree(utf16_path); return rc; } static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid, struct cifs_open_info_data *data) { *uniqueid = le64_to_cpu(data->fi.IndexNumber); return 0; } static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, struct cifs_open_info_data *data) { struct cifs_fid *fid = &cfile->fid; if (cfile->symlink_target) { data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!data->symlink_target) return -ENOMEM; } return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi); } #ifdef CONFIG_CIFS_XATTR static ssize_t move_smb2_ea_to_cifs(char *dst, size_t dst_size, struct smb2_file_full_ea_info *src, size_t src_size, const unsigned char *ea_name) { int rc = 0; unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0; char *name, *value; size_t buf_size = dst_size; size_t name_len, value_len, user_name_len; while (src_size > 0) { name_len = (size_t)src->ea_name_length; value_len = (size_t)le16_to_cpu(src->ea_value_length); if (name_len == 0) break; if (src_size < 8 + name_len + 1 + value_len) { cifs_dbg(FYI, "EA entry goes beyond length of list\n"); rc = -EIO; goto out; } name = &src->ea_data[0]; value = &src->ea_data[src->ea_name_length + 1]; if (ea_name) { if (ea_name_len == name_len && memcmp(ea_name, name, name_len) == 0) { rc = value_len; if (dst_size == 0) goto out; if (dst_size < value_len) { rc = -ERANGE; goto out; } memcpy(dst, value, value_len); goto out; } } else { /* 'user.' plus a terminating null */ user_name_len = 5 + 1 + name_len; if (buf_size == 0) { /* skip copy - calc size only */ rc += user_name_len; } else if (dst_size >= user_name_len) { dst_size -= user_name_len; memcpy(dst, "user.", 5); dst += 5; memcpy(dst, src->ea_data, name_len); dst += name_len; *dst = 0; ++dst; rc += user_name_len; } else { /* stop before overrun buffer */ rc = -ERANGE; break; } } if (!src->next_entry_offset) break; if (src_size < le32_to_cpu(src->next_entry_offset)) { /* stop before overrun buffer */ rc = -ERANGE; break; } src_size -= le32_to_cpu(src->next_entry_offset); src = (void *)((char *)src + le32_to_cpu(src->next_entry_offset)); } /* didn't find the named attribute */ if (ea_name) rc = -ENODATA; out: return (ssize_t)rc; } static ssize_t smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *path, const unsigned char *ea_name, char *ea_data, size_t buf_size, struct cifs_sb_info *cifs_sb) { int rc; struct kvec rsp_iov = {NULL, 0}; int buftype = CIFS_NO_BUFFER; struct smb2_query_info_rsp *rsp; struct smb2_file_full_ea_info *info = NULL; rc = smb2_query_info_compound(xid, tcon, path, FILE_READ_EA, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE, &rsp_iov, &buftype, cifs_sb); if (rc) { /* * If ea_name is NULL (listxattr) and there are no EAs, * return 0 as it's not an error. Otherwise, the specified * ea_name was not found. */ if (!ea_name && rc == -ENODATA) rc = 0; goto qeas_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, sizeof(struct smb2_file_full_ea_info)); if (rc) goto qeas_exit; info = (struct smb2_file_full_ea_info *)( le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); rc = move_smb2_ea_to_cifs(ea_data, buf_size, info, le32_to_cpu(rsp->OutputBufferLength), ea_name); qeas_exit: free_rsp_buf(buftype, rsp_iov.iov_base); return rc; } static int smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, const char *path, const char *ea_name, const void *ea_value, const __u16 ea_value_len, const struct nls_table *nls_codepage, struct cifs_sb_info *cifs_sb) { struct smb2_compound_vars *vars; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); struct smb_rqst *rqst; struct kvec *rsp_iov; __le16 *utf16_path = NULL; int ea_name_len = strlen(ea_name); int flags = CIFS_CP_CREATE_CLOSE_OP; int len; int resp_buftype[3]; struct cifs_open_parms oparms; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_fid fid; unsigned int size[1]; void *data[1]; struct smb2_file_full_ea_info *ea = NULL; struct smb2_query_info_rsp *rsp; int rc, used_len = 0; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; if (ea_name_len > 255) return -EINVAL; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) return -ENOMEM; resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; vars = kzalloc(sizeof(*vars), GFP_KERNEL); if (!vars) { rc = -ENOMEM; goto out_free_path; } rqst = vars->rqst; rsp_iov = vars->rsp_iov; if (ses->server->ops->query_all_EAs) { if (!ea_value) { rc = ses->server->ops->query_all_EAs(xid, tcon, path, ea_name, NULL, 0, cifs_sb); if (rc == -ENODATA) goto sea_exit; } else { /* If we are adding a attribute we should first check * if there will be enough space available to store * the new EA. If not we should not add it since we * would not be able to even read the EAs back. */ rc = smb2_query_info_compound(xid, tcon, path, FILE_READ_EA, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE, &rsp_iov[1], &resp_buftype[1], cifs_sb); if (rc == 0) { rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; used_len = le32_to_cpu(rsp->OutputBufferLength); } free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); resp_buftype[1] = CIFS_NO_BUFFER; memset(&rsp_iov[1], 0, sizeof(rsp_iov[1])); rc = 0; /* Use a fudge factor of 256 bytes in case we collide * with a different set_EAs command. */ if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 < used_len + ea_name_len + ea_value_len + 1) { rc = -ENOSPC; goto sea_exit; } } } /* Open */ rqst[0].rq_iov = vars->open_iov; rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = path, .desired_access = FILE_WRITE_EA, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = &fid, }; rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, utf16_path); if (rc) goto sea_exit; smb2_set_next_command(tcon, &rqst[0]); /* Set Info */ rqst[1].rq_iov = vars->si_iov; rqst[1].rq_nvec = 1; len = sizeof(*ea) + ea_name_len + ea_value_len + 1; ea = kzalloc(len, GFP_KERNEL); if (ea == NULL) { rc = -ENOMEM; goto sea_exit; } ea->ea_name_length = ea_name_len; ea->ea_value_length = cpu_to_le16(ea_value_len); memcpy(ea->ea_data, ea_name, ea_name_len + 1); memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len); size[0] = len; data[0] = ea; rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); if (rc) goto sea_exit; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); /* Close */ rqst[2].rq_iov = &vars->close_iov; rqst[2].rq_nvec = 1; rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); if (rc) goto sea_exit; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, ses, server, flags, 3, rqst, resp_buftype, rsp_iov); /* no need to bump num_remote_opens because handle immediately closed */ sea_exit: kfree(ea); SMB2_open_free(&rqst[0]); SMB2_set_info_free(&rqst[1]); SMB2_close_free(&rqst[2]); free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); kfree(vars); out_free_path: kfree(utf16_path); return rc; } #endif static bool smb2_can_echo(struct TCP_Server_Info *server) { return server->echoes; } static void smb2_clear_stats(struct cifs_tcon *tcon) { int i; for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) { atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0); atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0); } } static void smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon) { seq_puts(m, "\n\tShare Capabilities:"); if (tcon->capabilities & SMB2_SHARE_CAP_DFS) seq_puts(m, " DFS,"); if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) seq_puts(m, " CONTINUOUS AVAILABILITY,"); if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT) seq_puts(m, " SCALEOUT,"); if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) seq_puts(m, " CLUSTER,"); if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC) seq_puts(m, " ASYMMETRIC,"); if (tcon->capabilities == 0) seq_puts(m, " None"); if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE) seq_puts(m, " Aligned,"); if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE) seq_puts(m, " Partition Aligned,"); if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY) seq_puts(m, " SSD,"); if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED) seq_puts(m, " TRIM-support,"); seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags); seq_printf(m, "\n\ttid: 0x%x", tcon->tid); if (tcon->perf_sector_size) seq_printf(m, "\tOptimal sector size: 0x%x", tcon->perf_sector_size); seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access); } static void smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon) { atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent; atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed; /* * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO * totals (requests sent) since those SMBs are per-session not per tcon */ seq_printf(m, "\nBytes read: %llu Bytes written: %llu", (long long)(tcon->bytes_read), (long long)(tcon->bytes_written)); seq_printf(m, "\nOpen files: %d total (local), %d open on server", atomic_read(&tcon->num_local_opens), atomic_read(&tcon->num_remote_opens)); seq_printf(m, "\nTreeConnects: %d total %d failed", atomic_read(&sent[SMB2_TREE_CONNECT_HE]), atomic_read(&failed[SMB2_TREE_CONNECT_HE])); seq_printf(m, "\nTreeDisconnects: %d total %d failed", atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]), atomic_read(&failed[SMB2_TREE_DISCONNECT_HE])); seq_printf(m, "\nCreates: %d total %d failed", atomic_read(&sent[SMB2_CREATE_HE]), atomic_read(&failed[SMB2_CREATE_HE])); seq_printf(m, "\nCloses: %d total %d failed", atomic_read(&sent[SMB2_CLOSE_HE]), atomic_read(&failed[SMB2_CLOSE_HE])); seq_printf(m, "\nFlushes: %d total %d failed", atomic_read(&sent[SMB2_FLUSH_HE]), atomic_read(&failed[SMB2_FLUSH_HE])); seq_printf(m, "\nReads: %d total %d failed", atomic_read(&sent[SMB2_READ_HE]), atomic_read(&failed[SMB2_READ_HE])); seq_printf(m, "\nWrites: %d total %d failed", atomic_read(&sent[SMB2_WRITE_HE]), atomic_read(&failed[SMB2_WRITE_HE])); seq_printf(m, "\nLocks: %d total %d failed", atomic_read(&sent[SMB2_LOCK_HE]), atomic_read(&failed[SMB2_LOCK_HE])); seq_printf(m, "\nIOCTLs: %d total %d failed", atomic_read(&sent[SMB2_IOCTL_HE]), atomic_read(&failed[SMB2_IOCTL_HE])); seq_printf(m, "\nQueryDirectories: %d total %d failed", atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]), atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE])); seq_printf(m, "\nChangeNotifies: %d total %d failed", atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]), atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE])); seq_printf(m, "\nQueryInfos: %d total %d failed", atomic_read(&sent[SMB2_QUERY_INFO_HE]), atomic_read(&failed[SMB2_QUERY_INFO_HE])); seq_printf(m, "\nSetInfos: %d total %d failed", atomic_read(&sent[SMB2_SET_INFO_HE]), atomic_read(&failed[SMB2_SET_INFO_HE])); seq_printf(m, "\nOplockBreaks: %d sent %d failed", atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]), atomic_read(&failed[SMB2_OPLOCK_BREAK_HE])); } static void smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) { struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; cfile->fid.persistent_fid = fid->persistent_fid; cfile->fid.volatile_fid = fid->volatile_fid; cfile->fid.access = fid->access; #ifdef CONFIG_CIFS_DEBUG2 cfile->fid.mid = fid->mid; #endif /* CIFS_DEBUG2 */ server->ops->set_oplock_level(cinode, oplock, fid->epoch, &fid->purge_cache); cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode); memcpy(cfile->fid.create_guid, fid->create_guid, 16); } static void smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); } static void smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile) { struct smb2_file_network_open_info file_inf; struct inode *inode; int rc; rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, &file_inf); if (rc) return; inode = d_inode(cfile->dentry); spin_lock(&inode->i_lock); CIFS_I(inode)->time = jiffies; /* Creation time should not need to be updated on close */ if (file_inf.LastWriteTime) inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime); if (file_inf.ChangeTime) inode_set_ctime_to_ts(inode, cifs_NTtimeToUnix(file_inf.ChangeTime)); if (file_inf.LastAccessTime) inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime); /* * i_blocks is not related to (i_size / i_blksize), * but instead 512 byte (2**9) size is required for * calculating num blocks. */ if (le64_to_cpu(file_inf.AllocationSize) > 4096) inode->i_blocks = (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9; /* End of file and Attributes should not have to be updated on close */ spin_unlock(&inode->i_lock); } static int SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct copychunk_ioctl *pcchunk) { int rc; unsigned int ret_data_len; struct resume_key_req *res_key; rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */, CIFSMaxBufSize, (char **)&res_key, &ret_data_len); if (rc == -EOPNOTSUPP) { pr_warn_once("Server share %s does not support copy range\n", tcon->tree_name); goto req_res_key_exit; } else if (rc) { cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc); goto req_res_key_exit; } if (ret_data_len < sizeof(struct resume_key_req)) { cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n"); rc = -EINVAL; goto req_res_key_exit; } memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE); req_res_key_exit: kfree(res_key); return rc; } static int smb2_ioctl_query_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, __le16 *path, int is_dir, unsigned long p) { struct smb2_compound_vars *vars; struct smb_rqst *rqst; struct kvec *rsp_iov; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); char __user *arg = (char __user *)p; struct smb_query_info qi; struct smb_query_info __user *pqi; int rc = 0; int flags = CIFS_CP_CREATE_CLOSE_OP; struct smb2_query_info_rsp *qi_rsp = NULL; struct smb2_ioctl_rsp *io_rsp = NULL; void *buffer = NULL; int resp_buftype[3]; struct cifs_open_parms oparms; u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_fid fid; unsigned int size[2]; void *data[2]; int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR; void (*free_req1_func)(struct smb_rqst *r); vars = kzalloc(sizeof(*vars), GFP_ATOMIC); if (vars == NULL) return -ENOMEM; rqst = &vars->rqst[0]; rsp_iov = &vars->rsp_iov[0]; resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) { rc = -EFAULT; goto free_vars; } if (qi.output_buffer_length > 1024) { rc = -EINVAL; goto free_vars; } if (!ses || !server) { rc = -EIO; goto free_vars; } if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; if (qi.output_buffer_length) { buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length); if (IS_ERR(buffer)) { rc = PTR_ERR(buffer); goto free_vars; } } /* Open */ rqst[0].rq_iov = &vars->open_iov[0]; rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; oparms = (struct cifs_open_parms) { .tcon = tcon, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, create_options), .fid = &fid, }; if (qi.flags & PASSTHRU_FSCTL) { switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) { case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS: oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE; break; case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS: oparms.desired_access = GENERIC_ALL; break; case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS: oparms.desired_access = GENERIC_READ; break; case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS: oparms.desired_access = GENERIC_WRITE; break; } } else if (qi.flags & PASSTHRU_SET_INFO) { oparms.desired_access = GENERIC_WRITE; } else { oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL; } rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, path); if (rc) goto free_output_buffer; smb2_set_next_command(tcon, &rqst[0]); /* Query */ if (qi.flags & PASSTHRU_FSCTL) { /* Can eventually relax perm check since server enforces too */ if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; goto free_open_req; } rqst[1].rq_iov = &vars->io_iov[0]; rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, qi.info_type, buffer, qi.output_buffer_length, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); free_req1_func = SMB2_ioctl_free; } else if (qi.flags == PASSTHRU_SET_INFO) { /* Can eventually relax perm check since server enforces too */ if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; goto free_open_req; } if (qi.output_buffer_length < 8) { rc = -EINVAL; goto free_open_req; } rqst[1].rq_iov = vars->si_iov; rqst[1].rq_nvec = 1; /* MS-FSCC 2.4.13 FileEndOfFileInformation */ size[0] = 8; data[0] = buffer; rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, current->tgid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); free_req1_func = SMB2_set_info_free; } else if (qi.flags == PASSTHRU_QUERY_INFO) { rqst[1].rq_iov = &vars->qi_iov; rqst[1].rq_nvec = 1; rc = SMB2_query_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, qi.file_info_class, qi.info_type, qi.additional_information, qi.input_buffer_length, qi.output_buffer_length, buffer); free_req1_func = SMB2_query_info_free; } else { /* unknown flags */ cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n", qi.flags); rc = -EINVAL; } if (rc) goto free_open_req; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); /* Close */ rqst[2].rq_iov = &vars->close_iov; rqst[2].rq_nvec = 1; rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); if (rc) goto free_req_1; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, ses, server, flags, 3, rqst, resp_buftype, rsp_iov); if (rc) goto out; /* No need to bump num_remote_opens since handle immediately closed */ if (qi.flags & PASSTHRU_FSCTL) { pqi = (struct smb_query_info __user *)arg; io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base; if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length) qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); if (qi.input_buffer_length > 0 && le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) { rc = -EFAULT; goto out; } if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, sizeof(qi.input_buffer_length))) { rc = -EFAULT; goto out; } if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), qi.input_buffer_length)) rc = -EFAULT; } else { pqi = (struct smb_query_info __user *)arg; qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length) qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, sizeof(qi.input_buffer_length))) { rc = -EFAULT; goto out; } if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) rc = -EFAULT; } out: free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); SMB2_close_free(&rqst[2]); free_req_1: free_req1_func(&rqst[1]); free_open_req: SMB2_open_free(&rqst[0]); free_output_buffer: kfree(buffer); free_vars: kfree(vars); return rc; } static ssize_t smb2_copychunk_range(const unsigned int xid, struct cifsFileInfo *srcfile, struct cifsFileInfo *trgtfile, u64 src_off, u64 len, u64 dest_off) { int rc; unsigned int ret_data_len; struct copychunk_ioctl *pcchunk; struct copychunk_ioctl_rsp *retbuf = NULL; struct cifs_tcon *tcon; int chunks_copied = 0; bool chunk_sizes_updated = false; ssize_t bytes_written, total_bytes_written = 0; pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); if (pcchunk == NULL) return -ENOMEM; cifs_dbg(FYI, "%s: about to call request res key\n", __func__); /* Request a key from the server to identify the source of the copy */ rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), srcfile->fid.persistent_fid, srcfile->fid.volatile_fid, pcchunk); /* Note: request_res_key sets res_key null only if rc !=0 */ if (rc) goto cchunk_out; /* For now array only one chunk long, will make more flexible later */ pcchunk->ChunkCount = cpu_to_le32(1); pcchunk->Reserved = 0; pcchunk->Reserved2 = 0; tcon = tlink_tcon(trgtfile->tlink); while (len > 0) { pcchunk->SourceOffset = cpu_to_le64(src_off); pcchunk->TargetOffset = cpu_to_le64(dest_off); pcchunk->Length = cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk)); /* Request server copy to target from src identified by key */ kfree(retbuf); retbuf = NULL; rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, (char *)pcchunk, sizeof(struct copychunk_ioctl), CIFSMaxBufSize, (char **)&retbuf, &ret_data_len); if (rc == 0) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) { cifs_tcon_dbg(VFS, "Invalid cchunk response size\n"); rc = -EIO; goto cchunk_out; } if (retbuf->TotalBytesWritten == 0) { cifs_dbg(FYI, "no bytes copied\n"); rc = -EIO; goto cchunk_out; } /* * Check if server claimed to write more than we asked */ if (le32_to_cpu(retbuf->TotalBytesWritten) > le32_to_cpu(pcchunk->Length)) { cifs_tcon_dbg(VFS, "Invalid copy chunk response\n"); rc = -EIO; goto cchunk_out; } if (le32_to_cpu(retbuf->ChunksWritten) != 1) { cifs_tcon_dbg(VFS, "Invalid num chunks written\n"); rc = -EIO; goto cchunk_out; } chunks_copied++; bytes_written = le32_to_cpu(retbuf->TotalBytesWritten); src_off += bytes_written; dest_off += bytes_written; len -= bytes_written; total_bytes_written += bytes_written; cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n", le32_to_cpu(retbuf->ChunksWritten), le32_to_cpu(retbuf->ChunkBytesWritten), bytes_written); } else if (rc == -EINVAL) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) goto cchunk_out; cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n", le32_to_cpu(retbuf->ChunksWritten), le32_to_cpu(retbuf->ChunkBytesWritten), le32_to_cpu(retbuf->TotalBytesWritten)); /* * Check if this is the first request using these sizes, * (ie check if copy succeed once with original sizes * and check if the server gave us different sizes after * we already updated max sizes on previous request). * if not then why is the server returning an error now */ if ((chunks_copied != 0) || chunk_sizes_updated) goto cchunk_out; /* Check that server is not asking us to grow size */ if (le32_to_cpu(retbuf->ChunkBytesWritten) < tcon->max_bytes_chunk) tcon->max_bytes_chunk = le32_to_cpu(retbuf->ChunkBytesWritten); else goto cchunk_out; /* server gave us bogus size */ /* No need to change MaxChunks since already set to 1 */ chunk_sizes_updated = true; } else goto cchunk_out; } cchunk_out: kfree(pcchunk); kfree(retbuf); if (rc) return rc; else return total_bytes_written; } static int smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid); } static unsigned int smb2_read_data_offset(char *buf) { struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf; return rsp->DataOffset; } static unsigned int smb2_read_data_length(char *buf, bool in_remaining) { struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf; if (in_remaining) return le32_to_cpu(rsp->DataRemaining); return le32_to_cpu(rsp->DataLength); } static int smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid, struct cifs_io_parms *parms, unsigned int *bytes_read, char **buf, int *buf_type) { parms->persistent_fid = pfid->persistent_fid; parms->volatile_fid = pfid->volatile_fid; return SMB2_read(xid, parms, bytes_read, buf, buf_type); } static int smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid, struct cifs_io_parms *parms, unsigned int *written, struct kvec *iov, unsigned long nr_segs) { parms->persistent_fid = pfid->persistent_fid; parms->volatile_fid = pfid->volatile_fid; return SMB2_write(xid, parms, written, iov, nr_segs); } /* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse) { struct cifsInodeInfo *cifsi; int rc; cifsi = CIFS_I(inode); /* if file already sparse don't bother setting sparse again */ if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse) return true; /* already sparse */ if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse) return true; /* already not sparse */ /* * Can't check for sparse support on share the usual way via the * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share * since Samba server doesn't set the flag on the share, yet * supports the set sparse FSCTL and returns sparse correctly * in the file attributes. If we fail setting sparse though we * mark that server does not support sparse files for this share * to avoid repeatedly sending the unsupported fsctl to server * if the file is repeatedly extended. */ if (tcon->broken_sparse_sup) return false; rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_SPARSE, &setsparse, 1, CIFSMaxBufSize, NULL, NULL); if (rc) { tcon->broken_sparse_sup = true; cifs_dbg(FYI, "set sparse rc = %d\n", rc); return false; } if (setsparse) cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE; else cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE); return true; } static int smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, __u64 size, bool set_alloc) { __le64 eof = cpu_to_le64(size); struct inode *inode; /* * If extending file more than one page make sparse. Many Linux fs * make files sparse by default when extending via ftruncate */ inode = d_inode(cfile->dentry); if (!set_alloc && (size > inode->i_size + 8192)) { __u8 set_sparse = 1; /* whether set sparse succeeds or not, extend the file */ smb2_set_sparse(xid, tcon, cfile, inode, set_sparse); } return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, cfile->pid, &eof); } static int smb2_duplicate_extents(const unsigned int xid, struct cifsFileInfo *srcfile, struct cifsFileInfo *trgtfile, u64 src_off, u64 len, u64 dest_off) { int rc; unsigned int ret_data_len; struct inode *inode; struct duplicate_extents_to_file dup_ext_buf; struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink); /* server fileays advertise duplicate extent support with this flag */ if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0) return -EOPNOTSUPP; dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid; dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid; dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off); dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off); dup_ext_buf.ByteCount = cpu_to_le64(len); cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n", src_off, dest_off, len); inode = d_inode(trgtfile->dentry); if (inode->i_size < dest_off + len) { rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false); if (rc) goto duplicate_extents_out; /* * Although also could set plausible allocation size (i_blocks) * here in addition to setting the file size, in reflink * it is likely that the target file is sparse. Its allocation * size will be queried on next revalidate, but it is important * to make sure that file's cached size is updated immediately */ cifs_setsize(inode, dest_off + len); } rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), CIFSMaxBufSize, NULL, &ret_data_len); if (ret_data_len > 0) cifs_dbg(FYI, "Non-zero response length in duplicate extents\n"); duplicate_extents_out: return rc; } static int smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile) { return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid); } static int smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile) { struct fsctl_set_integrity_information_req integr_info; unsigned int ret_data_len; integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED); integr_info.Flags = 0; integr_info.Reserved = 0; return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_INTEGRITY_INFORMATION, (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), CIFSMaxBufSize, NULL, &ret_data_len); } /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ #define GMT_TOKEN_SIZE 50 #define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */ /* * Input buffer contains (empty) struct smb_snapshot array with size filled in * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 */ static int smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, void __user *ioc_buf) { char *retbuf = NULL; unsigned int ret_data_len = 0; int rc; u32 max_response_size; struct smb_snapshot_array snapshot_in; /* * On the first query to enumerate the list of snapshots available * for this volume the buffer begins with 0 (number of snapshots * which can be returned is zero since at that point we do not know * how big the buffer needs to be). On the second query, * it (ret_data_len) is set to number of snapshots so we can * know to set the maximum response size larger (see below). */ if (get_user(ret_data_len, (unsigned int __user *)ioc_buf)) return -EFAULT; /* * Note that for snapshot queries that servers like Azure expect that * the first query be minimal size (and just used to get the number/size * of previous versions) so response size must be specified as EXACTLY * sizeof(struct snapshot_array) which is 16 when rounded up to multiple * of eight bytes. */ if (ret_data_len == 0) max_response_size = MIN_SNAPSHOT_ARRAY_SIZE; else max_response_size = CIFSMaxBufSize; rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SRV_ENUMERATE_SNAPSHOTS, NULL, 0 /* no input data */, max_response_size, (char **)&retbuf, &ret_data_len); cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", rc, ret_data_len); if (rc) return rc; if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) { /* Fixup buffer */ if (copy_from_user(&snapshot_in, ioc_buf, sizeof(struct smb_snapshot_array))) { rc = -EFAULT; kfree(retbuf); return rc; } /* * Check for min size, ie not large enough to fit even one GMT * token (snapshot). On the first ioctl some users may pass in * smaller size (or zero) to simply get the size of the array * so the user space caller can allocate sufficient memory * and retry the ioctl again with larger array size sufficient * to hold all of the snapshot GMT tokens on the second try. */ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE) ret_data_len = sizeof(struct smb_snapshot_array); /* * We return struct SRV_SNAPSHOT_ARRAY, followed by * the snapshot array (of 50 byte GMT tokens) each * representing an available previous version of the data */ if (ret_data_len > (snapshot_in.snapshot_array_size + sizeof(struct smb_snapshot_array))) ret_data_len = snapshot_in.snapshot_array_size + sizeof(struct smb_snapshot_array); if (copy_to_user(ioc_buf, retbuf, ret_data_len)) rc = -EFAULT; } kfree(retbuf); return rc; } static int smb3_notify(const unsigned int xid, struct file *pfile, void __user *ioc_buf, bool return_changes) { struct smb3_notify_info notify; struct smb3_notify_info __user *pnotify_buf; struct dentry *dentry = pfile->f_path.dentry; struct inode *inode = file_inode(pfile); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_open_parms oparms; struct cifs_fid fid; struct cifs_tcon *tcon; const unsigned char *path; char *returned_ioctl_info = NULL; void *page = alloc_dentry_path(); __le16 *utf16_path = NULL; u8 oplock = SMB2_OPLOCK_LEVEL_NONE; int rc = 0; __u32 ret_len = 0; path = build_path_from_dentry(dentry, page); if (IS_ERR(path)) { rc = PTR_ERR(path); goto notify_exit; } utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (utf16_path == NULL) { rc = -ENOMEM; goto notify_exit; } if (return_changes) { if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify_info))) { rc = -EFAULT; goto notify_exit; } } else { if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) { rc = -EFAULT; goto notify_exit; } notify.data_len = 0; } tcon = cifs_sb_master_tcon(cifs_sb); oparms = (struct cifs_open_parms) { .tcon = tcon, .path = path, .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = &fid, }; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, NULL); if (rc) goto notify_exit; rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid, notify.watch_tree, notify.completion_filter, notify.data_len, &returned_ioctl_info, &ret_len); SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc); if (return_changes && (ret_len > 0) && (notify.data_len > 0)) { if (ret_len > notify.data_len) ret_len = notify.data_len; pnotify_buf = (struct smb3_notify_info __user *)ioc_buf; if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len)) rc = -EFAULT; else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len))) rc = -EFAULT; } kfree(returned_ioctl_info); notify_exit: free_dentry_path(page); kfree(utf16_path); return rc; } static int smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, const char *path, struct cifs_sb_info *cifs_sb, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { __le16 *utf16_path; struct smb_rqst rqst[2]; struct kvec rsp_iov[2]; int resp_buftype[2]; struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE]; int rc, flags = 0; u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct smb2_query_directory_rsp *qd_rsp = NULL; struct smb2_create_rsp *op_rsp = NULL; struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); int retry_count = 0; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) return -ENOMEM; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(rqst, 0, sizeof(rqst)); resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; memset(rsp_iov, 0, sizeof(rsp_iov)); /* Open */ memset(&open_iov, 0, sizeof(open_iov)); rqst[0].rq_iov = open_iov; rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = path, .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = fid, }; rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, utf16_path); if (rc) goto qdf_free; smb2_set_next_command(tcon, &rqst[0]); /* Query directory */ srch_inf->entries_in_buffer = 0; srch_inf->index_of_last_entry = 2; memset(&qd_iov, 0, sizeof(qd_iov)); rqst[1].rq_iov = qd_iov; rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE; rc = SMB2_query_directory_init(xid, tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, 0, srch_inf->info_level); if (rc) goto qdf_free; smb2_set_related(&rqst[1]); again: rc = compound_send_recv(xid, tcon->ses, server, flags, 2, rqst, resp_buftype, rsp_iov); if (rc == -EAGAIN && retry_count++ < 10) goto again; /* If the open failed there is nothing to do */ op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) { cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc); goto qdf_free; } fid->persistent_fid = op_rsp->PersistentFileId; fid->volatile_fid = op_rsp->VolatileFileId; /* Anything else than ENODATA means a genuine error */ if (rc && rc != -ENODATA) { SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc); trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid, tcon->ses->Suid, 0, 0, rc); goto qdf_free; } atomic_inc(&tcon->num_remote_opens); qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base; if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) { trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid, tcon->ses->Suid, 0, 0); srch_inf->endOfSearch = true; rc = 0; goto qdf_free; } rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1], srch_inf); if (rc) { trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid, tcon->ses->Suid, 0, 0, rc); goto qdf_free; } resp_buftype[1] = CIFS_NO_BUFFER; trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid, tcon->ses->Suid, 0, srch_inf->entries_in_buffer); qdf_free: kfree(utf16_path); SMB2_open_free(&rqst[0]); SMB2_query_directory_free(&rqst[1]); free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); return rc; } static int smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { return SMB2_query_directory(xid, tcon, fid->persistent_fid, fid->volatile_fid, 0, srch_inf); } static int smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); } /* * If we negotiate SMB2 protocol and get STATUS_PENDING - update * the number of credits and return true. Otherwise - return false. */ static bool smb2_is_status_pending(char *buf, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; int scredits, in_flight; if (shdr->Status != STATUS_PENDING) return false; if (shdr->CreditRequest) { spin_lock(&server->req_lock); server->credits += le16_to_cpu(shdr->CreditRequest); scredits = server->credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); wake_up(&server->request_q); trace_smb3_pend_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, le16_to_cpu(shdr->CreditRequest), in_flight); cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n", __func__, le16_to_cpu(shdr->CreditRequest), scredits); } return true; } static bool smb2_is_session_expired(char *buf) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED && shdr->Status != STATUS_USER_SESSION_DELETED) return false; trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId), le64_to_cpu(shdr->SessionId), le16_to_cpu(shdr->Command), le64_to_cpu(shdr->MessageId)); cifs_dbg(FYI, "Session expired or deleted\n"); return true; } static bool smb2_is_status_io_timeout(char *buf) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; if (shdr->Status == STATUS_IO_TIMEOUT) return true; else return false; } static bool smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; struct TCP_Server_Info *pserver; struct cifs_ses *ses; struct cifs_tcon *tcon; if (shdr->Status != STATUS_NETWORK_NAME_DELETED) return false; /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { spin_lock(&tcon->tc_lock); tcon->need_reconnect = true; spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); pr_warn_once("Server share %s deleted.\n", tcon->tree_name); return true; } } } spin_unlock(&cifs_tcp_ses_lock); return false; } static int smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode) { if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) return SMB2_lease_break(0, tcon, cinode->lease_key, smb2_get_lease_state(cinode)); return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid, CIFS_CACHE_READ(cinode) ? 1 : 0); } void smb2_set_related(struct smb_rqst *rqst) { struct smb2_hdr *shdr; shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base); if (shdr == NULL) { cifs_dbg(FYI, "shdr NULL in smb2_set_related\n"); return; } shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; } char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0}; void smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst) { struct smb2_hdr *shdr; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = ses->server; unsigned long len = smb_rqst_len(server, rqst); int i, num_padding; shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base); if (shdr == NULL) { cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n"); return; } /* SMB headers in a compound are 8 byte aligned. */ /* No padding needed */ if (!(len & 7)) goto finished; num_padding = 8 - (len & 7); if (!smb3_encryption_required(tcon)) { /* * If we do not have encryption then we can just add an extra * iov for the padding. */ rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding; rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding; rqst->rq_nvec++; len += num_padding; } else { /* * We can not add a small padding iov for the encryption case * because the encryption framework can not handle the padding * iovs. * We have to flatten this into a single buffer and add * the padding to it. */ for (i = 1; i < rqst->rq_nvec; i++) { memcpy(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len, rqst->rq_iov[i].iov_base, rqst->rq_iov[i].iov_len); rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len; } memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len, 0, num_padding); rqst->rq_iov[0].iov_len += num_padding; len += num_padding; rqst->rq_nvec = 1; } finished: shdr->NextCommand = cpu_to_le32(len); } /* * Passes the query info response back to the caller on success. * Caller need to free this with free_rsp_buf(). */ int smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, const char *path, u32 desired_access, u32 class, u32 type, u32 output_len, struct kvec *rsp, int *buftype, struct cifs_sb_info *cifs_sb) { struct smb2_compound_vars *vars; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); int flags = CIFS_CP_CREATE_CLOSE_OP; struct smb_rqst *rqst; int resp_buftype[3]; struct kvec *rsp_iov; u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct cifs_fid fid; int rc; __le16 *utf16_path; struct cached_fid *cfid = NULL; if (!path) path = ""; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) return -ENOMEM; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; vars = kzalloc(sizeof(*vars), GFP_KERNEL); if (!vars) { rc = -ENOMEM; goto out_free_path; } rqst = vars->rqst; rsp_iov = vars->rsp_iov; /* * We can only call this for things we know are directories. */ if (!strcmp(path, "")) open_cached_dir(xid, tcon, path, cifs_sb, false, &cfid); /* cfid null if open dir failed */ rqst[0].rq_iov = vars->open_iov; rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = path, .desired_access = desired_access, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = &fid, }; rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, utf16_path); if (rc) goto qic_exit; smb2_set_next_command(tcon, &rqst[0]); rqst[1].rq_iov = &vars->qi_iov; rqst[1].rq_nvec = 1; if (cfid) { rc = SMB2_query_info_init(tcon, server, &rqst[1], cfid->fid.persistent_fid, cfid->fid.volatile_fid, class, type, 0, output_len, 0, NULL); } else { rc = SMB2_query_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, class, type, 0, output_len, 0, NULL); } if (rc) goto qic_exit; if (!cfid) { smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); } rqst[2].rq_iov = &vars->close_iov; rqst[2].rq_nvec = 1; rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); if (rc) goto qic_exit; smb2_set_related(&rqst[2]); if (cfid) { rc = compound_send_recv(xid, ses, server, flags, 1, &rqst[1], &resp_buftype[1], &rsp_iov[1]); } else { rc = compound_send_recv(xid, ses, server, flags, 3, rqst, resp_buftype, rsp_iov); } if (rc) { free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); if (rc == -EREMCHG) { tcon->need_reconnect = true; pr_warn_once("server share %s deleted\n", tcon->tree_name); } goto qic_exit; } *rsp = rsp_iov[1]; *buftype = resp_buftype[1]; qic_exit: SMB2_open_free(&rqst[0]); SMB2_query_info_free(&rqst[1]); SMB2_close_free(&rqst[2]); free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); if (cfid) close_cached_dir(cfid); kfree(vars); out_free_path: kfree(utf16_path); return rc; } static int smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct kstatfs *buf) { struct smb2_query_info_rsp *rsp; struct smb2_fs_full_size_info *info = NULL; struct kvec rsp_iov = {NULL, 0}; int buftype = CIFS_NO_BUFFER; int rc; rc = smb2_query_info_compound(xid, tcon, "", FILE_READ_ATTRIBUTES, FS_FULL_SIZE_INFORMATION, SMB2_O_INFO_FILESYSTEM, sizeof(struct smb2_fs_full_size_info), &rsp_iov, &buftype, cifs_sb); if (rc) goto qfs_exit; rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; buf->f_type = SMB2_SUPER_MAGIC; info = (struct smb2_fs_full_size_info *)( le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, sizeof(struct smb2_fs_full_size_info)); if (!rc) smb2_copy_fs_info_to_kstatfs(info, buf); qfs_exit: trace_smb3_qfs_done(xid, tcon->tid, tcon->ses->Suid, tcon->tree_name, rc); free_rsp_buf(buftype, rsp_iov.iov_base); return rc; } static int smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct kstatfs *buf) { int rc; __le16 srch_path = 0; /* Null - open root of share */ u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct cifs_fid fid; if (!tcon->posix_extensions) return smb2_queryfs(xid, tcon, cifs_sb, buf); oparms = (struct cifs_open_parms) { .tcon = tcon, .path = "", .desired_access = FILE_READ_ATTRIBUTES, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, 0), .fid = &fid, }; rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL, NULL); if (rc) return rc; rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid, fid.volatile_fid, buf); buf->f_type = SMB2_SUPER_MAGIC; SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); return rc; } static bool smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2) { return ob1->fid.persistent_fid == ob2->fid.persistent_fid && ob1->fid.volatile_fid == ob2->fid.volatile_fid; } static int smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u32 type, int lock, int unlock, bool wait) { if (unlock && !lock) type = SMB2_LOCKFLAG_UNLOCK; return SMB2_lock(xid, tlink_tcon(cfile->tlink), cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, length, offset, type, wait); } static void smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid) { memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE); } static void smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid) { memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE); } static void smb2_new_lease_key(struct cifs_fid *fid) { generate_random_uuid(fid->lease_key); } static int smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, const char *search_name, struct dfs_info3_param **target_nodes, unsigned int *num_of_nodes, const struct nls_table *nls_codepage, int remap) { int rc; __le16 *utf16_path = NULL; int utf16_path_len = 0; struct cifs_tcon *tcon; struct fsctl_get_dfs_referral_req *dfs_req = NULL; struct get_dfs_referral_rsp *dfs_rsp = NULL; u32 dfs_req_size = 0, dfs_rsp_size = 0; int retry_count = 0; cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name); /* * Try to use the IPC tcon, otherwise just use any */ tcon = ses->tcon_ipc; if (tcon == NULL) { spin_lock(&cifs_tcp_ses_lock); tcon = list_first_entry_or_null(&ses->tcon_list, struct cifs_tcon, tcon_list); if (tcon) tcon->tc_count++; spin_unlock(&cifs_tcp_ses_lock); } if (tcon == NULL) { cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n", ses); rc = -ENOTCONN; goto out; } utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX, &utf16_path_len, nls_codepage, remap); if (!utf16_path) { rc = -ENOMEM; goto out; } dfs_req_size = sizeof(*dfs_req) + utf16_path_len; dfs_req = kzalloc(dfs_req_size, GFP_KERNEL); if (!dfs_req) { rc = -ENOMEM; goto out; } /* Highest DFS referral version understood */ dfs_req->MaxReferralLevel = DFS_VERSION; /* Path to resolve in an UTF-16 null-terminated string */ memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len); do { rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_DFS_GET_REFERRALS, (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, (char **)&dfs_rsp, &dfs_rsp_size); if (!is_retryable_error(rc)) break; usleep_range(512, 2048); } while (++retry_count < 5); if (rc) { if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP) cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc); goto out; } rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size, num_of_nodes, target_nodes, nls_codepage, remap, search_name, true /* is_unicode */); if (rc) { cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc); goto out; } out: if (tcon && !tcon->ipc) { /* ipc tcons are not refcounted */ spin_lock(&cifs_tcp_ses_lock); tcon->tc_count--; /* tc_count can never go negative */ WARN_ON(tcon->tc_count < 0); spin_unlock(&cifs_tcp_ses_lock); } kfree(utf16_path); kfree(dfs_req); kfree(dfs_rsp); return rc; } static int parse_reparse_posix(struct reparse_posix_data *symlink_buf, u32 plen, char **target_path, struct cifs_sb_info *cifs_sb) { unsigned int len; /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */ len = le16_to_cpu(symlink_buf->ReparseDataLength); if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) { cifs_dbg(VFS, "%lld not a supported symlink type\n", le64_to_cpu(symlink_buf->InodeType)); return -EOPNOTSUPP; } *target_path = cifs_strndup_from_utf16( symlink_buf->PathBuffer, len, true, cifs_sb->local_nls); if (!(*target_path)) return -ENOMEM; convert_delimiter(*target_path, '/'); cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path); return 0; } static int parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf, u32 plen, char **target_path, struct cifs_sb_info *cifs_sb) { unsigned int sub_len; unsigned int sub_offset; /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */ sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset); sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength); if (sub_offset + 20 > plen || sub_offset + sub_len + 20 > plen) { cifs_dbg(VFS, "srv returned malformed symlink buffer\n"); return -EIO; } *target_path = cifs_strndup_from_utf16( symlink_buf->PathBuffer + sub_offset, sub_len, true, cifs_sb->local_nls); if (!(*target_path)) return -ENOMEM; convert_delimiter(*target_path, '/'); cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path); return 0; } static int parse_reparse_point(struct reparse_data_buffer *buf, u32 plen, char **target_path, struct cifs_sb_info *cifs_sb) { if (plen < sizeof(struct reparse_data_buffer)) { cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n", plen); return -EIO; } if (plen < le16_to_cpu(buf->ReparseDataLength) + sizeof(struct reparse_data_buffer)) { cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n", plen); return -EIO; } /* See MS-FSCC 2.1.2 */ switch (le32_to_cpu(buf->ReparseTag)) { case IO_REPARSE_TAG_NFS: return parse_reparse_posix( (struct reparse_posix_data *)buf, plen, target_path, cifs_sb); case IO_REPARSE_TAG_SYMLINK: return parse_reparse_symlink( (struct reparse_symlink_data_buffer *)buf, plen, target_path, cifs_sb); default: cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n", le32_to_cpu(buf->ReparseTag)); return -EOPNOTSUPP; } } static int smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, char **target_path, struct kvec *rsp_iov) { struct reparse_data_buffer *buf; struct smb2_ioctl_rsp *io = rsp_iov->iov_base; u32 plen = le32_to_cpu(io->OutputCount); cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); buf = (struct reparse_data_buffer *)((u8 *)io + le32_to_cpu(io->OutputOffset)); return parse_reparse_point(buf, plen, target_path, cifs_sb); } static int smb2_query_reparse_point(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, u32 *tag, struct kvec *rsp, int *rsp_buftype) { struct smb2_compound_vars *vars; int rc; __le16 *utf16_path = NULL; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct cifs_fid fid; struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); int flags = CIFS_CP_CREATE_CLOSE_OP; struct smb_rqst *rqst; int resp_buftype[3]; struct kvec *rsp_iov; struct smb2_ioctl_rsp *ioctl_rsp; struct reparse_data_buffer *reparse_buf; u32 plen; cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); if (!utf16_path) return -ENOMEM; resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; vars = kzalloc(sizeof(*vars), GFP_KERNEL); if (!vars) { rc = -ENOMEM; goto out_free_path; } rqst = vars->rqst; rsp_iov = vars->rsp_iov; /* * setup smb2open - TODO add optimization to call cifs_get_readable_path * to see if there is a handle already open that we can use */ rqst[0].rq_iov = vars->open_iov; rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = full_path, .desired_access = FILE_READ_ATTRIBUTES, .disposition = FILE_OPEN, .create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT), .fid = &fid, }; rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, utf16_path); if (rc) goto query_rp_exit; smb2_set_next_command(tcon, &rqst[0]); /* IOCTL */ rqst[1].rq_iov = vars->io_iov; rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); if (rc) goto query_rp_exit; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); /* Close */ rqst[2].rq_iov = &vars->close_iov; rqst[2].rq_nvec = 1; rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); if (rc) goto query_rp_exit; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, tcon->ses, server, flags, 3, rqst, resp_buftype, rsp_iov); ioctl_rsp = rsp_iov[1].iov_base; /* * Open was successful and we got an ioctl response. */ if (rc == 0) { /* See MS-FSCC 2.3.23 */ reparse_buf = (struct reparse_data_buffer *) ((char *)ioctl_rsp + le32_to_cpu(ioctl_rsp->OutputOffset)); plen = le32_to_cpu(ioctl_rsp->OutputCount); if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) > rsp_iov[1].iov_len) { cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n", plen); rc = -EIO; goto query_rp_exit; } *tag = le32_to_cpu(reparse_buf->ReparseTag); *rsp = rsp_iov[1]; *rsp_buftype = resp_buftype[1]; resp_buftype[1] = CIFS_NO_BUFFER; } query_rp_exit: SMB2_open_free(&rqst[0]); SMB2_ioctl_free(&rqst[1]); SMB2_close_free(&rqst[2]); free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); kfree(vars); out_free_path: kfree(utf16_path); return rc; } static struct cifs_ntsd * get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb, const struct cifs_fid *cifsfid, u32 *pacllen, u32 info) { struct cifs_ntsd *pntsd = NULL; unsigned int xid; int rc = -EOPNOTSUPP; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return ERR_CAST(tlink); xid = get_xid(); cifs_dbg(FYI, "trying to get acl\n"); rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid, cifsfid->volatile_fid, (void **)&pntsd, pacllen, info); free_xid(xid); cifs_put_tlink(tlink); cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); if (rc) return ERR_PTR(rc); return pntsd; } static struct cifs_ntsd * get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, u32 *pacllen, u32 info) { struct cifs_ntsd *pntsd = NULL; u8 oplock = SMB2_OPLOCK_LEVEL_NONE; unsigned int xid; int rc; struct cifs_tcon *tcon; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct cifs_fid fid; struct cifs_open_parms oparms; __le16 *utf16_path; cifs_dbg(FYI, "get smb3 acl for path %s\n", path); if (IS_ERR(tlink)) return ERR_CAST(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) { rc = -ENOMEM; free_xid(xid); return ERR_PTR(rc); } oparms = (struct cifs_open_parms) { .tcon = tcon, .path = path, .desired_access = READ_CONTROL, .disposition = FILE_OPEN, /* * When querying an ACL, even if the file is a symlink * we want to open the source not the target, and so * the protocol requires that the client specify this * flag when opening a reparse point */ .create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT, .fid = &fid, }; if (info & SACL_SECINFO) oparms.desired_access |= SYSTEM_SECURITY; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, NULL); kfree(utf16_path); if (!rc) { rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid, fid.volatile_fid, (void **)&pntsd, pacllen, info); SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } cifs_put_tlink(tlink); free_xid(xid); cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); if (rc) return ERR_PTR(rc); return pntsd; } static int set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen, struct inode *inode, const char *path, int aclflag) { u8 oplock = SMB2_OPLOCK_LEVEL_NONE; unsigned int xid; int rc, access_flags = 0; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct cifs_fid fid; struct cifs_open_parms oparms; __le16 *utf16_path; cifs_dbg(FYI, "set smb3 acl for path %s\n", path); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP) access_flags |= WRITE_OWNER; if (aclflag & CIFS_ACL_SACL) access_flags |= SYSTEM_SECURITY; if (aclflag & CIFS_ACL_DACL) access_flags |= WRITE_DAC; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) { rc = -ENOMEM; free_xid(xid); return rc; } oparms = (struct cifs_open_parms) { .tcon = tcon, .desired_access = access_flags, .create_options = cifs_create_options(cifs_sb, 0), .disposition = FILE_OPEN, .path = path, .fid = &fid, }; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, NULL); kfree(utf16_path); if (!rc) { rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid, fid.volatile_fid, pnntsd, acllen, aclflag); SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } cifs_put_tlink(tlink); free_xid(xid); return rc; } /* Retrieve an ACL from the server */ static struct cifs_ntsd * get_smb2_acl(struct cifs_sb_info *cifs_sb, struct inode *inode, const char *path, u32 *pacllen, u32 info) { struct cifs_ntsd *pntsd = NULL; struct cifsFileInfo *open_file = NULL; if (inode && !(info & SACL_SECINFO)) open_file = find_readable_file(CIFS_I(inode), true); if (!open_file || (info & SACL_SECINFO)) return get_smb2_acl_by_path(cifs_sb, path, pacllen, info); pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info); cifsFileInfo_put(open_file); return pntsd; } static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon, loff_t offset, loff_t len, unsigned int xid) { struct cifsFileInfo *cfile = file->private_data; struct file_zero_data_information fsctl_buf; cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); fsctl_buf.FileOffset = cpu_to_le64(offset); fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, (char *)&fsctl_buf, sizeof(struct file_zero_data_information), 0, NULL, NULL); } static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, loff_t offset, loff_t len, bool keep_size) { struct cifs_ses *ses = tcon->ses; struct inode *inode = file_inode(file); struct cifsInodeInfo *cifsi = CIFS_I(inode); struct cifsFileInfo *cfile = file->private_data; long rc; unsigned int xid; __le64 eof; xid = get_xid(); trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, ses->Suid, offset, len); inode_lock(inode); filemap_invalidate_lock(inode->i_mapping); /* * We zero the range through ioctl, so we need remove the page caches * first, otherwise the data may be inconsistent with the server. */ truncate_pagecache_range(inode, offset, offset + len - 1); /* if file not oplocked can't be sure whether asking to extend size */ rc = -EOPNOTSUPP; if (keep_size == false && !CIFS_CACHE_READ(cifsi)) goto zero_range_exit; rc = smb3_zero_data(file, tcon, offset, len, xid); if (rc < 0) goto zero_range_exit; /* * do we also need to change the size of the file? */ if (keep_size == false && i_size_read(inode) < offset + len) { eof = cpu_to_le64(offset + len); rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, cfile->pid, &eof); } zero_range_exit: filemap_invalidate_unlock(inode->i_mapping); inode_unlock(inode); free_xid(xid); if (rc) trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid, ses->Suid, offset, len, rc); else trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid, ses->Suid, offset, len); return rc; } static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; struct file_zero_data_information fsctl_buf; long rc; unsigned int xid; __u8 set_sparse = 1; xid = get_xid(); inode_lock(inode); /* Need to make file sparse, if not already, before freeing range. */ /* Consider adding equivalent for compressed since it could also work */ if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { rc = -EOPNOTSUPP; goto out; } filemap_invalidate_lock(inode->i_mapping); /* * We implement the punch hole through ioctl, so we need remove the page * caches first, otherwise the data may be inconsistent with the server. */ truncate_pagecache_range(inode, offset, offset + len - 1); cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); fsctl_buf.FileOffset = cpu_to_le64(offset); fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, (char *)&fsctl_buf, sizeof(struct file_zero_data_information), CIFSMaxBufSize, NULL, NULL); filemap_invalidate_unlock(inode->i_mapping); out: inode_unlock(inode); free_xid(xid); return rc; } static int smb3_simple_fallocate_write_range(unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, loff_t off, loff_t len, char *buf) { struct cifs_io_parms io_parms = {0}; int nbytes; int rc = 0; struct kvec iov[2]; io_parms.netfid = cfile->fid.netfid; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.persistent_fid = cfile->fid.persistent_fid; io_parms.volatile_fid = cfile->fid.volatile_fid; while (len) { io_parms.offset = off; io_parms.length = len; if (io_parms.length > SMB2_MAX_BUFFER_SIZE) io_parms.length = SMB2_MAX_BUFFER_SIZE; /* iov[0] is reserved for smb header */ iov[1].iov_base = buf; iov[1].iov_len = io_parms.length; rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1); if (rc) break; if (nbytes > len) return -EINVAL; buf += nbytes; off += nbytes; len -= nbytes; } return rc; } static int smb3_simple_fallocate_range(unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, loff_t off, loff_t len) { struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data; u32 out_data_len; char *buf = NULL; loff_t l; int rc; in_data.file_offset = cpu_to_le64(off); in_data.length = cpu_to_le64(len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); if (rc) goto out; buf = kzalloc(1024 * 1024, GFP_KERNEL); if (buf == NULL) { rc = -ENOMEM; goto out; } tmp_data = out_data; while (len) { /* * The rest of the region is unmapped so write it all. */ if (out_data_len == 0) { rc = smb3_simple_fallocate_write_range(xid, tcon, cfile, off, len, buf); goto out; } if (out_data_len < sizeof(struct file_allocated_range_buffer)) { rc = -EINVAL; goto out; } if (off < le64_to_cpu(tmp_data->file_offset)) { /* * We are at a hole. Write until the end of the region * or until the next allocated data, * whichever comes next. */ l = le64_to_cpu(tmp_data->file_offset) - off; if (len < l) l = len; rc = smb3_simple_fallocate_write_range(xid, tcon, cfile, off, l, buf); if (rc) goto out; off = off + l; len = len - l; if (len == 0) goto out; } /* * We are at a section of allocated data, just skip forward * until the end of the data or the end of the region * we are supposed to fallocate, whichever comes first. */ l = le64_to_cpu(tmp_data->length); if (len < l) l = len; off += l; len -= l; tmp_data = &tmp_data[1]; out_data_len -= sizeof(struct file_allocated_range_buffer); } out: kfree(out_data); kfree(buf); return rc; } static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, loff_t off, loff_t len, bool keep_size) { struct inode *inode; struct cifsInodeInfo *cifsi; struct cifsFileInfo *cfile = file->private_data; long rc = -EOPNOTSUPP; unsigned int xid; __le64 eof; xid = get_xid(); inode = d_inode(cfile->dentry); cifsi = CIFS_I(inode); trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, off, len); /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) if (keep_size == false) { trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, off, len, rc); free_xid(xid); return rc; } /* * Extending the file */ if ((keep_size == false) && i_size_read(inode) < off + len) { rc = inode_newsize_ok(inode, off + len); if (rc) goto out; if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) smb2_set_sparse(xid, tcon, cfile, inode, false); eof = cpu_to_le64(off + len); rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, cfile->pid, &eof); if (rc == 0) { cifsi->server_eof = off + len; cifs_setsize(inode, off + len); cifs_truncate_page(inode->i_mapping, inode->i_size); truncate_setsize(inode, off + len); } goto out; } /* * Files are non-sparse by default so falloc may be a no-op * Must check if file sparse. If not sparse, and since we are not * extending then no need to do anything since file already allocated */ if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) { rc = 0; goto out; } if (keep_size == true) { /* * We can not preallocate pages beyond the end of the file * in SMB2 */ if (off >= i_size_read(inode)) { rc = 0; goto out; } /* * For fallocates that are partially beyond the end of file, * clamp len so we only fallocate up to the end of file. */ if (off + len > i_size_read(inode)) { len = i_size_read(inode) - off; } } if ((keep_size == true) || (i_size_read(inode) >= off + len)) { /* * At this point, we are trying to fallocate an internal * regions of a sparse file. Since smb2 does not have a * fallocate command we have two otions on how to emulate this. * We can either turn the entire file to become non-sparse * which we only do if the fallocate is for virtually * the whole file, or we can overwrite the region with zeroes * using SMB2_write, which could be prohibitevly expensive * if len is large. */ /* * We are only trying to fallocate a small region so * just write it with zero. */ if (len <= 1024 * 1024) { rc = smb3_simple_fallocate_range(xid, tcon, cfile, off, len); goto out; } /* * Check if falloc starts within first few pages of file * and ends within a few pages of the end of file to * ensure that most of file is being forced to be * fallocated now. If so then setting whole file sparse * ie potentially making a few extra pages at the beginning * or end of the file non-sparse via set_sparse is harmless. */ if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) { rc = -EOPNOTSUPP; goto out; } } smb2_set_sparse(xid, tcon, cfile, inode, false); rc = 0; out: if (rc) trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, off, len, rc); else trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, off, len); free_xid(xid); return rc; } static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon, loff_t off, loff_t len) { int rc; unsigned int xid; struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; struct cifsInodeInfo *cifsi = CIFS_I(inode); __le64 eof; loff_t old_eof; xid = get_xid(); inode_lock(inode); old_eof = i_size_read(inode); if ((off >= old_eof) || off + len >= old_eof) { rc = -EINVAL; goto out; } filemap_invalidate_lock(inode->i_mapping); rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1); if (rc < 0) goto out_2; truncate_pagecache_range(inode, off, old_eof); rc = smb2_copychunk_range(xid, cfile, cfile, off + len, old_eof - off - len, off); if (rc < 0) goto out_2; eof = cpu_to_le64(old_eof - len); rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, cfile->pid, &eof); if (rc < 0) goto out_2; rc = 0; cifsi->server_eof = i_size_read(inode) - len; truncate_setsize(inode, cifsi->server_eof); fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof); out_2: filemap_invalidate_unlock(inode->i_mapping); out: inode_unlock(inode); free_xid(xid); return rc; } static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon, loff_t off, loff_t len) { int rc; unsigned int xid; struct cifsFileInfo *cfile = file->private_data; struct inode *inode = file_inode(file); __le64 eof; __u64 count, old_eof; xid = get_xid(); inode_lock(inode); old_eof = i_size_read(inode); if (off >= old_eof) { rc = -EINVAL; goto out; } count = old_eof - off; eof = cpu_to_le64(old_eof + len); filemap_invalidate_lock(inode->i_mapping); rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1); if (rc < 0) goto out_2; truncate_pagecache_range(inode, off, old_eof); rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, cfile->pid, &eof); if (rc < 0) goto out_2; rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len); if (rc < 0) goto out_2; rc = smb3_zero_data(file, tcon, off, len, xid); if (rc < 0) goto out_2; rc = 0; out_2: filemap_invalidate_unlock(inode->i_mapping); out: inode_unlock(inode); free_xid(xid); return rc; } static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence) { struct cifsFileInfo *wrcfile, *cfile = file->private_data; struct cifsInodeInfo *cifsi; struct inode *inode; int rc = 0; struct file_allocated_range_buffer in_data, *out_data = NULL; u32 out_data_len; unsigned int xid; if (whence != SEEK_HOLE && whence != SEEK_DATA) return generic_file_llseek(file, offset, whence); inode = d_inode(cfile->dentry); cifsi = CIFS_I(inode); if (offset < 0 || offset >= i_size_read(inode)) return -ENXIO; xid = get_xid(); /* * We need to be sure that all dirty pages are written as they * might fill holes on the server. * Note that we also MUST flush any written pages since at least * some servers (Windows2016) will not reflect recent writes in * QUERY_ALLOCATED_RANGES until SMB2_flush is called. */ wrcfile = find_writable_file(cifsi, FIND_WR_ANY); if (wrcfile) { filemap_write_and_wait(inode->i_mapping); smb2_flush_file(xid, tcon, &wrcfile->fid); cifsFileInfo_put(wrcfile); } if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) { if (whence == SEEK_HOLE) offset = i_size_read(inode); goto lseek_exit; } in_data.file_offset = cpu_to_le64(offset); in_data.length = cpu_to_le64(i_size_read(inode)); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); if (rc == -E2BIG) rc = 0; if (rc) goto lseek_exit; if (whence == SEEK_HOLE && out_data_len == 0) goto lseek_exit; if (whence == SEEK_DATA && out_data_len == 0) { rc = -ENXIO; goto lseek_exit; } if (out_data_len < sizeof(struct file_allocated_range_buffer)) { rc = -EINVAL; goto lseek_exit; } if (whence == SEEK_DATA) { offset = le64_to_cpu(out_data->file_offset); goto lseek_exit; } if (offset < le64_to_cpu(out_data->file_offset)) goto lseek_exit; offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length); lseek_exit: free_xid(xid); kfree(out_data); if (!rc) return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); else return rc; } static int smb3_fiemap(struct cifs_tcon *tcon, struct cifsFileInfo *cfile, struct fiemap_extent_info *fei, u64 start, u64 len) { unsigned int xid; struct file_allocated_range_buffer in_data, *out_data; u32 out_data_len; int i, num, rc, flags, last_blob; u64 next; rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0); if (rc) return rc; xid = get_xid(); again: in_data.file_offset = cpu_to_le64(start); in_data.length = cpu_to_le64(len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); if (rc == -E2BIG) { last_blob = 0; rc = 0; } else last_blob = 1; if (rc) goto out; if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) { rc = -EINVAL; goto out; } if (out_data_len % sizeof(struct file_allocated_range_buffer)) { rc = -EINVAL; goto out; } num = out_data_len / sizeof(struct file_allocated_range_buffer); for (i = 0; i < num; i++) { flags = 0; if (i == num - 1 && last_blob) flags |= FIEMAP_EXTENT_LAST; rc = fiemap_fill_next_extent(fei, le64_to_cpu(out_data[i].file_offset), le64_to_cpu(out_data[i].file_offset), le64_to_cpu(out_data[i].length), flags); if (rc < 0) goto out; if (rc == 1) { rc = 0; goto out; } } if (!last_blob) { next = le64_to_cpu(out_data[num - 1].file_offset) + le64_to_cpu(out_data[num - 1].length); len = len - (next - start); start = next; goto again; } out: free_xid(xid); kfree(out_data); return rc; } static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, loff_t off, loff_t len) { /* KEEP_SIZE already checked for by do_fallocate */ if (mode & FALLOC_FL_PUNCH_HOLE) return smb3_punch_hole(file, tcon, off, len); else if (mode & FALLOC_FL_ZERO_RANGE) { if (mode & FALLOC_FL_KEEP_SIZE) return smb3_zero_range(file, tcon, off, len, true); return smb3_zero_range(file, tcon, off, len, false); } else if (mode == FALLOC_FL_KEEP_SIZE) return smb3_simple_falloc(file, tcon, off, len, true); else if (mode == FALLOC_FL_COLLAPSE_RANGE) return smb3_collapse_range(file, tcon, off, len); else if (mode == FALLOC_FL_INSERT_RANGE) return smb3_insert_range(file, tcon, off, len); else if (mode == 0) return smb3_simple_falloc(file, tcon, off, len, false); return -EOPNOTSUPP; } static void smb2_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache) { server->ops->set_oplock_level(cinode, oplock, 0, NULL); } static void smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache); static void smb3_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache) { unsigned int old_state = cinode->oplock; unsigned int old_epoch = cinode->epoch; unsigned int new_state; if (epoch > old_epoch) { smb21_set_oplock_level(cinode, oplock, 0, NULL); cinode->epoch = epoch; } new_state = cinode->oplock; *purge_cache = false; if ((old_state & CIFS_CACHE_READ_FLG) != 0 && (new_state & CIFS_CACHE_READ_FLG) == 0) *purge_cache = true; else if (old_state == new_state && (epoch - old_epoch > 1)) *purge_cache = true; } static void smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache) { oplock &= 0xFF; cinode->lease_granted = false; if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) return; if (oplock == SMB2_OPLOCK_LEVEL_BATCH) { cinode->oplock = CIFS_CACHE_RHW_FLG; cifs_dbg(FYI, "Batch Oplock granted on inode %p\n", &cinode->netfs.inode); } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { cinode->oplock = CIFS_CACHE_RW_FLG; cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", &cinode->netfs.inode); } else if (oplock == SMB2_OPLOCK_LEVEL_II) { cinode->oplock = CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", &cinode->netfs.inode); } else cinode->oplock = 0; } static void smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache) { char message[5] = {0}; unsigned int new_oplock = 0; oplock &= 0xFF; cinode->lease_granted = true; if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) return; /* Check if the server granted an oplock rather than a lease */ if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE) return smb2_set_oplock_level(cinode, oplock, epoch, purge_cache); if (oplock & SMB2_LEASE_READ_CACHING_HE) { new_oplock |= CIFS_CACHE_READ_FLG; strcat(message, "R"); } if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) { new_oplock |= CIFS_CACHE_HANDLE_FLG; strcat(message, "H"); } if (oplock & SMB2_LEASE_WRITE_CACHING_HE) { new_oplock |= CIFS_CACHE_WRITE_FLG; strcat(message, "W"); } if (!new_oplock) strncpy(message, "None", sizeof(message)); cinode->oplock = new_oplock; cifs_dbg(FYI, "%s Lease granted on inode %p\n", message, &cinode->netfs.inode); } static void smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache) { unsigned int old_oplock = cinode->oplock; smb21_set_oplock_level(cinode, oplock, epoch, purge_cache); if (purge_cache) { *purge_cache = false; if (old_oplock == CIFS_CACHE_READ_FLG) { if (cinode->oplock == CIFS_CACHE_READ_FLG && (epoch - cinode->epoch > 0)) *purge_cache = true; else if (cinode->oplock == CIFS_CACHE_RH_FLG && (epoch - cinode->epoch > 1)) *purge_cache = true; else if (cinode->oplock == CIFS_CACHE_RHW_FLG && (epoch - cinode->epoch > 1)) *purge_cache = true; else if (cinode->oplock == 0 && (epoch - cinode->epoch > 0)) *purge_cache = true; } else if (old_oplock == CIFS_CACHE_RH_FLG) { if (cinode->oplock == CIFS_CACHE_RH_FLG && (epoch - cinode->epoch > 0)) *purge_cache = true; else if (cinode->oplock == CIFS_CACHE_RHW_FLG && (epoch - cinode->epoch > 1)) *purge_cache = true; } cinode->epoch = epoch; } } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static bool smb2_is_read_op(__u32 oplock) { return oplock == SMB2_OPLOCK_LEVEL_II; } #endif /* CIFS_ALLOW_INSECURE_LEGACY */ static bool smb21_is_read_op(__u32 oplock) { return (oplock & SMB2_LEASE_READ_CACHING_HE) && !(oplock & SMB2_LEASE_WRITE_CACHING_HE); } static __le32 map_oplock_to_lease(u8 oplock) { if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE; else if (oplock == SMB2_OPLOCK_LEVEL_II) return SMB2_LEASE_READ_CACHING_LE; else if (oplock == SMB2_OPLOCK_LEVEL_BATCH) return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_WRITE_CACHING_LE; return 0; } static char * smb2_create_lease_buf(u8 *lease_key, u8 oplock) { struct create_lease *buf; buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL); if (!buf) return NULL; memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseState = map_oplock_to_lease(oplock); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_lease, lcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_lease, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */ buf->Name[0] = 'R'; buf->Name[1] = 'q'; buf->Name[2] = 'L'; buf->Name[3] = 's'; return (char *)buf; } static char * smb3_create_lease_buf(u8 *lease_key, u8 oplock) { struct create_lease_v2 *buf; buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL); if (!buf) return NULL; memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseState = map_oplock_to_lease(oplock); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_lease_v2, lcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_lease_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */ buf->Name[0] = 'R'; buf->Name[1] = 'q'; buf->Name[2] = 'L'; buf->Name[3] = 's'; return (char *)buf; } static __u8 smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) { struct create_lease *lc = (struct create_lease *)buf; *epoch = 0; /* not used */ if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE) return SMB2_OPLOCK_LEVEL_NOCHANGE; return le32_to_cpu(lc->lcontext.LeaseState); } static __u8 smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) { struct create_lease_v2 *lc = (struct create_lease_v2 *)buf; *epoch = le16_to_cpu(lc->lcontext.Epoch); if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE) return SMB2_OPLOCK_LEVEL_NOCHANGE; if (lease_key) memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); return le32_to_cpu(lc->lcontext.LeaseState); } static unsigned int smb2_wp_retry_size(struct inode *inode) { return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize, SMB2_MAX_BUFFER_SIZE); } static bool smb2_dir_needs_close(struct cifsFileInfo *cfile) { return !cfile->invalidHandle; } static void fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, struct smb_rqst *old_rq, __le16 cipher_type) { struct smb2_hdr *shdr = (struct smb2_hdr *)old_rq->rq_iov[0].iov_base; memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr)); tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM; tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len); tr_hdr->Flags = cpu_to_le16(0x01); if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) || (cipher_type == SMB2_ENCRYPTION_AES256_GCM)) get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE); else get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE); memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8); } static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst, int num_rqst, const u8 *sig, u8 **iv, struct aead_request **req, struct sg_table *sgt, unsigned int *num_sgs, size_t *sensitive_size) { unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm); unsigned int iv_size = crypto_aead_ivsize(tfm); unsigned int len; u8 *p; *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig); if (IS_ERR_VALUE((long)(int)*num_sgs)) return ERR_PTR(*num_sgs); len = iv_size; len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); len += req_size; len = ALIGN(len, __alignof__(struct scatterlist)); len += array_size(*num_sgs, sizeof(struct scatterlist)); *sensitive_size = len; p = kvzalloc(len, GFP_NOFS); if (!p) return ERR_PTR(-ENOMEM); *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1); *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, crypto_tfm_ctx_alignment()); sgt->sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, __alignof__(struct scatterlist)); return p; } static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst, int num_rqst, const u8 *sig, u8 **iv, struct aead_request **req, struct scatterlist **sgl, size_t *sensitive_size) { struct sg_table sgtable = {}; unsigned int skip, num_sgs, i, j; ssize_t rc; void *p; p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs, sensitive_size); if (IS_ERR(p)) return ERR_CAST(p); sg_init_marker(sgtable.sgl, num_sgs); /* * The first rqst has a transform header where the * first 20 bytes are not part of the encrypted blob. */ skip = 20; for (i = 0; i < num_rqst; i++) { struct iov_iter *iter = &rqst[i].rq_iter; size_t count = iov_iter_count(iter); for (j = 0; j < rqst[i].rq_nvec; j++) { cifs_sg_set_buf(&sgtable, rqst[i].rq_iov[j].iov_base + skip, rqst[i].rq_iov[j].iov_len - skip); /* See the above comment on the 'skip' assignment */ skip = 0; } sgtable.orig_nents = sgtable.nents; rc = extract_iter_to_sg(iter, count, &sgtable, num_sgs - sgtable.nents, 0); iov_iter_revert(iter, rc); sgtable.orig_nents = sgtable.nents; } cifs_sg_set_buf(&sgtable, sig, SMB2_SIGNATURE_SIZE); sg_mark_end(&sgtable.sgl[sgtable.nents - 1]); *sgl = sgtable.sgl; return p; } static int smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) { struct TCP_Server_Info *pserver; struct cifs_ses *ses; u8 *ses_enc_key; /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { if (ses->Suid == ses_id) { spin_lock(&ses->ses_lock); ses_enc_key = enc ? ses->smb3encryptionkey : ses->smb3decryptionkey; memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); spin_unlock(&ses->ses_lock); spin_unlock(&cifs_tcp_ses_lock); return 0; } } spin_unlock(&cifs_tcp_ses_lock); trace_smb3_ses_not_found(ses_id); return -EAGAIN; } /* * Encrypt or decrypt @rqst message. @rqst[0] has the following format: * iov[0] - transform header (associate data), * iov[1-N] - SMB2 header and pages - data to encrypt. * On success return encrypted data in iov[1-N] and pages, leave iov[0] * untouched. */ static int crypt_message(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int enc) { struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base; unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20; int rc = 0; struct scatterlist *sg; u8 sign[SMB2_SIGNATURE_SIZE] = {}; u8 key[SMB3_ENC_DEC_KEY_SIZE]; struct aead_request *req; u8 *iv; DECLARE_CRYPTO_WAIT(wait); struct crypto_aead *tfm; unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); void *creq; size_t sensitive_size; rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key); if (rc) { cifs_server_dbg(FYI, "%s: Could not get %scryption key. sid: 0x%llx\n", __func__, enc ? "en" : "de", le64_to_cpu(tr_hdr->SessionId)); return rc; } rc = smb3_crypto_aead_allocate(server); if (rc) { cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__); return rc; } tfm = enc ? server->secmech.enc : server->secmech.dec; if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE); else rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE); if (rc) { cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc); return rc; } rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE); if (rc) { cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc); return rc; } creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg, &sensitive_size); if (IS_ERR(creq)) return PTR_ERR(creq); if (!enc) { memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE); crypt_len += SMB2_SIGNATURE_SIZE; } if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE); else { iv[0] = 3; memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE); } aead_request_set_tfm(req, tfm); aead_request_set_crypt(req, sg, sg, crypt_len, iv); aead_request_set_ad(req, assoc_data_len); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait); if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); kvfree_sensitive(creq, sensitive_size); return rc; } /* * Clear a read buffer, discarding the folios which have XA_MARK_0 set. */ static void cifs_clear_xarray_buffer(struct xarray *buffer) { struct folio *folio; XA_STATE(xas, buffer, 0); rcu_read_lock(); xas_for_each_marked(&xas, folio, ULONG_MAX, XA_MARK_0) { folio_put(folio); } rcu_read_unlock(); xa_destroy(buffer); } void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst) { int i; for (i = 0; i < num_rqst; i++) if (!xa_empty(&rqst[i].rq_buffer)) cifs_clear_xarray_buffer(&rqst[i].rq_buffer); } /* * This function will initialize new_rq and encrypt the content. * The first entry, new_rq[0], only contains a single iov which contains * a smb2_transform_hdr and is pre-allocated by the caller. * This function then populates new_rq[1+] with the content from olq_rq[0+]. * * The end result is an array of smb_rqst structures where the first structure * only contains a single iov for the transform header which we then can pass * to crypt_message(). * * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests */ static int smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *new_rq, struct smb_rqst *old_rq) { struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base; struct page *page; unsigned int orig_len = 0; int i, j; int rc = -ENOMEM; for (i = 1; i < num_rqst; i++) { struct smb_rqst *old = &old_rq[i - 1]; struct smb_rqst *new = &new_rq[i]; struct xarray *buffer = &new->rq_buffer; size_t size = iov_iter_count(&old->rq_iter), seg, copied = 0; orig_len += smb_rqst_len(server, old); new->rq_iov = old->rq_iov; new->rq_nvec = old->rq_nvec; xa_init(buffer); if (size > 0) { unsigned int npages = DIV_ROUND_UP(size, PAGE_SIZE); for (j = 0; j < npages; j++) { void *o; rc = -ENOMEM; page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); if (!page) goto err_free; page->index = j; o = xa_store(buffer, j, page, GFP_KERNEL); if (xa_is_err(o)) { rc = xa_err(o); put_page(page); goto err_free; } xa_set_mark(buffer, j, XA_MARK_0); seg = min_t(size_t, size - copied, PAGE_SIZE); if (copy_page_from_iter(page, 0, seg, &old->rq_iter) != seg) { rc = -EFAULT; goto err_free; } copied += seg; } iov_iter_xarray(&new->rq_iter, ITER_SOURCE, buffer, 0, size); new->rq_iter_size = size; } } /* fill the 1st iov with a transform header */ fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type); rc = crypt_message(server, num_rqst, new_rq, 1); cifs_dbg(FYI, "Encrypt message returned %d\n", rc); if (rc) goto err_free; return rc; err_free: smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]); return rc; } static int smb3_is_transform_hdr(void *buf) { struct smb2_transform_hdr *trhdr = buf; return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM; } static int decrypt_raw_data(struct TCP_Server_Info *server, char *buf, unsigned int buf_data_size, struct iov_iter *iter, bool is_offloaded) { struct kvec iov[2]; struct smb_rqst rqst = {NULL}; size_t iter_size = 0; int rc; iov[0].iov_base = buf; iov[0].iov_len = sizeof(struct smb2_transform_hdr); iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr); iov[1].iov_len = buf_data_size; rqst.rq_iov = iov; rqst.rq_nvec = 2; if (iter) { rqst.rq_iter = *iter; rqst.rq_iter_size = iov_iter_count(iter); iter_size = iov_iter_count(iter); } rc = crypt_message(server, 1, &rqst, 0); cifs_dbg(FYI, "Decrypt message returned %d\n", rc); if (rc) return rc; memmove(buf, iov[1].iov_base, buf_data_size); if (!is_offloaded) server->total_read = buf_data_size + iter_size; return rc; } static int cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size, unsigned int skip, struct iov_iter *iter) { struct page *page; unsigned long index; xa_for_each(pages, index, page) { size_t n, len = min_t(unsigned int, PAGE_SIZE - skip, data_size); n = copy_page_to_iter(page, skip, len, iter); if (n != len) { cifs_dbg(VFS, "%s: something went wrong\n", __func__); return -EIO; } data_size -= n; skip = 0; } return 0; } static int handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, char *buf, unsigned int buf_len, struct xarray *pages, unsigned int pages_len, bool is_offloaded) { unsigned int data_offset; unsigned int data_len; unsigned int cur_off; unsigned int cur_page_idx; unsigned int pad_len; struct cifs_readdata *rdata = mid->callback_data; struct smb2_hdr *shdr = (struct smb2_hdr *)buf; int length; bool use_rdma_mr = false; if (shdr->Command != SMB2_READ) { cifs_server_dbg(VFS, "only big read responses are supported\n"); return -EOPNOTSUPP; } if (server->ops->is_session_expired && server->ops->is_session_expired(buf)) { if (!is_offloaded) cifs_reconnect(server, true); return -1; } if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server)) return -1; /* set up first two iov to get credits */ rdata->iov[0].iov_base = buf; rdata->iov[0].iov_len = 0; rdata->iov[1].iov_base = buf; rdata->iov[1].iov_len = min_t(unsigned int, buf_len, server->vals->read_rsp_size); cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", rdata->iov[0].iov_base, rdata->iov[0].iov_len); cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", rdata->iov[1].iov_base, rdata->iov[1].iov_len); rdata->result = server->ops->map_error(buf, true); if (rdata->result != 0) { cifs_dbg(FYI, "%s: server returned error %d\n", __func__, rdata->result); /* normal error on read response */ if (is_offloaded) mid->mid_state = MID_RESPONSE_RECEIVED; else dequeue_mid(mid, false); return 0; } data_offset = server->ops->read_data_offset(buf); #ifdef CONFIG_CIFS_SMB_DIRECT use_rdma_mr = rdata->mr; #endif data_len = server->ops->read_data_length(buf, use_rdma_mr); if (data_offset < server->vals->read_rsp_size) { /* * win2k8 sometimes sends an offset of 0 when the read * is beyond the EOF. Treat it as if the data starts just after * the header. */ cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", __func__, data_offset); data_offset = server->vals->read_rsp_size; } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { /* data_offset is beyond the end of smallbuf */ cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", __func__, data_offset); rdata->result = -EIO; if (is_offloaded) mid->mid_state = MID_RESPONSE_MALFORMED; else dequeue_mid(mid, rdata->result); return 0; } pad_len = data_offset - server->vals->read_rsp_size; if (buf_len <= data_offset) { /* read response payload is in pages */ cur_page_idx = pad_len / PAGE_SIZE; cur_off = pad_len % PAGE_SIZE; if (cur_page_idx != 0) { /* data offset is beyond the 1st page of response */ cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n", __func__, data_offset); rdata->result = -EIO; if (is_offloaded) mid->mid_state = MID_RESPONSE_MALFORMED; else dequeue_mid(mid, rdata->result); return 0; } if (data_len > pages_len - pad_len) { /* data_len is corrupt -- discard frame */ rdata->result = -EIO; if (is_offloaded) mid->mid_state = MID_RESPONSE_MALFORMED; else dequeue_mid(mid, rdata->result); return 0; } /* Copy the data to the output I/O iterator. */ rdata->result = cifs_copy_pages_to_iter(pages, pages_len, cur_off, &rdata->iter); if (rdata->result != 0) { if (is_offloaded) mid->mid_state = MID_RESPONSE_MALFORMED; else dequeue_mid(mid, rdata->result); return 0; } rdata->got_bytes = pages_len; } else if (buf_len >= data_offset + data_len) { /* read response payload is in buf */ WARN_ONCE(pages && !xa_empty(pages), "read data can be either in buf or in pages"); length = copy_to_iter(buf + data_offset, data_len, &rdata->iter); if (length < 0) return length; rdata->got_bytes = data_len; } else { /* read response payload cannot be in both buf and pages */ WARN_ONCE(1, "buf can not contain only a part of read data"); rdata->result = -EIO; if (is_offloaded) mid->mid_state = MID_RESPONSE_MALFORMED; else dequeue_mid(mid, rdata->result); return 0; } if (is_offloaded) mid->mid_state = MID_RESPONSE_RECEIVED; else dequeue_mid(mid, false); return 0; } struct smb2_decrypt_work { struct work_struct decrypt; struct TCP_Server_Info *server; struct xarray buffer; char *buf; unsigned int len; }; static void smb2_decrypt_offload(struct work_struct *work) { struct smb2_decrypt_work *dw = container_of(work, struct smb2_decrypt_work, decrypt); int rc; struct mid_q_entry *mid; struct iov_iter iter; iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, dw->len); rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size, &iter, true); if (rc) { cifs_dbg(VFS, "error decrypting rc=%d\n", rc); goto free_pages; } dw->server->lstrp = jiffies; mid = smb2_find_dequeue_mid(dw->server, dw->buf); if (mid == NULL) cifs_dbg(FYI, "mid not found\n"); else { mid->decrypted = true; rc = handle_read_data(dw->server, mid, dw->buf, dw->server->vals->read_rsp_size, &dw->buffer, dw->len, true); if (rc >= 0) { #ifdef CONFIG_CIFS_STATS2 mid->when_received = jiffies; #endif if (dw->server->ops->is_network_name_deleted) dw->server->ops->is_network_name_deleted(dw->buf, dw->server); mid->callback(mid); } else { spin_lock(&dw->server->srv_lock); if (dw->server->tcpStatus == CifsNeedReconnect) { spin_lock(&dw->server->mid_lock); mid->mid_state = MID_RETRY_NEEDED; spin_unlock(&dw->server->mid_lock); spin_unlock(&dw->server->srv_lock); mid->callback(mid); } else { spin_lock(&dw->server->mid_lock); mid->mid_state = MID_REQUEST_SUBMITTED; mid->mid_flags &= ~(MID_DELETED); list_add_tail(&mid->qhead, &dw->server->pending_mid_q); spin_unlock(&dw->server->mid_lock); spin_unlock(&dw->server->srv_lock); } } release_mid(mid); } free_pages: cifs_clear_xarray_buffer(&dw->buffer); cifs_small_buf_release(dw->buf); kfree(dw); } static int receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, int *num_mids) { struct page *page; char *buf = server->smallbuf; struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; struct iov_iter iter; unsigned int len, npages; unsigned int buflen = server->pdu_size; int rc; int i = 0; struct smb2_decrypt_work *dw; dw = kzalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL); if (!dw) return -ENOMEM; xa_init(&dw->buffer); INIT_WORK(&dw->decrypt, smb2_decrypt_offload); dw->server = server; *num_mids = 1; len = min_t(unsigned int, buflen, server->vals->read_rsp_size + sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1; rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len); if (rc < 0) goto free_dw; server->total_read += rc; len = le32_to_cpu(tr_hdr->OriginalMessageSize) - server->vals->read_rsp_size; dw->len = len; npages = DIV_ROUND_UP(len, PAGE_SIZE); rc = -ENOMEM; for (; i < npages; i++) { void *old; page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); if (!page) goto discard_data; page->index = i; old = xa_store(&dw->buffer, i, page, GFP_KERNEL); if (xa_is_err(old)) { rc = xa_err(old); put_page(page); goto discard_data; } xa_set_mark(&dw->buffer, i, XA_MARK_0); } iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, npages * PAGE_SIZE); /* Read the data into the buffer and clear excess bufferage. */ rc = cifs_read_iter_from_socket(server, &iter, dw->len); if (rc < 0) goto discard_data; server->total_read += rc; if (rc < npages * PAGE_SIZE) iov_iter_zero(npages * PAGE_SIZE - rc, &iter); iov_iter_revert(&iter, npages * PAGE_SIZE); iov_iter_truncate(&iter, dw->len); rc = cifs_discard_remaining_data(server); if (rc) goto free_pages; /* * For large reads, offload to different thread for better performance, * use more cores decrypting which can be expensive */ if ((server->min_offload) && (server->in_flight > 1) && (server->pdu_size >= server->min_offload)) { dw->buf = server->smallbuf; server->smallbuf = (char *)cifs_small_buf_get(); queue_work(decrypt_wq, &dw->decrypt); *num_mids = 0; /* worker thread takes care of finding mid */ return -1; } rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size, &iter, false); if (rc) goto free_pages; *mid = smb2_find_mid(server, buf); if (*mid == NULL) { cifs_dbg(FYI, "mid not found\n"); } else { cifs_dbg(FYI, "mid found\n"); (*mid)->decrypted = true; rc = handle_read_data(server, *mid, buf, server->vals->read_rsp_size, &dw->buffer, dw->len, false); if (rc >= 0) { if (server->ops->is_network_name_deleted) { server->ops->is_network_name_deleted(buf, server); } } } free_pages: cifs_clear_xarray_buffer(&dw->buffer); free_dw: kfree(dw); return rc; discard_data: cifs_discard_remaining_data(server); goto free_pages; } static int receive_encrypted_standard(struct TCP_Server_Info *server, struct mid_q_entry **mids, char **bufs, int *num_mids) { int ret, length; char *buf = server->smallbuf; struct smb2_hdr *shdr; unsigned int pdu_length = server->pdu_size; unsigned int buf_size; struct mid_q_entry *mid_entry; int next_is_large; char *next_buffer = NULL; *num_mids = 0; /* switch to large buffer if too big for a small one */ if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) { server->large_buf = true; memcpy(server->bigbuf, buf, server->total_read); buf = server->bigbuf; } /* now read the rest */ length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, pdu_length - HEADER_SIZE(server) + 1); if (length < 0) return length; server->total_read += length; buf_size = pdu_length - sizeof(struct smb2_transform_hdr); length = decrypt_raw_data(server, buf, buf_size, NULL, false); if (length) return length; next_is_large = server->large_buf; one_more: shdr = (struct smb2_hdr *)buf; if (shdr->NextCommand) { if (next_is_large) next_buffer = (char *)cifs_buf_get(); else next_buffer = (char *)cifs_small_buf_get(); memcpy(next_buffer, buf + le32_to_cpu(shdr->NextCommand), pdu_length - le32_to_cpu(shdr->NextCommand)); } mid_entry = smb2_find_mid(server, buf); if (mid_entry == NULL) cifs_dbg(FYI, "mid not found\n"); else { cifs_dbg(FYI, "mid found\n"); mid_entry->decrypted = true; mid_entry->resp_buf_size = server->pdu_size; } if (*num_mids >= MAX_COMPOUND) { cifs_server_dbg(VFS, "too many PDUs in compound\n"); return -1; } bufs[*num_mids] = buf; mids[(*num_mids)++] = mid_entry; if (mid_entry && mid_entry->handle) ret = mid_entry->handle(server, mid_entry); else ret = cifs_handle_standard(server, mid_entry); if (ret == 0 && shdr->NextCommand) { pdu_length -= le32_to_cpu(shdr->NextCommand); server->large_buf = next_is_large; if (next_is_large) server->bigbuf = buf = next_buffer; else server->smallbuf = buf = next_buffer; goto one_more; } else if (ret != 0) { /* * ret != 0 here means that we didn't get to handle_mid() thus * server->smallbuf and server->bigbuf are still valid. We need * to free next_buffer because it is not going to be used * anywhere. */ if (next_is_large) free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer); else free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer); } return ret; } static int smb3_receive_transform(struct TCP_Server_Info *server, struct mid_q_entry **mids, char **bufs, int *num_mids) { char *buf = server->smallbuf; unsigned int pdu_length = server->pdu_size; struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize); if (pdu_length < sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) { cifs_server_dbg(VFS, "Transform message is too small (%u)\n", pdu_length); cifs_reconnect(server, true); return -ECONNABORTED; } if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) { cifs_server_dbg(VFS, "Transform message is broken\n"); cifs_reconnect(server, true); return -ECONNABORTED; } /* TODO: add support for compounds containing READ. */ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) { return receive_encrypted_read(server, &mids[0], num_mids); } return receive_encrypted_standard(server, mids, bufs, num_mids); } int smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid) { char *buf = server->large_buf ? server->bigbuf : server->smallbuf; return handle_read_data(server, mid, buf, server->pdu_size, NULL, 0, false); } static int smb2_next_header(char *buf) { struct smb2_hdr *hdr = (struct smb2_hdr *)buf; struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf; if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) return sizeof(struct smb2_transform_hdr) + le32_to_cpu(t_hdr->OriginalMessageSize); return le32_to_cpu(hdr->NextCommand); } static int smb2_make_node(unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, const char *full_path, umode_t mode, dev_t dev) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); int rc = -EPERM; struct cifs_open_info_data buf = {}; struct cifs_io_parms io_parms = {0}; __u32 oplock = 0; struct cifs_fid fid; struct cifs_open_parms oparms; unsigned int bytes_written; struct win_dev *pdev; struct kvec iov[2]; /* * Check if mounted with mount parm 'sfu' mount parm. * SFU emulation should work with all servers, but only * supports block and char device (no socket & fifo), * and was used by default in earlier versions of Windows */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) return rc; /* * TODO: Add ability to create instead via reparse point. Windows (e.g. * their current NFS server) uses this approach to expose special files * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions */ if (!S_ISCHR(mode) && !S_ISBLK(mode)) return rc; cifs_dbg(FYI, "sfu compat create special file\n"); oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = GENERIC_WRITE, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL), .disposition = FILE_CREATE, .path = full_path, .fid = &fid, }; if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf); if (rc) return rc; /* * BB Do not bother to decode buf since no local inode yet to put * timestamps in, but we can reuse it safely. */ pdev = (struct win_dev *)&buf.fi; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = sizeof(struct win_dev); iov[1].iov_base = &buf.fi; iov[1].iov_len = sizeof(struct win_dev); if (S_ISCHR(mode)) { memcpy(pdev->type, "IntxCHR", 8); pdev->major = cpu_to_le64(MAJOR(dev)); pdev->minor = cpu_to_le64(MINOR(dev)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); } else if (S_ISBLK(mode)) { memcpy(pdev->type, "IntxBLK", 8); pdev->major = cpu_to_le64(MAJOR(dev)); pdev->minor = cpu_to_le64(MINOR(dev)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); } tcon->ses->server->ops->close(xid, tcon, &fid); d_drop(dentry); /* FIXME: add code here to set EAs */ cifs_free_open_info(&buf); return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct smb_version_operations smb20_operations = { .compare_fids = smb2_compare_fids, .setup_request = smb2_setup_request, .setup_async_request = smb2_setup_async_request, .check_receive = smb2_check_receive, .add_credits = smb2_add_credits, .set_credits = smb2_set_credits, .get_credits_field = smb2_get_credits_field, .get_credits = smb2_get_credits, .wait_mtu_credits = cifs_wait_mtu_credits, .get_next_mid = smb2_get_next_mid, .revert_current_mid = smb2_revert_current_mid, .read_data_offset = smb2_read_data_offset, .read_data_length = smb2_read_data_length, .map_error = map_smb2_to_linux_error, .find_mid = smb2_find_mid, .check_message = smb2_check_message, .dump_detail = smb2_dump_detail, .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, .negotiate_rsize = smb2_negotiate_rsize, .sess_setup = SMB2_sess_setup, .logoff = SMB2_logoff, .tree_connect = SMB2_tcon, .tree_disconnect = SMB2_tdis, .qfs_tcon = smb2_qfs_tcon, .is_path_accessible = smb2_is_path_accessible, .can_echo = smb2_can_echo, .echo = SMB2_echo, .query_path_info = smb2_query_path_info, .query_reparse_point = smb2_query_reparse_point, .get_srv_inum = smb2_get_srv_inum, .query_file_info = smb2_query_file_info, .set_path_size = smb2_set_path_size, .set_file_size = smb2_set_file_size, .set_file_info = smb2_set_file_info, .set_compression = smb2_set_compression, .mkdir = smb2_mkdir, .mkdir_setinfo = smb2_mkdir_setinfo, .rmdir = smb2_rmdir, .unlink = smb2_unlink, .rename = smb2_rename_path, .create_hardlink = smb2_create_hardlink, .query_symlink = smb2_query_symlink, .query_mf_symlink = smb3_query_mf_symlink, .create_mf_symlink = smb3_create_mf_symlink, .open = smb2_open_file, .set_fid = smb2_set_fid, .close = smb2_close_file, .flush = smb2_flush_file, .async_readv = smb2_async_readv, .async_writev = smb2_async_writev, .sync_read = smb2_sync_read, .sync_write = smb2_sync_write, .query_dir_first = smb2_query_dir_first, .query_dir_next = smb2_query_dir_next, .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb2_queryfs, .mand_lock = smb2_mand_lock, .mand_unlock_range = smb2_unlock_range, .push_mand_locks = smb2_push_mandatory_locks, .get_lease_key = smb2_get_lease_key, .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, .calc_signature = smb2_calc_signature, .is_read_op = smb2_is_read_op, .set_oplock_level = smb2_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, .parse_lease_buf = smb2_parse_lease_buf, .copychunk_range = smb2_copychunk_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .get_dfs_refer = smb2_get_dfs_refer, .select_sectype = smb2_select_sectype, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = smb2_query_eas, .set_EA = smb2_set_ea, #endif /* CIFS_XATTR */ .get_acl = get_smb2_acl, .get_acl_by_fid = get_smb2_acl_by_fid, .set_acl = set_smb2_acl, .next_header = smb2_next_header, .ioctl_query_info = smb2_ioctl_query_info, .make_node = smb2_make_node, .fiemap = smb3_fiemap, .llseek = smb3_llseek, .is_status_io_timeout = smb2_is_status_io_timeout, .is_network_name_deleted = smb2_is_network_name_deleted, }; #endif /* CIFS_ALLOW_INSECURE_LEGACY */ struct smb_version_operations smb21_operations = { .compare_fids = smb2_compare_fids, .setup_request = smb2_setup_request, .setup_async_request = smb2_setup_async_request, .check_receive = smb2_check_receive, .add_credits = smb2_add_credits, .set_credits = smb2_set_credits, .get_credits_field = smb2_get_credits_field, .get_credits = smb2_get_credits, .wait_mtu_credits = smb2_wait_mtu_credits, .adjust_credits = smb2_adjust_credits, .get_next_mid = smb2_get_next_mid, .revert_current_mid = smb2_revert_current_mid, .read_data_offset = smb2_read_data_offset, .read_data_length = smb2_read_data_length, .map_error = map_smb2_to_linux_error, .find_mid = smb2_find_mid, .check_message = smb2_check_message, .dump_detail = smb2_dump_detail, .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, .negotiate_rsize = smb2_negotiate_rsize, .sess_setup = SMB2_sess_setup, .logoff = SMB2_logoff, .tree_connect = SMB2_tcon, .tree_disconnect = SMB2_tdis, .qfs_tcon = smb2_qfs_tcon, .is_path_accessible = smb2_is_path_accessible, .can_echo = smb2_can_echo, .echo = SMB2_echo, .query_path_info = smb2_query_path_info, .query_reparse_point = smb2_query_reparse_point, .get_srv_inum = smb2_get_srv_inum, .query_file_info = smb2_query_file_info, .set_path_size = smb2_set_path_size, .set_file_size = smb2_set_file_size, .set_file_info = smb2_set_file_info, .set_compression = smb2_set_compression, .mkdir = smb2_mkdir, .mkdir_setinfo = smb2_mkdir_setinfo, .rmdir = smb2_rmdir, .unlink = smb2_unlink, .rename = smb2_rename_path, .create_hardlink = smb2_create_hardlink, .query_symlink = smb2_query_symlink, .query_mf_symlink = smb3_query_mf_symlink, .create_mf_symlink = smb3_create_mf_symlink, .open = smb2_open_file, .set_fid = smb2_set_fid, .close = smb2_close_file, .flush = smb2_flush_file, .async_readv = smb2_async_readv, .async_writev = smb2_async_writev, .sync_read = smb2_sync_read, .sync_write = smb2_sync_write, .query_dir_first = smb2_query_dir_first, .query_dir_next = smb2_query_dir_next, .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb2_queryfs, .mand_lock = smb2_mand_lock, .mand_unlock_range = smb2_unlock_range, .push_mand_locks = smb2_push_mandatory_locks, .get_lease_key = smb2_get_lease_key, .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, .calc_signature = smb2_calc_signature, .is_read_op = smb21_is_read_op, .set_oplock_level = smb21_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, .parse_lease_buf = smb2_parse_lease_buf, .copychunk_range = smb2_copychunk_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .enum_snapshots = smb3_enum_snapshots, .notify = smb3_notify, .get_dfs_refer = smb2_get_dfs_refer, .select_sectype = smb2_select_sectype, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = smb2_query_eas, .set_EA = smb2_set_ea, #endif /* CIFS_XATTR */ .get_acl = get_smb2_acl, .get_acl_by_fid = get_smb2_acl_by_fid, .set_acl = set_smb2_acl, .next_header = smb2_next_header, .ioctl_query_info = smb2_ioctl_query_info, .make_node = smb2_make_node, .fiemap = smb3_fiemap, .llseek = smb3_llseek, .is_status_io_timeout = smb2_is_status_io_timeout, .is_network_name_deleted = smb2_is_network_name_deleted, }; struct smb_version_operations smb30_operations = { .compare_fids = smb2_compare_fids, .setup_request = smb2_setup_request, .setup_async_request = smb2_setup_async_request, .check_receive = smb2_check_receive, .add_credits = smb2_add_credits, .set_credits = smb2_set_credits, .get_credits_field = smb2_get_credits_field, .get_credits = smb2_get_credits, .wait_mtu_credits = smb2_wait_mtu_credits, .adjust_credits = smb2_adjust_credits, .get_next_mid = smb2_get_next_mid, .revert_current_mid = smb2_revert_current_mid, .read_data_offset = smb2_read_data_offset, .read_data_length = smb2_read_data_length, .map_error = map_smb2_to_linux_error, .find_mid = smb2_find_mid, .check_message = smb2_check_message, .dump_detail = smb2_dump_detail, .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb3_negotiate_wsize, .negotiate_rsize = smb3_negotiate_rsize, .sess_setup = SMB2_sess_setup, .logoff = SMB2_logoff, .tree_connect = SMB2_tcon, .tree_disconnect = SMB2_tdis, .qfs_tcon = smb3_qfs_tcon, .is_path_accessible = smb2_is_path_accessible, .can_echo = smb2_can_echo, .echo = SMB2_echo, .query_path_info = smb2_query_path_info, /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */ .query_reparse_point = smb2_query_reparse_point, .get_srv_inum = smb2_get_srv_inum, .query_file_info = smb2_query_file_info, .set_path_size = smb2_set_path_size, .set_file_size = smb2_set_file_size, .set_file_info = smb2_set_file_info, .set_compression = smb2_set_compression, .mkdir = smb2_mkdir, .mkdir_setinfo = smb2_mkdir_setinfo, .rmdir = smb2_rmdir, .unlink = smb2_unlink, .rename = smb2_rename_path, .create_hardlink = smb2_create_hardlink, .query_symlink = smb2_query_symlink, .query_mf_symlink = smb3_query_mf_symlink, .create_mf_symlink = smb3_create_mf_symlink, .open = smb2_open_file, .set_fid = smb2_set_fid, .close = smb2_close_file, .close_getattr = smb2_close_getattr, .flush = smb2_flush_file, .async_readv = smb2_async_readv, .async_writev = smb2_async_writev, .sync_read = smb2_sync_read, .sync_write = smb2_sync_write, .query_dir_first = smb2_query_dir_first, .query_dir_next = smb2_query_dir_next, .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb2_queryfs, .mand_lock = smb2_mand_lock, .mand_unlock_range = smb2_unlock_range, .push_mand_locks = smb2_push_mandatory_locks, .get_lease_key = smb2_get_lease_key, .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, .generate_signingkey = generate_smb30signingkey, .calc_signature = smb3_calc_signature, .set_integrity = smb3_set_integrity, .is_read_op = smb21_is_read_op, .set_oplock_level = smb3_set_oplock_level, .create_lease_buf = smb3_create_lease_buf, .parse_lease_buf = smb3_parse_lease_buf, .copychunk_range = smb2_copychunk_range, .duplicate_extents = smb2_duplicate_extents, .validate_negotiate = smb3_validate_negotiate, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .fallocate = smb3_fallocate, .enum_snapshots = smb3_enum_snapshots, .notify = smb3_notify, .init_transform_rq = smb3_init_transform_rq, .is_transform_hdr = smb3_is_transform_hdr, .receive_transform = smb3_receive_transform, .get_dfs_refer = smb2_get_dfs_refer, .select_sectype = smb2_select_sectype, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = smb2_query_eas, .set_EA = smb2_set_ea, #endif /* CIFS_XATTR */ .get_acl = get_smb2_acl, .get_acl_by_fid = get_smb2_acl_by_fid, .set_acl = set_smb2_acl, .next_header = smb2_next_header, .ioctl_query_info = smb2_ioctl_query_info, .make_node = smb2_make_node, .fiemap = smb3_fiemap, .llseek = smb3_llseek, .is_status_io_timeout = smb2_is_status_io_timeout, .is_network_name_deleted = smb2_is_network_name_deleted, }; struct smb_version_operations smb311_operations = { .compare_fids = smb2_compare_fids, .setup_request = smb2_setup_request, .setup_async_request = smb2_setup_async_request, .check_receive = smb2_check_receive, .add_credits = smb2_add_credits, .set_credits = smb2_set_credits, .get_credits_field = smb2_get_credits_field, .get_credits = smb2_get_credits, .wait_mtu_credits = smb2_wait_mtu_credits, .adjust_credits = smb2_adjust_credits, .get_next_mid = smb2_get_next_mid, .revert_current_mid = smb2_revert_current_mid, .read_data_offset = smb2_read_data_offset, .read_data_length = smb2_read_data_length, .map_error = map_smb2_to_linux_error, .find_mid = smb2_find_mid, .check_message = smb2_check_message, .dump_detail = smb2_dump_detail, .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb3_negotiate_wsize, .negotiate_rsize = smb3_negotiate_rsize, .sess_setup = SMB2_sess_setup, .logoff = SMB2_logoff, .tree_connect = SMB2_tcon, .tree_disconnect = SMB2_tdis, .qfs_tcon = smb3_qfs_tcon, .is_path_accessible = smb2_is_path_accessible, .can_echo = smb2_can_echo, .echo = SMB2_echo, .query_path_info = smb2_query_path_info, .query_reparse_point = smb2_query_reparse_point, .get_srv_inum = smb2_get_srv_inum, .query_file_info = smb2_query_file_info, .set_path_size = smb2_set_path_size, .set_file_size = smb2_set_file_size, .set_file_info = smb2_set_file_info, .set_compression = smb2_set_compression, .mkdir = smb2_mkdir, .mkdir_setinfo = smb2_mkdir_setinfo, .posix_mkdir = smb311_posix_mkdir, .rmdir = smb2_rmdir, .unlink = smb2_unlink, .rename = smb2_rename_path, .create_hardlink = smb2_create_hardlink, .query_symlink = smb2_query_symlink, .query_mf_symlink = smb3_query_mf_symlink, .create_mf_symlink = smb3_create_mf_symlink, .open = smb2_open_file, .set_fid = smb2_set_fid, .close = smb2_close_file, .close_getattr = smb2_close_getattr, .flush = smb2_flush_file, .async_readv = smb2_async_readv, .async_writev = smb2_async_writev, .sync_read = smb2_sync_read, .sync_write = smb2_sync_write, .query_dir_first = smb2_query_dir_first, .query_dir_next = smb2_query_dir_next, .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb311_queryfs, .mand_lock = smb2_mand_lock, .mand_unlock_range = smb2_unlock_range, .push_mand_locks = smb2_push_mandatory_locks, .get_lease_key = smb2_get_lease_key, .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, .generate_signingkey = generate_smb311signingkey, .calc_signature = smb3_calc_signature, .set_integrity = smb3_set_integrity, .is_read_op = smb21_is_read_op, .set_oplock_level = smb3_set_oplock_level, .create_lease_buf = smb3_create_lease_buf, .parse_lease_buf = smb3_parse_lease_buf, .copychunk_range = smb2_copychunk_range, .duplicate_extents = smb2_duplicate_extents, /* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .fallocate = smb3_fallocate, .enum_snapshots = smb3_enum_snapshots, .notify = smb3_notify, .init_transform_rq = smb3_init_transform_rq, .is_transform_hdr = smb3_is_transform_hdr, .receive_transform = smb3_receive_transform, .get_dfs_refer = smb2_get_dfs_refer, .select_sectype = smb2_select_sectype, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = smb2_query_eas, .set_EA = smb2_set_ea, #endif /* CIFS_XATTR */ .get_acl = get_smb2_acl, .get_acl_by_fid = get_smb2_acl_by_fid, .set_acl = set_smb2_acl, .next_header = smb2_next_header, .ioctl_query_info = smb2_ioctl_query_info, .make_node = smb2_make_node, .fiemap = smb3_fiemap, .llseek = smb3_llseek, .is_status_io_timeout = smb2_is_status_io_timeout, .is_network_name_deleted = smb2_is_network_name_deleted, }; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct smb_version_values smb20_values = { .version_string = SMB20_VERSION_STRING, .protocol_id = SMB20_PROT_ID, .req_capabilities = 0, /* MBZ */ .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease), }; #endif /* ALLOW_INSECURE_LEGACY */ struct smb_version_values smb21_values = { .version_string = SMB21_VERSION_STRING, .protocol_id = SMB21_PROT_ID, .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */ .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease), }; struct smb_version_values smb3any_values = { .version_string = SMB3ANY_VERSION_STRING, .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease_v2), }; struct smb_version_values smbdefault_values = { .version_string = SMBDEFAULT_VERSION_STRING, .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease_v2), }; struct smb_version_values smb30_values = { .version_string = SMB30_VERSION_STRING, .protocol_id = SMB30_PROT_ID, .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease_v2), }; struct smb_version_values smb302_values = { .version_string = SMB302_VERSION_STRING, .protocol_id = SMB302_PROT_ID, .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease_v2), }; struct smb_version_values smb311_values = { .version_string = SMB311_VERSION_STRING, .protocol_id = SMB311_PROT_ID, .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, .max_header_size = MAX_SMB2_HDR_SIZE, .read_rsp_size = sizeof(struct smb2_read_rsp), .lock_cmd = SMB2_LOCK, .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease_v2), };
linux-master
fs/smb/client/smb2ops.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Unix SMB/Netbios implementation. * Version 1.9. * RPC Pipe client / server routines * Copyright (C) Luke Kenneth Casson Leighton 1997-2001. */ /* NT error codes - see nterr.h */ #include <linux/types.h> #include <linux/fs.h> #include "nterr.h" const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_OK", NT_STATUS_OK}, {"NT_STATUS_UNSUCCESSFUL", NT_STATUS_UNSUCCESSFUL}, {"NT_STATUS_NOT_IMPLEMENTED", NT_STATUS_NOT_IMPLEMENTED}, {"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS}, {"NT_STATUS_INFO_LENGTH_MISMATCH", NT_STATUS_INFO_LENGTH_MISMATCH}, {"NT_STATUS_ACCESS_VIOLATION", NT_STATUS_ACCESS_VIOLATION}, {"NT_STATUS_BUFFER_OVERFLOW", NT_STATUS_BUFFER_OVERFLOW}, {"NT_STATUS_IN_PAGE_ERROR", NT_STATUS_IN_PAGE_ERROR}, {"NT_STATUS_PAGEFILE_QUOTA", NT_STATUS_PAGEFILE_QUOTA}, {"NT_STATUS_INVALID_HANDLE", NT_STATUS_INVALID_HANDLE}, {"NT_STATUS_BAD_INITIAL_STACK", NT_STATUS_BAD_INITIAL_STACK}, {"NT_STATUS_BAD_INITIAL_PC", NT_STATUS_BAD_INITIAL_PC}, {"NT_STATUS_INVALID_CID", NT_STATUS_INVALID_CID}, {"NT_STATUS_TIMER_NOT_CANCELED", NT_STATUS_TIMER_NOT_CANCELED}, {"NT_STATUS_INVALID_PARAMETER", NT_STATUS_INVALID_PARAMETER}, {"NT_STATUS_NO_SUCH_DEVICE", NT_STATUS_NO_SUCH_DEVICE}, {"NT_STATUS_NO_SUCH_FILE", NT_STATUS_NO_SUCH_FILE}, {"NT_STATUS_INVALID_DEVICE_REQUEST", NT_STATUS_INVALID_DEVICE_REQUEST}, {"NT_STATUS_END_OF_FILE", NT_STATUS_END_OF_FILE}, {"NT_STATUS_WRONG_VOLUME", NT_STATUS_WRONG_VOLUME}, {"NT_STATUS_NO_MEDIA_IN_DEVICE", NT_STATUS_NO_MEDIA_IN_DEVICE}, {"NT_STATUS_UNRECOGNIZED_MEDIA", NT_STATUS_UNRECOGNIZED_MEDIA}, {"NT_STATUS_NONEXISTENT_SECTOR", NT_STATUS_NONEXISTENT_SECTOR}, {"NT_STATUS_MORE_PROCESSING_REQUIRED", NT_STATUS_MORE_PROCESSING_REQUIRED}, {"NT_STATUS_NO_MEMORY", NT_STATUS_NO_MEMORY}, {"NT_STATUS_CONFLICTING_ADDRESSES", NT_STATUS_CONFLICTING_ADDRESSES}, {"NT_STATUS_NOT_MAPPED_VIEW", NT_STATUS_NOT_MAPPED_VIEW}, {"NT_STATUS_UNABLE_TO_FREE_VM", NT_STATUS_UNABLE_TO_FREE_VM}, {"NT_STATUS_UNABLE_TO_DELETE_SECTION", NT_STATUS_UNABLE_TO_DELETE_SECTION}, {"NT_STATUS_INVALID_SYSTEM_SERVICE", NT_STATUS_INVALID_SYSTEM_SERVICE}, {"NT_STATUS_ILLEGAL_INSTRUCTION", NT_STATUS_ILLEGAL_INSTRUCTION}, {"NT_STATUS_INVALID_LOCK_SEQUENCE", NT_STATUS_INVALID_LOCK_SEQUENCE}, {"NT_STATUS_INVALID_VIEW_SIZE", NT_STATUS_INVALID_VIEW_SIZE}, {"NT_STATUS_INVALID_FILE_FOR_SECTION", NT_STATUS_INVALID_FILE_FOR_SECTION}, {"NT_STATUS_ALREADY_COMMITTED", NT_STATUS_ALREADY_COMMITTED}, {"NT_STATUS_ACCESS_DENIED", NT_STATUS_ACCESS_DENIED}, {"NT_STATUS_BUFFER_TOO_SMALL", NT_STATUS_BUFFER_TOO_SMALL}, {"NT_STATUS_OBJECT_TYPE_MISMATCH", NT_STATUS_OBJECT_TYPE_MISMATCH}, {"NT_STATUS_NONCONTINUABLE_EXCEPTION", NT_STATUS_NONCONTINUABLE_EXCEPTION}, {"NT_STATUS_INVALID_DISPOSITION", NT_STATUS_INVALID_DISPOSITION}, {"NT_STATUS_UNWIND", NT_STATUS_UNWIND}, {"NT_STATUS_BAD_STACK", NT_STATUS_BAD_STACK}, {"NT_STATUS_INVALID_UNWIND_TARGET", NT_STATUS_INVALID_UNWIND_TARGET}, {"NT_STATUS_NOT_LOCKED", NT_STATUS_NOT_LOCKED}, {"NT_STATUS_PARITY_ERROR", NT_STATUS_PARITY_ERROR}, {"NT_STATUS_UNABLE_TO_DECOMMIT_VM", NT_STATUS_UNABLE_TO_DECOMMIT_VM}, {"NT_STATUS_NOT_COMMITTED", NT_STATUS_NOT_COMMITTED}, {"NT_STATUS_INVALID_PORT_ATTRIBUTES", NT_STATUS_INVALID_PORT_ATTRIBUTES}, {"NT_STATUS_PORT_MESSAGE_TOO_LONG", NT_STATUS_PORT_MESSAGE_TOO_LONG}, {"NT_STATUS_INVALID_PARAMETER_MIX", NT_STATUS_INVALID_PARAMETER_MIX}, {"NT_STATUS_INVALID_QUOTA_LOWER", NT_STATUS_INVALID_QUOTA_LOWER}, {"NT_STATUS_DISK_CORRUPT_ERROR", NT_STATUS_DISK_CORRUPT_ERROR}, {"NT_STATUS_OBJECT_NAME_INVALID", NT_STATUS_OBJECT_NAME_INVALID}, {"NT_STATUS_OBJECT_NAME_NOT_FOUND", NT_STATUS_OBJECT_NAME_NOT_FOUND}, {"NT_STATUS_OBJECT_NAME_COLLISION", NT_STATUS_OBJECT_NAME_COLLISION}, {"NT_STATUS_HANDLE_NOT_WAITABLE", NT_STATUS_HANDLE_NOT_WAITABLE}, {"NT_STATUS_PORT_DISCONNECTED", NT_STATUS_PORT_DISCONNECTED}, {"NT_STATUS_DEVICE_ALREADY_ATTACHED", NT_STATUS_DEVICE_ALREADY_ATTACHED}, {"NT_STATUS_OBJECT_PATH_INVALID", NT_STATUS_OBJECT_PATH_INVALID}, {"NT_STATUS_OBJECT_PATH_NOT_FOUND", NT_STATUS_OBJECT_PATH_NOT_FOUND}, {"NT_STATUS_OBJECT_PATH_SYNTAX_BAD", NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, {"NT_STATUS_DATA_OVERRUN", NT_STATUS_DATA_OVERRUN}, {"NT_STATUS_DATA_LATE_ERROR", NT_STATUS_DATA_LATE_ERROR}, {"NT_STATUS_DATA_ERROR", NT_STATUS_DATA_ERROR}, {"NT_STATUS_CRC_ERROR", NT_STATUS_CRC_ERROR}, {"NT_STATUS_SECTION_TOO_BIG", NT_STATUS_SECTION_TOO_BIG}, {"NT_STATUS_PORT_CONNECTION_REFUSED", NT_STATUS_PORT_CONNECTION_REFUSED}, {"NT_STATUS_INVALID_PORT_HANDLE", NT_STATUS_INVALID_PORT_HANDLE}, {"NT_STATUS_SHARING_VIOLATION", NT_STATUS_SHARING_VIOLATION}, {"NT_STATUS_QUOTA_EXCEEDED", NT_STATUS_QUOTA_EXCEEDED}, {"NT_STATUS_INVALID_PAGE_PROTECTION", NT_STATUS_INVALID_PAGE_PROTECTION}, {"NT_STATUS_MUTANT_NOT_OWNED", NT_STATUS_MUTANT_NOT_OWNED}, {"NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED", NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, {"NT_STATUS_PORT_ALREADY_SET", NT_STATUS_PORT_ALREADY_SET}, {"NT_STATUS_SECTION_NOT_IMAGE", NT_STATUS_SECTION_NOT_IMAGE}, {"NT_STATUS_SUSPEND_COUNT_EXCEEDED", NT_STATUS_SUSPEND_COUNT_EXCEEDED}, {"NT_STATUS_THREAD_IS_TERMINATING", NT_STATUS_THREAD_IS_TERMINATING}, {"NT_STATUS_BAD_WORKING_SET_LIMIT", NT_STATUS_BAD_WORKING_SET_LIMIT}, {"NT_STATUS_INCOMPATIBLE_FILE_MAP", NT_STATUS_INCOMPATIBLE_FILE_MAP}, {"NT_STATUS_SECTION_PROTECTION", NT_STATUS_SECTION_PROTECTION}, {"NT_STATUS_EAS_NOT_SUPPORTED", NT_STATUS_EAS_NOT_SUPPORTED}, {"NT_STATUS_EA_TOO_LARGE", NT_STATUS_EA_TOO_LARGE}, {"NT_STATUS_NONEXISTENT_EA_ENTRY", NT_STATUS_NONEXISTENT_EA_ENTRY}, {"NT_STATUS_NO_EAS_ON_FILE", NT_STATUS_NO_EAS_ON_FILE}, {"NT_STATUS_EA_CORRUPT_ERROR", NT_STATUS_EA_CORRUPT_ERROR}, {"NT_STATUS_FILE_LOCK_CONFLICT", NT_STATUS_FILE_LOCK_CONFLICT}, {"NT_STATUS_LOCK_NOT_GRANTED", NT_STATUS_LOCK_NOT_GRANTED}, {"NT_STATUS_DELETE_PENDING", NT_STATUS_DELETE_PENDING}, {"NT_STATUS_CTL_FILE_NOT_SUPPORTED", NT_STATUS_CTL_FILE_NOT_SUPPORTED}, {"NT_STATUS_UNKNOWN_REVISION", NT_STATUS_UNKNOWN_REVISION}, {"NT_STATUS_REVISION_MISMATCH", NT_STATUS_REVISION_MISMATCH}, {"NT_STATUS_INVALID_OWNER", NT_STATUS_INVALID_OWNER}, {"NT_STATUS_INVALID_PRIMARY_GROUP", NT_STATUS_INVALID_PRIMARY_GROUP}, {"NT_STATUS_NO_IMPERSONATION_TOKEN", NT_STATUS_NO_IMPERSONATION_TOKEN}, {"NT_STATUS_CANT_DISABLE_MANDATORY", NT_STATUS_CANT_DISABLE_MANDATORY}, {"NT_STATUS_NO_LOGON_SERVERS", NT_STATUS_NO_LOGON_SERVERS}, {"NT_STATUS_NO_SUCH_LOGON_SESSION", NT_STATUS_NO_SUCH_LOGON_SESSION}, {"NT_STATUS_NO_SUCH_PRIVILEGE", NT_STATUS_NO_SUCH_PRIVILEGE}, {"NT_STATUS_PRIVILEGE_NOT_HELD", NT_STATUS_PRIVILEGE_NOT_HELD}, {"NT_STATUS_INVALID_ACCOUNT_NAME", NT_STATUS_INVALID_ACCOUNT_NAME}, {"NT_STATUS_USER_EXISTS", NT_STATUS_USER_EXISTS}, {"NT_STATUS_NO_SUCH_USER", NT_STATUS_NO_SUCH_USER}, {"NT_STATUS_GROUP_EXISTS", NT_STATUS_GROUP_EXISTS}, {"NT_STATUS_NO_SUCH_GROUP", NT_STATUS_NO_SUCH_GROUP}, {"NT_STATUS_MEMBER_IN_GROUP", NT_STATUS_MEMBER_IN_GROUP}, {"NT_STATUS_MEMBER_NOT_IN_GROUP", NT_STATUS_MEMBER_NOT_IN_GROUP}, {"NT_STATUS_LAST_ADMIN", NT_STATUS_LAST_ADMIN}, {"NT_STATUS_WRONG_PASSWORD", NT_STATUS_WRONG_PASSWORD}, {"NT_STATUS_ILL_FORMED_PASSWORD", NT_STATUS_ILL_FORMED_PASSWORD}, {"NT_STATUS_PASSWORD_RESTRICTION", NT_STATUS_PASSWORD_RESTRICTION}, {"NT_STATUS_LOGON_FAILURE", NT_STATUS_LOGON_FAILURE}, {"NT_STATUS_ACCOUNT_RESTRICTION", NT_STATUS_ACCOUNT_RESTRICTION}, {"NT_STATUS_INVALID_LOGON_HOURS", NT_STATUS_INVALID_LOGON_HOURS}, {"NT_STATUS_INVALID_WORKSTATION", NT_STATUS_INVALID_WORKSTATION}, {"NT_STATUS_PASSWORD_EXPIRED", NT_STATUS_PASSWORD_EXPIRED}, {"NT_STATUS_ACCOUNT_DISABLED", NT_STATUS_ACCOUNT_DISABLED}, {"NT_STATUS_NONE_MAPPED", NT_STATUS_NONE_MAPPED}, {"NT_STATUS_TOO_MANY_LUIDS_REQUESTED", NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, {"NT_STATUS_LUIDS_EXHAUSTED", NT_STATUS_LUIDS_EXHAUSTED}, {"NT_STATUS_INVALID_SUB_AUTHORITY", NT_STATUS_INVALID_SUB_AUTHORITY}, {"NT_STATUS_INVALID_ACL", NT_STATUS_INVALID_ACL}, {"NT_STATUS_INVALID_SID", NT_STATUS_INVALID_SID}, {"NT_STATUS_INVALID_SECURITY_DESCR", NT_STATUS_INVALID_SECURITY_DESCR}, {"NT_STATUS_PROCEDURE_NOT_FOUND", NT_STATUS_PROCEDURE_NOT_FOUND}, {"NT_STATUS_INVALID_IMAGE_FORMAT", NT_STATUS_INVALID_IMAGE_FORMAT}, {"NT_STATUS_NO_TOKEN", NT_STATUS_NO_TOKEN}, {"NT_STATUS_BAD_INHERITANCE_ACL", NT_STATUS_BAD_INHERITANCE_ACL}, {"NT_STATUS_RANGE_NOT_LOCKED", NT_STATUS_RANGE_NOT_LOCKED}, {"NT_STATUS_DISK_FULL", NT_STATUS_DISK_FULL}, {"NT_STATUS_SERVER_DISABLED", NT_STATUS_SERVER_DISABLED}, {"NT_STATUS_SERVER_NOT_DISABLED", NT_STATUS_SERVER_NOT_DISABLED}, {"NT_STATUS_TOO_MANY_GUIDS_REQUESTED", NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, {"NT_STATUS_GUIDS_EXHAUSTED", NT_STATUS_GUIDS_EXHAUSTED}, {"NT_STATUS_INVALID_ID_AUTHORITY", NT_STATUS_INVALID_ID_AUTHORITY}, {"NT_STATUS_AGENTS_EXHAUSTED", NT_STATUS_AGENTS_EXHAUSTED}, {"NT_STATUS_INVALID_VOLUME_LABEL", NT_STATUS_INVALID_VOLUME_LABEL}, {"NT_STATUS_SECTION_NOT_EXTENDED", NT_STATUS_SECTION_NOT_EXTENDED}, {"NT_STATUS_NOT_MAPPED_DATA", NT_STATUS_NOT_MAPPED_DATA}, {"NT_STATUS_RESOURCE_DATA_NOT_FOUND", NT_STATUS_RESOURCE_DATA_NOT_FOUND}, {"NT_STATUS_RESOURCE_TYPE_NOT_FOUND", NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, {"NT_STATUS_RESOURCE_NAME_NOT_FOUND", NT_STATUS_RESOURCE_NAME_NOT_FOUND}, {"NT_STATUS_ARRAY_BOUNDS_EXCEEDED", NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, {"NT_STATUS_FLOAT_DENORMAL_OPERAND", NT_STATUS_FLOAT_DENORMAL_OPERAND}, {"NT_STATUS_FLOAT_DIVIDE_BY_ZERO", NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, {"NT_STATUS_FLOAT_INEXACT_RESULT", NT_STATUS_FLOAT_INEXACT_RESULT}, {"NT_STATUS_FLOAT_INVALID_OPERATION", NT_STATUS_FLOAT_INVALID_OPERATION}, {"NT_STATUS_FLOAT_OVERFLOW", NT_STATUS_FLOAT_OVERFLOW}, {"NT_STATUS_FLOAT_STACK_CHECK", NT_STATUS_FLOAT_STACK_CHECK}, {"NT_STATUS_FLOAT_UNDERFLOW", NT_STATUS_FLOAT_UNDERFLOW}, {"NT_STATUS_INTEGER_DIVIDE_BY_ZERO", NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, {"NT_STATUS_INTEGER_OVERFLOW", NT_STATUS_INTEGER_OVERFLOW}, {"NT_STATUS_PRIVILEGED_INSTRUCTION", NT_STATUS_PRIVILEGED_INSTRUCTION}, {"NT_STATUS_TOO_MANY_PAGING_FILES", NT_STATUS_TOO_MANY_PAGING_FILES}, {"NT_STATUS_FILE_INVALID", NT_STATUS_FILE_INVALID}, {"NT_STATUS_ALLOTTED_SPACE_EXCEEDED", NT_STATUS_ALLOTTED_SPACE_EXCEEDED}, {"NT_STATUS_INSUFFICIENT_RESOURCES", NT_STATUS_INSUFFICIENT_RESOURCES}, {"NT_STATUS_DFS_EXIT_PATH_FOUND", NT_STATUS_DFS_EXIT_PATH_FOUND}, {"NT_STATUS_DEVICE_DATA_ERROR", NT_STATUS_DEVICE_DATA_ERROR}, {"NT_STATUS_DEVICE_NOT_CONNECTED", NT_STATUS_DEVICE_NOT_CONNECTED}, {"NT_STATUS_DEVICE_POWER_FAILURE", NT_STATUS_DEVICE_POWER_FAILURE}, {"NT_STATUS_FREE_VM_NOT_AT_BASE", NT_STATUS_FREE_VM_NOT_AT_BASE}, {"NT_STATUS_MEMORY_NOT_ALLOCATED", NT_STATUS_MEMORY_NOT_ALLOCATED}, {"NT_STATUS_WORKING_SET_QUOTA", NT_STATUS_WORKING_SET_QUOTA}, {"NT_STATUS_MEDIA_WRITE_PROTECTED", NT_STATUS_MEDIA_WRITE_PROTECTED}, {"NT_STATUS_DEVICE_NOT_READY", NT_STATUS_DEVICE_NOT_READY}, {"NT_STATUS_INVALID_GROUP_ATTRIBUTES", NT_STATUS_INVALID_GROUP_ATTRIBUTES}, {"NT_STATUS_BAD_IMPERSONATION_LEVEL", NT_STATUS_BAD_IMPERSONATION_LEVEL}, {"NT_STATUS_CANT_OPEN_ANONYMOUS", NT_STATUS_CANT_OPEN_ANONYMOUS}, {"NT_STATUS_BAD_VALIDATION_CLASS", NT_STATUS_BAD_VALIDATION_CLASS}, {"NT_STATUS_BAD_TOKEN_TYPE", NT_STATUS_BAD_TOKEN_TYPE}, {"NT_STATUS_BAD_MASTER_BOOT_RECORD", NT_STATUS_BAD_MASTER_BOOT_RECORD}, {"NT_STATUS_INSTRUCTION_MISALIGNMENT", NT_STATUS_INSTRUCTION_MISALIGNMENT}, {"NT_STATUS_INSTANCE_NOT_AVAILABLE", NT_STATUS_INSTANCE_NOT_AVAILABLE}, {"NT_STATUS_PIPE_NOT_AVAILABLE", NT_STATUS_PIPE_NOT_AVAILABLE}, {"NT_STATUS_INVALID_PIPE_STATE", NT_STATUS_INVALID_PIPE_STATE}, {"NT_STATUS_PIPE_BUSY", NT_STATUS_PIPE_BUSY}, {"NT_STATUS_ILLEGAL_FUNCTION", NT_STATUS_ILLEGAL_FUNCTION}, {"NT_STATUS_PIPE_DISCONNECTED", NT_STATUS_PIPE_DISCONNECTED}, {"NT_STATUS_PIPE_CLOSING", NT_STATUS_PIPE_CLOSING}, {"NT_STATUS_PIPE_CONNECTED", NT_STATUS_PIPE_CONNECTED}, {"NT_STATUS_PIPE_LISTENING", NT_STATUS_PIPE_LISTENING}, {"NT_STATUS_INVALID_READ_MODE", NT_STATUS_INVALID_READ_MODE}, {"NT_STATUS_IO_TIMEOUT", NT_STATUS_IO_TIMEOUT}, {"NT_STATUS_FILE_FORCED_CLOSED", NT_STATUS_FILE_FORCED_CLOSED}, {"NT_STATUS_PROFILING_NOT_STARTED", NT_STATUS_PROFILING_NOT_STARTED}, {"NT_STATUS_PROFILING_NOT_STOPPED", NT_STATUS_PROFILING_NOT_STOPPED}, {"NT_STATUS_COULD_NOT_INTERPRET", NT_STATUS_COULD_NOT_INTERPRET}, {"NT_STATUS_FILE_IS_A_DIRECTORY", NT_STATUS_FILE_IS_A_DIRECTORY}, {"NT_STATUS_NOT_SUPPORTED", NT_STATUS_NOT_SUPPORTED}, {"NT_STATUS_REMOTE_NOT_LISTENING", NT_STATUS_REMOTE_NOT_LISTENING}, {"NT_STATUS_DUPLICATE_NAME", NT_STATUS_DUPLICATE_NAME}, {"NT_STATUS_BAD_NETWORK_PATH", NT_STATUS_BAD_NETWORK_PATH}, {"NT_STATUS_NETWORK_BUSY", NT_STATUS_NETWORK_BUSY}, {"NT_STATUS_DEVICE_DOES_NOT_EXIST", NT_STATUS_DEVICE_DOES_NOT_EXIST}, {"NT_STATUS_TOO_MANY_COMMANDS", NT_STATUS_TOO_MANY_COMMANDS}, {"NT_STATUS_ADAPTER_HARDWARE_ERROR", NT_STATUS_ADAPTER_HARDWARE_ERROR}, {"NT_STATUS_INVALID_NETWORK_RESPONSE", NT_STATUS_INVALID_NETWORK_RESPONSE}, {"NT_STATUS_UNEXPECTED_NETWORK_ERROR", NT_STATUS_UNEXPECTED_NETWORK_ERROR}, {"NT_STATUS_BAD_REMOTE_ADAPTER", NT_STATUS_BAD_REMOTE_ADAPTER}, {"NT_STATUS_PRINT_QUEUE_FULL", NT_STATUS_PRINT_QUEUE_FULL}, {"NT_STATUS_NO_SPOOL_SPACE", NT_STATUS_NO_SPOOL_SPACE}, {"NT_STATUS_PRINT_CANCELLED", NT_STATUS_PRINT_CANCELLED}, {"NT_STATUS_NETWORK_NAME_DELETED", NT_STATUS_NETWORK_NAME_DELETED}, {"NT_STATUS_NETWORK_ACCESS_DENIED", NT_STATUS_NETWORK_ACCESS_DENIED}, {"NT_STATUS_BAD_DEVICE_TYPE", NT_STATUS_BAD_DEVICE_TYPE}, {"NT_STATUS_BAD_NETWORK_NAME", NT_STATUS_BAD_NETWORK_NAME}, {"NT_STATUS_TOO_MANY_NAMES", NT_STATUS_TOO_MANY_NAMES}, {"NT_STATUS_TOO_MANY_SESSIONS", NT_STATUS_TOO_MANY_SESSIONS}, {"NT_STATUS_SHARING_PAUSED", NT_STATUS_SHARING_PAUSED}, {"NT_STATUS_REQUEST_NOT_ACCEPTED", NT_STATUS_REQUEST_NOT_ACCEPTED}, {"NT_STATUS_REDIRECTOR_PAUSED", NT_STATUS_REDIRECTOR_PAUSED}, {"NT_STATUS_NET_WRITE_FAULT", NT_STATUS_NET_WRITE_FAULT}, {"NT_STATUS_PROFILING_AT_LIMIT", NT_STATUS_PROFILING_AT_LIMIT}, {"NT_STATUS_NOT_SAME_DEVICE", NT_STATUS_NOT_SAME_DEVICE}, {"NT_STATUS_FILE_RENAMED", NT_STATUS_FILE_RENAMED}, {"NT_STATUS_VIRTUAL_CIRCUIT_CLOSED", NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, {"NT_STATUS_NO_SECURITY_ON_OBJECT", NT_STATUS_NO_SECURITY_ON_OBJECT}, {"NT_STATUS_CANT_WAIT", NT_STATUS_CANT_WAIT}, {"NT_STATUS_PIPE_EMPTY", NT_STATUS_PIPE_EMPTY}, {"NT_STATUS_CANT_ACCESS_DOMAIN_INFO", NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, {"NT_STATUS_CANT_TERMINATE_SELF", NT_STATUS_CANT_TERMINATE_SELF}, {"NT_STATUS_INVALID_SERVER_STATE", NT_STATUS_INVALID_SERVER_STATE}, {"NT_STATUS_INVALID_DOMAIN_STATE", NT_STATUS_INVALID_DOMAIN_STATE}, {"NT_STATUS_INVALID_DOMAIN_ROLE", NT_STATUS_INVALID_DOMAIN_ROLE}, {"NT_STATUS_NO_SUCH_DOMAIN", NT_STATUS_NO_SUCH_DOMAIN}, {"NT_STATUS_DOMAIN_EXISTS", NT_STATUS_DOMAIN_EXISTS}, {"NT_STATUS_DOMAIN_LIMIT_EXCEEDED", NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, {"NT_STATUS_OPLOCK_NOT_GRANTED", NT_STATUS_OPLOCK_NOT_GRANTED}, {"NT_STATUS_INVALID_OPLOCK_PROTOCOL", NT_STATUS_INVALID_OPLOCK_PROTOCOL}, {"NT_STATUS_INTERNAL_DB_CORRUPTION", NT_STATUS_INTERNAL_DB_CORRUPTION}, {"NT_STATUS_INTERNAL_ERROR", NT_STATUS_INTERNAL_ERROR}, {"NT_STATUS_GENERIC_NOT_MAPPED", NT_STATUS_GENERIC_NOT_MAPPED}, {"NT_STATUS_BAD_DESCRIPTOR_FORMAT", NT_STATUS_BAD_DESCRIPTOR_FORMAT}, {"NT_STATUS_INVALID_USER_BUFFER", NT_STATUS_INVALID_USER_BUFFER}, {"NT_STATUS_UNEXPECTED_IO_ERROR", NT_STATUS_UNEXPECTED_IO_ERROR}, {"NT_STATUS_UNEXPECTED_MM_CREATE_ERR", NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, {"NT_STATUS_UNEXPECTED_MM_MAP_ERROR", NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, {"NT_STATUS_UNEXPECTED_MM_EXTEND_ERR", NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, {"NT_STATUS_NOT_LOGON_PROCESS", NT_STATUS_NOT_LOGON_PROCESS}, {"NT_STATUS_LOGON_SESSION_EXISTS", NT_STATUS_LOGON_SESSION_EXISTS}, {"NT_STATUS_INVALID_PARAMETER_1", NT_STATUS_INVALID_PARAMETER_1}, {"NT_STATUS_INVALID_PARAMETER_2", NT_STATUS_INVALID_PARAMETER_2}, {"NT_STATUS_INVALID_PARAMETER_3", NT_STATUS_INVALID_PARAMETER_3}, {"NT_STATUS_INVALID_PARAMETER_4", NT_STATUS_INVALID_PARAMETER_4}, {"NT_STATUS_INVALID_PARAMETER_5", NT_STATUS_INVALID_PARAMETER_5}, {"NT_STATUS_INVALID_PARAMETER_6", NT_STATUS_INVALID_PARAMETER_6}, {"NT_STATUS_INVALID_PARAMETER_7", NT_STATUS_INVALID_PARAMETER_7}, {"NT_STATUS_INVALID_PARAMETER_8", NT_STATUS_INVALID_PARAMETER_8}, {"NT_STATUS_INVALID_PARAMETER_9", NT_STATUS_INVALID_PARAMETER_9}, {"NT_STATUS_INVALID_PARAMETER_10", NT_STATUS_INVALID_PARAMETER_10}, {"NT_STATUS_INVALID_PARAMETER_11", NT_STATUS_INVALID_PARAMETER_11}, {"NT_STATUS_INVALID_PARAMETER_12", NT_STATUS_INVALID_PARAMETER_12}, {"NT_STATUS_REDIRECTOR_NOT_STARTED", NT_STATUS_REDIRECTOR_NOT_STARTED}, {"NT_STATUS_REDIRECTOR_STARTED", NT_STATUS_REDIRECTOR_STARTED}, {"NT_STATUS_STACK_OVERFLOW", NT_STATUS_STACK_OVERFLOW}, {"NT_STATUS_NO_SUCH_PACKAGE", NT_STATUS_NO_SUCH_PACKAGE}, {"NT_STATUS_BAD_FUNCTION_TABLE", NT_STATUS_BAD_FUNCTION_TABLE}, {"NT_STATUS_DIRECTORY_NOT_EMPTY", NT_STATUS_DIRECTORY_NOT_EMPTY}, {"NT_STATUS_FILE_CORRUPT_ERROR", NT_STATUS_FILE_CORRUPT_ERROR}, {"NT_STATUS_NOT_A_DIRECTORY", NT_STATUS_NOT_A_DIRECTORY}, {"NT_STATUS_BAD_LOGON_SESSION_STATE", NT_STATUS_BAD_LOGON_SESSION_STATE}, {"NT_STATUS_LOGON_SESSION_COLLISION", NT_STATUS_LOGON_SESSION_COLLISION}, {"NT_STATUS_NAME_TOO_LONG", NT_STATUS_NAME_TOO_LONG}, {"NT_STATUS_FILES_OPEN", NT_STATUS_FILES_OPEN}, {"NT_STATUS_CONNECTION_IN_USE", NT_STATUS_CONNECTION_IN_USE}, {"NT_STATUS_MESSAGE_NOT_FOUND", NT_STATUS_MESSAGE_NOT_FOUND}, {"NT_STATUS_PROCESS_IS_TERMINATING", NT_STATUS_PROCESS_IS_TERMINATING}, {"NT_STATUS_INVALID_LOGON_TYPE", NT_STATUS_INVALID_LOGON_TYPE}, {"NT_STATUS_NO_GUID_TRANSLATION", NT_STATUS_NO_GUID_TRANSLATION}, {"NT_STATUS_CANNOT_IMPERSONATE", NT_STATUS_CANNOT_IMPERSONATE}, {"NT_STATUS_IMAGE_ALREADY_LOADED", NT_STATUS_IMAGE_ALREADY_LOADED}, {"NT_STATUS_ABIOS_NOT_PRESENT", NT_STATUS_ABIOS_NOT_PRESENT}, {"NT_STATUS_ABIOS_LID_NOT_EXIST", NT_STATUS_ABIOS_LID_NOT_EXIST}, {"NT_STATUS_ABIOS_LID_ALREADY_OWNED", NT_STATUS_ABIOS_LID_ALREADY_OWNED}, {"NT_STATUS_ABIOS_NOT_LID_OWNER", NT_STATUS_ABIOS_NOT_LID_OWNER}, {"NT_STATUS_ABIOS_INVALID_COMMAND", NT_STATUS_ABIOS_INVALID_COMMAND}, {"NT_STATUS_ABIOS_INVALID_LID", NT_STATUS_ABIOS_INVALID_LID}, {"NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE", NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, {"NT_STATUS_ABIOS_INVALID_SELECTOR", NT_STATUS_ABIOS_INVALID_SELECTOR}, {"NT_STATUS_NO_LDT", NT_STATUS_NO_LDT}, {"NT_STATUS_INVALID_LDT_SIZE", NT_STATUS_INVALID_LDT_SIZE}, {"NT_STATUS_INVALID_LDT_OFFSET", NT_STATUS_INVALID_LDT_OFFSET}, {"NT_STATUS_INVALID_LDT_DESCRIPTOR", NT_STATUS_INVALID_LDT_DESCRIPTOR}, {"NT_STATUS_INVALID_IMAGE_NE_FORMAT", NT_STATUS_INVALID_IMAGE_NE_FORMAT}, {"NT_STATUS_RXACT_INVALID_STATE", NT_STATUS_RXACT_INVALID_STATE}, {"NT_STATUS_RXACT_COMMIT_FAILURE", NT_STATUS_RXACT_COMMIT_FAILURE}, {"NT_STATUS_MAPPED_FILE_SIZE_ZERO", NT_STATUS_MAPPED_FILE_SIZE_ZERO}, {"NT_STATUS_TOO_MANY_OPENED_FILES", NT_STATUS_TOO_MANY_OPENED_FILES}, {"NT_STATUS_CANCELLED", NT_STATUS_CANCELLED}, {"NT_STATUS_CANNOT_DELETE", NT_STATUS_CANNOT_DELETE}, {"NT_STATUS_INVALID_COMPUTER_NAME", NT_STATUS_INVALID_COMPUTER_NAME}, {"NT_STATUS_FILE_DELETED", NT_STATUS_FILE_DELETED}, {"NT_STATUS_SPECIAL_ACCOUNT", NT_STATUS_SPECIAL_ACCOUNT}, {"NT_STATUS_SPECIAL_GROUP", NT_STATUS_SPECIAL_GROUP}, {"NT_STATUS_SPECIAL_USER", NT_STATUS_SPECIAL_USER}, {"NT_STATUS_MEMBERS_PRIMARY_GROUP", NT_STATUS_MEMBERS_PRIMARY_GROUP}, {"NT_STATUS_FILE_CLOSED", NT_STATUS_FILE_CLOSED}, {"NT_STATUS_TOO_MANY_THREADS", NT_STATUS_TOO_MANY_THREADS}, {"NT_STATUS_THREAD_NOT_IN_PROCESS", NT_STATUS_THREAD_NOT_IN_PROCESS}, {"NT_STATUS_TOKEN_ALREADY_IN_USE", NT_STATUS_TOKEN_ALREADY_IN_USE}, {"NT_STATUS_PAGEFILE_QUOTA_EXCEEDED", NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, {"NT_STATUS_COMMITMENT_LIMIT", NT_STATUS_COMMITMENT_LIMIT}, {"NT_STATUS_INVALID_IMAGE_LE_FORMAT", NT_STATUS_INVALID_IMAGE_LE_FORMAT}, {"NT_STATUS_INVALID_IMAGE_NOT_MZ", NT_STATUS_INVALID_IMAGE_NOT_MZ}, {"NT_STATUS_INVALID_IMAGE_PROTECT", NT_STATUS_INVALID_IMAGE_PROTECT}, {"NT_STATUS_INVALID_IMAGE_WIN_16", NT_STATUS_INVALID_IMAGE_WIN_16}, {"NT_STATUS_LOGON_SERVER_CONFLICT", NT_STATUS_LOGON_SERVER_CONFLICT}, {"NT_STATUS_TIME_DIFFERENCE_AT_DC", NT_STATUS_TIME_DIFFERENCE_AT_DC}, {"NT_STATUS_SYNCHRONIZATION_REQUIRED", NT_STATUS_SYNCHRONIZATION_REQUIRED}, {"NT_STATUS_DLL_NOT_FOUND", NT_STATUS_DLL_NOT_FOUND}, {"NT_STATUS_OPEN_FAILED", NT_STATUS_OPEN_FAILED}, {"NT_STATUS_IO_PRIVILEGE_FAILED", NT_STATUS_IO_PRIVILEGE_FAILED}, {"NT_STATUS_ORDINAL_NOT_FOUND", NT_STATUS_ORDINAL_NOT_FOUND}, {"NT_STATUS_ENTRYPOINT_NOT_FOUND", NT_STATUS_ENTRYPOINT_NOT_FOUND}, {"NT_STATUS_CONTROL_C_EXIT", NT_STATUS_CONTROL_C_EXIT}, {"NT_STATUS_LOCAL_DISCONNECT", NT_STATUS_LOCAL_DISCONNECT}, {"NT_STATUS_REMOTE_DISCONNECT", NT_STATUS_REMOTE_DISCONNECT}, {"NT_STATUS_REMOTE_RESOURCES", NT_STATUS_REMOTE_RESOURCES}, {"NT_STATUS_LINK_FAILED", NT_STATUS_LINK_FAILED}, {"NT_STATUS_LINK_TIMEOUT", NT_STATUS_LINK_TIMEOUT}, {"NT_STATUS_INVALID_CONNECTION", NT_STATUS_INVALID_CONNECTION}, {"NT_STATUS_INVALID_ADDRESS", NT_STATUS_INVALID_ADDRESS}, {"NT_STATUS_DLL_INIT_FAILED", NT_STATUS_DLL_INIT_FAILED}, {"NT_STATUS_MISSING_SYSTEMFILE", NT_STATUS_MISSING_SYSTEMFILE}, {"NT_STATUS_UNHANDLED_EXCEPTION", NT_STATUS_UNHANDLED_EXCEPTION}, {"NT_STATUS_APP_INIT_FAILURE", NT_STATUS_APP_INIT_FAILURE}, {"NT_STATUS_PAGEFILE_CREATE_FAILED", NT_STATUS_PAGEFILE_CREATE_FAILED}, {"NT_STATUS_NO_PAGEFILE", NT_STATUS_NO_PAGEFILE}, {"NT_STATUS_INVALID_LEVEL", NT_STATUS_INVALID_LEVEL}, {"NT_STATUS_WRONG_PASSWORD_CORE", NT_STATUS_WRONG_PASSWORD_CORE}, {"NT_STATUS_ILLEGAL_FLOAT_CONTEXT", NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, {"NT_STATUS_PIPE_BROKEN", NT_STATUS_PIPE_BROKEN}, {"NT_STATUS_REGISTRY_CORRUPT", NT_STATUS_REGISTRY_CORRUPT}, {"NT_STATUS_REGISTRY_IO_FAILED", NT_STATUS_REGISTRY_IO_FAILED}, {"NT_STATUS_NO_EVENT_PAIR", NT_STATUS_NO_EVENT_PAIR}, {"NT_STATUS_UNRECOGNIZED_VOLUME", NT_STATUS_UNRECOGNIZED_VOLUME}, {"NT_STATUS_SERIAL_NO_DEVICE_INITED", NT_STATUS_SERIAL_NO_DEVICE_INITED}, {"NT_STATUS_NO_SUCH_ALIAS", NT_STATUS_NO_SUCH_ALIAS}, {"NT_STATUS_MEMBER_NOT_IN_ALIAS", NT_STATUS_MEMBER_NOT_IN_ALIAS}, {"NT_STATUS_MEMBER_IN_ALIAS", NT_STATUS_MEMBER_IN_ALIAS}, {"NT_STATUS_ALIAS_EXISTS", NT_STATUS_ALIAS_EXISTS}, {"NT_STATUS_LOGON_NOT_GRANTED", NT_STATUS_LOGON_NOT_GRANTED}, {"NT_STATUS_TOO_MANY_SECRETS", NT_STATUS_TOO_MANY_SECRETS}, {"NT_STATUS_SECRET_TOO_LONG", NT_STATUS_SECRET_TOO_LONG}, {"NT_STATUS_INTERNAL_DB_ERROR", NT_STATUS_INTERNAL_DB_ERROR}, {"NT_STATUS_FULLSCREEN_MODE", NT_STATUS_FULLSCREEN_MODE}, {"NT_STATUS_TOO_MANY_CONTEXT_IDS", NT_STATUS_TOO_MANY_CONTEXT_IDS}, {"NT_STATUS_LOGON_TYPE_NOT_GRANTED", NT_STATUS_LOGON_TYPE_NOT_GRANTED}, {"NT_STATUS_NOT_REGISTRY_FILE", NT_STATUS_NOT_REGISTRY_FILE}, {"NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED", NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, {"NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR", NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, {"NT_STATUS_FT_MISSING_MEMBER", NT_STATUS_FT_MISSING_MEMBER}, {"NT_STATUS_ILL_FORMED_SERVICE_ENTRY", NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, {"NT_STATUS_ILLEGAL_CHARACTER", NT_STATUS_ILLEGAL_CHARACTER}, {"NT_STATUS_UNMAPPABLE_CHARACTER", NT_STATUS_UNMAPPABLE_CHARACTER}, {"NT_STATUS_UNDEFINED_CHARACTER", NT_STATUS_UNDEFINED_CHARACTER}, {"NT_STATUS_FLOPPY_VOLUME", NT_STATUS_FLOPPY_VOLUME}, {"NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND", NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, {"NT_STATUS_FLOPPY_WRONG_CYLINDER", NT_STATUS_FLOPPY_WRONG_CYLINDER}, {"NT_STATUS_FLOPPY_UNKNOWN_ERROR", NT_STATUS_FLOPPY_UNKNOWN_ERROR}, {"NT_STATUS_FLOPPY_BAD_REGISTERS", NT_STATUS_FLOPPY_BAD_REGISTERS}, {"NT_STATUS_DISK_RECALIBRATE_FAILED", NT_STATUS_DISK_RECALIBRATE_FAILED}, {"NT_STATUS_DISK_OPERATION_FAILED", NT_STATUS_DISK_OPERATION_FAILED}, {"NT_STATUS_DISK_RESET_FAILED", NT_STATUS_DISK_RESET_FAILED}, {"NT_STATUS_SHARED_IRQ_BUSY", NT_STATUS_SHARED_IRQ_BUSY}, {"NT_STATUS_FT_ORPHANING", NT_STATUS_FT_ORPHANING}, {"NT_STATUS_PARTITION_FAILURE", NT_STATUS_PARTITION_FAILURE}, {"NT_STATUS_INVALID_BLOCK_LENGTH", NT_STATUS_INVALID_BLOCK_LENGTH}, {"NT_STATUS_DEVICE_NOT_PARTITIONED", NT_STATUS_DEVICE_NOT_PARTITIONED}, {"NT_STATUS_UNABLE_TO_LOCK_MEDIA", NT_STATUS_UNABLE_TO_LOCK_MEDIA}, {"NT_STATUS_UNABLE_TO_UNLOAD_MEDIA", NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, {"NT_STATUS_EOM_OVERFLOW", NT_STATUS_EOM_OVERFLOW}, {"NT_STATUS_NO_MEDIA", NT_STATUS_NO_MEDIA}, {"NT_STATUS_NO_SUCH_MEMBER", NT_STATUS_NO_SUCH_MEMBER}, {"NT_STATUS_INVALID_MEMBER", NT_STATUS_INVALID_MEMBER}, {"NT_STATUS_KEY_DELETED", NT_STATUS_KEY_DELETED}, {"NT_STATUS_NO_LOG_SPACE", NT_STATUS_NO_LOG_SPACE}, {"NT_STATUS_TOO_MANY_SIDS", NT_STATUS_TOO_MANY_SIDS}, {"NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED", NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, {"NT_STATUS_KEY_HAS_CHILDREN", NT_STATUS_KEY_HAS_CHILDREN}, {"NT_STATUS_CHILD_MUST_BE_VOLATILE", NT_STATUS_CHILD_MUST_BE_VOLATILE}, {"NT_STATUS_DEVICE_CONFIGURATION_ERROR", NT_STATUS_DEVICE_CONFIGURATION_ERROR}, {"NT_STATUS_DRIVER_INTERNAL_ERROR", NT_STATUS_DRIVER_INTERNAL_ERROR}, {"NT_STATUS_INVALID_DEVICE_STATE", NT_STATUS_INVALID_DEVICE_STATE}, {"NT_STATUS_IO_DEVICE_ERROR", NT_STATUS_IO_DEVICE_ERROR}, {"NT_STATUS_DEVICE_PROTOCOL_ERROR", NT_STATUS_DEVICE_PROTOCOL_ERROR}, {"NT_STATUS_BACKUP_CONTROLLER", NT_STATUS_BACKUP_CONTROLLER}, {"NT_STATUS_LOG_FILE_FULL", NT_STATUS_LOG_FILE_FULL}, {"NT_STATUS_TOO_LATE", NT_STATUS_TOO_LATE}, {"NT_STATUS_NO_TRUST_LSA_SECRET", NT_STATUS_NO_TRUST_LSA_SECRET}, {"NT_STATUS_NO_TRUST_SAM_ACCOUNT", NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {"NT_STATUS_TRUSTED_DOMAIN_FAILURE", NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {"NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE", NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, {"NT_STATUS_EVENTLOG_FILE_CORRUPT", NT_STATUS_EVENTLOG_FILE_CORRUPT}, {"NT_STATUS_EVENTLOG_CANT_START", NT_STATUS_EVENTLOG_CANT_START}, {"NT_STATUS_TRUST_FAILURE", NT_STATUS_TRUST_FAILURE}, {"NT_STATUS_MUTANT_LIMIT_EXCEEDED", NT_STATUS_MUTANT_LIMIT_EXCEEDED}, {"NT_STATUS_NETLOGON_NOT_STARTED", NT_STATUS_NETLOGON_NOT_STARTED}, {"NT_STATUS_ACCOUNT_EXPIRED", NT_STATUS_ACCOUNT_EXPIRED}, {"NT_STATUS_POSSIBLE_DEADLOCK", NT_STATUS_POSSIBLE_DEADLOCK}, {"NT_STATUS_NETWORK_CREDENTIAL_CONFLICT", NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, {"NT_STATUS_REMOTE_SESSION_LIMIT", NT_STATUS_REMOTE_SESSION_LIMIT}, {"NT_STATUS_EVENTLOG_FILE_CHANGED", NT_STATUS_EVENTLOG_FILE_CHANGED}, {"NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT", NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, {"NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT", NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {"NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT", NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT}, {"NT_STATUS_DOMAIN_TRUST_INCONSISTENT", NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {"NT_STATUS_FS_DRIVER_REQUIRED", NT_STATUS_FS_DRIVER_REQUIRED}, {"NT_STATUS_NO_USER_SESSION_KEY", NT_STATUS_NO_USER_SESSION_KEY}, {"NT_STATUS_USER_SESSION_DELETED", NT_STATUS_USER_SESSION_DELETED}, {"NT_STATUS_RESOURCE_LANG_NOT_FOUND", NT_STATUS_RESOURCE_LANG_NOT_FOUND}, {"NT_STATUS_INSUFF_SERVER_RESOURCES", NT_STATUS_INSUFF_SERVER_RESOURCES}, {"NT_STATUS_INVALID_BUFFER_SIZE", NT_STATUS_INVALID_BUFFER_SIZE}, {"NT_STATUS_INVALID_ADDRESS_COMPONENT", NT_STATUS_INVALID_ADDRESS_COMPONENT}, {"NT_STATUS_INVALID_ADDRESS_WILDCARD", NT_STATUS_INVALID_ADDRESS_WILDCARD}, {"NT_STATUS_TOO_MANY_ADDRESSES", NT_STATUS_TOO_MANY_ADDRESSES}, {"NT_STATUS_ADDRESS_ALREADY_EXISTS", NT_STATUS_ADDRESS_ALREADY_EXISTS}, {"NT_STATUS_ADDRESS_CLOSED", NT_STATUS_ADDRESS_CLOSED}, {"NT_STATUS_CONNECTION_DISCONNECTED", NT_STATUS_CONNECTION_DISCONNECTED}, {"NT_STATUS_CONNECTION_RESET", NT_STATUS_CONNECTION_RESET}, {"NT_STATUS_TOO_MANY_NODES", NT_STATUS_TOO_MANY_NODES}, {"NT_STATUS_TRANSACTION_ABORTED", NT_STATUS_TRANSACTION_ABORTED}, {"NT_STATUS_TRANSACTION_TIMED_OUT", NT_STATUS_TRANSACTION_TIMED_OUT}, {"NT_STATUS_TRANSACTION_NO_RELEASE", NT_STATUS_TRANSACTION_NO_RELEASE}, {"NT_STATUS_TRANSACTION_NO_MATCH", NT_STATUS_TRANSACTION_NO_MATCH}, {"NT_STATUS_TRANSACTION_RESPONDED", NT_STATUS_TRANSACTION_RESPONDED}, {"NT_STATUS_TRANSACTION_INVALID_ID", NT_STATUS_TRANSACTION_INVALID_ID}, {"NT_STATUS_TRANSACTION_INVALID_TYPE", NT_STATUS_TRANSACTION_INVALID_TYPE}, {"NT_STATUS_NOT_SERVER_SESSION", NT_STATUS_NOT_SERVER_SESSION}, {"NT_STATUS_NOT_CLIENT_SESSION", NT_STATUS_NOT_CLIENT_SESSION}, {"NT_STATUS_CANNOT_LOAD_REGISTRY_FILE", NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, {"NT_STATUS_DEBUG_ATTACH_FAILED", NT_STATUS_DEBUG_ATTACH_FAILED}, {"NT_STATUS_SYSTEM_PROCESS_TERMINATED", NT_STATUS_SYSTEM_PROCESS_TERMINATED}, {"NT_STATUS_DATA_NOT_ACCEPTED", NT_STATUS_DATA_NOT_ACCEPTED}, {"NT_STATUS_NO_BROWSER_SERVERS_FOUND", NT_STATUS_NO_BROWSER_SERVERS_FOUND}, {"NT_STATUS_VDM_HARD_ERROR", NT_STATUS_VDM_HARD_ERROR}, {"NT_STATUS_DRIVER_CANCEL_TIMEOUT", NT_STATUS_DRIVER_CANCEL_TIMEOUT}, {"NT_STATUS_REPLY_MESSAGE_MISMATCH", NT_STATUS_REPLY_MESSAGE_MISMATCH}, {"NT_STATUS_MAPPED_ALIGNMENT", NT_STATUS_MAPPED_ALIGNMENT}, {"NT_STATUS_IMAGE_CHECKSUM_MISMATCH", NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, {"NT_STATUS_LOST_WRITEBEHIND_DATA", NT_STATUS_LOST_WRITEBEHIND_DATA}, {"NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID", NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, {"NT_STATUS_PASSWORD_MUST_CHANGE", NT_STATUS_PASSWORD_MUST_CHANGE}, {"NT_STATUS_NOT_FOUND", NT_STATUS_NOT_FOUND}, {"NT_STATUS_NOT_TINY_STREAM", NT_STATUS_NOT_TINY_STREAM}, {"NT_STATUS_RECOVERY_FAILURE", NT_STATUS_RECOVERY_FAILURE}, {"NT_STATUS_STACK_OVERFLOW_READ", NT_STATUS_STACK_OVERFLOW_READ}, {"NT_STATUS_FAIL_CHECK", NT_STATUS_FAIL_CHECK}, {"NT_STATUS_DUPLICATE_OBJECTID", NT_STATUS_DUPLICATE_OBJECTID}, {"NT_STATUS_OBJECTID_EXISTS", NT_STATUS_OBJECTID_EXISTS}, {"NT_STATUS_CONVERT_TO_LARGE", NT_STATUS_CONVERT_TO_LARGE}, {"NT_STATUS_RETRY", NT_STATUS_RETRY}, {"NT_STATUS_FOUND_OUT_OF_SCOPE", NT_STATUS_FOUND_OUT_OF_SCOPE}, {"NT_STATUS_ALLOCATE_BUCKET", NT_STATUS_ALLOCATE_BUCKET}, {"NT_STATUS_PROPSET_NOT_FOUND", NT_STATUS_PROPSET_NOT_FOUND}, {"NT_STATUS_MARSHALL_OVERFLOW", NT_STATUS_MARSHALL_OVERFLOW}, {"NT_STATUS_INVALID_VARIANT", NT_STATUS_INVALID_VARIANT}, {"NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND", NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, {"NT_STATUS_ACCOUNT_LOCKED_OUT", NT_STATUS_ACCOUNT_LOCKED_OUT}, {"NT_STATUS_HANDLE_NOT_CLOSABLE", NT_STATUS_HANDLE_NOT_CLOSABLE}, {"NT_STATUS_CONNECTION_REFUSED", NT_STATUS_CONNECTION_REFUSED}, {"NT_STATUS_GRACEFUL_DISCONNECT", NT_STATUS_GRACEFUL_DISCONNECT}, {"NT_STATUS_ADDRESS_ALREADY_ASSOCIATED", NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, {"NT_STATUS_ADDRESS_NOT_ASSOCIATED", NT_STATUS_ADDRESS_NOT_ASSOCIATED}, {"NT_STATUS_CONNECTION_INVALID", NT_STATUS_CONNECTION_INVALID}, {"NT_STATUS_CONNECTION_ACTIVE", NT_STATUS_CONNECTION_ACTIVE}, {"NT_STATUS_NETWORK_UNREACHABLE", NT_STATUS_NETWORK_UNREACHABLE}, {"NT_STATUS_HOST_UNREACHABLE", NT_STATUS_HOST_UNREACHABLE}, {"NT_STATUS_PROTOCOL_UNREACHABLE", NT_STATUS_PROTOCOL_UNREACHABLE}, {"NT_STATUS_PORT_UNREACHABLE", NT_STATUS_PORT_UNREACHABLE}, {"NT_STATUS_REQUEST_ABORTED", NT_STATUS_REQUEST_ABORTED}, {"NT_STATUS_CONNECTION_ABORTED", NT_STATUS_CONNECTION_ABORTED}, {"NT_STATUS_BAD_COMPRESSION_BUFFER", NT_STATUS_BAD_COMPRESSION_BUFFER}, {"NT_STATUS_USER_MAPPED_FILE", NT_STATUS_USER_MAPPED_FILE}, {"NT_STATUS_AUDIT_FAILED", NT_STATUS_AUDIT_FAILED}, {"NT_STATUS_TIMER_RESOLUTION_NOT_SET", NT_STATUS_TIMER_RESOLUTION_NOT_SET}, {"NT_STATUS_CONNECTION_COUNT_LIMIT", NT_STATUS_CONNECTION_COUNT_LIMIT}, {"NT_STATUS_LOGIN_TIME_RESTRICTION", NT_STATUS_LOGIN_TIME_RESTRICTION}, {"NT_STATUS_LOGIN_WKSTA_RESTRICTION", NT_STATUS_LOGIN_WKSTA_RESTRICTION}, {"NT_STATUS_IMAGE_MP_UP_MISMATCH", NT_STATUS_IMAGE_MP_UP_MISMATCH}, {"NT_STATUS_INSUFFICIENT_LOGON_INFO", NT_STATUS_INSUFFICIENT_LOGON_INFO}, {"NT_STATUS_BAD_DLL_ENTRYPOINT", NT_STATUS_BAD_DLL_ENTRYPOINT}, {"NT_STATUS_BAD_SERVICE_ENTRYPOINT", NT_STATUS_BAD_SERVICE_ENTRYPOINT}, {"NT_STATUS_LPC_REPLY_LOST", NT_STATUS_LPC_REPLY_LOST}, {"NT_STATUS_IP_ADDRESS_CONFLICT1", NT_STATUS_IP_ADDRESS_CONFLICT1}, {"NT_STATUS_IP_ADDRESS_CONFLICT2", NT_STATUS_IP_ADDRESS_CONFLICT2}, {"NT_STATUS_REGISTRY_QUOTA_LIMIT", NT_STATUS_REGISTRY_QUOTA_LIMIT}, {"NT_STATUS_PATH_NOT_COVERED", NT_STATUS_PATH_NOT_COVERED}, {"NT_STATUS_NO_CALLBACK_ACTIVE", NT_STATUS_NO_CALLBACK_ACTIVE}, {"NT_STATUS_LICENSE_QUOTA_EXCEEDED", NT_STATUS_LICENSE_QUOTA_EXCEEDED}, {"NT_STATUS_PWD_TOO_SHORT", NT_STATUS_PWD_TOO_SHORT}, {"NT_STATUS_PWD_TOO_RECENT", NT_STATUS_PWD_TOO_RECENT}, {"NT_STATUS_PWD_HISTORY_CONFLICT", NT_STATUS_PWD_HISTORY_CONFLICT}, {"NT_STATUS_PLUGPLAY_NO_DEVICE", NT_STATUS_PLUGPLAY_NO_DEVICE}, {"NT_STATUS_UNSUPPORTED_COMPRESSION", NT_STATUS_UNSUPPORTED_COMPRESSION}, {"NT_STATUS_INVALID_HW_PROFILE", NT_STATUS_INVALID_HW_PROFILE}, {"NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH", NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, {"NT_STATUS_DRIVER_ORDINAL_NOT_FOUND", NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, {"NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND", NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, {"NT_STATUS_RESOURCE_NOT_OWNED", NT_STATUS_RESOURCE_NOT_OWNED}, {"NT_STATUS_TOO_MANY_LINKS", NT_STATUS_TOO_MANY_LINKS}, {"NT_STATUS_QUOTA_LIST_INCONSISTENT", NT_STATUS_QUOTA_LIST_INCONSISTENT}, {"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE}, {"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES}, {"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES}, {"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED}, {NULL, 0} };
linux-master
fs/smb/client/nterr.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov ([email protected]) * Steve French ([email protected]) * Wang Lei ([email protected]) * David Howells ([email protected]) * * Contains the CIFS DFS upcall routines used for hostname to * IP address translation. * */ #include <linux/inet.h> #include <linux/slab.h> #include <linux/dns_resolver.h> #include "dns_resolve.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" /** * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address. * @unc: UNC path specifying the server (with '/' as delimiter) * @ip_addr: Where to return the IP address. * @expiry: Where to return the expiry time for the dns record. * * Returns zero success, -ve on error. */ int dns_resolve_server_name_to_ip(const char *unc, struct sockaddr *ip_addr, time64_t *expiry) { const char *hostname, *sep; char *ip; int len, rc; if (!ip_addr || !unc) return -EINVAL; len = strlen(unc); if (len < 3) { cifs_dbg(FYI, "%s: unc is too short: %s\n", __func__, unc); return -EINVAL; } /* Discount leading slashes for cifs */ len -= 2; hostname = unc + 2; /* Search for server name delimiter */ sep = memchr(hostname, '/', len); if (sep) len = sep - hostname; else cifs_dbg(FYI, "%s: probably server name is whole unc: %s\n", __func__, unc); /* Try to interpret hostname as an IPv4 or IPv6 address */ rc = cifs_convert_address(ip_addr, hostname, len); if (rc > 0) { cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %*.*s\n", __func__, len, len, hostname); return 0; } /* Perform the upcall */ rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len, NULL, &ip, expiry, false); if (rc < 0) { cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n", __func__, len, len, hostname); } else { cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n", __func__, len, len, hostname, ip, expiry ? (*expiry) : 0); rc = cifs_convert_address(ip_addr, ip, strlen(ip)); kfree(ip); if (!rc) { cifs_dbg(FYI, "%s: unable to determine ip address\n", __func__); rc = -EHOSTUNREACH; } else rc = 0; } return rc; }
linux-master
fs/smb/client/dns_resolve.c
// SPDX-License-Identifier: GPL-2.0 /* * Functions to handle the cached directory entries * * Copyright (c) 2022, Ronnie Sahlberg <[email protected]> */ #include <linux/namei.h> #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "smb2proto.h" #include "cached_dir.h" static struct cached_fid *init_cached_dir(const char *path); static void free_cached_dir(struct cached_fid *cfid); static void smb2_close_cached_fid(struct kref *ref); static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, const char *path, bool lookup_only, __u32 max_cached_dirs) { struct cached_fid *cfid; spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { if (!strcmp(cfid->path, path)) { /* * If it doesn't have a lease it is either not yet * fully cached or it may be in the process of * being deleted due to a lease break. */ if (!cfid->has_lease) { spin_unlock(&cfids->cfid_list_lock); return NULL; } kref_get(&cfid->refcount); spin_unlock(&cfids->cfid_list_lock); return cfid; } } if (lookup_only) { spin_unlock(&cfids->cfid_list_lock); return NULL; } if (cfids->num_entries >= max_cached_dirs) { spin_unlock(&cfids->cfid_list_lock); return NULL; } cfid = init_cached_dir(path); if (cfid == NULL) { spin_unlock(&cfids->cfid_list_lock); return NULL; } cfid->cfids = cfids; cfids->num_entries++; list_add(&cfid->entry, &cfids->entries); cfid->on_list = true; kref_get(&cfid->refcount); spin_unlock(&cfids->cfid_list_lock); return cfid; } static struct dentry * path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) { struct dentry *dentry; const char *s, *p; char sep; sep = CIFS_DIR_SEP(cifs_sb); dentry = dget(cifs_sb->root); s = path; do { struct inode *dir = d_inode(dentry); struct dentry *child; if (!S_ISDIR(dir->i_mode)) { dput(dentry); dentry = ERR_PTR(-ENOTDIR); break; } /* skip separators */ while (*s == sep) s++; if (!*s) break; p = s++; /* next separator */ while (*s && *s != sep) s++; child = lookup_positive_unlocked(p, dentry, s - p); dput(dentry); dentry = child; } while (!IS_ERR(dentry)); return dentry; } static const char *path_no_prefix(struct cifs_sb_info *cifs_sb, const char *path) { size_t len = 0; if (!*path) return path; if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && cifs_sb->prepath) { len = strlen(cifs_sb->prepath) + 1; if (unlikely(len > strlen(path))) return ERR_PTR(-EINVAL); } return path + len; } /* * Open the and cache a directory handle. * If error then *cfid is not initialized. */ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, const char *path, struct cifs_sb_info *cifs_sb, bool lookup_only, struct cached_fid **ret_cfid) { struct cifs_ses *ses; struct TCP_Server_Info *server; struct cifs_open_parms oparms; struct smb2_create_rsp *o_rsp = NULL; struct smb2_query_info_rsp *qi_rsp = NULL; int resp_buftype[2]; struct smb_rqst rqst[2]; struct kvec rsp_iov[2]; struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; struct kvec qi_iov[1]; int rc, flags = 0; __le16 *utf16_path = NULL; u8 oplock = SMB2_OPLOCK_LEVEL_II; struct cifs_fid *pfid; struct dentry *dentry = NULL; struct cached_fid *cfid; struct cached_fids *cfids; const char *npath; if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache || is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0)) return -EOPNOTSUPP; ses = tcon->ses; server = ses->server; cfids = tcon->cfids; if (!server->ops->new_lease_key) return -EIO; if (cifs_sb->root == NULL) return -ENOENT; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) return -ENOMEM; cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); if (cfid == NULL) { kfree(utf16_path); return -ENOENT; } /* * At this point we either have a lease already and we can just * return it. If not we are guaranteed to be the only thread accessing * this cfid. */ if (cfid->has_lease) { *ret_cfid = cfid; kfree(utf16_path); return 0; } /* * Skip any prefix paths in @path as lookup_positive_unlocked() ends up * calling ->lookup() which already adds those through * build_path_from_dentry(). Also, do it earlier as we might reconnect * below when trying to send compounded request and then potentially * having a different prefix path (e.g. after DFS failover). */ npath = path_no_prefix(cifs_sb, path); if (IS_ERR(npath)) { rc = PTR_ERR(npath); kfree(utf16_path); return rc; } /* * We do not hold the lock for the open because in case * SMB2_open needs to reconnect. * This is safe because no other thread will be able to get a ref * to the cfid until we have finished opening the file and (possibly) * acquired a lease. */ if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; pfid = &cfid->fid; server->ops->new_lease_key(pfid); memset(rqst, 0, sizeof(rqst)); resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; memset(rsp_iov, 0, sizeof(rsp_iov)); /* Open */ memset(&open_iov, 0, sizeof(open_iov)); rqst[0].rq_iov = open_iov; rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; oparms = (struct cifs_open_parms) { .tcon = tcon, .path = path, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES, .disposition = FILE_OPEN, .fid = pfid, }; rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, utf16_path); if (rc) goto oshr_free; smb2_set_next_command(tcon, &rqst[0]); memset(&qi_iov, 0, sizeof(qi_iov)); rqst[1].rq_iov = qi_iov; rqst[1].rq_nvec = 1; rc = SMB2_query_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 0, NULL); if (rc) goto oshr_free; smb2_set_related(&rqst[1]); rc = compound_send_recv(xid, ses, server, flags, 2, rqst, resp_buftype, rsp_iov); if (rc) { if (rc == -EREMCHG) { tcon->need_reconnect = true; pr_warn_once("server share %s deleted\n", tcon->tree_name); } goto oshr_free; } cfid->tcon = tcon; cfid->is_open = true; o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; oparms.fid->persistent_fid = o_rsp->PersistentFileId; oparms.fid->volatile_fid = o_rsp->VolatileFileId; #ifdef CONFIG_CIFS_DEBUG2 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); #endif /* CIFS_DEBUG2 */ if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) goto oshr_free; smb2_parse_contexts(server, o_rsp, &oparms.fid->epoch, oparms.fid->lease_key, &oplock, NULL, NULL); if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) goto oshr_free; qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) goto oshr_free; if (!smb2_validate_and_copy_iov( le16_to_cpu(qi_rsp->OutputBufferOffset), sizeof(struct smb2_file_all_info), &rsp_iov[1], sizeof(struct smb2_file_all_info), (char *)&cfid->file_all_info)) cfid->file_all_info_is_valid = true; if (!npath[0]) dentry = dget(cifs_sb->root); else { dentry = path_to_dentry(cifs_sb, npath); if (IS_ERR(dentry)) { rc = -ENOENT; goto oshr_free; } } cfid->dentry = dentry; cfid->time = jiffies; cfid->has_lease = true; oshr_free: kfree(utf16_path); SMB2_open_free(&rqst[0]); SMB2_query_info_free(&rqst[1]); free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); spin_lock(&cfids->cfid_list_lock); if (rc && !cfid->has_lease) { if (cfid->on_list) { list_del(&cfid->entry); cfid->on_list = false; cfids->num_entries--; } rc = -ENOENT; } spin_unlock(&cfids->cfid_list_lock); if (!rc && !cfid->has_lease) { /* * We are guaranteed to have two references at this point. * One for the caller and one for a potential lease. * Release the Lease-ref so that the directory will be closed * when the caller closes the cached handle. */ kref_put(&cfid->refcount, smb2_close_cached_fid); } if (rc) { if (cfid->is_open) SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, cfid->fid.volatile_fid); free_cached_dir(cfid); cfid = NULL; } if (rc == 0) { *ret_cfid = cfid; atomic_inc(&tcon->num_remote_opens); } return rc; } int open_cached_dir_by_dentry(struct cifs_tcon *tcon, struct dentry *dentry, struct cached_fid **ret_cfid) { struct cached_fid *cfid; struct cached_fids *cfids = tcon->cfids; if (cfids == NULL) return -ENOENT; spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { if (dentry && cfid->dentry == dentry) { cifs_dbg(FYI, "found a cached root file handle by dentry\n"); kref_get(&cfid->refcount); *ret_cfid = cfid; spin_unlock(&cfids->cfid_list_lock); return 0; } } spin_unlock(&cfids->cfid_list_lock); return -ENOENT; } static void smb2_close_cached_fid(struct kref *ref) { struct cached_fid *cfid = container_of(ref, struct cached_fid, refcount); spin_lock(&cfid->cfids->cfid_list_lock); if (cfid->on_list) { list_del(&cfid->entry); cfid->on_list = false; cfid->cfids->num_entries--; } spin_unlock(&cfid->cfids->cfid_list_lock); dput(cfid->dentry); cfid->dentry = NULL; if (cfid->is_open) { SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, cfid->fid.volatile_fid); atomic_dec(&cfid->tcon->num_remote_opens); } free_cached_dir(cfid); } void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { struct cached_fid *cfid = NULL; int rc; rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); if (rc) { cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); return; } spin_lock(&cfid->cfids->cfid_list_lock); if (cfid->has_lease) { cfid->has_lease = false; kref_put(&cfid->refcount, smb2_close_cached_fid); } spin_unlock(&cfid->cfids->cfid_list_lock); close_cached_dir(cfid); } void close_cached_dir(struct cached_fid *cfid) { kref_put(&cfid->refcount, smb2_close_cached_fid); } /* * Called from cifs_kill_sb when we unmount a share */ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) { struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node; struct cached_fid *cfid; struct cifs_tcon *tcon; struct tcon_link *tlink; struct cached_fids *cfids; for (node = rb_first(root); node; node = rb_next(node)) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); tcon = tlink_tcon(tlink); if (IS_ERR(tcon)) continue; cfids = tcon->cfids; if (cfids == NULL) continue; list_for_each_entry(cfid, &cfids->entries, entry) { dput(cfid->dentry); cfid->dentry = NULL; } } } /* * Invalidate all cached dirs when a TCON has been reset * due to a session loss. */ void invalidate_all_cached_dirs(struct cifs_tcon *tcon) { struct cached_fids *cfids = tcon->cfids; struct cached_fid *cfid, *q; LIST_HEAD(entry); if (cfids == NULL) return; spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { list_move(&cfid->entry, &entry); cfids->num_entries--; cfid->is_open = false; cfid->on_list = false; /* To prevent race with smb2_cached_lease_break() */ kref_get(&cfid->refcount); } spin_unlock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &entry, entry) { list_del(&cfid->entry); cancel_work_sync(&cfid->lease_break); if (cfid->has_lease) { /* * We lease was never cancelled from the server so we * need to drop the reference. */ spin_lock(&cfids->cfid_list_lock); cfid->has_lease = false; spin_unlock(&cfids->cfid_list_lock); kref_put(&cfid->refcount, smb2_close_cached_fid); } /* Drop the extra reference opened above*/ kref_put(&cfid->refcount, smb2_close_cached_fid); } } static void smb2_cached_lease_break(struct work_struct *work) { struct cached_fid *cfid = container_of(work, struct cached_fid, lease_break); spin_lock(&cfid->cfids->cfid_list_lock); cfid->has_lease = false; spin_unlock(&cfid->cfids->cfid_list_lock); kref_put(&cfid->refcount, smb2_close_cached_fid); } int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) { struct cached_fids *cfids = tcon->cfids; struct cached_fid *cfid; if (cfids == NULL) return false; spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { if (cfid->has_lease && !memcmp(lease_key, cfid->fid.lease_key, SMB2_LEASE_KEY_SIZE)) { cfid->time = 0; /* * We found a lease remove it from the list * so no threads can access it. */ list_del(&cfid->entry); cfid->on_list = false; cfids->num_entries--; queue_work(cifsiod_wq, &cfid->lease_break); spin_unlock(&cfids->cfid_list_lock); return true; } } spin_unlock(&cfids->cfid_list_lock); return false; } static struct cached_fid *init_cached_dir(const char *path) { struct cached_fid *cfid; cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); if (!cfid) return NULL; cfid->path = kstrdup(path, GFP_ATOMIC); if (!cfid->path) { kfree(cfid); return NULL; } INIT_WORK(&cfid->lease_break, smb2_cached_lease_break); INIT_LIST_HEAD(&cfid->entry); INIT_LIST_HEAD(&cfid->dirents.entries); mutex_init(&cfid->dirents.de_mutex); spin_lock_init(&cfid->fid_lock); kref_init(&cfid->refcount); return cfid; } static void free_cached_dir(struct cached_fid *cfid) { struct cached_dirent *dirent, *q; dput(cfid->dentry); cfid->dentry = NULL; /* * Delete all cached dirent names */ list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { list_del(&dirent->entry); kfree(dirent->name); kfree(dirent); } kfree(cfid->path); cfid->path = NULL; kfree(cfid); } static int cifs_cfids_laundromat_thread(void *p) { struct cached_fids *cfids = p; struct cached_fid *cfid, *q; struct list_head entry; while (!kthread_should_stop()) { ssleep(1); INIT_LIST_HEAD(&entry); if (kthread_should_stop()) return 0; spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) { list_del(&cfid->entry); list_add(&cfid->entry, &entry); cfids->num_entries--; } } spin_unlock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &entry, entry) { cfid->on_list = false; list_del(&cfid->entry); /* * Cancel, and wait for the work to finish in * case we are racing with it. */ cancel_work_sync(&cfid->lease_break); if (cfid->has_lease) { /* * We lease has not yet been cancelled from * the server so we need to drop the reference. */ spin_lock(&cfids->cfid_list_lock); cfid->has_lease = false; spin_unlock(&cfids->cfid_list_lock); kref_put(&cfid->refcount, smb2_close_cached_fid); } } } return 0; } struct cached_fids *init_cached_dirs(void) { struct cached_fids *cfids; cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); if (!cfids) return NULL; spin_lock_init(&cfids->cfid_list_lock); INIT_LIST_HEAD(&cfids->entries); /* * since we're in a cifs function already, we know that * this will succeed. No need for try_module_get(). */ __module_get(THIS_MODULE); cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread, cfids, "cifsd-cfid-laundromat"); if (IS_ERR(cfids->laundromat)) { cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n"); kfree(cfids); module_put(THIS_MODULE); return NULL; } return cfids; } /* * Called from tconInfoFree when we are tearing down the tcon. * There are no active users or open files/directories at this point. */ void free_cached_dirs(struct cached_fids *cfids) { struct cached_fid *cfid, *q; LIST_HEAD(entry); if (cfids == NULL) return; if (cfids->laundromat) { kthread_stop(cfids->laundromat); cfids->laundromat = NULL; module_put(THIS_MODULE); } spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { cfid->on_list = false; cfid->is_open = false; list_move(&cfid->entry, &entry); } spin_unlock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &entry, entry) { list_del(&cfid->entry); free_cached_dir(cfid); } kfree(cfids); }
linux-master
fs/smb/client/cached_dir.c
// SPDX-License-Identifier: LGPL-2.1 /* * * vfs operations that deal with dentries * * Copyright (C) International Business Machines Corp., 2002,2009 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/file.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "fs_context.h" #include "cifs_ioctl.h" #include "fscache.h" static void renew_parental_timestamps(struct dentry *direntry) { /* BB check if there is a way to get the kernel to do this or if we really need this */ do { cifs_set_time(direntry, jiffies); direntry = direntry->d_parent; } while (!IS_ROOT(direntry)); } char * cifs_build_path_to_root(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, int add_treename) { int pplen = ctx->prepath ? strlen(ctx->prepath) + 1 : 0; int dfsplen; char *full_path = NULL; /* if no prefix path, simply set path to the root of share to "" */ if (pplen == 0) { full_path = kzalloc(1, GFP_KERNEL); return full_path; } if (add_treename) dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1); else dfsplen = 0; full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL); if (full_path == NULL) return full_path; if (dfsplen) memcpy(full_path, tcon->tree_name, dfsplen); full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb); memcpy(full_path + dfsplen + 1, ctx->prepath, pplen); convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); return full_path; } /* Note: caller must free return buffer */ const char * build_path_from_dentry(struct dentry *direntry, void *page) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); bool prefix = tcon->Flags & SMB_SHARE_IS_IN_DFS; return build_path_from_dentry_optional_prefix(direntry, page, prefix); } char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, const char *tree, int tree_len, bool prefix) { int dfsplen; int pplen = 0; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); char dirsep = CIFS_DIR_SEP(cifs_sb); char *s; if (unlikely(!page)) return ERR_PTR(-ENOMEM); if (prefix) dfsplen = strnlen(tree, tree_len + 1); else dfsplen = 0; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; s = dentry_path_raw(direntry, page, PATH_MAX); if (IS_ERR(s)) return s; if (!s[1]) // for root we want "", not "/" s++; if (s < (char *)page + pplen + dfsplen) return ERR_PTR(-ENAMETOOLONG); if (pplen) { cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); s -= pplen; memcpy(s + 1, cifs_sb->prepath, pplen - 1); *s = '/'; } if (dirsep != '/') { /* BB test paths to Windows with '/' in the midst of prepath */ char *p; for (p = s; *p; p++) if (*p == '/') *p = dirsep; } if (dfsplen) { s -= dfsplen; memcpy(s, tree, dfsplen); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { int i; for (i = 0; i < dfsplen; i++) { if (s[i] == '\\') s[i] = '/'; } } } return s; } char *build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, bool prefix) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); return __build_path_from_dentry_optional_prefix(direntry, page, tcon->tree_name, MAX_TREE_SIZE, prefix); } /* * Don't allow path components longer than the server max. * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. */ static int check_name(struct dentry *direntry, struct cifs_tcon *tcon) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); int i; if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength && direntry->d_name.len > le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) return -ENAMETOOLONG; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { for (i = 0; i < direntry->d_name.len; i++) { if (direntry->d_name.name[i] == '\\') { cifs_dbg(FYI, "Invalid file name\n"); return -EINVAL; } } } return 0; } /* Inode operations in similar order to how they appear in Linux file fs.h */ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, struct tcon_link *tlink, unsigned int oflags, umode_t mode, __u32 *oplock, struct cifs_fid *fid, struct cifs_open_info_data *buf) { int rc = -ENOENT; int create_options = CREATE_NOT_DIR; int desired_access; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *tcon = tlink_tcon(tlink); const char *full_path; void *page = alloc_dentry_path(); struct inode *newinode = NULL; int disposition; struct TCP_Server_Info *server = tcon->ses->server; struct cifs_open_parms oparms; *oplock = 0; if (tcon->ses->server->oplocks) *oplock = REQ_OPLOCK; full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { free_dentry_path(page); return PTR_ERR(full_path); } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_open(full_path, &newinode, inode->i_sb, mode, oflags, oplock, &fid->netfid, xid); switch (rc) { case 0: if (newinode == NULL) { /* query inode info */ goto cifs_create_get_file_info; } if (S_ISDIR(newinode->i_mode)) { CIFSSMBClose(xid, tcon, fid->netfid); iput(newinode); rc = -EISDIR; goto out; } if (!S_ISREG(newinode->i_mode)) { /* * The server may allow us to open things like * FIFOs, but the client isn't set up to deal * with that. If it's not a regular file, just * close it and proceed as if it were a normal * lookup. */ CIFSSMBClose(xid, tcon, fid->netfid); goto cifs_create_get_file_info; } /* success, no need to query */ goto cifs_create_set_dentry; case -ENOENT: goto cifs_create_get_file_info; case -EIO: case -EINVAL: /* * EIO could indicate that (posix open) operation is not * supported, despite what server claimed in capability * negotiation. * * POSIX open in samba versions 3.3.1 and earlier could * incorrectly fail with invalid parameter. */ tcon->broken_posix_open = true; break; case -EREMOTE: case -EOPNOTSUPP: /* * EREMOTE indicates DFS junction, which is not handled * in posix open. If either that or op not supported * returned, follow the normal lookup. */ break; default: goto out; } /* * fallthrough to retry, using older open call, this is case * where server does not support this SMB level, and falsely * claims capability (also get here for DFS case which should be * rare for path not covered on files) */ } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ desired_access = 0; if (OPEN_FMODE(oflags) & FMODE_READ) desired_access |= GENERIC_READ; /* is this too little? */ if (OPEN_FMODE(oflags) & FMODE_WRITE) desired_access |= GENERIC_WRITE; disposition = FILE_OVERWRITE_IF; if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) disposition = FILE_CREATE; else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) disposition = FILE_OVERWRITE_IF; else if ((oflags & O_CREAT) == O_CREAT) disposition = FILE_OPEN_IF; else cifs_dbg(FYI, "Create flag not set in create function\n"); /* * BB add processing to set equivalent of mode - e.g. via CreateX with * ACLs */ if (!server->ops->open) { rc = -ENOSYS; goto out; } /* * if we're not using unix extensions, see if we need to set * ATTR_READONLY on the create call */ if (!tcon->unix_ext && (mode & S_IWUGO) == 0) create_options |= CREATE_OPTION_READONLY; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = desired_access, .create_options = cifs_create_options(cifs_sb, create_options), .disposition = disposition, .path = full_path, .fid = fid, .mode = mode, }; rc = server->ops->open(xid, &oparms, oplock, buf); if (rc) { cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc); goto out; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * If Open reported that we actually created a file then we now have to * set the mode if possible. */ if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) { struct cifs_unix_set_info_args args = { .mode = mode, .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = current_fsuid(); if (inode->i_mode & S_ISGID) args.gid = inode->i_gid; else args.gid = current_fsgid(); } else { args.uid = INVALID_UID; /* no change */ args.gid = INVALID_GID; /* no change */ } CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid->netfid, current->tgid); } else { /* * BB implement mode setting via Windows security * descriptors e.g. */ /* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/ /* Could set r/o dos attribute if mode & 0222 == 0 */ } cifs_create_get_file_info: /* server might mask mode so we have to query for it */ if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else { #else { #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* TODO: Add support for calling POSIX query info here, but passing in fid */ rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, fid); if (newinode) { if (server->ops->set_lease_key) server->ops->set_lease_key(newinode, fid); if ((*oplock & CIFS_CREATE_ACTION) && S_ISREG(newinode->i_mode)) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) newinode->i_mode = mode; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { newinode->i_uid = current_fsuid(); if (inode->i_mode & S_ISGID) newinode->i_gid = inode->i_gid; else newinode->i_gid = current_fsgid(); } } } } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY cifs_create_set_dentry: #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (rc != 0) { cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n", rc); goto out_err; } if (newinode) if (S_ISDIR(newinode->i_mode)) { rc = -EISDIR; goto out_err; } d_drop(direntry); d_add(direntry, newinode); out: free_dentry_path(page); return rc; out_err: if (server->ops->close) server->ops->close(xid, tcon, fid); if (newinode) iput(newinode); goto out; } int cifs_atomic_open(struct inode *inode, struct dentry *direntry, struct file *file, unsigned oflags, umode_t mode) { int rc; unsigned int xid; struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifs_fid fid = {}; struct cifs_pending_open open; __u32 oplock; struct cifsFileInfo *file_info; struct cifs_open_info_data buf = {}; if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) return -EIO; /* * Posix open is only called (at lookup time) for file create now. For * opens (rather than creates), because we do not know if it is a file * or directory yet, and current Samba no longer allows us to do posix * open on dirs, we could end up wasting an open call on what turns out * to be a dir. For file opens, we wait to call posix open till * cifs_open. It could be added to atomic_open in the future but the * performance tradeoff of the extra network request when EISDIR or * EACCES is returned would have to be weighed against the 50% reduction * in network traffic in the other paths. */ if (!(oflags & O_CREAT)) { struct dentry *res; /* * Check for hashed negative dentry. We have already revalidated * the dentry and it is fine. No need to perform another lookup. */ if (!d_in_lookup(direntry)) return -ENOENT; res = cifs_lookup(inode, direntry, 0); if (IS_ERR(res)) return PTR_ERR(res); return finish_no_open(file, res); } xid = get_xid(); cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", inode, direntry, direntry); tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); goto out_free_xid; } tcon = tlink_tcon(tlink); rc = check_name(direntry, tcon); if (rc) goto out; server = tcon->ses->server; if (server->ops->new_lease_key) server->ops->new_lease_key(&fid); cifs_add_pending_open(&fid, tlink, &open); rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, &oplock, &fid, &buf); if (rc) { cifs_del_pending_open(&open); goto out; } if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) file->f_mode |= FMODE_CREATED; rc = finish_open(file, direntry, generic_file_open); if (rc) { if (server->ops->close) server->ops->close(xid, tcon, &fid); cifs_del_pending_open(&open); goto out; } if (file->f_flags & O_DIRECT && CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) file->f_op = &cifs_file_direct_nobrl_ops; else file->f_op = &cifs_file_direct_ops; } file_info = cifs_new_fileinfo(&fid, file, tlink, oplock, buf.symlink_target); if (file_info == NULL) { if (server->ops->close) server->ops->close(xid, tcon, &fid); cifs_del_pending_open(&open); rc = -ENOMEM; goto out; } fscache_use_cookie(cifs_inode_cookie(file_inode(file)), file->f_mode & FMODE_WRITE); out: cifs_put_tlink(tlink); out_free_xid: free_xid(xid); cifs_free_open_info(&buf); return rc; } int cifs_create(struct mnt_idmap *idmap, struct inode *inode, struct dentry *direntry, umode_t mode, bool excl) { int rc; unsigned int xid = get_xid(); /* * BB below access is probably too much for mknod to request * but we have to do query and setpathinfo so requesting * less could fail (unless we want to request getatr and setatr * permissions (only). At least for POSIX we do not have to * request so much. */ unsigned oflags = O_EXCL | O_CREAT | O_RDWR; struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifs_fid fid; __u32 oplock; struct cifs_open_info_data buf = {}; cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n", inode, direntry, direntry); if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) { rc = -EIO; goto out_free_xid; } tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); rc = PTR_ERR(tlink); if (IS_ERR(tlink)) goto out_free_xid; tcon = tlink_tcon(tlink); server = tcon->ses->server; if (server->ops->new_lease_key) server->ops->new_lease_key(&fid); rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, &oplock, &fid, &buf); if (!rc && server->ops->close) server->ops->close(xid, tcon, &fid); cifs_free_open_info(&buf); cifs_put_tlink(tlink); out_free_xid: free_xid(xid); return rc; } int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode, struct dentry *direntry, umode_t mode, dev_t device_number) { int rc = -EPERM; unsigned int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; const char *full_path; void *page; if (!old_valid_dev(device_number)) return -EINVAL; cifs_sb = CIFS_SB(inode->i_sb); if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); page = alloc_dentry_path(); tcon = tlink_tcon(tlink); xid = get_xid(); full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto mknod_out; } rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon, full_path, mode, device_number); mknod_out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return rc; } struct dentry * cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, unsigned int flags) { unsigned int xid; int rc = 0; /* to get around spurious gcc warning, set to zero here */ struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *pTcon; struct inode *newInode = NULL; const char *full_path; void *page; int retry_count = 0; xid = get_xid(); cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", parent_dir_inode, direntry, direntry); /* check whether path exists */ cifs_sb = CIFS_SB(parent_dir_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { free_xid(xid); return ERR_CAST(tlink); } pTcon = tlink_tcon(tlink); rc = check_name(direntry, pTcon); if (unlikely(rc)) { cifs_put_tlink(tlink); free_xid(xid); return ERR_PTR(rc); } /* can not grab the rename sem here since it would deadlock in the cases (beginning of sys_rename itself) in which we already have the sb rename sem */ page = alloc_dentry_path(); full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { cifs_put_tlink(tlink); free_xid(xid); free_dentry_path(page); return ERR_CAST(full_path); } if (d_really_is_positive(direntry)) { cifs_dbg(FYI, "non-NULL inode in lookup\n"); } else { cifs_dbg(FYI, "NULL inode in lookup\n"); } cifs_dbg(FYI, "Full path: %s inode = 0x%p\n", full_path, d_inode(direntry)); again: if (pTcon->posix_extensions) rc = smb311_posix_get_inode_info(&newInode, full_path, parent_dir_inode->i_sb, xid); else if (pTcon->unix_ext) { rc = cifs_get_inode_info_unix(&newInode, full_path, parent_dir_inode->i_sb, xid); } else { rc = cifs_get_inode_info(&newInode, full_path, NULL, parent_dir_inode->i_sb, xid, NULL); } if (rc == 0) { /* since paths are not looked up by component - the parent directories are presumed to be good here */ renew_parental_timestamps(direntry); } else if (rc == -EAGAIN && retry_count++ < 10) { goto again; } else if (rc == -ENOENT) { cifs_set_time(direntry, jiffies); newInode = NULL; } else { if (rc != -EACCES) { cifs_dbg(FYI, "Unexpected lookup error %d\n", rc); /* We special case check for Access Denied - since that is a common return code */ } newInode = ERR_PTR(rc); } free_dentry_path(page); cifs_put_tlink(tlink); free_xid(xid); return d_splice_alias(newInode, direntry); } static int cifs_d_revalidate(struct dentry *direntry, unsigned int flags) { struct inode *inode; int rc; if (flags & LOOKUP_RCU) return -ECHILD; if (d_really_is_positive(direntry)) { inode = d_inode(direntry); if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) CIFS_I(inode)->time = 0; /* force reval */ rc = cifs_revalidate_dentry(direntry); if (rc) { cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); switch (rc) { case -ENOENT: case -ESTALE: /* * Those errors mean the dentry is invalid * (file was deleted or recreated) */ return 0; default: /* * Otherwise some unexpected error happened * report it as-is to VFS layer */ return rc; } } else { /* * If the inode wasn't known to be a dfs entry when * the dentry was instantiated, such as when created * via ->readdir(), it needs to be set now since the * attributes will have been updated by * cifs_revalidate_dentry(). */ if (IS_AUTOMOUNT(inode) && !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) { spin_lock(&direntry->d_lock); direntry->d_flags |= DCACHE_NEED_AUTOMOUNT; spin_unlock(&direntry->d_lock); } return 1; } } /* * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ if (!flags) return 0; /* * Drop the negative dentry, in order to make sure to use the * case sensitive name which is specified by user if this is * for creation. */ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; if (time_after(jiffies, cifs_get_time(direntry) + HZ) || !lookupCacheEnabled) return 0; return 1; } /* static int cifs_d_delete(struct dentry *direntry) { int rc = 0; cifs_dbg(FYI, "In cifs d_delete, name = %pd\n", direntry); return rc; } */ const struct dentry_operations cifs_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_automount = cifs_d_automount, /* d_delete: cifs_d_delete, */ /* not needed except for debugging */ }; static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q) { struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls; unsigned long hash; wchar_t c; int i, charlen; hash = init_name_hash(dentry); for (i = 0; i < q->len; i += charlen) { charlen = codepage->char2uni(&q->name[i], q->len - i, &c); /* error out if we can't convert the character */ if (unlikely(charlen < 0)) return charlen; hash = partial_name_hash(cifs_toupper(c), hash); } q->hash = end_name_hash(hash); return 0; } static int cifs_ci_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls; wchar_t c1, c2; int i, l1, l2; /* * We make the assumption here that uppercase characters in the local * codepage are always the same length as their lowercase counterparts. * * If that's ever not the case, then this will fail to match it. */ if (name->len != len) return 1; for (i = 0; i < len; i += l1) { /* Convert characters in both strings to UTF-16. */ l1 = codepage->char2uni(&str[i], len - i, &c1); l2 = codepage->char2uni(&name->name[i], name->len - i, &c2); /* * If we can't convert either character, just declare it to * be 1 byte long and compare the original byte. */ if (unlikely(l1 < 0 && l2 < 0)) { if (str[i] != name->name[i]) return 1; l1 = 1; continue; } /* * Here, we again ass|u|me that upper/lowercase versions of * a character are the same length in the local NLS. */ if (l1 != l2) return 1; /* Now compare uppercase versions of these characters */ if (cifs_toupper(c1) != cifs_toupper(c2)) return 1; } return 0; } const struct dentry_operations cifs_ci_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_hash = cifs_ci_hash, .d_compare = cifs_ci_compare, .d_automount = cifs_d_automount, };
linux-master
fs/smb/client/dir.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Directory search handling * * Copyright (C) International Business Machines Corp., 2004, 2008 * Copyright (C) Red Hat, Inc., 2011 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/stat.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifsfs.h" #include "smb2proto.h" #include "fs_context.h" #include "cached_dir.h" /* * To be safe - for UCS to UTF-8 with strings loaded with the rare long * characters alloc more to account for such multibyte target UTF-8 * characters. */ #define UNICODE_NAME_MAX ((4 * NAME_MAX) + 2) #ifdef CONFIG_CIFS_DEBUG2 static void dump_cifs_file_struct(struct file *file, char *label) { struct cifsFileInfo *cf; if (file) { cf = file->private_data; if (cf == NULL) { cifs_dbg(FYI, "empty cifs private file data\n"); return; } if (cf->invalidHandle) cifs_dbg(FYI, "Invalid handle\n"); if (cf->srch_inf.endOfSearch) cifs_dbg(FYI, "end of search\n"); if (cf->srch_inf.emptyDir) cifs_dbg(FYI, "empty dir\n"); } } #else static inline void dump_cifs_file_struct(struct file *file, char *label) { } #endif /* DEBUG2 */ /* * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT * * Find the dentry that matches "name". If there isn't one, create one. If it's * a negative dentry or the uniqueid or filetype(mode) changed, * then drop it and recreate it. */ static void cifs_prime_dcache(struct dentry *parent, struct qstr *name, struct cifs_fattr *fattr) { struct dentry *dentry, *alias; struct inode *inode; struct super_block *sb = parent->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); dentry = d_hash_and_lookup(parent, name); if (!dentry) { /* * If we know that the inode will need to be revalidated * immediately, then don't create a new dentry for it. * We'll end up doing an on the wire call either way and * this spares us an invalidation. */ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) return; retry: dentry = d_alloc_parallel(parent, name, &wq); } if (IS_ERR(dentry)) return; if (!d_in_lookup(dentry)) { inode = d_inode(dentry); if (inode) { if (d_mountpoint(dentry)) { dput(dentry); return; } /* * If we're generating inode numbers, then we don't * want to clobber the existing one with the one that * the readdir code created. */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) fattr->cf_uniqueid = CIFS_I(inode)->uniqueid; /* update inode in place * if both i_ino and i_mode didn't change */ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid && cifs_fattr_to_inode(inode, fattr) == 0) { dput(dentry); return; } } d_invalidate(dentry); dput(dentry); goto retry; } else { inode = cifs_iget(sb, fattr); if (!inode) inode = ERR_PTR(-ENOMEM); alias = d_splice_alias(inode, dentry); d_lookup_done(dentry); if (alias && !IS_ERR(alias)) dput(alias); } dput(dentry); } static bool reparse_file_needs_reval(const struct cifs_fattr *fattr) { if (!(fattr->cf_cifsattrs & ATTR_REPARSE)) return false; /* * The DFS tags should be only intepreted by server side as per * MS-FSCC 2.1.2.1, but let's include them anyway. * * Besides, if cf_cifstag is unset (0), then we still need it to be * revalidated to know exactly what reparse point it is. */ switch (fattr->cf_cifstag) { case IO_REPARSE_TAG_DFS: case IO_REPARSE_TAG_DFSR: case IO_REPARSE_TAG_SYMLINK: case IO_REPARSE_TAG_NFS: case IO_REPARSE_TAG_MOUNT_POINT: case 0: return true; } return false; } static void cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) { fattr->cf_uid = cifs_sb->ctx->linux_uid; fattr->cf_gid = cifs_sb->ctx->linux_gid; /* * The IO_REPARSE_TAG_LX_ tags originally were used by WSL but they * are preferred by the Linux client in some cases since, unlike * the NFS reparse tag (or EAs), they don't require an extra query * to determine which type of special file they represent. * TODO: go through all documented reparse tags to see if we can * reasonably map some of them to directories vs. files vs. symlinks */ if ((fattr->cf_cifsattrs & ATTR_REPARSE) && cifs_reparse_point_to_fattr(cifs_sb, fattr, fattr->cf_cifstag)) goto out_reparse; if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode = S_IFDIR | cifs_sb->ctx->dir_mode; fattr->cf_dtype = DT_DIR; } else { fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode; fattr->cf_dtype = DT_REG; } out_reparse: /* * We need to revalidate it further to make a decision about whether it * is a symbolic link, DFS referral or a reparse point with a direct * access like junctions, deduplicated files, NFS symlinks. */ if (reparse_file_needs_reval(fattr)) fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; /* non-unix readdir doesn't provide nlink */ fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK; if (fattr->cf_cifsattrs & ATTR_READONLY) fattr->cf_mode &= ~S_IWUGO; /* * We of course don't get ACL info in FIND_FIRST/NEXT results, so * mark it for revalidation so that "ls -l" will look right. It might * be super-slow, but if we don't do this then the ownership of files * may look wrong since the inodes may not have timed out by the time * "ls" does a stat() call on them. */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) || (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL && fattr->cf_cifsattrs & ATTR_SYSTEM) { if (fattr->cf_eof == 0) { fattr->cf_mode &= ~S_IFMT; fattr->cf_mode |= S_IFIFO; fattr->cf_dtype = DT_FIFO; } else { /* * trying to get the type and mode via SFU can be slow, * so just call those regular files for now, and mark * for reval */ fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; } } } /* Fill a cifs_fattr struct with info from SMB_FIND_FILE_POSIX_INFO. */ static void cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info, struct cifs_sb_info *cifs_sb) { struct smb2_posix_info_parsed parsed; posix_info_parse(info, NULL, &parsed); memset(fattr, 0, sizeof(*fattr)); fattr->cf_uniqueid = le64_to_cpu(info->Inode); fattr->cf_bytes = le64_to_cpu(info->AllocationSize); fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); fattr->cf_ctime = cifs_NTtimeToUnix(info->CreationTime); fattr->cf_nlink = le32_to_cpu(info->HardLinks); fattr->cf_cifsattrs = le32_to_cpu(info->DosAttributes); /* * Since we set the inode type below we need to mask off * to avoid strange results if bits set above. * XXX: why not make server&client use the type bits? */ fattr->cf_mode = le32_to_cpu(info->Mode) & ~S_IFMT; cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o\n", le32_to_cpu(info->DeviceId), le32_to_cpu(info->ReparseTag), le32_to_cpu(info->Mode)); if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode |= S_IFDIR; fattr->cf_dtype = DT_DIR; } else { /* * mark anything that is not a dir as regular * file. special files should have the REPARSE * attribute and will be marked as needing revaluation */ fattr->cf_mode |= S_IFREG; fattr->cf_dtype = DT_REG; } if (reparse_file_needs_reval(fattr)) fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER); sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP); } static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info) { const FILE_DIRECTORY_INFO *fi = info; memset(fattr, 0, sizeof(*fattr)); fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes); fattr->cf_eof = le64_to_cpu(fi->EndOfFile); fattr->cf_bytes = le64_to_cpu(fi->AllocationSize); fattr->cf_createtime = le64_to_cpu(fi->CreationTime); fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime); fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime); fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime); } void cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, struct cifs_sb_info *cifs_sb) { __dir_info_to_fattr(fattr, info); cifs_fill_common_info(fattr, cifs_sb); } static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr, SEARCH_ID_FULL_DIR_INFO *info, struct cifs_sb_info *cifs_sb) { __dir_info_to_fattr(fattr, info); /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */ if (fattr->cf_cifsattrs & ATTR_REPARSE) fattr->cf_cifstag = le32_to_cpu(info->EaSize); cifs_fill_common_info(fattr, cifs_sb); } static void cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, struct cifs_sb_info *cifs_sb) { int offset = cifs_sb_master_tcon(cifs_sb)->ses->server->timeAdj; memset(fattr, 0, sizeof(*fattr)); fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate, info->LastAccessTime, offset); fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate, info->LastWriteTime, offset); fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate, info->LastWriteTime, offset); fattr->cf_cifsattrs = le16_to_cpu(info->Attributes); fattr->cf_bytes = le32_to_cpu(info->AllocationSize); fattr->cf_eof = le32_to_cpu(info->DataSize); cifs_fill_common_info(fattr, cifs_sb); } /* BB eventually need to add the following helper function to resolve NT_STATUS_STOPPED_ON_SYMLINK return code when we try to do FindFirst on (NTFS) directory symlinks */ /* int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb, unsigned int xid) { __u16 fid; int len; int oplock = 0; int rc; struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb); char *tmpbuffer; rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, OPEN_REPARSE_POINT, &fid, &oplock, NULL, cifs_sb->local_nls, cifs_remap(cifs_sb); if (!rc) { tmpbuffer = kmalloc(maxpath); rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path, tmpbuffer, maxpath -1, fid, cifs_sb->local_nls); if (CIFSSMBClose(xid, ptcon, fid)) { cifs_dbg(FYI, "Error closing temporary reparsepoint open\n"); } } } */ static int _initiate_cifs_search(const unsigned int xid, struct file *file, const char *full_path) { __u16 search_flags; int rc = 0; struct cifsFileInfo *cifsFile; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); struct tcon_link *tlink = NULL; struct cifs_tcon *tcon; struct TCP_Server_Info *server; if (file->private_data == NULL) { tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); if (cifsFile == NULL) { rc = -ENOMEM; goto error_exit; } spin_lock_init(&cifsFile->file_info_lock); file->private_data = cifsFile; cifsFile->tlink = cifs_get_tlink(tlink); tcon = tlink_tcon(tlink); } else { cifsFile = file->private_data; tcon = tlink_tcon(cifsFile->tlink); } server = tcon->ses->server; if (!server->ops->query_dir_first) { rc = -ENOSYS; goto error_exit; } cifsFile->invalidHandle = true; cifsFile->srch_inf.endOfSearch = false; cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos); ffirst_retry: /* test for Unix extensions */ /* but now check for them on the share/mount not on the SMB session */ /* if (cap_unix(tcon->ses) { */ if (tcon->unix_ext) cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; else if (tcon->posix_extensions) cifsFile->srch_inf.info_level = SMB_FIND_FILE_POSIX_INFO; else if ((tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find) == 0) { cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD; } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; } else /* not srvinos - BB fixme add check for backlevel? */ { cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO; } search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME; if (backup_cred(cifs_sb)) search_flags |= CIFS_SEARCH_BACKUP_SEARCH; rc = server->ops->query_dir_first(xid, tcon, full_path, cifs_sb, &cifsFile->fid, search_flags, &cifsFile->srch_inf); if (rc == 0) cifsFile->invalidHandle = false; /* BB add following call to handle readdir on new NTFS symlink errors else if STATUS_STOPPED_ON_SYMLINK call get_symlink_reparse_path and retry with new path */ else if ((rc == -EOPNOTSUPP) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; goto ffirst_retry; } error_exit: cifs_put_tlink(tlink); return rc; } static int initiate_cifs_search(const unsigned int xid, struct file *file, const char *full_path) { int rc, retry_count = 0; do { rc = _initiate_cifs_search(xid, file, full_path); /* * If we don't have enough credits to start reading the * directory just try again after short wait. */ if (rc != -EDEADLK) break; usleep_range(512, 2048); } while (retry_count++ < 5); return rc; } /* return length of unicode string in bytes */ static int cifs_unicode_bytelen(const char *str) { int len; const __le16 *ustr = (const __le16 *)str; for (len = 0; len <= PATH_MAX; len++) { if (ustr[len] == 0) return len << 1; } cifs_dbg(FYI, "Unicode string longer than PATH_MAX found\n"); return len << 1; } static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) { char *new_entry; FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; if (level == SMB_FIND_FILE_INFO_STANDARD) { FIND_FILE_STANDARD_INFO *pfData; pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo; new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + 1 + pfData->FileNameLength; } else { u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset); if (old_entry + next_offset < old_entry) { cifs_dbg(VFS, "Invalid offset %u\n", next_offset); return NULL; } new_entry = old_entry + next_offset; } cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry); /* validate that new_entry is not past end of SMB */ if (new_entry >= end_of_smb) { cifs_dbg(VFS, "search entry %p began after end of SMB %p old entry %p\n", new_entry, end_of_smb, old_entry); return NULL; } else if (((level == SMB_FIND_FILE_INFO_STANDARD) && (new_entry + sizeof(FIND_FILE_STANDARD_INFO) + 1 > end_of_smb)) || ((level != SMB_FIND_FILE_INFO_STANDARD) && (new_entry + sizeof(FILE_DIRECTORY_INFO) + 1 > end_of_smb))) { cifs_dbg(VFS, "search entry %p extends after end of SMB %p\n", new_entry, end_of_smb); return NULL; } else return new_entry; } struct cifs_dirent { const char *name; size_t namelen; u32 resume_key; u64 ino; }; static void cifs_fill_dirent_posix(struct cifs_dirent *de, const struct smb2_posix_info *info) { struct smb2_posix_info_parsed parsed; /* payload should have already been checked at this point */ if (posix_info_parse(info, NULL, &parsed) < 0) { cifs_dbg(VFS, "Invalid POSIX info payload\n"); return; } de->name = parsed.name; de->namelen = parsed.name_len; de->resume_key = info->Ignored; de->ino = le64_to_cpu(info->Inode); } static void cifs_fill_dirent_unix(struct cifs_dirent *de, const FILE_UNIX_INFO *info, bool is_unicode) { de->name = &info->FileName[0]; if (is_unicode) de->namelen = cifs_unicode_bytelen(de->name); else de->namelen = strnlen(de->name, PATH_MAX); de->resume_key = info->ResumeKey; de->ino = le64_to_cpu(info->basic.UniqueId); } static void cifs_fill_dirent_dir(struct cifs_dirent *de, const FILE_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_full(struct cifs_dirent *de, const FILE_FULL_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_search(struct cifs_dirent *de, const SEARCH_ID_FULL_DIR_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; de->ino = le64_to_cpu(info->UniqueId); } static void cifs_fill_dirent_both(struct cifs_dirent *de, const FILE_BOTH_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_std(struct cifs_dirent *de, const FIND_FILE_STANDARD_INFO *info) { de->name = &info->FileName[0]; /* one byte length, no endianess conversion */ de->namelen = info->FileNameLength; de->resume_key = info->ResumeKey; } static int cifs_fill_dirent(struct cifs_dirent *de, const void *info, u16 level, bool is_unicode) { memset(de, 0, sizeof(*de)); switch (level) { case SMB_FIND_FILE_POSIX_INFO: cifs_fill_dirent_posix(de, info); break; case SMB_FIND_FILE_UNIX: cifs_fill_dirent_unix(de, info, is_unicode); break; case SMB_FIND_FILE_DIRECTORY_INFO: cifs_fill_dirent_dir(de, info); break; case SMB_FIND_FILE_FULL_DIRECTORY_INFO: cifs_fill_dirent_full(de, info); break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: cifs_fill_dirent_search(de, info); break; case SMB_FIND_FILE_BOTH_DIRECTORY_INFO: cifs_fill_dirent_both(de, info); break; case SMB_FIND_FILE_INFO_STANDARD: cifs_fill_dirent_std(de, info); break; default: cifs_dbg(FYI, "Unknown findfirst level %d\n", level); return -EINVAL; } return 0; } #define UNICODE_DOT cpu_to_le16(0x2e) /* return 0 if no match and 1 for . (current directory) and 2 for .. (parent) */ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode) { int rc = 0; if (!de->name) return 0; if (is_unicode) { __le16 *ufilename = (__le16 *)de->name; if (de->namelen == 2) { /* check for . */ if (ufilename[0] == UNICODE_DOT) rc = 1; } else if (de->namelen == 4) { /* check for .. */ if (ufilename[0] == UNICODE_DOT && ufilename[1] == UNICODE_DOT) rc = 2; } } else /* ASCII */ { if (de->namelen == 1) { if (de->name[0] == '.') rc = 1; } else if (de->namelen == 2) { if (de->name[0] == '.' && de->name[1] == '.') rc = 2; } } return rc; } /* Check if directory that we are searching has changed so we can decide whether we can use the cached search results from the previous search */ static int is_dir_changed(struct file *file) { struct inode *inode = file_inode(file); struct cifsInodeInfo *cifsInfo = CIFS_I(inode); if (cifsInfo->time == 0) return 1; /* directory was changed, perhaps due to unlink */ else return 0; } static int cifs_save_resume_key(const char *current_entry, struct cifsFileInfo *file_info) { struct cifs_dirent de; int rc; rc = cifs_fill_dirent(&de, current_entry, file_info->srch_inf.info_level, file_info->srch_inf.unicode); if (!rc) { file_info->srch_inf.presume_name = de.name; file_info->srch_inf.resume_name_len = de.namelen; file_info->srch_inf.resume_key = de.resume_key; } return rc; } /* * Find the corresponding entry in the search. Note that the SMB server returns * search entries for . and .. which complicates logic here if we choose to * parse for them and we do not assume that they are located in the findfirst * return buffer. We start counting in the buffer with entry 2 and increment for * every entry (do not increment for . or .. entry). */ static int find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, struct file *file, const char *full_path, char **current_entry, int *num_to_ret) { __u16 search_flags; int rc = 0; int pos_in_buf = 0; loff_t first_entry_in_buffer; loff_t index_to_find = pos; struct cifsFileInfo *cfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); struct TCP_Server_Info *server = tcon->ses->server; /* check if index in the buffer */ if (!server->ops->query_dir_first || !server->ops->query_dir_next) return -ENOSYS; if ((cfile == NULL) || (current_entry == NULL) || (num_to_ret == NULL)) return -ENOENT; *current_entry = NULL; first_entry_in_buffer = cfile->srch_inf.index_of_last_entry - cfile->srch_inf.entries_in_buffer; /* * If first entry in buf is zero then is first buffer * in search response data which means it is likely . and .. * will be in this buffer, although some servers do not return * . and .. for the root of a drive and for those we need * to start two entries earlier. */ dump_cifs_file_struct(file, "In fce "); if (((index_to_find < cfile->srch_inf.index_of_last_entry) && is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) { /* close and restart search */ cifs_dbg(FYI, "search backing up - close and restart search\n"); spin_lock(&cfile->file_info_lock); if (server->ops->dir_needs_close(cfile)) { cfile->invalidHandle = true; spin_unlock(&cfile->file_info_lock); if (server->ops->close_dir) server->ops->close_dir(xid, tcon, &cfile->fid); } else spin_unlock(&cfile->file_info_lock); if (cfile->srch_inf.ntwrk_buf_start) { cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n"); if (cfile->srch_inf.smallBuf) cifs_small_buf_release(cfile->srch_inf. ntwrk_buf_start); else cifs_buf_release(cfile->srch_inf. ntwrk_buf_start); cfile->srch_inf.ntwrk_buf_start = NULL; } rc = initiate_cifs_search(xid, file, full_path); if (rc) { cifs_dbg(FYI, "error %d reinitiating a search on rewind\n", rc); return rc; } /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cfile->srch_inf.last_entry) cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); } search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME; if (backup_cred(cifs_sb)) search_flags |= CIFS_SEARCH_BACKUP_SEARCH; while ((index_to_find >= cfile->srch_inf.index_of_last_entry) && (rc == 0) && !cfile->srch_inf.endOfSearch) { cifs_dbg(FYI, "calling findnext2\n"); rc = server->ops->query_dir_next(xid, tcon, &cfile->fid, search_flags, &cfile->srch_inf); /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cfile->srch_inf.last_entry) cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); if (rc) return -ENOENT; } if (index_to_find < cfile->srch_inf.index_of_last_entry) { /* we found the buffer that contains the entry */ /* scan and find it */ int i; char *cur_ent; char *end_of_smb; if (cfile->srch_inf.ntwrk_buf_start == NULL) { cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n"); return -EIO; } end_of_smb = cfile->srch_inf.ntwrk_buf_start + server->ops->calc_smb_size( cfile->srch_inf.ntwrk_buf_start); cur_ent = cfile->srch_inf.srch_entries_start; first_entry_in_buffer = cfile->srch_inf.index_of_last_entry - cfile->srch_inf.entries_in_buffer; pos_in_buf = index_to_find - first_entry_in_buffer; cifs_dbg(FYI, "found entry - pos_in_buf %d\n", pos_in_buf); for (i = 0; (i < (pos_in_buf)) && (cur_ent != NULL); i++) { /* go entry by entry figuring out which is first */ cur_ent = nxt_dir_entry(cur_ent, end_of_smb, cfile->srch_inf.info_level); } if ((cur_ent == NULL) && (i < pos_in_buf)) { /* BB fixme - check if we should flag this error */ cifs_dbg(VFS, "reached end of buf searching for pos in buf %d index to find %lld rc %d\n", pos_in_buf, index_to_find, rc); } rc = 0; *current_entry = cur_ent; } else { cifs_dbg(FYI, "index not in buffer - could not findnext into it\n"); return 0; } if (pos_in_buf >= cfile->srch_inf.entries_in_buffer) { cifs_dbg(FYI, "can not return entries pos_in_buf beyond last\n"); *num_to_ret = 0; } else *num_to_ret = cfile->srch_inf.entries_in_buffer - pos_in_buf; return rc; } static bool emit_cached_dirents(struct cached_dirents *cde, struct dir_context *ctx) { struct cached_dirent *dirent; bool rc; list_for_each_entry(dirent, &cde->entries, entry) { /* * Skip all early entries prior to the current lseek() * position. */ if (ctx->pos > dirent->pos) continue; /* * We recorded the current ->pos value for the dirent * when we stored it in the cache. * However, this sequence of ->pos values may have holes * in it, for example dot-dirs returned from the server * are suppressed. * Handle this bu forcing ctx->pos to be the same as the * ->pos of the current dirent we emit from the cache. * This means that when we emit these entries from the cache * we now emit them with the same ->pos value as in the * initial scan. */ ctx->pos = dirent->pos; rc = dir_emit(ctx, dirent->name, dirent->namelen, dirent->fattr.cf_uniqueid, dirent->fattr.cf_dtype); if (!rc) return rc; ctx->pos++; } return true; } static void update_cached_dirents_count(struct cached_dirents *cde, struct dir_context *ctx) { if (cde->ctx != ctx) return; if (cde->is_valid || cde->is_failed) return; cde->pos++; } static void finished_cached_dirents_count(struct cached_dirents *cde, struct dir_context *ctx) { if (cde->ctx != ctx) return; if (cde->is_valid || cde->is_failed) return; if (ctx->pos != cde->pos) return; cde->is_valid = 1; } static void add_cached_dirent(struct cached_dirents *cde, struct dir_context *ctx, const char *name, int namelen, struct cifs_fattr *fattr) { struct cached_dirent *de; if (cde->ctx != ctx) return; if (cde->is_valid || cde->is_failed) return; if (ctx->pos != cde->pos) { cde->is_failed = 1; return; } de = kzalloc(sizeof(*de), GFP_ATOMIC); if (de == NULL) { cde->is_failed = 1; return; } de->namelen = namelen; de->name = kstrndup(name, namelen, GFP_ATOMIC); if (de->name == NULL) { kfree(de); cde->is_failed = 1; return; } de->pos = ctx->pos; memcpy(&de->fattr, fattr, sizeof(struct cifs_fattr)); list_add_tail(&de->entry, &cde->entries); } static bool cifs_dir_emit(struct dir_context *ctx, const char *name, int namelen, struct cifs_fattr *fattr, struct cached_fid *cfid) { bool rc; ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid); rc = dir_emit(ctx, name, namelen, ino, fattr->cf_dtype); if (!rc) return rc; if (cfid) { mutex_lock(&cfid->dirents.de_mutex); add_cached_dirent(&cfid->dirents, ctx, name, namelen, fattr); mutex_unlock(&cfid->dirents.de_mutex); } return rc; } static int cifs_filldir(char *find_entry, struct file *file, struct dir_context *ctx, char *scratch_buf, unsigned int max_len, struct cached_fid *cfid) { struct cifsFileInfo *file_info = file->private_data; struct super_block *sb = file_inode(file)->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_dirent de = { NULL, }; struct cifs_fattr fattr; struct qstr name; int rc = 0; rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level, file_info->srch_inf.unicode); if (rc) return rc; if (de.namelen > max_len) { cifs_dbg(VFS, "bad search response length %zd past smb end\n", de.namelen); return -EINVAL; } /* skip . and .. since we added them first */ if (cifs_entry_is_dot(&de, file_info->srch_inf.unicode)) return 0; if (file_info->srch_inf.unicode) { struct nls_table *nlt = cifs_sb->local_nls; int map_type; map_type = cifs_remap(cifs_sb); name.name = scratch_buf; name.len = cifs_from_utf16((char *)name.name, (__le16 *)de.name, UNICODE_NAME_MAX, min_t(size_t, de.namelen, (size_t)max_len), nlt, map_type); name.len -= nls_nullsize(nlt); } else { name.name = de.name; name.len = de.namelen; } switch (file_info->srch_inf.info_level) { case SMB_FIND_FILE_POSIX_INFO: cifs_posix_to_fattr(&fattr, (struct smb2_posix_info *)find_entry, cifs_sb); break; case SMB_FIND_FILE_UNIX: cifs_unix_basic_to_fattr(&fattr, &((FILE_UNIX_INFO *)find_entry)->basic, cifs_sb); if (S_ISLNK(fattr.cf_mode)) fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; break; case SMB_FIND_FILE_INFO_STANDARD: cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *)find_entry, cifs_sb); break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: cifs_fulldir_info_to_fattr(&fattr, (SEARCH_ID_FULL_DIR_INFO *)find_entry, cifs_sb); break; default: cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)find_entry, cifs_sb); break; } if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { fattr.cf_uniqueid = de.ino; } else { fattr.cf_uniqueid = iunique(sb, ROOT_I); cifs_autodisable_serverino(cifs_sb); } if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) && couldbe_mf_symlink(&fattr)) /* * trying to get the type and mode can be slow, * so just call those regular files for now, and mark * for reval */ fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; cifs_prime_dcache(file_dentry(file), &name, &fattr); return !cifs_dir_emit(ctx, name.name, name.len, &fattr, cfid); } int cifs_readdir(struct file *file, struct dir_context *ctx) { int rc = 0; unsigned int xid; int i; struct tcon_link *tlink = NULL; struct cifs_tcon *tcon; struct cifsFileInfo *cifsFile; char *current_entry; int num_to_fill = 0; char *tmp_buf = NULL; char *end_of_smb; unsigned int max_len; const char *full_path; void *page = alloc_dentry_path(); struct cached_fid *cfid = NULL; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); xid = get_xid(); full_path = build_path_from_dentry(file_dentry(file), page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto rddir2_exit; } if (file->private_data == NULL) { tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) goto cache_not_found; tcon = tlink_tcon(tlink); } else { cifsFile = file->private_data; tcon = tlink_tcon(cifsFile->tlink); } rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); cifs_put_tlink(tlink); if (rc) goto cache_not_found; mutex_lock(&cfid->dirents.de_mutex); /* * If this was reading from the start of the directory * we need to initialize scanning and storing the * directory content. */ if (ctx->pos == 0 && cfid->dirents.ctx == NULL) { cfid->dirents.ctx = ctx; cfid->dirents.pos = 2; } /* * If we already have the entire directory cached then * we can just serve the cache. */ if (cfid->dirents.is_valid) { if (!dir_emit_dots(file, ctx)) { mutex_unlock(&cfid->dirents.de_mutex); goto rddir2_exit; } emit_cached_dirents(&cfid->dirents, ctx); mutex_unlock(&cfid->dirents.de_mutex); goto rddir2_exit; } mutex_unlock(&cfid->dirents.de_mutex); /* Drop the cache while calling initiate_cifs_search and * find_cifs_entry in case there will be reconnects during * query_directory. */ close_cached_dir(cfid); cfid = NULL; cache_not_found: /* * Ensure FindFirst doesn't fail before doing filldir() for '.' and * '..'. Otherwise we won't be able to notify VFS in case of failure. */ if (file->private_data == NULL) { rc = initiate_cifs_search(xid, file, full_path); cifs_dbg(FYI, "initiate cifs search rc %d\n", rc); if (rc) goto rddir2_exit; } if (!dir_emit_dots(file, ctx)) goto rddir2_exit; /* 1) If search is active, is in current search buffer? if it before then restart search if after then keep searching till find it */ cifsFile = file->private_data; if (cifsFile->srch_inf.endOfSearch) { if (cifsFile->srch_inf.emptyDir) { cifs_dbg(FYI, "End of search, empty dir\n"); rc = 0; goto rddir2_exit; } } /* else { cifsFile->invalidHandle = true; tcon->ses->server->close(xid, tcon, &cifsFile->fid); } */ tcon = tlink_tcon(cifsFile->tlink); rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path, &current_entry, &num_to_fill); open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); if (rc) { cifs_dbg(FYI, "fce error %d\n", rc); goto rddir2_exit; } else if (current_entry != NULL) { cifs_dbg(FYI, "entry %lld found\n", ctx->pos); } else { if (cfid) { mutex_lock(&cfid->dirents.de_mutex); finished_cached_dirents_count(&cfid->dirents, ctx); mutex_unlock(&cfid->dirents.de_mutex); } cifs_dbg(FYI, "Could not find entry\n"); goto rddir2_exit; } cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); max_len = tcon->ses->server->ops->calc_smb_size( cifsFile->srch_inf.ntwrk_buf_start); end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); if (tmp_buf == NULL) { rc = -ENOMEM; goto rddir2_exit; } for (i = 0; i < num_to_fill; i++) { if (current_entry == NULL) { /* evaluate whether this case is an error */ cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n", num_to_fill, i); break; } /* * if buggy server returns . and .. late do we want to * check for that here? */ *tmp_buf = 0; rc = cifs_filldir(current_entry, file, ctx, tmp_buf, max_len, cfid); if (rc) { if (rc > 0) rc = 0; break; } ctx->pos++; if (cfid) { mutex_lock(&cfid->dirents.de_mutex); update_cached_dirents_count(&cfid->dirents, ctx); mutex_unlock(&cfid->dirents.de_mutex); } if (ctx->pos == cifsFile->srch_inf.index_of_last_entry) { cifs_dbg(FYI, "last entry in buf at pos %lld %s\n", ctx->pos, tmp_buf); cifs_save_resume_key(current_entry, cifsFile); break; } current_entry = nxt_dir_entry(current_entry, end_of_smb, cifsFile->srch_inf.info_level); } kfree(tmp_buf); rddir2_exit: if (cfid) close_cached_dir(cfid); free_dentry_path(page); free_xid(xid); return rc; }
linux-master
fs/smb/client/readdir.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (c) International Business Machines Corp., 2002,2008 * Author(s): Steve French ([email protected]) * * Error mapping routines from Samba libsmb/errormap.c * Copyright (C) Andrew Tridgell 2001 */ #include <linux/net.h> #include <linux/string.h> #include <linux/in.h> #include <linux/ctype.h> #include <linux/fs.h> #include <asm/div64.h> #include <asm/byteorder.h> #include <linux/inet.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "smberr.h" #include "cifs_debug.h" #include "nterr.h" struct smb_to_posix_error { __u16 smb_err; int posix_code; }; static const struct smb_to_posix_error mapping_table_ERRDOS[] = { {ERRbadfunc, -EINVAL}, {ERRbadfile, -ENOENT}, {ERRbadpath, -ENOTDIR}, {ERRnofids, -EMFILE}, {ERRnoaccess, -EACCES}, {ERRbadfid, -EBADF}, {ERRbadmcb, -EIO}, {ERRnomem, -EREMOTEIO}, {ERRbadmem, -EFAULT}, {ERRbadenv, -EFAULT}, {ERRbadformat, -EINVAL}, {ERRbadaccess, -EACCES}, {ERRbaddata, -EIO}, {ERRbaddrive, -ENXIO}, {ERRremcd, -EACCES}, {ERRdiffdevice, -EXDEV}, {ERRnofiles, -ENOENT}, {ERRwriteprot, -EROFS}, {ERRbadshare, -EBUSY}, {ERRlock, -EACCES}, {ERRunsup, -EINVAL}, {ERRnosuchshare, -ENXIO}, {ERRfilexists, -EEXIST}, {ERRinvparm, -EINVAL}, {ERRdiskfull, -ENOSPC}, {ERRinvname, -ENOENT}, {ERRinvlevel, -EOPNOTSUPP}, {ERRdirnotempty, -ENOTEMPTY}, {ERRnotlocked, -ENOLCK}, {ERRcancelviolation, -ENOLCK}, {ERRalreadyexists, -EEXIST}, {ERRmoredata, -EOVERFLOW}, {ERReasnotsupported, -EOPNOTSUPP}, {ErrQuota, -EDQUOT}, {ErrNotALink, -ENOLINK}, {ERRnetlogonNotStarted, -ENOPROTOOPT}, {ERRsymlink, -EOPNOTSUPP}, {ErrTooManyLinks, -EMLINK}, {0, 0} }; static const struct smb_to_posix_error mapping_table_ERRSRV[] = { {ERRerror, -EIO}, {ERRbadpw, -EACCES}, /* was EPERM */ {ERRbadtype, -EREMOTE}, {ERRaccess, -EACCES}, {ERRinvtid, -ENXIO}, {ERRinvnetname, -ENXIO}, {ERRinvdevice, -ENXIO}, {ERRqfull, -ENOSPC}, {ERRqtoobig, -ENOSPC}, {ERRqeof, -EIO}, {ERRinvpfid, -EBADF}, {ERRsmbcmd, -EBADRQC}, {ERRsrverror, -EIO}, {ERRbadBID, -EIO}, {ERRfilespecs, -EINVAL}, {ERRbadLink, -EIO}, {ERRbadpermits, -EINVAL}, {ERRbadPID, -ESRCH}, {ERRsetattrmode, -EINVAL}, {ERRpaused, -EHOSTDOWN}, {ERRmsgoff, -EHOSTDOWN}, {ERRnoroom, -ENOSPC}, {ERRrmuns, -EUSERS}, {ERRtimeout, -ETIME}, {ERRnoresource, -EREMOTEIO}, {ERRtoomanyuids, -EUSERS}, {ERRbaduid, -EACCES}, {ERRusempx, -EIO}, {ERRusestd, -EIO}, {ERR_NOTIFY_ENUM_DIR, -ENOBUFS}, {ERRnoSuchUser, -EACCES}, /* {ERRaccountexpired, -EACCES}, {ERRbadclient, -EACCES}, {ERRbadLogonTime, -EACCES}, {ERRpasswordExpired, -EACCES},*/ {ERRaccountexpired, -EKEYEXPIRED}, {ERRbadclient, -EACCES}, {ERRbadLogonTime, -EACCES}, {ERRpasswordExpired, -EKEYEXPIRED}, {ERRnosupport, -EINVAL}, {0, 0} }; /* * Convert a string containing text IPv4 or IPv6 address to binary form. * * Returns 0 on failure. */ static int cifs_inet_pton(const int address_family, const char *cp, int len, void *dst) { int ret = 0; /* calculate length by finding first slash or NULL */ if (address_family == AF_INET) ret = in4_pton(cp, len, dst, '\\', NULL); else if (address_family == AF_INET6) ret = in6_pton(cp, len, dst , '\\', NULL); cifs_dbg(NOISY, "address conversion returned %d for %*.*s\n", ret, len, len, cp); if (ret > 0) ret = 1; return ret; } /* * Try to convert a string to an IPv4 address and then attempt to convert * it to an IPv6 address if that fails. Set the family field if either * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to * treat the part following it as a numeric sin6_scope_id. * * Returns 0 on failure. */ int cifs_convert_address(struct sockaddr *dst, const char *src, int len) { int rc, alen, slen; const char *pct; char scope_id[13]; struct sockaddr_in *s4 = (struct sockaddr_in *) dst; struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; /* IPv4 address */ if (cifs_inet_pton(AF_INET, src, len, &s4->sin_addr.s_addr)) { s4->sin_family = AF_INET; return 1; } /* attempt to exclude the scope ID from the address part */ pct = memchr(src, '%', len); alen = pct ? pct - src : len; rc = cifs_inet_pton(AF_INET6, src, alen, &s6->sin6_addr.s6_addr); if (!rc) return rc; s6->sin6_family = AF_INET6; if (pct) { /* grab the scope ID */ slen = len - (alen + 1); if (slen <= 0 || slen > 12) return 0; memcpy(scope_id, pct + 1, slen); scope_id[slen] = '\0'; rc = kstrtouint(scope_id, 0, &s6->sin6_scope_id); rc = (rc == 0) ? 1 : 0; } return rc; } void cifs_set_port(struct sockaddr *addr, const unsigned short int port) { switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *)addr)->sin_port = htons(port); break; case AF_INET6: ((struct sockaddr_in6 *)addr)->sin6_port = htons(port); break; } } /***************************************************************************** convert a NT status code to a dos class/code *****************************************************************************/ /* NT status -> dos error map */ static const struct { __u8 dos_class; __u16 dos_code; __u32 ntstatus; } ntstatus_to_dos_map[] = { { ERRDOS, ERRgeneral, NT_STATUS_UNSUCCESSFUL}, { ERRDOS, ERRbadfunc, NT_STATUS_NOT_IMPLEMENTED}, { ERRDOS, ERRinvlevel, NT_STATUS_INVALID_INFO_CLASS}, { ERRDOS, 24, NT_STATUS_INFO_LENGTH_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_ACCESS_VIOLATION}, { ERRHRD, ERRgeneral, NT_STATUS_IN_PAGE_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA}, { ERRDOS, ERRbadfid, NT_STATUS_INVALID_HANDLE}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_INITIAL_STACK}, { ERRDOS, 193, NT_STATUS_BAD_INITIAL_PC}, { ERRDOS, 87, NT_STATUS_INVALID_CID}, { ERRHRD, ERRgeneral, NT_STATUS_TIMER_NOT_CANCELED}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER}, { ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_DEVICE}, { ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_FILE}, { ERRDOS, ERRbadfunc, NT_STATUS_INVALID_DEVICE_REQUEST}, { ERRDOS, 38, NT_STATUS_END_OF_FILE}, { ERRDOS, 34, NT_STATUS_WRONG_VOLUME}, { ERRDOS, 21, NT_STATUS_NO_MEDIA_IN_DEVICE}, { ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, { ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR}, /* { This NT error code was 'sqashed' from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK during the session setup } */ { ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, { ERRDOS, 487, NT_STATUS_CONFLICTING_ADDRESSES}, { ERRDOS, 487, NT_STATUS_NOT_MAPPED_VIEW}, { ERRDOS, 87, NT_STATUS_UNABLE_TO_FREE_VM}, { ERRDOS, 87, NT_STATUS_UNABLE_TO_DELETE_SECTION}, { ERRDOS, 2142, NT_STATUS_INVALID_SYSTEM_SERVICE}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_INSTRUCTION}, { ERRDOS, ERRnoaccess, NT_STATUS_INVALID_LOCK_SEQUENCE}, { ERRDOS, ERRnoaccess, NT_STATUS_INVALID_VIEW_SIZE}, { ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, { ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED}, /* { This NT error code was 'sqashed' from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, { ERRDOS, 111, NT_STATUS_BUFFER_TOO_SMALL}, { ERRDOS, ERRbadfid, NT_STATUS_OBJECT_TYPE_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_NONCONTINUABLE_EXCEPTION}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DISPOSITION}, { ERRHRD, ERRgeneral, NT_STATUS_UNWIND}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_STACK}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_UNWIND_TARGET}, { ERRDOS, 158, NT_STATUS_NOT_LOCKED}, { ERRHRD, ERRgeneral, NT_STATUS_PARITY_ERROR}, { ERRDOS, 487, NT_STATUS_UNABLE_TO_DECOMMIT_VM}, { ERRDOS, 487, NT_STATUS_NOT_COMMITTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PORT_ATTRIBUTES}, { ERRHRD, ERRgeneral, NT_STATUS_PORT_MESSAGE_TOO_LONG}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, { /* mapping changed since shell does lookup on * expects FileNotFound */ ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, { ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, { ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, { ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, { ERRDOS, ERRbadfid, NT_STATUS_PORT_DISCONNECTED}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_ALREADY_ATTACHED}, { ERRDOS, 161, NT_STATUS_OBJECT_PATH_INVALID}, { ERRDOS, ERRbadpath, NT_STATUS_OBJECT_PATH_NOT_FOUND}, { ERRDOS, 161, NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_OVERRUN}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_LATE_ERROR}, { ERRDOS, 23, NT_STATUS_DATA_ERROR}, { ERRDOS, 23, NT_STATUS_CRC_ERROR}, { ERRDOS, ERRnomem, NT_STATUS_SECTION_TOO_BIG}, { ERRDOS, ERRnoaccess, NT_STATUS_PORT_CONNECTION_REFUSED}, { ERRDOS, ERRbadfid, NT_STATUS_INVALID_PORT_HANDLE}, { ERRDOS, ERRbadshare, NT_STATUS_SHARING_VIOLATION}, { ERRHRD, ERRgeneral, NT_STATUS_QUOTA_EXCEEDED}, { ERRDOS, 87, NT_STATUS_INVALID_PAGE_PROTECTION}, { ERRDOS, 288, NT_STATUS_MUTANT_NOT_OWNED}, { ERRDOS, 298, NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, { ERRDOS, 87, NT_STATUS_PORT_ALREADY_SET}, { ERRDOS, 87, NT_STATUS_SECTION_NOT_IMAGE}, { ERRDOS, 156, NT_STATUS_SUSPEND_COUNT_EXCEEDED}, { ERRDOS, ERRnoaccess, NT_STATUS_THREAD_IS_TERMINATING}, { ERRDOS, 87, NT_STATUS_BAD_WORKING_SET_LIMIT}, { ERRDOS, 87, NT_STATUS_INCOMPATIBLE_FILE_MAP}, { ERRDOS, 87, NT_STATUS_SECTION_PROTECTION}, { ERRDOS, ERReasnotsupported, NT_STATUS_EAS_NOT_SUPPORTED}, { ERRDOS, 255, NT_STATUS_EA_TOO_LARGE}, { ERRHRD, ERRgeneral, NT_STATUS_NONEXISTENT_EA_ENTRY}, { ERRHRD, ERRgeneral, NT_STATUS_NO_EAS_ON_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_EA_CORRUPT_ERROR}, { ERRDOS, ERRlock, NT_STATUS_FILE_LOCK_CONFLICT}, { ERRDOS, ERRlock, NT_STATUS_LOCK_NOT_GRANTED}, { ERRDOS, ERRbadfile, NT_STATUS_DELETE_PENDING}, { ERRDOS, ERRunsup, NT_STATUS_CTL_FILE_NOT_SUPPORTED}, { ERRHRD, ERRgeneral, NT_STATUS_UNKNOWN_REVISION}, { ERRHRD, ERRgeneral, NT_STATUS_REVISION_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_OWNER}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PRIMARY_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_NO_IMPERSONATION_TOKEN}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_DISABLE_MANDATORY}, { ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, { ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, { ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS}, /* { This NT error code was 'sqashed' from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, { /* could map to 2238 */ ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN}, /* { This NT error code was 'sqashed' from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, { ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_PASSWORD}, { ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, { ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, { ERRSRV, ERRbadLogonTime, NT_STATUS_INVALID_LOGON_HOURS}, { ERRSRV, ERRbadclient, NT_STATUS_INVALID_WORKSTATION}, { ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_EXPIRED}, { ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_DISABLED}, { ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, { ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SUB_AUTHORITY}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACL}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SID}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SECURITY_DESCR}, { ERRDOS, 127, NT_STATUS_PROCEDURE_NOT_FOUND}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_NO_TOKEN}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_INHERITANCE_ACL}, { ERRDOS, 158, NT_STATUS_RANGE_NOT_LOCKED}, { ERRDOS, 112, NT_STATUS_DISK_FULL}, { ERRHRD, ERRgeneral, NT_STATUS_SERVER_DISABLED}, { ERRHRD, ERRgeneral, NT_STATUS_SERVER_NOT_DISABLED}, { ERRDOS, 68, NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, { ERRDOS, 259, NT_STATUS_GUIDS_EXHAUSTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ID_AUTHORITY}, { ERRDOS, 259, NT_STATUS_AGENTS_EXHAUSTED}, { ERRDOS, 154, NT_STATUS_INVALID_VOLUME_LABEL}, { ERRDOS, 14, NT_STATUS_SECTION_NOT_EXTENDED}, { ERRDOS, 487, NT_STATUS_NOT_MAPPED_DATA}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_DATA_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_NAME_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DENORMAL_OPERAND}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INEXACT_RESULT}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INVALID_OPERATION}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_STACK_CHECK}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_UNDERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, { ERRDOS, 534, NT_STATUS_INTEGER_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_PRIVILEGED_INSTRUCTION}, { ERRDOS, ERRnomem, NT_STATUS_TOO_MANY_PAGING_FILES}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, { ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED}, /* { This NT error code was 'sqashed' from NT_STATUS_INSUFFICIENT_RESOURCES to NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup } */ { ERRDOS, ERRnoresource, NT_STATUS_INSUFFICIENT_RESOURCES}, { ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, { ERRDOS, 23, NT_STATUS_DEVICE_DATA_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_CONNECTED}, { ERRDOS, 21, NT_STATUS_DEVICE_POWER_FAILURE}, { ERRDOS, 487, NT_STATUS_FREE_VM_NOT_AT_BASE}, { ERRDOS, 487, NT_STATUS_MEMORY_NOT_ALLOCATED}, { ERRHRD, ERRgeneral, NT_STATUS_WORKING_SET_QUOTA}, { ERRDOS, 19, NT_STATUS_MEDIA_WRITE_PROTECTED}, { ERRDOS, 21, NT_STATUS_DEVICE_NOT_READY}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_GROUP_ATTRIBUTES}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_IMPERSONATION_LEVEL}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_OPEN_ANONYMOUS}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_VALIDATION_CLASS}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_TOKEN_TYPE}, { ERRDOS, 87, NT_STATUS_BAD_MASTER_BOOT_RECORD}, { ERRHRD, ERRgeneral, NT_STATUS_INSTRUCTION_MISALIGNMENT}, { ERRDOS, ERRpipebusy, NT_STATUS_INSTANCE_NOT_AVAILABLE}, { ERRDOS, ERRpipebusy, NT_STATUS_PIPE_NOT_AVAILABLE}, { ERRDOS, ERRbadpipe, NT_STATUS_INVALID_PIPE_STATE}, { ERRDOS, ERRpipebusy, NT_STATUS_PIPE_BUSY}, { ERRDOS, ERRbadfunc, NT_STATUS_ILLEGAL_FUNCTION}, { ERRDOS, ERRnotconnected, NT_STATUS_PIPE_DISCONNECTED}, { ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_CLOSING}, { ERRHRD, ERRgeneral, NT_STATUS_PIPE_CONNECTED}, { ERRHRD, ERRgeneral, NT_STATUS_PIPE_LISTENING}, { ERRDOS, ERRbadpipe, NT_STATUS_INVALID_READ_MODE}, { ERRDOS, 121, NT_STATUS_IO_TIMEOUT}, { ERRDOS, 38, NT_STATUS_FILE_FORCED_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STOPPED}, { ERRHRD, ERRgeneral, NT_STATUS_COULD_NOT_INTERPRET}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_IS_A_DIRECTORY}, { ERRDOS, ERRunsup, NT_STATUS_NOT_SUPPORTED}, { ERRDOS, 51, NT_STATUS_REMOTE_NOT_LISTENING}, { ERRDOS, 52, NT_STATUS_DUPLICATE_NAME}, { ERRDOS, 53, NT_STATUS_BAD_NETWORK_PATH}, { ERRDOS, 54, NT_STATUS_NETWORK_BUSY}, { ERRDOS, 55, NT_STATUS_DEVICE_DOES_NOT_EXIST}, { ERRDOS, 56, NT_STATUS_TOO_MANY_COMMANDS}, { ERRDOS, 57, NT_STATUS_ADAPTER_HARDWARE_ERROR}, { ERRDOS, 58, NT_STATUS_INVALID_NETWORK_RESPONSE}, { ERRDOS, 59, NT_STATUS_UNEXPECTED_NETWORK_ERROR}, { ERRDOS, 60, NT_STATUS_BAD_REMOTE_ADAPTER}, { ERRDOS, 61, NT_STATUS_PRINT_QUEUE_FULL}, { ERRDOS, 62, NT_STATUS_NO_SPOOL_SPACE}, { ERRDOS, 63, NT_STATUS_PRINT_CANCELLED}, { ERRDOS, 64, NT_STATUS_NETWORK_NAME_DELETED}, { ERRDOS, 65, NT_STATUS_NETWORK_ACCESS_DENIED}, { ERRDOS, 66, NT_STATUS_BAD_DEVICE_TYPE}, { ERRDOS, ERRnosuchshare, NT_STATUS_BAD_NETWORK_NAME}, { ERRDOS, 68, NT_STATUS_TOO_MANY_NAMES}, { ERRDOS, 69, NT_STATUS_TOO_MANY_SESSIONS}, { ERRDOS, 70, NT_STATUS_SHARING_PAUSED}, { ERRDOS, 71, NT_STATUS_REQUEST_NOT_ACCEPTED}, { ERRDOS, 72, NT_STATUS_REDIRECTOR_PAUSED}, { ERRDOS, 88, NT_STATUS_NET_WRITE_FAULT}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_AT_LIMIT}, { ERRDOS, ERRdiffdevice, NT_STATUS_NOT_SAME_DEVICE}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_RENAMED}, { ERRDOS, 240, NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SECURITY_ON_OBJECT}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_WAIT}, { ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_EMPTY}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_TERMINATE_SELF}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SERVER_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_ROLE}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_DOMAIN}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, { ERRDOS, 300, NT_STATUS_OPLOCK_NOT_GRANTED}, { ERRDOS, 301, NT_STATUS_INVALID_OPLOCK_PROTOCOL}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_CORRUPTION}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_GENERIC_NOT_MAPPED}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_DESCRIPTOR_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_USER_BUFFER}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_IO_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_LOGON_PROCESS}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_EXISTS}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_1}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_2}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_3}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_4}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_5}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_6}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_7}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_8}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_9}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_10}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_11}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_12}, { ERRDOS, ERRbadpath, NT_STATUS_REDIRECTOR_NOT_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_REDIRECTOR_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PACKAGE}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_FUNCTION_TABLE}, { ERRDOS, 203, 0xc0000100}, { ERRDOS, 145, NT_STATUS_DIRECTORY_NOT_EMPTY}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_CORRUPT_ERROR}, { ERRDOS, 267, NT_STATUS_NOT_A_DIRECTORY}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_LOGON_SESSION_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_COLLISION}, { ERRDOS, 206, NT_STATUS_NAME_TOO_LONG}, { ERRDOS, 2401, NT_STATUS_FILES_OPEN}, { ERRDOS, 2404, NT_STATUS_CONNECTION_IN_USE}, { ERRHRD, ERRgeneral, NT_STATUS_MESSAGE_NOT_FOUND}, { ERRDOS, ERRnoaccess, NT_STATUS_PROCESS_IS_TERMINATING}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LOGON_TYPE}, { ERRHRD, ERRgeneral, NT_STATUS_NO_GUID_TRANSLATION}, { ERRHRD, ERRgeneral, NT_STATUS_CANNOT_IMPERSONATE}, { ERRHRD, ERRgeneral, NT_STATUS_IMAGE_ALREADY_LOADED}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_PRESENT}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_NOT_EXIST}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_ALREADY_OWNED}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_LID_OWNER}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_COMMAND}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_LID}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_SELECTOR}, { ERRHRD, ERRgeneral, NT_STATUS_NO_LDT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_SIZE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_OFFSET}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_DESCRIPTOR}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NE_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_RXACT_INVALID_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_RXACT_COMMIT_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_MAPPED_FILE_SIZE_ZERO}, { ERRDOS, ERRnofids, NT_STATUS_TOO_MANY_OPENED_FILES}, { ERRHRD, ERRgeneral, NT_STATUS_CANCELLED}, { ERRDOS, ERRnoaccess, NT_STATUS_CANNOT_DELETE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_COMPUTER_NAME}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_ACCOUNT}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_USER}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBERS_PRIMARY_GROUP}, { ERRDOS, ERRbadfid, NT_STATUS_FILE_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_THREADS}, { ERRHRD, ERRgeneral, NT_STATUS_THREAD_NOT_IN_PROCESS}, { ERRHRD, ERRgeneral, NT_STATUS_TOKEN_ALREADY_IN_USE}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_COMMITMENT_LIMIT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_LE_FORMAT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NOT_MZ}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_PROTECT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_WIN_16}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SERVER_CONFLICT}, { ERRHRD, ERRgeneral, NT_STATUS_TIME_DIFFERENCE_AT_DC}, { ERRHRD, ERRgeneral, NT_STATUS_SYNCHRONIZATION_REQUIRED}, { ERRDOS, 126, NT_STATUS_DLL_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_OPEN_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_IO_PRIVILEGE_FAILED}, { ERRDOS, 182, NT_STATUS_ORDINAL_NOT_FOUND}, { ERRDOS, 127, NT_STATUS_ENTRYPOINT_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_CONTROL_C_EXIT}, { ERRDOS, 64, NT_STATUS_LOCAL_DISCONNECT}, { ERRDOS, 64, NT_STATUS_REMOTE_DISCONNECT}, { ERRDOS, 51, NT_STATUS_REMOTE_RESOURCES}, { ERRDOS, 59, NT_STATUS_LINK_FAILED}, { ERRDOS, 59, NT_STATUS_LINK_TIMEOUT}, { ERRDOS, 59, NT_STATUS_INVALID_CONNECTION}, { ERRDOS, 59, NT_STATUS_INVALID_ADDRESS}, { ERRHRD, ERRgeneral, NT_STATUS_DLL_INIT_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_MISSING_SYSTEMFILE}, { ERRHRD, ERRgeneral, NT_STATUS_UNHANDLED_EXCEPTION}, { ERRHRD, ERRgeneral, NT_STATUS_APP_INIT_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_CREATE_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_PAGEFILE}, { ERRDOS, 124, NT_STATUS_INVALID_LEVEL}, { ERRDOS, 86, NT_STATUS_WRONG_PASSWORD_CORE}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, { ERRDOS, 109, NT_STATUS_PIPE_BROKEN}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_CORRUPT}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_IO_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_EVENT_PAIR}, { ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_VOLUME}, { ERRHRD, ERRgeneral, NT_STATUS_SERIAL_NO_DEVICE_INITED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_ALIAS_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_NOT_GRANTED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SECRETS}, { ERRHRD, ERRgeneral, NT_STATUS_SECRET_TOO_LONG}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FULLSCREEN_MODE}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_CONTEXT_IDS}, { ERRDOS, ERRnoaccess, NT_STATUS_LOGON_TYPE_NOT_GRANTED}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_REGISTRY_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FT_MISSING_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_UNMAPPABLE_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_UNDEFINED_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_VOLUME}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_WRONG_CYLINDER}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_UNKNOWN_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_BAD_REGISTERS}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_RECALIBRATE_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_OPERATION_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_RESET_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_SHARED_IRQ_BUSY}, { ERRHRD, ERRgeneral, NT_STATUS_FT_ORPHANING}, { ERRHRD, ERRgeneral, 0xc000016e}, { ERRHRD, ERRgeneral, 0xc000016f}, { ERRHRD, ERRgeneral, 0xc0000170}, { ERRHRD, ERRgeneral, 0xc0000171}, { ERRHRD, ERRgeneral, NT_STATUS_PARTITION_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_BLOCK_LENGTH}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_PARTITIONED}, { ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_LOCK_MEDIA}, { ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, { ERRHRD, ERRgeneral, NT_STATUS_EOM_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_NO_MEDIA}, { ERRHRD, ERRgeneral, 0xc0000179}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_KEY_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_LOG_SPACE}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SIDS}, { ERRHRD, ERRgeneral, NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_KEY_HAS_CHILDREN}, { ERRHRD, ERRgeneral, NT_STATUS_CHILD_MUST_BE_VOLATILE}, { ERRDOS, 87, NT_STATUS_DEVICE_CONFIGURATION_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DRIVER_INTERNAL_ERROR}, { ERRDOS, 22, NT_STATUS_INVALID_DEVICE_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_IO_DEVICE_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_PROTOCOL_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_BACKUP_CONTROLLER}, { ERRHRD, ERRgeneral, NT_STATUS_LOG_FILE_FULL}, { ERRDOS, 19, NT_STATUS_TOO_LATE}, { ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET}, /* { This NT error code was 'sqashed' from NT_STATUS_NO_TRUST_SAM_ACCOUNT to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CORRUPT}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_CANT_START}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, { ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, { ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_EXPIRED}, { ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, { ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, { ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CHANGED}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT}, /* { This NT error code was 'sqashed' from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, { ERRHRD, ERRgeneral, NT_STATUS_FS_DRIVER_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_USER_SESSION_KEY}, { ERRDOS, 59, NT_STATUS_USER_SESSION_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_LANG_NOT_FOUND}, { ERRDOS, ERRnoresource, NT_STATUS_INSUFF_SERVER_RESOURCES}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_BUFFER_SIZE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_COMPONENT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_WILDCARD}, { ERRDOS, 68, NT_STATUS_TOO_MANY_ADDRESSES}, { ERRDOS, 52, NT_STATUS_ADDRESS_ALREADY_EXISTS}, { ERRDOS, 64, NT_STATUS_ADDRESS_CLOSED}, { ERRDOS, 64, NT_STATUS_CONNECTION_DISCONNECTED}, { ERRDOS, 64, NT_STATUS_CONNECTION_RESET}, { ERRDOS, 68, NT_STATUS_TOO_MANY_NODES}, { ERRDOS, 59, NT_STATUS_TRANSACTION_ABORTED}, { ERRDOS, 59, NT_STATUS_TRANSACTION_TIMED_OUT}, { ERRDOS, 59, NT_STATUS_TRANSACTION_NO_RELEASE}, { ERRDOS, 59, NT_STATUS_TRANSACTION_NO_MATCH}, { ERRDOS, 59, NT_STATUS_TRANSACTION_RESPONDED}, { ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_ID}, { ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_TYPE}, { ERRDOS, ERRunsup, NT_STATUS_NOT_SERVER_SESSION}, { ERRDOS, ERRunsup, NT_STATUS_NOT_CLIENT_SESSION}, { ERRHRD, ERRgeneral, NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_DEBUG_ATTACH_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_SYSTEM_PROCESS_TERMINATED}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_NOT_ACCEPTED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_BROWSER_SERVERS_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_VDM_HARD_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DRIVER_CANCEL_TIMEOUT}, { ERRHRD, ERRgeneral, NT_STATUS_REPLY_MESSAGE_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_MAPPED_ALIGNMENT}, { ERRDOS, 193, NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_LOST_WRITEBEHIND_DATA}, { ERRHRD, ERRgeneral, NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, { ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_MUST_CHANGE}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_TINY_STREAM}, { ERRHRD, ERRgeneral, NT_STATUS_RECOVERY_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW_READ}, { ERRHRD, ERRgeneral, NT_STATUS_FAIL_CHECK}, { ERRHRD, ERRgeneral, NT_STATUS_DUPLICATE_OBJECTID}, { ERRHRD, ERRgeneral, NT_STATUS_OBJECTID_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_CONVERT_TO_LARGE}, { ERRHRD, ERRgeneral, NT_STATUS_RETRY}, { ERRHRD, ERRgeneral, NT_STATUS_FOUND_OUT_OF_SCOPE}, { ERRHRD, ERRgeneral, NT_STATUS_ALLOCATE_BUCKET}, { ERRHRD, ERRgeneral, NT_STATUS_PROPSET_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_MARSHALL_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_VARIANT}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, { ERRDOS, ERRnoaccess, NT_STATUS_ACCOUNT_LOCKED_OUT}, { ERRDOS, ERRbadfid, NT_STATUS_HANDLE_NOT_CLOSABLE}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_REFUSED}, { ERRHRD, ERRgeneral, NT_STATUS_GRACEFUL_DISCONNECT}, { ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, { ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_NOT_ASSOCIATED}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_INVALID}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ACTIVE}, { ERRHRD, ERRgeneral, NT_STATUS_NETWORK_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_HOST_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_PROTOCOL_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_PORT_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_REQUEST_ABORTED}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ABORTED}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_COMPRESSION_BUFFER}, { ERRHRD, ERRgeneral, NT_STATUS_USER_MAPPED_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_AUDIT_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_TIMER_RESOLUTION_NOT_SET}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_COUNT_LIMIT}, { ERRHRD, ERRgeneral, NT_STATUS_LOGIN_TIME_RESTRICTION}, { ERRHRD, ERRgeneral, NT_STATUS_LOGIN_WKSTA_RESTRICTION}, { ERRDOS, 193, NT_STATUS_IMAGE_MP_UP_MISMATCH}, { ERRHRD, ERRgeneral, 0xc000024a}, { ERRHRD, ERRgeneral, 0xc000024b}, { ERRHRD, ERRgeneral, 0xc000024c}, { ERRHRD, ERRgeneral, 0xc000024d}, { ERRHRD, ERRgeneral, 0xc000024e}, { ERRHRD, ERRgeneral, 0xc000024f}, { ERRHRD, ERRgeneral, NT_STATUS_INSUFFICIENT_LOGON_INFO}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_DLL_ENTRYPOINT}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_SERVICE_ENTRYPOINT}, { ERRHRD, ERRgeneral, NT_STATUS_LPC_REPLY_LOST}, { ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT1}, { ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT2}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_QUOTA_LIMIT}, { ERRSRV, 3, NT_STATUS_PATH_NOT_COVERED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_CALLBACK_ACTIVE}, { ERRHRD, ERRgeneral, NT_STATUS_LICENSE_QUOTA_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_SHORT}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_RECENT}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_HISTORY_CONFLICT}, { ERRHRD, ERRgeneral, 0xc000025d}, { ERRHRD, ERRgeneral, NT_STATUS_PLUGPLAY_NO_DEVICE}, { ERRHRD, ERRgeneral, NT_STATUS_UNSUPPORTED_COMPRESSION}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_HW_PROFILE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, { ERRDOS, 182, NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, { ERRDOS, 127, NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, { ERRDOS, 288, NT_STATUS_RESOURCE_NOT_OWNED}, { ERRDOS, ErrTooManyLinks, NT_STATUS_TOO_MANY_LINKS}, { ERRHRD, ERRgeneral, NT_STATUS_QUOTA_LIST_INCONSISTENT}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_IS_OFFLINE}, { ERRDOS, 21, 0xc000026e}, { ERRDOS, 161, 0xc0000281}, { ERRDOS, ERRnoaccess, 0xc000028a}, { ERRDOS, ERRnoaccess, 0xc000028b}, { ERRHRD, ERRgeneral, 0xc000028c}, { ERRDOS, ERRnoaccess, 0xc000028d}, { ERRDOS, ERRnoaccess, 0xc000028e}, { ERRDOS, ERRnoaccess, 0xc000028f}, { ERRDOS, ERRnoaccess, 0xc0000290}, { ERRDOS, ERRbadfunc, 0xc000029c}, { ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, { ERRDOS, ERRinvlevel, 0x007c0001}, { 0, 0, 0 } }; /***************************************************************************** Print an error message from the status code *****************************************************************************/ static void cifs_print_status(__u32 status_code) { int idx = 0; while (nt_errs[idx].nt_errstr != NULL) { if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) == (status_code & 0xFFFFFF)) { pr_notice("Status code returned 0x%08x %s\n", status_code, nt_errs[idx].nt_errstr); } idx++; } return; } static void ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode) { int i; if (ntstatus == 0) { *eclass = 0; *ecode = 0; return; } for (i = 0; ntstatus_to_dos_map[i].ntstatus; i++) { if (ntstatus == ntstatus_to_dos_map[i].ntstatus) { *eclass = ntstatus_to_dos_map[i].dos_class; *ecode = ntstatus_to_dos_map[i].dos_code; return; } } *eclass = ERRHRD; *ecode = ERRgeneral; } int map_smb_to_linux_error(char *buf, bool logErr) { struct smb_hdr *smb = (struct smb_hdr *)buf; unsigned int i; int rc = -EIO; /* if transport error smb error may not be set */ __u8 smberrclass; __u16 smberrcode; /* BB if NT Status codes - map NT BB */ /* old style smb error codes */ if (smb->Status.CifsError == 0) return 0; if (smb->Flags2 & SMBFLG2_ERR_STATUS) { /* translate the newer STATUS codes to old style SMB errors * and then to POSIX errors */ __u32 err = le32_to_cpu(smb->Status.CifsError); if (logErr && (err != (NT_STATUS_MORE_PROCESSING_REQUIRED))) cifs_print_status(err); else if (cifsFYI & CIFS_RC) cifs_print_status(err); ntstatus_to_dos(err, &smberrclass, &smberrcode); } else { smberrclass = smb->Status.DosError.ErrorClass; smberrcode = le16_to_cpu(smb->Status.DosError.Error); } /* old style errors */ /* DOS class smb error codes - map DOS */ if (smberrclass == ERRDOS) { /* 1 byte field no need to byte reverse */ for (i = 0; i < sizeof(mapping_table_ERRDOS) / sizeof(struct smb_to_posix_error); i++) { if (mapping_table_ERRDOS[i].smb_err == 0) break; else if (mapping_table_ERRDOS[i].smb_err == smberrcode) { rc = mapping_table_ERRDOS[i].posix_code; break; } /* else try next error mapping one to see if match */ } } else if (smberrclass == ERRSRV) { /* server class of error codes */ for (i = 0; i < sizeof(mapping_table_ERRSRV) / sizeof(struct smb_to_posix_error); i++) { if (mapping_table_ERRSRV[i].smb_err == 0) break; else if (mapping_table_ERRSRV[i].smb_err == smberrcode) { rc = mapping_table_ERRSRV[i].posix_code; break; } /* else try next error mapping to see if match */ } } /* else ERRHRD class errors or junk - return EIO */ cifs_dbg(FYI, "Mapping smb error code 0x%x to POSIX err %d\n", le32_to_cpu(smb->Status.CifsError), rc); /* generic corrective action e.g. reconnect SMB session on * ERRbaduid could be added */ return rc; } int map_and_check_smb_error(struct mid_q_entry *mid, bool logErr) { int rc; struct smb_hdr *smb = (struct smb_hdr *)mid->resp_buf; rc = map_smb_to_linux_error((char *)smb, logErr); if (rc == -EACCES && !(smb->Flags2 & SMBFLG2_ERR_STATUS)) { /* possible ERRBaduid */ __u8 class = smb->Status.DosError.ErrorClass; __u16 code = le16_to_cpu(smb->Status.DosError.Error); /* switch can be used to handle different errors */ if (class == ERRSRV && code == ERRbaduid) { cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n", code); cifs_signal_cifsd_for_reconnect(mid->server, false); } } return rc; } /* * calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message */ unsigned int smbCalcSize(void *buf) { struct smb_hdr *ptr = buf; return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + 2 /* size of the bcc field */ + get_bcc(ptr)); } /* The following are taken from fs/ntfs/util.c */ #define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000) /* * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units) * into Unix UTC (based 1970-01-01, in seconds). */ struct timespec64 cifs_NTtimeToUnix(__le64 ntutc) { struct timespec64 ts; /* BB what about the timezone? BB */ /* Subtract the NTFS time offset, then convert to 1s intervals. */ s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET; u64 abs_t; /* * Unfortunately can not use normal 64 bit division on 32 bit arch, but * the alternative, do_div, does not work with negative numbers so have * to special case them */ if (t < 0) { abs_t = -t; ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100); ts.tv_nsec = -ts.tv_nsec; ts.tv_sec = -abs_t; } else { abs_t = t; ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100; ts.tv_sec = abs_t; } return ts; } /* Convert the Unix UTC into NT UTC. */ u64 cifs_UnixTimeToNT(struct timespec64 t) { /* Convert to 100ns intervals and then add the NTFS time offset. */ return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET; } static const int total_days_of_prev_months[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset) { struct timespec64 ts; time64_t sec, days; int min, day, month, year; u16 date = le16_to_cpu(le_date); u16 time = le16_to_cpu(le_time); SMB_TIME *st = (SMB_TIME *)&time; SMB_DATE *sd = (SMB_DATE *)&date; cifs_dbg(FYI, "date %d time %d\n", date, time); sec = 2 * st->TwoSeconds; min = st->Minutes; if ((sec > 59) || (min > 59)) cifs_dbg(VFS, "Invalid time min %d sec %lld\n", min, sec); sec += (min * 60); sec += 60 * 60 * st->Hours; if (st->Hours > 24) cifs_dbg(VFS, "Invalid hours %d\n", st->Hours); day = sd->Day; month = sd->Month; if (day < 1 || day > 31 || month < 1 || month > 12) { cifs_dbg(VFS, "Invalid date, month %d day: %d\n", month, day); day = clamp(day, 1, 31); month = clamp(month, 1, 12); } month -= 1; days = day + total_days_of_prev_months[month]; days += 3652; /* account for difference in days between 1980 and 1970 */ year = sd->Year; days += year * 365; days += (year/4); /* leap year */ /* generalized leap year calculation is more complex, ie no leap year for years/100 except for years/400, but since the maximum number for DOS year is 2**7, the last year is 1980+127, which means we need only consider 2 special case years, ie the years 2000 and 2100, and only adjust for the lack of leap year for the year 2100, as 2000 was a leap year (divisable by 400) */ if (year >= 120) /* the year 2100 */ days = days - 1; /* do not count leap year for the year 2100 */ /* adjust for leap year where we are still before leap day */ if (year != 120) days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0); sec += 24 * 60 * 60 * days; ts.tv_sec = sec + offset; /* cifs_dbg(FYI, "sec after cnvrt dos to unix time %d\n",sec); */ ts.tv_nsec = 0; return ts; }
linux-master
fs/smb/client/netmisc.c
// SPDX-License-Identifier: GPL-2.0 /* * SMB root file system support * * Copyright (c) 2019 Paulo Alcantara <[email protected]> */ #include <linux/init.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/root_dev.h> #include <linux/kernel.h> #include <linux/in.h> #include <linux/inet.h> #include <net/ipconfig.h> #define DEFAULT_MNT_OPTS \ "vers=1.0,cifsacl,mfsymlinks,rsize=1048576,wsize=65536,uid=0,gid=0," \ "hard,rootfs" static char root_dev[2048] __initdata = ""; static char root_opts[1024] __initdata = DEFAULT_MNT_OPTS; static __be32 __init parse_srvaddr(char *start, char *end) { /* TODO: ipv6 support */ char addr[sizeof("aaa.bbb.ccc.ddd")]; int i = 0; while (start < end && i < sizeof(addr) - 1) { if (isdigit(*start) || *start == '.') addr[i++] = *start; start++; } addr[i] = '\0'; return in_aton(addr); } /* cifsroot=//<server-ip>/<share>[,options] */ static int __init cifs_root_setup(char *line) { char *s; int len; __be32 srvaddr = htonl(INADDR_NONE); ROOT_DEV = Root_CIFS; if (strlen(line) > 3 && line[0] == '/' && line[1] == '/') { s = strchr(&line[2], '/'); if (!s || s[1] == '\0') return 1; /* make s point to ',' or '\0' at end of line */ s = strchrnul(s, ','); /* len is strlen(unc) + '\0' */ len = s - line + 1; if (len > sizeof(root_dev)) { pr_err("Root-CIFS: UNC path too long\n"); return 1; } strscpy(root_dev, line, len); srvaddr = parse_srvaddr(&line[2], s); if (*s) { int n = snprintf(root_opts, sizeof(root_opts), "%s,%s", DEFAULT_MNT_OPTS, s + 1); if (n >= sizeof(root_opts)) { pr_err("Root-CIFS: mount options string too long\n"); root_opts[sizeof(root_opts)-1] = '\0'; return 1; } } } root_server_addr = srvaddr; return 1; } __setup("cifsroot=", cifs_root_setup); int __init cifs_root_data(char **dev, char **opts) { if (!root_dev[0] || root_server_addr == htonl(INADDR_NONE)) { pr_err("Root-CIFS: no SMB server address\n"); return -1; } *dev = root_dev; *opts = root_opts; return 0; }
linux-master
fs/smb/client/cifsroot.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002, 2011 * Etersoft, 2012 * Author(s): Steve French ([email protected]) * Jeremy Allison ([email protected]) 2006 * Pavel Shilovsky ([email protected]) 2012 * */ #include <linux/fs.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/net.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <linux/mempool.h> #include <linux/highmem.h> #include <crypto/aead.h> #include "cifsglob.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_debug.h" #include "smb2status.h" #include "smb2glob.h" static int smb3_crypto_shash_allocate(struct TCP_Server_Info *server) { struct cifs_secmech *p = &server->secmech; int rc; rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256); if (rc) goto err; rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac); if (rc) goto err; return 0; err: cifs_free_hash(&p->hmacsha256); return rc; } int smb311_crypto_shash_allocate(struct TCP_Server_Info *server) { struct cifs_secmech *p = &server->secmech; int rc = 0; rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256); if (rc) return rc; rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac); if (rc) goto err; rc = cifs_alloc_hash("sha512", &p->sha512); if (rc) goto err; return 0; err: cifs_free_hash(&p->aes_cmac); cifs_free_hash(&p->hmacsha256); return rc; } static int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key) { struct cifs_chan *chan; struct TCP_Server_Info *pserver; struct cifs_ses *ses = NULL; int i; int rc = 0; bool is_binding = false; spin_lock(&cifs_tcp_ses_lock); /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { if (ses->Suid == ses_id) goto found; } trace_smb3_ses_not_found(ses_id); cifs_server_dbg(FYI, "%s: Could not find session 0x%llx\n", __func__, ses_id); rc = -ENOENT; goto out; found: spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); is_binding = (cifs_chan_needs_reconnect(ses, server) && ses->ses_status == SES_GOOD); if (is_binding) { /* * If we are in the process of binding a new channel * to an existing session, use the master connection * session key */ memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE); spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); goto out; } /* * Otherwise, use the channel key. */ for (i = 0; i < ses->chan_count; i++) { chan = ses->chans + i; if (chan->server == server) { memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE); spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); goto out; } } spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); cifs_dbg(VFS, "%s: Could not find channel signing key for session 0x%llx\n", __func__, ses_id); rc = -ENOENT; out: spin_unlock(&cifs_tcp_ses_lock); return rc; } static struct cifs_ses * smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id) { struct TCP_Server_Info *pserver; struct cifs_ses *ses; /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { if (ses->Suid != ses_id) continue; spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING) { spin_unlock(&ses->ses_lock); continue; } cifs_smb_ses_inc_refcount(ses); spin_unlock(&ses->ses_lock); return ses; } return NULL; } struct cifs_ses * smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) { struct cifs_ses *ses; spin_lock(&cifs_tcp_ses_lock); ses = smb2_find_smb_ses_unlocked(server, ses_id); spin_unlock(&cifs_tcp_ses_lock); return ses; } static struct cifs_tcon * smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid) { struct cifs_tcon *tcon; list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid != tid) continue; ++tcon->tc_count; return tcon; } return NULL; } /* * Obtain tcon corresponding to the tid in the given * cifs_ses */ struct cifs_tcon * smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid) { struct cifs_ses *ses; struct cifs_tcon *tcon; spin_lock(&cifs_tcp_ses_lock); ses = smb2_find_smb_ses_unlocked(server, ses_id); if (!ses) { spin_unlock(&cifs_tcp_ses_lock); return NULL; } tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid); if (!tcon) { cifs_put_smb_ses(ses); spin_unlock(&cifs_tcp_ses_lock); return NULL; } spin_unlock(&cifs_tcp_ses_lock); /* tcon already has a ref to ses, so we don't need ses anymore */ cifs_put_smb_ses(ses); return tcon; } int smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, bool allocate_crypto) { int rc; unsigned char smb2_signature[SMB2_HMACSHA256_SIZE]; unsigned char *sigptr = smb2_signature; struct kvec *iov = rqst->rq_iov; struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base; struct cifs_ses *ses; struct shash_desc *shash = NULL; struct smb_rqst drqst; ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId)); if (unlikely(!ses)) { cifs_server_dbg(VFS, "%s: Could not find session\n", __func__); return -ENOENT; } memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE); memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); if (allocate_crypto) { rc = cifs_alloc_hash("hmac(sha256)", &shash); if (rc) { cifs_server_dbg(VFS, "%s: sha256 alloc failed\n", __func__); goto out; } } else { shash = server->secmech.hmacsha256; } rc = crypto_shash_setkey(shash->tfm, ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); if (rc) { cifs_server_dbg(VFS, "%s: Could not update with response\n", __func__); goto out; } rc = crypto_shash_init(shash); if (rc) { cifs_server_dbg(VFS, "%s: Could not init sha256", __func__); goto out; } /* * For SMB2+, __cifs_calc_signature() expects to sign only the actual * data, that is, iov[0] should not contain a rfc1002 length. * * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to * __cifs_calc_signature(). */ drqst = *rqst; if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { rc = crypto_shash_update(shash, iov[0].iov_base, iov[0].iov_len); if (rc) { cifs_server_dbg(VFS, "%s: Could not update with payload\n", __func__); goto out; } drqst.rq_iov++; drqst.rq_nvec--; } rc = __cifs_calc_signature(&drqst, server, sigptr, shash); if (!rc) memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); out: if (allocate_crypto) cifs_free_hash(&shash); if (ses) cifs_put_smb_ses(ses); return rc; } static int generate_key(struct cifs_ses *ses, struct kvec label, struct kvec context, __u8 *key, unsigned int key_size) { unsigned char zero = 0x0; __u8 i[4] = {0, 0, 0, 1}; __u8 L128[4] = {0, 0, 0, 128}; __u8 L256[4] = {0, 0, 1, 0}; int rc = 0; unsigned char prfhash[SMB2_HMACSHA256_SIZE]; unsigned char *hashptr = prfhash; struct TCP_Server_Info *server = ses->server; memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE); memset(key, 0x0, key_size); rc = smb3_crypto_shash_allocate(server); if (rc) { cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__); goto smb3signkey_ret; } rc = crypto_shash_setkey(server->secmech.hmacsha256->tfm, ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); if (rc) { cifs_server_dbg(VFS, "%s: Could not set with session key\n", __func__); goto smb3signkey_ret; } rc = crypto_shash_init(server->secmech.hmacsha256); if (rc) { cifs_server_dbg(VFS, "%s: Could not init sign hmac\n", __func__); goto smb3signkey_ret; } rc = crypto_shash_update(server->secmech.hmacsha256, i, 4); if (rc) { cifs_server_dbg(VFS, "%s: Could not update with n\n", __func__); goto smb3signkey_ret; } rc = crypto_shash_update(server->secmech.hmacsha256, label.iov_base, label.iov_len); if (rc) { cifs_server_dbg(VFS, "%s: Could not update with label\n", __func__); goto smb3signkey_ret; } rc = crypto_shash_update(server->secmech.hmacsha256, &zero, 1); if (rc) { cifs_server_dbg(VFS, "%s: Could not update with zero\n", __func__); goto smb3signkey_ret; } rc = crypto_shash_update(server->secmech.hmacsha256, context.iov_base, context.iov_len); if (rc) { cifs_server_dbg(VFS, "%s: Could not update with context\n", __func__); goto smb3signkey_ret; } if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) { rc = crypto_shash_update(server->secmech.hmacsha256, L256, 4); } else { rc = crypto_shash_update(server->secmech.hmacsha256, L128, 4); } if (rc) { cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__); goto smb3signkey_ret; } rc = crypto_shash_final(server->secmech.hmacsha256, hashptr); if (rc) { cifs_server_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__); goto smb3signkey_ret; } memcpy(key, hashptr, key_size); smb3signkey_ret: return rc; } struct derivation { struct kvec label; struct kvec context; }; struct derivation_triplet { struct derivation signing; struct derivation encryption; struct derivation decryption; }; static int generate_smb3signingkey(struct cifs_ses *ses, struct TCP_Server_Info *server, const struct derivation_triplet *ptriplet) { int rc; bool is_binding = false; int chan_index = 0; spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); is_binding = (cifs_chan_needs_reconnect(ses, server) && ses->ses_status == SES_GOOD); chan_index = cifs_ses_get_chan_index(ses, server); /* TODO: introduce ref counting for channels when the can be freed */ spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); /* * All channels use the same encryption/decryption keys but * they have their own signing key. * * When we generate the keys, check if it is for a new channel * (binding) in which case we only need to generate a signing * key and store it in the channel as to not overwrite the * master connection signing key stored in the session */ if (is_binding) { rc = generate_key(ses, ptriplet->signing.label, ptriplet->signing.context, ses->chans[chan_index].signkey, SMB3_SIGN_KEY_SIZE); if (rc) return rc; } else { rc = generate_key(ses, ptriplet->signing.label, ptriplet->signing.context, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE); if (rc) return rc; /* safe to access primary channel, since it will never go away */ spin_lock(&ses->chan_lock); memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE); spin_unlock(&ses->chan_lock); rc = generate_key(ses, ptriplet->encryption.label, ptriplet->encryption.context, ses->smb3encryptionkey, SMB3_ENC_DEC_KEY_SIZE); rc = generate_key(ses, ptriplet->decryption.label, ptriplet->decryption.context, ses->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE); if (rc) return rc; } if (rc) return rc; #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__); /* * The session id is opaque in terms of endianness, so we can't * print it as a long long. we dump it as we got it on the wire */ cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), &ses->Suid); cifs_dbg(VFS, "Cipher type %d\n", server->cipher_type); cifs_dbg(VFS, "Session Key %*ph\n", SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); cifs_dbg(VFS, "Signing Key %*ph\n", SMB3_SIGN_KEY_SIZE, ses->smb3signingkey); if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) { cifs_dbg(VFS, "ServerIn Key %*ph\n", SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey); cifs_dbg(VFS, "ServerOut Key %*ph\n", SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey); } else { cifs_dbg(VFS, "ServerIn Key %*ph\n", SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey); cifs_dbg(VFS, "ServerOut Key %*ph\n", SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey); } #endif return rc; } int generate_smb30signingkey(struct cifs_ses *ses, struct TCP_Server_Info *server) { struct derivation_triplet triplet; struct derivation *d; d = &triplet.signing; d->label.iov_base = "SMB2AESCMAC"; d->label.iov_len = 12; d->context.iov_base = "SmbSign"; d->context.iov_len = 8; d = &triplet.encryption; d->label.iov_base = "SMB2AESCCM"; d->label.iov_len = 11; d->context.iov_base = "ServerIn "; d->context.iov_len = 10; d = &triplet.decryption; d->label.iov_base = "SMB2AESCCM"; d->label.iov_len = 11; d->context.iov_base = "ServerOut"; d->context.iov_len = 10; return generate_smb3signingkey(ses, server, &triplet); } int generate_smb311signingkey(struct cifs_ses *ses, struct TCP_Server_Info *server) { struct derivation_triplet triplet; struct derivation *d; d = &triplet.signing; d->label.iov_base = "SMBSigningKey"; d->label.iov_len = 14; d->context.iov_base = ses->preauth_sha_hash; d->context.iov_len = 64; d = &triplet.encryption; d->label.iov_base = "SMBC2SCipherKey"; d->label.iov_len = 16; d->context.iov_base = ses->preauth_sha_hash; d->context.iov_len = 64; d = &triplet.decryption; d->label.iov_base = "SMBS2CCipherKey"; d->label.iov_len = 16; d->context.iov_base = ses->preauth_sha_hash; d->context.iov_len = 64; return generate_smb3signingkey(ses, server, &triplet); } int smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, bool allocate_crypto) { int rc; unsigned char smb3_signature[SMB2_CMACAES_SIZE]; unsigned char *sigptr = smb3_signature; struct kvec *iov = rqst->rq_iov; struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base; struct shash_desc *shash = NULL; struct smb_rqst drqst; u8 key[SMB3_SIGN_KEY_SIZE]; rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key); if (unlikely(rc)) { cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__); return rc; } if (allocate_crypto) { rc = cifs_alloc_hash("cmac(aes)", &shash); if (rc) return rc; } else { shash = server->secmech.aes_cmac; } memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE); memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); rc = crypto_shash_setkey(shash->tfm, key, SMB2_CMACAES_SIZE); if (rc) { cifs_server_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__); goto out; } /* * we already allocate aes_cmac when we init smb3 signing key, * so unlike smb2 case we do not have to check here if secmech are * initialized */ rc = crypto_shash_init(shash); if (rc) { cifs_server_dbg(VFS, "%s: Could not init cmac aes\n", __func__); goto out; } /* * For SMB2+, __cifs_calc_signature() expects to sign only the actual * data, that is, iov[0] should not contain a rfc1002 length. * * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to * __cifs_calc_signature(). */ drqst = *rqst; if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { rc = crypto_shash_update(shash, iov[0].iov_base, iov[0].iov_len); if (rc) { cifs_server_dbg(VFS, "%s: Could not update with payload\n", __func__); goto out; } drqst.rq_iov++; drqst.rq_nvec--; } rc = __cifs_calc_signature(&drqst, server, sigptr, shash); if (!rc) memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); out: if (allocate_crypto) cifs_free_hash(&shash); return rc; } /* must be called with server->srv_mutex held */ static int smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) { int rc = 0; struct smb2_hdr *shdr; struct smb2_sess_setup_req *ssr; bool is_binding; bool is_signed; shdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base; ssr = (struct smb2_sess_setup_req *)shdr; is_binding = shdr->Command == SMB2_SESSION_SETUP && (ssr->Flags & SMB2_SESSION_REQ_FLAG_BINDING); is_signed = shdr->Flags & SMB2_FLAGS_SIGNED; if (!is_signed) return 0; spin_lock(&server->srv_lock); if (server->ops->need_neg && server->ops->need_neg(server)) { spin_unlock(&server->srv_lock); return 0; } spin_unlock(&server->srv_lock); if (!is_binding && !server->session_estab) { strncpy(shdr->Signature, "BSRSPYL", 8); return 0; } rc = server->ops->calc_signature(rqst, server, false); return rc; } int smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { unsigned int rc; char server_response_sig[SMB2_SIGNATURE_SIZE]; struct smb2_hdr *shdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base; if ((shdr->Command == SMB2_NEGOTIATE) || (shdr->Command == SMB2_SESSION_SETUP) || (shdr->Command == SMB2_OPLOCK_BREAK) || server->ignore_signature || (!server->session_estab)) return 0; /* * BB what if signatures are supposed to be on for session but * server does not send one? BB */ /* Do not need to verify session setups with signature "BSRSPYL " */ if (memcmp(shdr->Signature, "BSRSPYL ", 8) == 0) cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n", shdr->Command); /* * Save off the origiginal signature so we can modify the smb and check * our calculated signature against what the server sent. */ memcpy(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE); memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE); rc = server->ops->calc_signature(rqst, server, true); if (rc) return rc; if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) { cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n", shdr->Command, shdr->MessageId); return -EACCES; } else return 0; } /* * Set message id for the request. Should be called after wait_for_free_request * and when srv_mutex is held. */ static inline void smb2_seq_num_into_buf(struct TCP_Server_Info *server, struct smb2_hdr *shdr) { unsigned int i, num = le16_to_cpu(shdr->CreditCharge); shdr->MessageId = get_next_mid64(server); /* skip message numbers according to CreditCharge field */ for (i = 1; i < num; i++) get_next_mid(server); } static struct mid_q_entry * smb2_mid_entry_alloc(const struct smb2_hdr *shdr, struct TCP_Server_Info *server) { struct mid_q_entry *temp; unsigned int credits = le16_to_cpu(shdr->CreditCharge); if (server == NULL) { cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n"); return NULL; } temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); kref_init(&temp->refcount); temp->mid = le64_to_cpu(shdr->MessageId); temp->credits = credits > 0 ? credits : 1; temp->pid = current->pid; temp->command = shdr->Command; /* Always LE */ temp->when_alloc = jiffies; temp->server = server; /* * The default is for the mid to be synchronous, so the * default callback just wakes up the current task. */ get_task_struct(current); temp->creator = current; temp->callback = cifs_wake_up_task; temp->callback_data = current; atomic_inc(&mid_count); temp->mid_state = MID_REQUEST_ALLOCATED; trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId), le64_to_cpu(shdr->SessionId), le16_to_cpu(shdr->Command), temp->mid); return temp; } static int smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, struct smb2_hdr *shdr, struct mid_q_entry **mid) { spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return -ENOENT; } if (server->tcpStatus == CifsNeedReconnect) { spin_unlock(&server->srv_lock); cifs_dbg(FYI, "tcp session dead - return to caller to retry\n"); return -EAGAIN; } if (server->tcpStatus == CifsNeedNegotiate && shdr->Command != SMB2_NEGOTIATE) { spin_unlock(&server->srv_lock); return -EAGAIN; } spin_unlock(&server->srv_lock); spin_lock(&ses->ses_lock); if (ses->ses_status == SES_NEW) { if ((shdr->Command != SMB2_SESSION_SETUP) && (shdr->Command != SMB2_NEGOTIATE)) { spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are setting up session */ } if (ses->ses_status == SES_EXITING) { if (shdr->Command != SMB2_LOGOFF) { spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are shutting down the session */ } spin_unlock(&ses->ses_lock); *mid = smb2_mid_entry_alloc(shdr, server); if (*mid == NULL) return -ENOMEM; spin_lock(&server->mid_lock); list_add_tail(&(*mid)->qhead, &server->pending_mid_q); spin_unlock(&server->mid_lock); return 0; } int smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, bool log_error) { unsigned int len = mid->resp_buf_size; struct kvec iov[1]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 1 }; iov[0].iov_base = (char *)mid->resp_buf; iov[0].iov_len = len; dump_smb(mid->resp_buf, min_t(u32, 80, len)); /* convert the length into a more usable form */ if (len > 24 && server->sign && !mid->decrypted) { int rc; rc = smb2_verify_signature(&rqst, server); if (rc) cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } return map_smb2_to_linux_error(mid->resp_buf, log_error); } struct mid_q_entry * smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server, struct smb_rqst *rqst) { int rc; struct smb2_hdr *shdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; smb2_seq_num_into_buf(server, shdr); rc = smb2_get_mid_entry(ses, server, shdr, &mid); if (rc) { revert_current_mid_from_hdr(server, shdr); return ERR_PTR(rc); } rc = smb2_sign_rqst(rqst, server); if (rc) { revert_current_mid_from_hdr(server, shdr); delete_mid(mid); return ERR_PTR(rc); } return mid; } struct mid_q_entry * smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) { int rc; struct smb2_hdr *shdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate && shdr->Command != SMB2_NEGOTIATE) { spin_unlock(&server->srv_lock); return ERR_PTR(-EAGAIN); } spin_unlock(&server->srv_lock); smb2_seq_num_into_buf(server, shdr); mid = smb2_mid_entry_alloc(shdr, server); if (mid == NULL) { revert_current_mid_from_hdr(server, shdr); return ERR_PTR(-ENOMEM); } rc = smb2_sign_rqst(rqst, server); if (rc) { revert_current_mid_from_hdr(server, shdr); release_mid(mid); return ERR_PTR(rc); } return mid; } int smb3_crypto_aead_allocate(struct TCP_Server_Info *server) { struct crypto_aead *tfm; if (!server->secmech.enc) { if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) tfm = crypto_alloc_aead("gcm(aes)", 0, 0); else tfm = crypto_alloc_aead("ccm(aes)", 0, 0); if (IS_ERR(tfm)) { cifs_server_dbg(VFS, "%s: Failed alloc encrypt aead\n", __func__); return PTR_ERR(tfm); } server->secmech.enc = tfm; } if (!server->secmech.dec) { if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) tfm = crypto_alloc_aead("gcm(aes)", 0, 0); else tfm = crypto_alloc_aead("ccm(aes)", 0, 0); if (IS_ERR(tfm)) { crypto_free_aead(server->secmech.enc); server->secmech.enc = NULL; cifs_server_dbg(VFS, "%s: Failed to alloc decrypt aead\n", __func__); return PTR_ERR(tfm); } server->secmech.dec = tfm; } return 0; }
linux-master
fs/smb/client/smb2transport.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/freezer.h> #include <linux/sched/signal.h> #include <linux/wait_bit.h> #include <linux/fiemap.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "fscache.h" #include "fs_context.h" #include "cifs_ioctl.h" #include "cached_dir.h" static void cifs_set_ops(struct inode *inode) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &cifs_file_inode_ops; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) inode->i_fop = &cifs_file_direct_nobrl_ops; else inode->i_fop = &cifs_file_direct_ops; } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) inode->i_fop = &cifs_file_strict_nobrl_ops; else inode->i_fop = &cifs_file_strict_ops; } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) inode->i_fop = &cifs_file_nobrl_ops; else { /* not direct, send byte range locks */ inode->i_fop = &cifs_file_ops; } /* check if server can support readahead */ if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read < PAGE_SIZE + MAX_CIFS_HDR_SIZE) inode->i_data.a_ops = &cifs_addr_ops_smallbuf; else inode->i_data.a_ops = &cifs_addr_ops; break; case S_IFDIR: if (IS_AUTOMOUNT(inode)) { inode->i_op = &cifs_namespace_inode_operations; } else { inode->i_op = &cifs_dir_inode_ops; inode->i_fop = &cifs_dir_ops; } break; case S_IFLNK: inode->i_op = &cifs_symlink_inode_ops; break; default: init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } } /* check inode attributes against fattr. If they don't match, tag the * inode for cache invalidation */ static void cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr) { struct cifs_fscache_inode_coherency_data cd; struct cifsInodeInfo *cifs_i = CIFS_I(inode); cifs_dbg(FYI, "%s: revalidating inode %llu\n", __func__, cifs_i->uniqueid); if (inode->i_state & I_NEW) { cifs_dbg(FYI, "%s: inode %llu is new\n", __func__, cifs_i->uniqueid); return; } /* don't bother with revalidation if we have an oplock */ if (CIFS_CACHE_READ(cifs_i)) { cifs_dbg(FYI, "%s: inode %llu is oplocked\n", __func__, cifs_i->uniqueid); return; } /* revalidate if mtime or size have changed */ fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode); if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) && cifs_i->server_eof == fattr->cf_eof) { cifs_dbg(FYI, "%s: inode %llu is unchanged\n", __func__, cifs_i->uniqueid); return; } cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n", __func__, cifs_i->uniqueid); set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags); /* Invalidate fscache cookie */ cifs_fscache_fill_coherency(&cifs_i->netfs.inode, &cd); fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0); } /* * copy nlink to the inode, unless it wasn't provided. Provide * sane values if we don't have an existing one and none was provided */ static void cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) { /* * if we're in a situation where we can't trust what we * got from the server (readdir, some non-unix cases) * fake reasonable values */ if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) { /* only provide fake values on a new inode */ if (inode->i_state & I_NEW) { if (fattr->cf_cifsattrs & ATTR_DIRECTORY) set_nlink(inode, 2); else set_nlink(inode, 1); } return; } /* we trust the server, so update it */ set_nlink(inode, fattr->cf_nlink); } /* populate an inode with info from a cifs_fattr struct */ int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); if (!(inode->i_state & I_NEW) && unlikely(inode_wrong_type(inode, fattr->cf_mode))) { CIFS_I(inode)->time = 0; /* force reval */ return -ESTALE; } cifs_revalidate_cache(inode, fattr); spin_lock(&inode->i_lock); fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode); fattr->cf_atime = timestamp_truncate(fattr->cf_atime, inode); fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode); /* we do not want atime to be less than mtime, it broke some apps */ if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0) inode->i_atime = fattr->cf_mtime; else inode->i_atime = fattr->cf_atime; inode->i_mtime = fattr->cf_mtime; inode_set_ctime_to_ts(inode, fattr->cf_ctime); inode->i_rdev = fattr->cf_rdev; cifs_nlink_fattr_to_inode(inode, fattr); inode->i_uid = fattr->cf_uid; inode->i_gid = fattr->cf_gid; /* if dynperm is set, don't clobber existing mode */ if (inode->i_state & I_NEW || !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) inode->i_mode = fattr->cf_mode; cifs_i->cifsAttrs = fattr->cf_cifsattrs; if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) cifs_i->time = 0; else cifs_i->time = jiffies; if (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING) set_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags); else clear_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags); cifs_i->server_eof = fattr->cf_eof; /* * Can't safely change the file size here if the client is writing to * it due to potential races. */ if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { i_size_write(inode, fattr->cf_eof); /* * i_blocks is not related to (i_size / i_blksize), * but instead 512 byte (2**9) size is required for * calculating num blocks. */ inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9; } if (S_ISLNK(fattr->cf_mode)) { kfree(cifs_i->symlink_target); cifs_i->symlink_target = fattr->cf_symlink_target; fattr->cf_symlink_target = NULL; } spin_unlock(&inode->i_lock); if (fattr->cf_flags & CIFS_FATTR_JUNCTION) inode->i_flags |= S_AUTOMOUNT; if (inode->i_state & I_NEW) cifs_set_ops(inode); return 0; } void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) return; fattr->cf_uniqueid = iunique(sb, ROOT_I); } /* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */ void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, struct cifs_sb_info *cifs_sb) { memset(fattr, 0, sizeof(*fattr)); fattr->cf_uniqueid = le64_to_cpu(info->UniqueId); fattr->cf_bytes = le64_to_cpu(info->NumOfBytes); fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime); fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange); /* old POSIX extensions don't get create time */ fattr->cf_mode = le64_to_cpu(info->Permissions); /* * Since we set the inode type below we need to mask off * to avoid strange results if bits set above. */ fattr->cf_mode &= ~S_IFMT; switch (le32_to_cpu(info->Type)) { case UNIX_FILE: fattr->cf_mode |= S_IFREG; fattr->cf_dtype = DT_REG; break; case UNIX_SYMLINK: fattr->cf_mode |= S_IFLNK; fattr->cf_dtype = DT_LNK; break; case UNIX_DIR: fattr->cf_mode |= S_IFDIR; fattr->cf_dtype = DT_DIR; break; case UNIX_CHARDEV: fattr->cf_mode |= S_IFCHR; fattr->cf_dtype = DT_CHR; fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor), le64_to_cpu(info->DevMinor) & MINORMASK); break; case UNIX_BLOCKDEV: fattr->cf_mode |= S_IFBLK; fattr->cf_dtype = DT_BLK; fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor), le64_to_cpu(info->DevMinor) & MINORMASK); break; case UNIX_FIFO: fattr->cf_mode |= S_IFIFO; fattr->cf_dtype = DT_FIFO; break; case UNIX_SOCKET: fattr->cf_mode |= S_IFSOCK; fattr->cf_dtype = DT_SOCK; break; default: /* safest to call it a file if we do not know */ fattr->cf_mode |= S_IFREG; fattr->cf_dtype = DT_REG; cifs_dbg(FYI, "unknown type %d\n", le32_to_cpu(info->Type)); break; } fattr->cf_uid = cifs_sb->ctx->linux_uid; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) { u64 id = le64_to_cpu(info->Uid); if (id < ((uid_t)-1)) { kuid_t uid = make_kuid(&init_user_ns, id); if (uid_valid(uid)) fattr->cf_uid = uid; } } fattr->cf_gid = cifs_sb->ctx->linux_gid; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) { u64 id = le64_to_cpu(info->Gid); if (id < ((gid_t)-1)) { kgid_t gid = make_kgid(&init_user_ns, id); if (gid_valid(gid)) fattr->cf_gid = gid; } } fattr->cf_nlink = le64_to_cpu(info->Nlinks); } /* * Fill a cifs_fattr struct with fake inode info. * * Needed to setup cifs_fattr data for the directory which is the * junction to the new submount (ie to setup the fake directory * which represents a DFS referral or reparse mount point). */ static void cifs_create_junction_fattr(struct cifs_fattr *fattr, struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); cifs_dbg(FYI, "%s: creating fake fattr\n", __func__); memset(fattr, 0, sizeof(*fattr)); fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU; fattr->cf_uid = cifs_sb->ctx->linux_uid; fattr->cf_gid = cifs_sb->ctx->linux_gid; ktime_get_coarse_real_ts64(&fattr->cf_mtime); fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime; fattr->cf_nlink = 2; fattr->cf_flags = CIFS_FATTR_JUNCTION; } /* Update inode with final fattr data */ static int update_inode_info(struct super_block *sb, struct cifs_fattr *fattr, struct inode **inode) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); int rc = 0; if (!*inode) { *inode = cifs_iget(sb, fattr); if (!*inode) rc = -ENOMEM; return rc; } /* We already have inode, update it. * * If file type or uniqueid is different, return error. */ if (unlikely((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && CIFS_I(*inode)->uniqueid != fattr->cf_uniqueid)) { CIFS_I(*inode)->time = 0; /* force reval */ return -ESTALE; } return cifs_fattr_to_inode(*inode, fattr); } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_get_file_info_unix(struct file *filp) { int rc; unsigned int xid; FILE_UNIX_BASIC_INFO find_data; struct cifs_fattr fattr = {}; struct inode *inode = file_inode(filp); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsFileInfo *cfile = filp->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); xid = get_xid(); if (cfile->symlink_target) { fattr.cf_symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!fattr.cf_symlink_target) { rc = -ENOMEM; goto cifs_gfiunix_out; } } rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->fid.netfid, &find_data); if (!rc) { cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb); } else if (rc == -EREMOTE) { cifs_create_junction_fattr(&fattr, inode->i_sb); rc = 0; } else goto cifs_gfiunix_out; rc = cifs_fattr_to_inode(inode, &fattr); cifs_gfiunix_out: free_xid(xid); return rc; } static int cifs_get_unix_fattr(const unsigned char *full_path, struct super_block *sb, struct cifs_fattr *fattr, struct inode **pinode, const unsigned int xid) { struct TCP_Server_Info *server; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); FILE_UNIX_BASIC_INFO find_data; struct cifs_tcon *tcon; struct tcon_link *tlink; int rc, tmprc; cifs_dbg(FYI, "Getting info on %s\n", full_path); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); server = tcon->ses->server; /* could have done a find first instead but this returns more info */ rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, cifs_sb->local_nls, cifs_remap(cifs_sb)); cifs_dbg(FYI, "%s: query path info: rc = %d\n", __func__, rc); cifs_put_tlink(tlink); if (!rc) { cifs_unix_basic_to_fattr(fattr, &find_data, cifs_sb); } else if (rc == -EREMOTE) { cifs_create_junction_fattr(fattr, sb); rc = 0; } else { return rc; } if (!*pinode) cifs_fill_uniqueid(sb, fattr); /* check for Minshall+French symlinks */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path); cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc); } if (S_ISLNK(fattr->cf_mode) && !fattr->cf_symlink_target) { if (!server->ops->query_symlink) return -EOPNOTSUPP; rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path, &fattr->cf_symlink_target, NULL); cifs_dbg(FYI, "%s: query_symlink: %d\n", __func__, rc); } return rc; } int cifs_get_inode_info_unix(struct inode **pinode, const unsigned char *full_path, struct super_block *sb, unsigned int xid) { struct cifs_fattr fattr = {}; int rc; rc = cifs_get_unix_fattr(full_path, sb, &fattr, pinode, xid); if (rc) goto out; rc = update_inode_info(sb, &fattr, pinode); out: kfree(fattr.cf_symlink_target); return rc; } #else static inline int cifs_get_unix_fattr(const unsigned char *full_path, struct super_block *sb, struct cifs_fattr *fattr, struct inode **pinode, const unsigned int xid) { return -EOPNOTSUPP; } int cifs_get_inode_info_unix(struct inode **pinode, const unsigned char *full_path, struct super_block *sb, unsigned int xid) { return -EOPNOTSUPP; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_sfu_type(struct cifs_fattr *fattr, const char *path, struct cifs_sb_info *cifs_sb, unsigned int xid) { int rc; __u32 oplock; struct tcon_link *tlink; struct cifs_tcon *tcon; struct cifs_fid fid; struct cifs_open_parms oparms; struct cifs_io_parms io_parms = {0}; char buf[24]; unsigned int bytes_read; char *pbuf; int buf_type = CIFS_NO_BUFFER; pbuf = buf; fattr->cf_mode &= ~S_IFMT; if (fattr->cf_eof == 0) { fattr->cf_mode |= S_IFIFO; fattr->cf_dtype = DT_FIFO; return 0; } else if (fattr->cf_eof < 8) { fattr->cf_mode |= S_IFREG; fattr->cf_dtype = DT_REG; return -EINVAL; /* EOPNOTSUPP? */ } tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = GENERIC_READ, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_OPEN, .path = path, .fid = &fid, }; if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL); if (rc) { cifs_dbg(FYI, "check sfu type of %s, open rc = %d\n", path, rc); cifs_put_tlink(tlink); return rc; } /* Read header */ io_parms.netfid = fid.netfid; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = 24; rc = tcon->ses->server->ops->sync_read(xid, &fid, &io_parms, &bytes_read, &pbuf, &buf_type); if ((rc == 0) && (bytes_read >= 8)) { if (memcmp("IntxBLK", pbuf, 8) == 0) { cifs_dbg(FYI, "Block device\n"); fattr->cf_mode |= S_IFBLK; fattr->cf_dtype = DT_BLK; if (bytes_read == 24) { /* we have enough to decode dev num */ __u64 mjr; /* major */ __u64 mnr; /* minor */ mjr = le64_to_cpu(*(__le64 *)(pbuf+8)); mnr = le64_to_cpu(*(__le64 *)(pbuf+16)); fattr->cf_rdev = MKDEV(mjr, mnr); } } else if (memcmp("IntxCHR", pbuf, 8) == 0) { cifs_dbg(FYI, "Char device\n"); fattr->cf_mode |= S_IFCHR; fattr->cf_dtype = DT_CHR; if (bytes_read == 24) { /* we have enough to decode dev num */ __u64 mjr; /* major */ __u64 mnr; /* minor */ mjr = le64_to_cpu(*(__le64 *)(pbuf+8)); mnr = le64_to_cpu(*(__le64 *)(pbuf+16)); fattr->cf_rdev = MKDEV(mjr, mnr); } } else if (memcmp("IntxLNK", pbuf, 7) == 0) { cifs_dbg(FYI, "Symlink\n"); fattr->cf_mode |= S_IFLNK; fattr->cf_dtype = DT_LNK; } else { fattr->cf_mode |= S_IFREG; /* file? */ fattr->cf_dtype = DT_REG; rc = -EOPNOTSUPP; } } else { fattr->cf_mode |= S_IFREG; /* then it is a file */ fattr->cf_dtype = DT_REG; rc = -EOPNOTSUPP; /* or some unknown SFU type */ } tcon->ses->server->ops->close(xid, tcon, &fid); cifs_put_tlink(tlink); return rc; } #define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */ /* * Fetch mode bits as provided by SFU. * * FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ? */ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, struct cifs_sb_info *cifs_sb, unsigned int xid) { #ifdef CONFIG_CIFS_XATTR ssize_t rc; char ea_value[4]; __u32 mode; struct tcon_link *tlink; struct cifs_tcon *tcon; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); if (tcon->ses->server->ops->query_all_EAs == NULL) { cifs_put_tlink(tlink); return -EOPNOTSUPP; } rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path, "SETFILEBITS", ea_value, 4 /* size of buf */, cifs_sb); cifs_put_tlink(tlink); if (rc < 0) return (int)rc; else if (rc > 3) { mode = le32_to_cpu(*((__le32 *)ea_value)); fattr->cf_mode &= ~SFBITS_MASK; cifs_dbg(FYI, "special bits 0%o org mode 0%o\n", mode, fattr->cf_mode); fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode; cifs_dbg(FYI, "special mode bits 0%o\n", mode); } return 0; #else return -EOPNOTSUPP; #endif } /* Fill a cifs_fattr struct with info from POSIX info struct */ static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data, struct cifs_sid *owner, struct cifs_sid *group, struct super_block *sb) { struct smb311_posix_qinfo *info = &data->posix_fi; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); memset(fattr, 0, sizeof(*fattr)); /* no fattr->flags to set */ fattr->cf_cifsattrs = le32_to_cpu(info->DosAttributes); fattr->cf_uniqueid = le64_to_cpu(info->Inode); if (info->LastAccessTime) fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); else ktime_get_coarse_real_ts64(&fattr->cf_atime); fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); if (data->adjust_tz) { fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj; fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj; } fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_bytes = le64_to_cpu(info->AllocationSize); fattr->cf_createtime = le64_to_cpu(info->CreationTime); fattr->cf_nlink = le32_to_cpu(info->HardLinks); fattr->cf_mode = (umode_t) le32_to_cpu(info->Mode); /* The srv fs device id is overridden on network mount so setting rdev isn't needed here */ /* fattr->cf_rdev = le32_to_cpu(info->DeviceId); */ if (data->symlink) { fattr->cf_mode |= S_IFLNK; fattr->cf_dtype = DT_LNK; fattr->cf_symlink_target = data->symlink_target; data->symlink_target = NULL; } else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode |= S_IFDIR; fattr->cf_dtype = DT_DIR; } else { /* file */ fattr->cf_mode |= S_IFREG; fattr->cf_dtype = DT_REG; } /* else if reparse point ... TODO: add support for FIFO and blk dev; special file types */ sid_to_id(cifs_sb, owner, fattr, SIDOWNER); sid_to_id(cifs_sb, group, fattr, SIDGROUP); cifs_dbg(FYI, "POSIX query info: mode 0x%x uniqueid 0x%llx nlink %d\n", fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink); } bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, u32 tag) { switch (tag) { case IO_REPARSE_TAG_LX_SYMLINK: fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode; fattr->cf_dtype = DT_LNK; break; case IO_REPARSE_TAG_LX_FIFO: fattr->cf_mode |= S_IFIFO | cifs_sb->ctx->file_mode; fattr->cf_dtype = DT_FIFO; break; case IO_REPARSE_TAG_AF_UNIX: fattr->cf_mode |= S_IFSOCK | cifs_sb->ctx->file_mode; fattr->cf_dtype = DT_SOCK; break; case IO_REPARSE_TAG_LX_CHR: fattr->cf_mode |= S_IFCHR | cifs_sb->ctx->file_mode; fattr->cf_dtype = DT_CHR; break; case IO_REPARSE_TAG_LX_BLK: fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode; fattr->cf_dtype = DT_BLK; break; case 0: /* SMB1 symlink */ case IO_REPARSE_TAG_SYMLINK: case IO_REPARSE_TAG_NFS: fattr->cf_mode = S_IFLNK; fattr->cf_dtype = DT_LNK; break; default: return false; } return true; } static void cifs_open_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data, struct super_block *sb) { struct smb2_file_all_info *info = &data->fi; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); memset(fattr, 0, sizeof(*fattr)); fattr->cf_cifsattrs = le32_to_cpu(info->Attributes); if (info->DeletePending) fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING; if (info->LastAccessTime) fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); else ktime_get_coarse_real_ts64(&fattr->cf_atime); fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); if (data->adjust_tz) { fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj; fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj; } fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_bytes = le64_to_cpu(info->AllocationSize); fattr->cf_createtime = le64_to_cpu(info->CreationTime); fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); if (cifs_open_data_reparse(data) && cifs_reparse_point_to_fattr(cifs_sb, fattr, data->reparse_tag)) goto out_reparse; if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode = S_IFDIR | cifs_sb->ctx->dir_mode; fattr->cf_dtype = DT_DIR; /* * Server can return wrong NumberOfLinks value for directories * when Unix extensions are disabled - fake it. */ if (!tcon->unix_ext) fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK; } else { fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode; fattr->cf_dtype = DT_REG; /* clear write bits if ATTR_READONLY is set */ if (fattr->cf_cifsattrs & ATTR_READONLY) fattr->cf_mode &= ~(S_IWUGO); /* * Don't accept zero nlink from non-unix servers unless * delete is pending. Instead mark it as unknown. */ if ((fattr->cf_nlink < 1) && !tcon->unix_ext && !info->DeletePending) { cifs_dbg(VFS, "bogus file nlink value %u\n", fattr->cf_nlink); fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK; } } out_reparse: if (S_ISLNK(fattr->cf_mode)) { fattr->cf_symlink_target = data->symlink_target; data->symlink_target = NULL; } fattr->cf_uid = cifs_sb->ctx->linux_uid; fattr->cf_gid = cifs_sb->ctx->linux_gid; } static int cifs_get_file_info(struct file *filp) { int rc; unsigned int xid; struct cifs_open_info_data data = {}; struct cifs_fattr fattr; struct inode *inode = file_inode(filp); struct cifsFileInfo *cfile = filp->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; if (!server->ops->query_file_info) return -ENOSYS; xid = get_xid(); rc = server->ops->query_file_info(xid, tcon, cfile, &data); switch (rc) { case 0: /* TODO: add support to query reparse tag */ data.adjust_tz = false; if (data.symlink_target) { data.symlink = true; data.reparse_tag = IO_REPARSE_TAG_SYMLINK; } cifs_open_info_to_fattr(&fattr, &data, inode->i_sb); break; case -EREMOTE: cifs_create_junction_fattr(&fattr, inode->i_sb); rc = 0; break; case -EOPNOTSUPP: case -EINVAL: /* * FIXME: legacy server -- fall back to path-based call? * for now, just skip revalidating and mark inode for * immediate reval. */ rc = 0; CIFS_I(inode)->time = 0; goto cgfi_exit; default: goto cgfi_exit; } /* * don't bother with SFU junk here -- just mark inode as needing * revalidation. */ fattr.cf_uniqueid = CIFS_I(inode)->uniqueid; fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; /* if filetype is different, return error */ rc = cifs_fattr_to_inode(inode, &fattr); cgfi_exit: cifs_free_open_info(&data); free_xid(xid); return rc; } /* Simple function to return a 64 bit hash of string. Rarely called */ static __u64 simple_hashstr(const char *str) { const __u64 hash_mult = 1125899906842597ULL; /* a big enough prime */ __u64 hash = 0; while (*str) hash = (hash + (__u64) *str++) * hash_mult; return hash; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /** * cifs_backup_query_path_info - SMB1 fallback code to get ino * * Fallback code to get file metadata when we don't have access to * full_path (EACCES) and have backup creds. * * @xid: transaction id used to identify original request in logs * @tcon: information about the server share we have mounted * @sb: the superblock stores info such as disk space available * @full_path: name of the file we are getting the metadata for * @resp_buf: will be set to cifs resp buf and needs to be freed with * cifs_buf_release() when done with @data * @data: will be set to search info result buffer */ static int cifs_backup_query_path_info(int xid, struct cifs_tcon *tcon, struct super_block *sb, const char *full_path, void **resp_buf, FILE_ALL_INFO **data) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_search_info info = {0}; u16 flags; int rc; *resp_buf = NULL; info.endOfSearch = false; if (tcon->unix_ext) info.info_level = SMB_FIND_FILE_UNIX; else if ((tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find) == 0) info.info_level = SMB_FIND_FILE_INFO_STANDARD; else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; else /* no srvino useful for fallback to some netapp */ info.info_level = SMB_FIND_FILE_DIRECTORY_INFO; flags = CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_BACKUP_SEARCH; rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL, flags, &info, false); if (rc) return rc; *resp_buf = (void *)info.ntwrk_buf_start; *data = (FILE_ALL_INFO *)info.srch_entries_start; return 0; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static void cifs_set_fattr_ino(int xid, struct cifs_tcon *tcon, struct super_block *sb, struct inode **inode, const char *full_path, struct cifs_open_info_data *data, struct cifs_fattr *fattr) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct TCP_Server_Info *server = tcon->ses->server; int rc; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { if (*inode) fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid; else fattr->cf_uniqueid = iunique(sb, ROOT_I); return; } /* * If we have an inode pass a NULL tcon to ensure we don't * make a round trip to the server. This only works for SMB2+. */ rc = server->ops->get_srv_inum(xid, *inode ? NULL : tcon, cifs_sb, full_path, &fattr->cf_uniqueid, data); if (rc) { /* * If that fails reuse existing ino or generate one * and disable server ones */ if (*inode) fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid; else { fattr->cf_uniqueid = iunique(sb, ROOT_I); cifs_autodisable_serverino(cifs_sb); } return; } /* If no errors, check for zero root inode (invalid) */ if (fattr->cf_uniqueid == 0 && strlen(full_path) == 0) { cifs_dbg(FYI, "Invalid (0) inodenum\n"); if (*inode) { /* reuse */ fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid; } else { /* make an ino by hashing the UNC */ fattr->cf_flags |= CIFS_FATTR_FAKE_ROOT_INO; fattr->cf_uniqueid = simple_hashstr(tcon->tree_name); } } } static inline bool is_inode_cache_good(struct inode *ino) { return ino && CIFS_CACHE_READ(CIFS_I(ino)) && CIFS_I(ino)->time != 0; } static int reparse_info_to_fattr(struct cifs_open_info_data *data, struct super_block *sb, const unsigned int xid, struct cifs_tcon *tcon, const char *full_path, struct cifs_fattr *fattr) { struct TCP_Server_Info *server = tcon->ses->server; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct kvec rsp_iov, *iov = NULL; int rsp_buftype = CIFS_NO_BUFFER; u32 tag = data->reparse_tag; int rc = 0; if (!tag && server->ops->query_reparse_point) { rc = server->ops->query_reparse_point(xid, tcon, cifs_sb, full_path, &tag, &rsp_iov, &rsp_buftype); if (!rc) iov = &rsp_iov; } switch ((data->reparse_tag = tag)) { case 0: /* SMB1 symlink */ iov = NULL; fallthrough; case IO_REPARSE_TAG_NFS: case IO_REPARSE_TAG_SYMLINK: if (!data->symlink_target && server->ops->query_symlink) { rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path, &data->symlink_target, iov); } break; case IO_REPARSE_TAG_MOUNT_POINT: cifs_create_junction_fattr(fattr, sb); goto out; } cifs_open_info_to_fattr(fattr, data, sb); out: free_rsp_buf(rsp_buftype, rsp_iov.iov_base); return rc; } static int cifs_get_fattr(struct cifs_open_info_data *data, struct super_block *sb, int xid, const struct cifs_fid *fid, struct cifs_fattr *fattr, struct inode **inode, const char *full_path) { struct cifs_open_info_data tmp_data = {}; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct tcon_link *tlink; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); void *smb1_backup_rsp_buf = NULL; int rc = 0; int tmprc = 0; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); server = tcon->ses->server; /* * 1. Fetch file metadata if not provided (data) */ if (!data) { rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path, &tmp_data); data = &tmp_data; } /* * 2. Convert it to internal cifs metadata (fattr) */ switch (rc) { case 0: /* * If the file is a reparse point, it is more complicated * since we have to check if its reparse tag matches a known * special file type e.g. symlink or fifo or char etc. */ if (cifs_open_data_reparse(data)) { rc = reparse_info_to_fattr(data, sb, xid, tcon, full_path, fattr); } else { cifs_open_info_to_fattr(fattr, data, sb); } break; case -EREMOTE: /* DFS link, no metadata available on this server */ cifs_create_junction_fattr(fattr, sb); rc = 0; break; case -EACCES: #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * perm errors, try again with backup flags if possible * * For SMB2 and later the backup intent flag * is already sent if needed on open and there * is no path based FindFirst operation to use * to retry with */ if (backup_cred(cifs_sb) && is_smb1_server(server)) { /* for easier reading */ FILE_ALL_INFO *fi; FILE_DIRECTORY_INFO *fdi; SEARCH_ID_FULL_DIR_INFO *si; rc = cifs_backup_query_path_info(xid, tcon, sb, full_path, &smb1_backup_rsp_buf, &fi); if (rc) goto out; move_cifs_info_to_smb2(&data->fi, fi); fdi = (FILE_DIRECTORY_INFO *)fi; si = (SEARCH_ID_FULL_DIR_INFO *)fi; cifs_dir_info_to_fattr(fattr, fdi, cifs_sb); fattr->cf_uniqueid = le64_to_cpu(si->UniqueId); /* uniqueid set, skip get inum step */ goto handle_mnt_opt; } else { /* nothing we can do, bail out */ goto out; } #else goto out; #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ break; default: cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc); goto out; } /* * 3. Get or update inode number (fattr->cf_uniqueid) */ cifs_set_fattr_ino(xid, tcon, sb, inode, full_path, data, fattr); /* * 4. Tweak fattr based on mount options */ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY handle_mnt_opt: #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* query for SFU type info if supported and needed */ if ((fattr->cf_cifsattrs & ATTR_SYSTEM) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) { tmprc = cifs_sfu_type(fattr, full_path, cifs_sb, xid); if (tmprc) cifs_dbg(FYI, "cifs_sfu_type failed: %d\n", tmprc); } /* fill in 0777 bits from ACL */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) { rc = cifs_acl_to_fattr(cifs_sb, fattr, *inode, true, full_path, fid); if (rc == -EREMOTE) rc = 0; if (rc) { cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n", __func__, rc); goto out; } } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { rc = cifs_acl_to_fattr(cifs_sb, fattr, *inode, false, full_path, fid); if (rc == -EREMOTE) rc = 0; if (rc) { cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n", __func__, rc); goto out; } } /* fill in remaining high mode bits e.g. SUID, VTX */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) cifs_sfu_mode(fattr, full_path, cifs_sb, xid); /* check for Minshall+French symlinks */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path); cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc); } out: cifs_buf_release(smb1_backup_rsp_buf); cifs_put_tlink(tlink); cifs_free_open_info(&tmp_data); return rc; } int cifs_get_inode_info(struct inode **inode, const char *full_path, struct cifs_open_info_data *data, struct super_block *sb, int xid, const struct cifs_fid *fid) { struct cifs_fattr fattr = {}; int rc; if (is_inode_cache_good(*inode)) { cifs_dbg(FYI, "No need to revalidate cached inode sizes\n"); return 0; } rc = cifs_get_fattr(data, sb, xid, fid, &fattr, inode, full_path); if (rc) goto out; rc = update_inode_info(sb, &fattr, inode); out: kfree(fattr.cf_symlink_target); return rc; } static int smb311_posix_get_fattr(struct cifs_fattr *fattr, const char *full_path, struct super_block *sb, const unsigned int xid) { struct cifs_open_info_data data = {}; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon; struct tcon_link *tlink; struct cifs_sid owner, group; int tmprc; int rc; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); /* * 1. Fetch file metadata */ rc = smb311_posix_query_path_info(xid, tcon, cifs_sb, full_path, &data, &owner, &group); /* * 2. Convert it to internal cifs metadata (fattr) */ switch (rc) { case 0: smb311_posix_info_to_fattr(fattr, &data, &owner, &group, sb); break; case -EREMOTE: /* DFS link, no metadata available on this server */ cifs_create_junction_fattr(fattr, sb); rc = 0; break; case -EACCES: /* * For SMB2 and later the backup intent flag * is already sent if needed on open and there * is no path based FindFirst operation to use * to retry with so nothing we can do, bail out */ goto out; default: cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc); goto out; } /* * 3. Tweak fattr based on mount options */ /* check for Minshall+French symlinks */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path); cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc); } out: cifs_put_tlink(tlink); cifs_free_open_info(&data); return rc; } int smb311_posix_get_inode_info(struct inode **inode, const char *full_path, struct super_block *sb, const unsigned int xid) { struct cifs_fattr fattr = {}; int rc; if (is_inode_cache_good(*inode)) { cifs_dbg(FYI, "No need to revalidate cached inode sizes\n"); return 0; } rc = smb311_posix_get_fattr(&fattr, full_path, sb, xid); if (rc) goto out; rc = update_inode_info(sb, &fattr, inode); out: kfree(fattr.cf_symlink_target); return rc; } static const struct inode_operations cifs_ipc_inode_ops = { .lookup = cifs_lookup, }; static int cifs_find_inode(struct inode *inode, void *opaque) { struct cifs_fattr *fattr = opaque; /* don't match inode with different uniqueid */ if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) return 0; /* use createtime like an i_generation field */ if (CIFS_I(inode)->createtime != fattr->cf_createtime) return 0; /* don't match inode of different type */ if (inode_wrong_type(inode, fattr->cf_mode)) return 0; /* if it's not a directory or has no dentries, then flag it */ if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) fattr->cf_flags |= CIFS_FATTR_INO_COLLISION; return 1; } static int cifs_init_inode(struct inode *inode, void *opaque) { struct cifs_fattr *fattr = opaque; CIFS_I(inode)->uniqueid = fattr->cf_uniqueid; CIFS_I(inode)->createtime = fattr->cf_createtime; return 0; } /* * walk dentry list for an inode and report whether it has aliases that * are hashed. We use this to determine if a directory inode can actually * be used. */ static bool inode_has_hashed_dentries(struct inode *inode) { struct dentry *dentry; spin_lock(&inode->i_lock); hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { if (!d_unhashed(dentry) || IS_ROOT(dentry)) { spin_unlock(&inode->i_lock); return true; } } spin_unlock(&inode->i_lock); return false; } /* Given fattrs, get a corresponding inode */ struct inode * cifs_iget(struct super_block *sb, struct cifs_fattr *fattr) { unsigned long hash; struct inode *inode; retry_iget5_locked: cifs_dbg(FYI, "looking for uniqueid=%llu\n", fattr->cf_uniqueid); /* hash down to 32-bits on 32-bit arch */ hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid); inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr); if (inode) { /* was there a potentially problematic inode collision? */ if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) { fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION; if (inode_has_hashed_dentries(inode)) { cifs_autodisable_serverino(CIFS_SB(sb)); iput(inode); fattr->cf_uniqueid = iunique(sb, ROOT_I); goto retry_iget5_locked; } } /* can't fail - see cifs_find_inode() */ cifs_fattr_to_inode(inode, fattr); if (sb->s_flags & SB_NOATIME) inode->i_flags |= S_NOATIME | S_NOCMTIME; if (inode->i_state & I_NEW) { inode->i_ino = hash; cifs_fscache_get_inode_cookie(inode); unlock_new_inode(inode); } } return inode; } /* gets root inode */ struct inode *cifs_root_iget(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_fattr fattr = {}; struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct inode *inode = NULL; unsigned int xid; char *path = NULL; int len; int rc; if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && cifs_sb->prepath) { len = strlen(cifs_sb->prepath); path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL); if (path == NULL) return ERR_PTR(-ENOMEM); path[0] = '/'; memcpy(path+1, cifs_sb->prepath, len); } else { path = kstrdup("", GFP_KERNEL); if (path == NULL) return ERR_PTR(-ENOMEM); } xid = get_xid(); if (tcon->unix_ext) { rc = cifs_get_unix_fattr(path, sb, &fattr, &inode, xid); /* some servers mistakenly claim POSIX support */ if (rc != -EOPNOTSUPP) goto iget_root; cifs_dbg(VFS, "server does not support POSIX extensions\n"); tcon->unix_ext = false; } convert_delimiter(path, CIFS_DIR_SEP(cifs_sb)); if (tcon->posix_extensions) rc = smb311_posix_get_fattr(&fattr, path, sb, xid); else rc = cifs_get_fattr(NULL, sb, xid, NULL, &fattr, &inode, path); iget_root: if (!rc) { if (fattr.cf_flags & CIFS_FATTR_JUNCTION) { fattr.cf_flags &= ~CIFS_FATTR_JUNCTION; cifs_autodisable_serverino(cifs_sb); } inode = cifs_iget(sb, &fattr); } if (!inode) { inode = ERR_PTR(rc); goto out; } if (rc && tcon->pipe) { cifs_dbg(FYI, "ipc connection - fake read inode\n"); spin_lock(&inode->i_lock); inode->i_mode |= S_IFDIR; set_nlink(inode, 2); inode->i_op = &cifs_ipc_inode_ops; inode->i_fop = &simple_dir_operations; inode->i_uid = cifs_sb->ctx->linux_uid; inode->i_gid = cifs_sb->ctx->linux_gid; spin_unlock(&inode->i_lock); } else if (rc) { iget_failed(inode); inode = ERR_PTR(rc); } out: kfree(path); free_xid(xid); kfree(fattr.cf_symlink_target); return inode; } int cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid, const char *full_path, __u32 dosattr) { bool set_time = false; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct TCP_Server_Info *server; FILE_BASIC_INFO info_buf; if (attrs == NULL) return -EINVAL; server = cifs_sb_master_tcon(cifs_sb)->ses->server; if (!server->ops->set_file_info) return -ENOSYS; info_buf.Pad = 0; if (attrs->ia_valid & ATTR_ATIME) { set_time = true; info_buf.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime)); } else info_buf.LastAccessTime = 0; if (attrs->ia_valid & ATTR_MTIME) { set_time = true; info_buf.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime)); } else info_buf.LastWriteTime = 0; /* * Samba throws this field away, but windows may actually use it. * Do not set ctime unless other time stamps are changed explicitly * (i.e. by utimes()) since we would then have a mix of client and * server times. */ if (set_time && (attrs->ia_valid & ATTR_CTIME)) { cifs_dbg(FYI, "CIFS - CTIME changed\n"); info_buf.ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime)); } else info_buf.ChangeTime = 0; info_buf.CreationTime = 0; /* don't change */ info_buf.Attributes = cpu_to_le32(dosattr); return server->ops->set_file_info(inode, full_path, &info_buf, xid); } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Open the given file (if it isn't already), set the DELETE_ON_CLOSE bit * and rename it to a random name that hopefully won't conflict with * anything else. */ int cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, const unsigned int xid) { int oplock = 0; int rc; struct cifs_fid fid; struct cifs_open_parms oparms; struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cifsInode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; struct cifs_tcon *tcon; __u32 dosattr, origattr; FILE_BASIC_INFO *info_buf = NULL; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); /* * We cannot rename the file if the server doesn't support * CAP_INFOLEVEL_PASSTHRU */ if (!(tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)) { rc = -EBUSY; goto out; } oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = DELETE | FILE_WRITE_ATTRIBUTES, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_OPEN, .path = full_path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc != 0) goto out; origattr = cifsInode->cifsAttrs; if (origattr == 0) origattr |= ATTR_NORMAL; dosattr = origattr & ~ATTR_READONLY; if (dosattr == 0) dosattr |= ATTR_NORMAL; dosattr |= ATTR_HIDDEN; /* set ATTR_HIDDEN and clear ATTR_READONLY, but only if needed */ if (dosattr != origattr) { info_buf = kzalloc(sizeof(*info_buf), GFP_KERNEL); if (info_buf == NULL) { rc = -ENOMEM; goto out_close; } info_buf->Attributes = cpu_to_le32(dosattr); rc = CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid, current->tgid); /* although we would like to mark the file hidden if that fails we will still try to rename it */ if (!rc) cifsInode->cifsAttrs = dosattr; else dosattr = origattr; /* since not able to change them */ } /* rename the file */ rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, NULL, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (rc != 0) { rc = -EBUSY; goto undo_setattr; } /* try to set DELETE_ON_CLOSE */ if (!test_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags)) { rc = CIFSSMBSetFileDisposition(xid, tcon, true, fid.netfid, current->tgid); /* * some samba versions return -ENOENT when we try to set the * file disposition here. Likely a samba bug, but work around * it for now. This means that some cifsXXX files may hang * around after they shouldn't. * * BB: remove this hack after more servers have the fix */ if (rc == -ENOENT) rc = 0; else if (rc != 0) { rc = -EBUSY; goto undo_rename; } set_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags); } out_close: CIFSSMBClose(xid, tcon, fid.netfid); out: kfree(info_buf); cifs_put_tlink(tlink); return rc; /* * reset everything back to the original state. Don't bother * dealing with errors here since we can't do anything about * them anyway. */ undo_rename: CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, dentry->d_name.name, cifs_sb->local_nls, cifs_remap(cifs_sb)); undo_setattr: if (dosattr != origattr) { info_buf->Attributes = cpu_to_le32(origattr); if (!CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid, current->tgid)) cifsInode->cifsAttrs = origattr; } goto out_close; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* copied from fs/nfs/dir.c with small changes */ static void cifs_drop_nlink(struct inode *inode) { spin_lock(&inode->i_lock); if (inode->i_nlink > 0) drop_nlink(inode); spin_unlock(&inode->i_lock); } /* * If d_inode(dentry) is null (usually meaning the cached dentry * is a negative dentry) then we would attempt a standard SMB delete, but * if that fails we can not attempt the fall back mechanisms on EACCES * but will return the EACCES to the caller. Note that the VFS does not call * unlink on negative dentries currently. */ int cifs_unlink(struct inode *dir, struct dentry *dentry) { int rc = 0; unsigned int xid; const char *full_path; void *page; struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cifs_inode; struct super_block *sb = dir->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct iattr *attrs = NULL; __u32 dosattr = 0, origattr = 0; cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry); if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); server = tcon->ses->server; xid = get_xid(); page = alloc_dentry_path(); if (tcon->nodelete) { rc = -EACCES; goto unlink_out; } /* Unlink can be called from rename so we can not take the * sb->s_vfs_rename_mutex here */ full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto unlink_out; } cifs_close_deferred_file_under_dentry(tcon, full_path); #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = CIFSPOSIXDelFile(xid, tcon, full_path, SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls, cifs_remap(cifs_sb)); cifs_dbg(FYI, "posix del rc %d\n", rc); if ((rc == 0) || (rc == -ENOENT)) goto psx_del_no_retry; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ retry_std_delete: if (!server->ops->unlink) { rc = -ENOSYS; goto psx_del_no_retry; } rc = server->ops->unlink(xid, tcon, full_path, cifs_sb); psx_del_no_retry: if (!rc) { if (inode) cifs_drop_nlink(inode); } else if (rc == -ENOENT) { d_drop(dentry); } else if (rc == -EBUSY) { if (server->ops->rename_pending_delete) { rc = server->ops->rename_pending_delete(full_path, dentry, xid); if (rc == 0) cifs_drop_nlink(inode); } } else if ((rc == -EACCES) && (dosattr == 0) && inode) { attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); if (attrs == NULL) { rc = -ENOMEM; goto out_reval; } /* try to reset dos attributes */ cifs_inode = CIFS_I(inode); origattr = cifs_inode->cifsAttrs; if (origattr == 0) origattr |= ATTR_NORMAL; dosattr = origattr & ~ATTR_READONLY; if (dosattr == 0) dosattr |= ATTR_NORMAL; dosattr |= ATTR_HIDDEN; rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr); if (rc != 0) goto out_reval; goto retry_std_delete; } /* undo the setattr if we errored out and it's needed */ if (rc != 0 && dosattr != 0) cifs_set_file_info(inode, attrs, xid, full_path, origattr); out_reval: if (inode) { cifs_inode = CIFS_I(inode); cifs_inode->time = 0; /* will force revalidate to get info when needed */ inode_set_ctime_current(inode); } dir->i_mtime = inode_set_ctime_current(dir); cifs_inode = CIFS_I(dir); CIFS_I(dir)->time = 0; /* force revalidate of dir as well */ unlink_out: free_dentry_path(page); kfree(attrs); free_xid(xid); cifs_put_tlink(tlink); return rc; } static int cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, const char *full_path, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, const unsigned int xid) { int rc = 0; struct inode *inode = NULL; if (tcon->posix_extensions) rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid); #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY else if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb, xid); #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ else rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb, xid, NULL); if (rc) return rc; if (!S_ISDIR(inode->i_mode)) { /* * mkdir succeeded, but another client has managed to remove the * sucker and replace it with non-directory. Return success, * but don't leave the child in dcache. */ iput(inode); d_drop(dentry); return 0; } /* * setting nlink not necessary except in cases where we failed to get it * from the server or was set bogus. Also, since this is a brand new * inode, no need to grab the i_lock before setting the i_nlink. */ if (inode->i_nlink < 2) set_nlink(inode, 2); mode &= ~current_umask(); /* must turn on setgid bit if parent dir has it */ if (parent->i_mode & S_ISGID) mode |= S_ISGID; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = mode, .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = current_fsuid(); if (parent->i_mode & S_ISGID) args.gid = parent->i_gid; else args.gid = current_fsgid(); } else { args.uid = INVALID_UID; /* no change */ args.gid = INVALID_GID; /* no change */ } CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, cifs_sb->local_nls, cifs_remap(cifs_sb)); } else { #else { #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ struct TCP_Server_Info *server = tcon->ses->server; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo) server->ops->mkdir_setinfo(inode, full_path, cifs_sb, tcon, xid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) inode->i_mode = (mode | S_IFDIR); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { inode->i_uid = current_fsuid(); if (inode->i_mode & S_ISGID) inode->i_gid = parent->i_gid; else inode->i_gid = current_fsgid(); } } d_instantiate(dentry, inode); return 0; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode, const char *full_path, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, const unsigned int xid) { int rc = 0; u32 oplock = 0; FILE_UNIX_BASIC_INFO *info = NULL; struct inode *newinode = NULL; struct cifs_fattr fattr; info = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); if (info == NULL) { rc = -ENOMEM; goto posix_mkdir_out; } mode &= ~current_umask(); rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, mode, NULL /* netfid */, info, &oplock, full_path, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (rc == -EOPNOTSUPP) goto posix_mkdir_out; else if (rc) { cifs_dbg(FYI, "posix mkdir returned 0x%x\n", rc); d_drop(dentry); goto posix_mkdir_out; } if (info->Type == cpu_to_le32(-1)) /* no return info, go query for it */ goto posix_mkdir_get_info; /* * BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if * need to set uid/gid. */ cifs_unix_basic_to_fattr(&fattr, info, cifs_sb); cifs_fill_uniqueid(inode->i_sb, &fattr); newinode = cifs_iget(inode->i_sb, &fattr); if (!newinode) goto posix_mkdir_get_info; d_instantiate(dentry, newinode); #ifdef CONFIG_CIFS_DEBUG2 cifs_dbg(FYI, "instantiated dentry %p %pd to inode %p\n", dentry, dentry, newinode); if (newinode->i_nlink != 2) cifs_dbg(FYI, "unexpected number of links %d\n", newinode->i_nlink); #endif posix_mkdir_out: kfree(info); return rc; posix_mkdir_get_info: rc = cifs_mkdir_qinfo(inode, dentry, mode, full_path, cifs_sb, tcon, xid); goto posix_mkdir_out; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int cifs_mkdir(struct mnt_idmap *idmap, struct inode *inode, struct dentry *direntry, umode_t mode) { int rc = 0; unsigned int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; const char *full_path; void *page; cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n", mode, inode); cifs_sb = CIFS_SB(inode->i_sb); if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto mkdir_out; } server = tcon->ses->server; if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) { rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path, cifs_sb); d_drop(direntry); /* for time being always refresh inode info */ goto mkdir_out; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb, tcon, xid); if (rc != -EOPNOTSUPP) goto mkdir_out; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (!server->ops->mkdir) { rc = -ENOSYS; goto mkdir_out; } /* BB add setting the equivalent of mode via CreateX w/ACLs */ rc = server->ops->mkdir(xid, inode, mode, tcon, full_path, cifs_sb); if (rc) { cifs_dbg(FYI, "cifs_mkdir returned 0x%x\n", rc); d_drop(direntry); goto mkdir_out; } /* TODO: skip this for smb2/smb3 */ rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon, xid); mkdir_out: /* * Force revalidate to get parent dir info when needed since cached * attributes are invalid now. */ CIFS_I(inode)->time = 0; free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return rc; } int cifs_rmdir(struct inode *inode, struct dentry *direntry) { int rc = 0; unsigned int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; const char *full_path; void *page = alloc_dentry_path(); struct cifsInodeInfo *cifsInode; cifs_dbg(FYI, "cifs_rmdir, inode = 0x%p\n", inode); xid = get_xid(); full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto rmdir_exit; } cifs_sb = CIFS_SB(inode->i_sb); if (unlikely(cifs_forced_shutdown(cifs_sb))) { rc = -EIO; goto rmdir_exit; } tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); goto rmdir_exit; } tcon = tlink_tcon(tlink); server = tcon->ses->server; if (!server->ops->rmdir) { rc = -ENOSYS; cifs_put_tlink(tlink); goto rmdir_exit; } if (tcon->nodelete) { rc = -EACCES; cifs_put_tlink(tlink); goto rmdir_exit; } rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb); cifs_put_tlink(tlink); if (!rc) { spin_lock(&d_inode(direntry)->i_lock); i_size_write(d_inode(direntry), 0); clear_nlink(d_inode(direntry)); spin_unlock(&d_inode(direntry)->i_lock); } cifsInode = CIFS_I(d_inode(direntry)); /* force revalidate to go get info when needed */ cifsInode->time = 0; cifsInode = CIFS_I(inode); /* * Force revalidate to get parent dir info when needed since cached * attributes are invalid now. */ cifsInode->time = 0; inode_set_ctime_current(d_inode(direntry)); inode->i_mtime = inode_set_ctime_current(inode); rmdir_exit: free_dentry_path(page); free_xid(xid); return rc; } static int cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, const char *from_path, struct dentry *to_dentry, const char *to_path) { struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb); struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_fid fid; struct cifs_open_parms oparms; int oplock; #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int rc; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); server = tcon->ses->server; if (!server->ops->rename) return -ENOSYS; /* try path-based rename first */ rc = server->ops->rename(xid, tcon, from_path, to_path, cifs_sb); /* * Don't bother with rename by filehandle unless file is busy and * source. Note that cross directory moves do not work with * rename by filehandle to various Windows servers. */ if (rc == 0 || rc != -EBUSY) goto do_rename_exit; /* Don't fall back to using SMB on SMB 2+ mount */ if (server->vals->protocol_id != 0) goto do_rename_exit; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* open-file renames don't work across directories */ if (to_dentry->d_parent != from_dentry->d_parent) goto do_rename_exit; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, /* open the file to be renamed -- we need DELETE perms */ .desired_access = DELETE, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_OPEN, .path = from_path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc == 0) { rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, (const char *) to_dentry->d_name.name, cifs_sb->local_nls, cifs_remap(cifs_sb)); CIFSSMBClose(xid, tcon, fid.netfid); } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ do_rename_exit: if (rc == 0) d_move(from_dentry, to_dentry); cifs_put_tlink(tlink); return rc; } int cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, struct dentry *source_dentry, struct inode *target_dir, struct dentry *target_dentry, unsigned int flags) { const char *from_name, *to_name; void *page1, *page2; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; unsigned int xid; int rc, tmprc; int retry_count = 0; FILE_UNIX_BASIC_INFO *info_buf_source = NULL; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY FILE_UNIX_BASIC_INFO *info_buf_target; #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (flags & ~RENAME_NOREPLACE) return -EINVAL; cifs_sb = CIFS_SB(source_dir->i_sb); if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); page1 = alloc_dentry_path(); page2 = alloc_dentry_path(); xid = get_xid(); from_name = build_path_from_dentry(source_dentry, page1); if (IS_ERR(from_name)) { rc = PTR_ERR(from_name); goto cifs_rename_exit; } to_name = build_path_from_dentry(target_dentry, page2); if (IS_ERR(to_name)) { rc = PTR_ERR(to_name); goto cifs_rename_exit; } cifs_close_deferred_file_under_dentry(tcon, from_name); if (d_inode(target_dentry) != NULL) cifs_close_deferred_file_under_dentry(tcon, to_name); rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); if (rc == -EACCES) { while (retry_count < 3) { cifs_close_all_deferred_files(tcon); rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); if (rc != -EACCES) break; retry_count++; } } /* * No-replace is the natural behavior for CIFS, so skip unlink hacks. */ if (flags & RENAME_NOREPLACE) goto cifs_rename_exit; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (rc == -EEXIST && tcon->unix_ext) { /* * Are src and dst hardlinks of same inode? We can only tell * with unix extensions enabled. */ info_buf_source = kmalloc_array(2, sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); if (info_buf_source == NULL) { rc = -ENOMEM; goto cifs_rename_exit; } info_buf_target = info_buf_source + 1; tmprc = CIFSSMBUnixQPathInfo(xid, tcon, from_name, info_buf_source, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (tmprc != 0) goto unlink_target; tmprc = CIFSSMBUnixQPathInfo(xid, tcon, to_name, info_buf_target, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (tmprc == 0 && (info_buf_source->UniqueId == info_buf_target->UniqueId)) { /* same file, POSIX says that this is a noop */ rc = 0; goto cifs_rename_exit; } } /* * else ... BB we could add the same check for Windows by * checking the UniqueId via FILE_INTERNAL_INFO */ unlink_target: #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* Try unlinking the target dentry if it's not negative */ if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { if (d_is_dir(target_dentry)) tmprc = cifs_rmdir(target_dir, target_dentry); else tmprc = cifs_unlink(target_dir, target_dentry); if (tmprc) goto cifs_rename_exit; rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); } /* force revalidate to go get info when needed */ CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir, inode_set_ctime_current(target_dir)); cifs_rename_exit: kfree(info_buf_source); free_dentry_path(page2); free_dentry_path(page1); free_xid(xid); cifs_put_tlink(tlink); return rc; } static bool cifs_dentry_needs_reval(struct dentry *dentry) { struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cifs_i = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct cached_fid *cfid = NULL; if (cifs_i->time == 0) return true; if (CIFS_CACHE_READ(cifs_i)) return false; if (!lookupCacheEnabled) return true; if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) { spin_lock(&cfid->fid_lock); if (cfid->time && cifs_i->time > cfid->time) { spin_unlock(&cfid->fid_lock); close_cached_dir(cfid); return false; } spin_unlock(&cfid->fid_lock); close_cached_dir(cfid); } /* * depending on inode type, check if attribute caching disabled for * files or directories */ if (S_ISDIR(inode->i_mode)) { if (!cifs_sb->ctx->acdirmax) return true; if (!time_in_range(jiffies, cifs_i->time, cifs_i->time + cifs_sb->ctx->acdirmax)) return true; } else { /* file */ if (!cifs_sb->ctx->acregmax) return true; if (!time_in_range(jiffies, cifs_i->time, cifs_i->time + cifs_sb->ctx->acregmax)) return true; } /* hardlinked files w/ noserverino get "special" treatment */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && S_ISREG(inode->i_mode) && inode->i_nlink != 1) return true; return false; } /* * Zap the cache. Called when invalid_mapping flag is set. */ int cifs_invalidate_mapping(struct inode *inode) { int rc = 0; if (inode->i_mapping && inode->i_mapping->nrpages != 0) { rc = invalidate_inode_pages2(inode->i_mapping); if (rc) cifs_dbg(VFS, "%s: invalidate inode %p failed with rc %d\n", __func__, inode, rc); } return rc; } /** * cifs_wait_bit_killable - helper for functions that are sleeping on bit locks * * @key: currently unused * @mode: the task state to sleep in */ static int cifs_wait_bit_killable(struct wait_bit_key *key, int mode) { schedule(); if (signal_pending_state(mode, current)) return -ERESTARTSYS; return 0; } int cifs_revalidate_mapping(struct inode *inode) { int rc; unsigned long *flags = &CIFS_I(inode)->flags; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); /* swapfiles are not supposed to be shared */ if (IS_SWAPFILE(inode)) return 0; rc = wait_on_bit_lock_action(flags, CIFS_INO_LOCK, cifs_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); if (rc) return rc; if (test_and_clear_bit(CIFS_INO_INVALID_MAPPING, flags)) { /* for cache=singleclient, do not invalidate */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) goto skip_invalidate; rc = cifs_invalidate_mapping(inode); if (rc) set_bit(CIFS_INO_INVALID_MAPPING, flags); } skip_invalidate: clear_bit_unlock(CIFS_INO_LOCK, flags); smp_mb__after_atomic(); wake_up_bit(flags, CIFS_INO_LOCK); return rc; } int cifs_zap_mapping(struct inode *inode) { set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags); return cifs_revalidate_mapping(inode); } int cifs_revalidate_file_attr(struct file *filp) { int rc = 0; struct dentry *dentry = file_dentry(filp); #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (!cifs_dentry_needs_reval(dentry)) return rc; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tlink_tcon(cfile->tlink)->unix_ext) rc = cifs_get_file_info_unix(filp); else #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_get_file_info(filp); return rc; } int cifs_revalidate_dentry_attr(struct dentry *dentry) { unsigned int xid; int rc = 0; struct inode *inode = d_inode(dentry); struct super_block *sb = dentry->d_sb; const char *full_path; void *page; int count = 0; if (inode == NULL) return -ENOENT; if (!cifs_dentry_needs_reval(dentry)) return rc; xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto out; } cifs_dbg(FYI, "Update attributes: %s inode 0x%p count %d dentry: 0x%p d_time %ld jiffies %ld\n", full_path, inode, inode->i_count.counter, dentry, cifs_get_time(dentry), jiffies); again: if (cifs_sb_master_tcon(CIFS_SB(sb))->posix_extensions) rc = smb311_posix_get_inode_info(&inode, full_path, sb, xid); else if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); else rc = cifs_get_inode_info(&inode, full_path, NULL, sb, xid, NULL); if (rc == -EAGAIN && count++ < 10) goto again; out: free_dentry_path(page); free_xid(xid); return rc; } int cifs_revalidate_file(struct file *filp) { int rc; struct inode *inode = file_inode(filp); rc = cifs_revalidate_file_attr(filp); if (rc) return rc; return cifs_revalidate_mapping(inode); } /* revalidate a dentry's inode attributes */ int cifs_revalidate_dentry(struct dentry *dentry) { int rc; struct inode *inode = d_inode(dentry); rc = cifs_revalidate_dentry_attr(dentry); if (rc) return rc; return cifs_revalidate_mapping(inode); } int cifs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct dentry *dentry = path->dentry; struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct inode *inode = d_inode(dentry); int rc; if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) return -EIO; /* * We need to be sure that all dirty pages are written and the server * has actual ctime, mtime and file length. */ if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) && !CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && inode->i_mapping->nrpages != 0) { rc = filemap_fdatawait(inode->i_mapping); if (rc) { mapping_set_error(inode->i_mapping, rc); return rc; } } if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_FORCE_SYNC) CIFS_I(inode)->time = 0; /* force revalidate */ /* * If the caller doesn't require syncing, only sync if * necessary (e.g. due to earlier truncate or setattr * invalidating the cached metadata) */ if (((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) || (CIFS_I(inode)->time == 0)) { rc = cifs_revalidate_dentry_attr(dentry); if (rc) return rc; } generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); stat->blksize = cifs_sb->ctx->bsize; stat->ino = CIFS_I(inode)->uniqueid; /* old CIFS Unix Extensions doesn't return create time */ if (CIFS_I(inode)->createtime) { stat->result_mask |= STATX_BTIME; stat->btime = cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime)); } stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED); if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_COMPRESSED) stat->attributes |= STATX_ATTR_COMPRESSED; if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_ENCRYPTED) stat->attributes |= STATX_ATTR_ENCRYPTED; /* * If on a multiuser mount without unix extensions or cifsacl being * enabled, and the admin hasn't overridden them, set the ownership * to the fsuid/fsgid of the current process. */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) && !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && !tcon->unix_ext) { if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) stat->uid = current_fsuid(); if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) stat->gid = current_fsgid(); } return 0; } int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start, u64 len) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->netfs.inode.i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct TCP_Server_Info *server = tcon->ses->server; struct cifsFileInfo *cfile; int rc; if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; /* * We need to be sure that all dirty pages are written as they * might fill holes on the server. */ if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && inode->i_mapping->nrpages != 0) { rc = filemap_fdatawait(inode->i_mapping); if (rc) { mapping_set_error(inode->i_mapping, rc); return rc; } } cfile = find_readable_file(cifs_i, false); if (cfile == NULL) return -EINVAL; if (server->ops->fiemap) { rc = server->ops->fiemap(tcon, cfile, fei, start, len); cifsFileInfo_put(cfile); return rc; } cifsFileInfo_put(cfile); return -EOPNOTSUPP; } int cifs_truncate_page(struct address_space *mapping, loff_t from) { pgoff_t index = from >> PAGE_SHIFT; unsigned offset = from & (PAGE_SIZE - 1); struct page *page; int rc = 0; page = grab_cache_page(mapping, index); if (!page) return -ENOMEM; zero_user_segment(page, offset, PAGE_SIZE); unlock_page(page); put_page(page); return rc; } void cifs_setsize(struct inode *inode, loff_t offset) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); spin_lock(&inode->i_lock); i_size_write(inode, offset); spin_unlock(&inode->i_lock); /* Cached inode must be refreshed on truncate */ cifs_i->time = 0; truncate_pagecache(inode, offset); } static int cifs_set_file_size(struct inode *inode, struct iattr *attrs, unsigned int xid, const char *full_path) { int rc; struct cifsFileInfo *open_file; struct cifsInodeInfo *cifsInode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = NULL; struct cifs_tcon *tcon = NULL; struct TCP_Server_Info *server; /* * To avoid spurious oplock breaks from server, in the case of * inodes that we already have open, avoid doing path based * setting of file size if we can do it by handle. * This keeps our caching token (oplock) and avoids timeouts * when the local oplock break takes longer to flush * writebehind data than the SMB timeout for the SetPathInfo * request would allow */ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); if (open_file) { tcon = tlink_tcon(open_file->tlink); server = tcon->ses->server; if (server->ops->set_file_size) rc = server->ops->set_file_size(xid, tcon, open_file, attrs->ia_size, false); else rc = -ENOSYS; cifsFileInfo_put(open_file); cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); } else rc = -EINVAL; if (!rc) goto set_size_out; if (tcon == NULL) { tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); server = tcon->ses->server; } /* * Set file size by pathname rather than by handle either because no * valid, writeable file handle for it was found or because there was * an error setting it by handle. */ if (server->ops->set_path_size) rc = server->ops->set_path_size(xid, tcon, full_path, attrs->ia_size, cifs_sb, false); else rc = -ENOSYS; cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); if (tlink) cifs_put_tlink(tlink); set_size_out: if (rc == 0) { cifsInode->server_eof = attrs->ia_size; cifs_setsize(inode, attrs->ia_size); /* * i_blocks is not related to (i_size / i_blksize), but instead * 512 byte (2**9) size is required for calculating num blocks. * Until we can query the server for actual allocation size, * this is best estimate we have for blocks allocated for a file * Number of blocks must be rounded up so size 1 is not 0 blocks */ inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9; /* * The man page of truncate says if the size changed, * then the st_ctime and st_mtime fields for the file * are updated. */ attrs->ia_ctime = attrs->ia_mtime = current_time(inode); attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME; cifs_truncate_page(inode->i_mapping, inode->i_size); } return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) { int rc; unsigned int xid; const char *full_path; void *page = alloc_dentry_path(); struct inode *inode = d_inode(direntry); struct cifsInodeInfo *cifsInode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; struct cifs_unix_set_info_args *args = NULL; struct cifsFileInfo *open_file; cifs_dbg(FYI, "setattr_unix on file %pd attrs->ia_valid=0x%x\n", direntry, attrs->ia_valid); xid = get_xid(); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) attrs->ia_valid |= ATTR_FORCE; rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs); if (rc < 0) goto out; full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto out; } /* * Attempt to flush data before changing attributes. We need to do * this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the * ownership or mode then we may also need to do this. Here, we take * the safe way out and just do the flush on all setattr requests. If * the flush returns error, store it to report later and continue. * * BB: This should be smarter. Why bother flushing pages that * will be truncated anyway? Also, should we error out here if * the flush returns error? */ rc = filemap_write_and_wait(inode->i_mapping); if (is_interrupt_error(rc)) { rc = -ERESTARTSYS; goto out; } mapping_set_error(inode->i_mapping, rc); rc = 0; if (attrs->ia_valid & ATTR_SIZE) { rc = cifs_set_file_size(inode, attrs, xid, full_path); if (rc != 0) goto out; } /* skip mode change if it's just for clearing setuid/setgid */ if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) attrs->ia_valid &= ~ATTR_MODE; args = kmalloc(sizeof(*args), GFP_KERNEL); if (args == NULL) { rc = -ENOMEM; goto out; } /* set up the struct */ if (attrs->ia_valid & ATTR_MODE) args->mode = attrs->ia_mode; else args->mode = NO_CHANGE_64; if (attrs->ia_valid & ATTR_UID) args->uid = attrs->ia_uid; else args->uid = INVALID_UID; /* no change */ if (attrs->ia_valid & ATTR_GID) args->gid = attrs->ia_gid; else args->gid = INVALID_GID; /* no change */ if (attrs->ia_valid & ATTR_ATIME) args->atime = cifs_UnixTimeToNT(attrs->ia_atime); else args->atime = NO_CHANGE_64; if (attrs->ia_valid & ATTR_MTIME) args->mtime = cifs_UnixTimeToNT(attrs->ia_mtime); else args->mtime = NO_CHANGE_64; if (attrs->ia_valid & ATTR_CTIME) args->ctime = cifs_UnixTimeToNT(attrs->ia_ctime); else args->ctime = NO_CHANGE_64; args->device = 0; open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); if (open_file) { u16 nfid = open_file->fid.netfid; u32 npid = open_file->pid; pTcon = tlink_tcon(open_file->tlink); rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid); cifsFileInfo_put(open_file); } else { tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); goto out; } pTcon = tlink_tcon(tlink); rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, cifs_sb->local_nls, cifs_remap(cifs_sb)); cifs_put_tlink(tlink); } if (rc) goto out; if ((attrs->ia_valid & ATTR_SIZE) && attrs->ia_size != i_size_read(inode)) { truncate_setsize(inode, attrs->ia_size); fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size); } setattr_copy(&nop_mnt_idmap, inode, attrs); mark_inode_dirty(inode); /* force revalidate when any of these times are set since some of the fs types (eg ext3, fat) do not have fine enough time granularity to match protocol, and we do not have a a way (yet) to query the server fs's time granularity (and whether it rounds times down). */ if (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)) cifsInode->time = 0; out: kfree(args); free_dentry_path(page); free_xid(xid); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) { unsigned int xid; kuid_t uid = INVALID_UID; kgid_t gid = INVALID_GID; struct inode *inode = d_inode(direntry); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsInodeInfo *cifsInode = CIFS_I(inode); struct cifsFileInfo *wfile; struct cifs_tcon *tcon; const char *full_path; void *page = alloc_dentry_path(); int rc = -EACCES; __u32 dosattr = 0; __u64 mode = NO_CHANGE_64; xid = get_xid(); cifs_dbg(FYI, "setattr on file %pd attrs->ia_valid 0x%x\n", direntry, attrs->ia_valid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) attrs->ia_valid |= ATTR_FORCE; rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs); if (rc < 0) goto cifs_setattr_exit; full_path = build_path_from_dentry(direntry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto cifs_setattr_exit; } /* * Attempt to flush data before changing attributes. We need to do * this for ATTR_SIZE and ATTR_MTIME. If the flush of the data * returns error, store it to report later and continue. * * BB: This should be smarter. Why bother flushing pages that * will be truncated anyway? Also, should we error out here if * the flush returns error? Do we need to check for ATTR_MTIME_SET flag? */ if (attrs->ia_valid & (ATTR_MTIME | ATTR_SIZE | ATTR_CTIME)) { rc = filemap_write_and_wait(inode->i_mapping); if (is_interrupt_error(rc)) { rc = -ERESTARTSYS; goto cifs_setattr_exit; } mapping_set_error(inode->i_mapping, rc); } rc = 0; if ((attrs->ia_valid & ATTR_MTIME) && !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile); if (!rc) { tcon = tlink_tcon(wfile->tlink); rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid); cifsFileInfo_put(wfile); if (rc) goto cifs_setattr_exit; } else if (rc != -EBADF) goto cifs_setattr_exit; else rc = 0; } if (attrs->ia_valid & ATTR_SIZE) { rc = cifs_set_file_size(inode, attrs, xid, full_path); if (rc != 0) goto cifs_setattr_exit; } if (attrs->ia_valid & ATTR_UID) uid = attrs->ia_uid; if (attrs->ia_valid & ATTR_GID) gid = attrs->ia_gid; if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) || (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) { if (uid_valid(uid) || gid_valid(gid)) { mode = NO_CHANGE_64; rc = id_mode_to_cifs_acl(inode, full_path, &mode, uid, gid); if (rc) { cifs_dbg(FYI, "%s: Setting id failed with error: %d\n", __func__, rc); goto cifs_setattr_exit; } } } else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) attrs->ia_valid &= ~(ATTR_UID | ATTR_GID); /* skip mode change if it's just for clearing setuid/setgid */ if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) attrs->ia_valid &= ~ATTR_MODE; if (attrs->ia_valid & ATTR_MODE) { mode = attrs->ia_mode; rc = 0; if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) || (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) { rc = id_mode_to_cifs_acl(inode, full_path, &mode, INVALID_UID, INVALID_GID); if (rc) { cifs_dbg(FYI, "%s: Setting ACL failed with error: %d\n", __func__, rc); goto cifs_setattr_exit; } /* * In case of CIFS_MOUNT_CIFS_ACL, we cannot support all modes. * Pick up the actual mode bits that were set. */ if (mode != attrs->ia_mode) attrs->ia_mode = mode; } else if (((mode & S_IWUGO) == 0) && (cifsInode->cifsAttrs & ATTR_READONLY) == 0) { dosattr = cifsInode->cifsAttrs | ATTR_READONLY; /* fix up mode if we're not using dynperm */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0) attrs->ia_mode = inode->i_mode & ~S_IWUGO; } else if ((mode & S_IWUGO) && (cifsInode->cifsAttrs & ATTR_READONLY)) { dosattr = cifsInode->cifsAttrs & ~ATTR_READONLY; /* Attributes of 0 are ignored */ if (dosattr == 0) dosattr |= ATTR_NORMAL; /* reset local inode permissions to normal */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) { attrs->ia_mode &= ~(S_IALLUGO); if (S_ISDIR(inode->i_mode)) attrs->ia_mode |= cifs_sb->ctx->dir_mode; else attrs->ia_mode |= cifs_sb->ctx->file_mode; } } else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) { /* ignore mode change - ATTR_READONLY hasn't changed */ attrs->ia_valid &= ~ATTR_MODE; } } if (attrs->ia_valid & (ATTR_MTIME|ATTR_ATIME|ATTR_CTIME) || ((attrs->ia_valid & ATTR_MODE) && dosattr)) { rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr); /* BB: check for rc = -EOPNOTSUPP and switch to legacy mode */ /* Even if error on time set, no sense failing the call if the server would set the time to a reasonable value anyway, and this check ensures that we are not being called from sys_utimes in which case we ought to fail the call back to the user when the server rejects the call */ if ((rc) && (attrs->ia_valid & (ATTR_MODE | ATTR_GID | ATTR_UID | ATTR_SIZE))) rc = 0; } /* do not need local check to inode_check_ok since the server does that */ if (rc) goto cifs_setattr_exit; if ((attrs->ia_valid & ATTR_SIZE) && attrs->ia_size != i_size_read(inode)) { truncate_setsize(inode, attrs->ia_size); fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size); } setattr_copy(&nop_mnt_idmap, inode, attrs); mark_inode_dirty(inode); cifs_setattr_exit: free_xid(xid); free_dentry_path(page); return rc; } int cifs_setattr(struct mnt_idmap *idmap, struct dentry *direntry, struct iattr *attrs) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); int rc, retries = 0; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; do { #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (pTcon->unix_ext) rc = cifs_setattr_unix(direntry, attrs); else #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_setattr_nounix(direntry, attrs); retries++; } while (is_retryable_error(rc) && retries < 2); /* BB: add cifs_setattr_legacy for really old servers */ return rc; }
linux-master
fs/smb/client/inode.c
// SPDX-License-Identifier: LGPL-2.1 /* * * vfs operations that deal with io control * * Copyright (C) International Business Machines Corp., 2005,2013 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/mm.h> #include <linux/pagemap.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifsfs.h" #include "cifs_ioctl.h" #include "smb2proto.h" #include "smb2glob.h" #include <linux/btrfs.h> static long cifs_ioctl_query_info(unsigned int xid, struct file *filep, unsigned long p) { struct inode *inode = file_inode(filep); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct dentry *dentry = filep->f_path.dentry; const unsigned char *path; void *page = alloc_dentry_path(); __le16 *utf16_path = NULL, root_path; int rc = 0; path = build_path_from_dentry(dentry, page); if (IS_ERR(path)) { free_dentry_path(page); return PTR_ERR(path); } cifs_dbg(FYI, "%s %s\n", __func__, path); if (!path[0]) { root_path = 0; utf16_path = &root_path; } else { utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb); if (!utf16_path) { rc = -ENOMEM; goto ici_exit; } } if (tcon->ses->server->ops->ioctl_query_info) rc = tcon->ses->server->ops->ioctl_query_info( xid, tcon, cifs_sb, utf16_path, filep->private_data ? 0 : 1, p); else rc = -EOPNOTSUPP; ici_exit: if (utf16_path != &root_path) kfree(utf16_path); free_dentry_path(page); return rc; } static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file, unsigned long srcfd) { int rc; struct fd src_file; struct inode *src_inode; cifs_dbg(FYI, "ioctl copychunk range\n"); /* the destination must be opened for writing */ if (!(dst_file->f_mode & FMODE_WRITE)) { cifs_dbg(FYI, "file target not open for write\n"); return -EINVAL; } /* check if target volume is readonly and take reference */ rc = mnt_want_write_file(dst_file); if (rc) { cifs_dbg(FYI, "mnt_want_write failed with rc %d\n", rc); return rc; } src_file = fdget(srcfd); if (!src_file.file) { rc = -EBADF; goto out_drop_write; } if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) { rc = -EBADF; cifs_dbg(VFS, "src file seems to be from a different filesystem type\n"); goto out_fput; } src_inode = file_inode(src_file.file); rc = -EINVAL; if (S_ISDIR(src_inode->i_mode)) goto out_fput; rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0, src_inode->i_size, 0); if (rc > 0) rc = 0; out_fput: fdput(src_file); out_drop_write: mnt_drop_write_file(dst_file); return rc; } static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon, void __user *arg) { int rc = 0; struct smb_mnt_fs_info *fsinf; fsinf = kzalloc(sizeof(struct smb_mnt_fs_info), GFP_KERNEL); if (fsinf == NULL) return -ENOMEM; fsinf->version = 1; fsinf->protocol_id = tcon->ses->server->vals->protocol_id; fsinf->device_characteristics = le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics); fsinf->device_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); fsinf->fs_attributes = le32_to_cpu(tcon->fsAttrInfo.Attributes); fsinf->max_path_component = le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); fsinf->vol_serial_number = tcon->vol_serial_number; fsinf->vol_create_time = le64_to_cpu(tcon->vol_create_time); fsinf->share_flags = tcon->share_flags; fsinf->share_caps = le32_to_cpu(tcon->capabilities); fsinf->sector_flags = tcon->ss_flags; fsinf->optimal_sector_size = tcon->perf_sector_size; fsinf->max_bytes_chunk = tcon->max_bytes_chunk; fsinf->maximal_access = tcon->maximal_access; fsinf->cifs_posix_caps = le64_to_cpu(tcon->fsUnixInfo.Capability); if (copy_to_user(arg, fsinf, sizeof(struct smb_mnt_fs_info))) rc = -EFAULT; kfree(fsinf); return rc; } static int cifs_shutdown(struct super_block *sb, unsigned long arg) { struct cifs_sb_info *sbi = CIFS_SB(sb); __u32 flags; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(flags, (__u32 __user *)arg)) return -EFAULT; if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH) return -EINVAL; if (cifs_forced_shutdown(sbi)) return 0; cifs_dbg(VFS, "shut down requested (%d)", flags); /* trace_cifs_shutdown(sb, flags);*/ /* * see: * https://man7.org/linux/man-pages/man2/ioctl_xfs_goingdown.2.html * for more information and description of original intent of the flags */ switch (flags) { /* * We could add support later for default flag which requires: * "Flush all dirty data and metadata to disk" * would need to call syncfs or equivalent to flush page cache for * the mount and then issue fsync to server (if nostrictsync not set) */ case CIFS_GOING_FLAGS_DEFAULT: cifs_dbg(FYI, "shutdown with default flag not supported\n"); return -EINVAL; /* * FLAGS_LOGFLUSH is easy since it asks to write out metadata (not * data) but metadata writes are not cached on the client, so can treat * it similarly to NOLOGFLUSH */ case CIFS_GOING_FLAGS_LOGFLUSH: case CIFS_GOING_FLAGS_NOLOGFLUSH: sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN; return 0; default: return -EINVAL; } return 0; } static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in) { struct smb3_full_key_debug_info out; struct cifs_ses *ses; int rc = 0; bool found = false; u8 __user *end; if (!smb3_encryption_required(tcon)) { rc = -EOPNOTSUPP; goto out; } /* copy user input into our output buffer */ if (copy_from_user(&out, in, sizeof(out))) { rc = -EINVAL; goto out; } if (!out.session_id) { /* if ses id is 0, use current user session */ ses = tcon->ses; } else { /* otherwise if a session id is given, look for it in all our sessions */ struct cifs_ses *ses_it = NULL; struct TCP_Server_Info *server_it = NULL; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) { if (ses_it->Suid == out.session_id) { ses = ses_it; /* * since we are using the session outside the crit * section, we need to make sure it won't be released * so increment its refcount */ cifs_smb_ses_inc_refcount(ses); found = true; goto search_end; } } } search_end: spin_unlock(&cifs_tcp_ses_lock); if (!found) { rc = -ENOENT; goto out; } } switch (ses->server->cipher_type) { case SMB2_ENCRYPTION_AES128_CCM: case SMB2_ENCRYPTION_AES128_GCM: out.session_key_length = CIFS_SESS_KEY_SIZE; out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE; break; case SMB2_ENCRYPTION_AES256_CCM: case SMB2_ENCRYPTION_AES256_GCM: out.session_key_length = CIFS_SESS_KEY_SIZE; out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE; break; default: rc = -EOPNOTSUPP; goto out; } /* check if user buffer is big enough to store all the keys */ if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length + out.server_out_key_length) { rc = -ENOBUFS; goto out; } out.session_id = ses->Suid; out.cipher_type = le16_to_cpu(ses->server->cipher_type); /* overwrite user input with our output */ if (copy_to_user(in, &out, sizeof(out))) { rc = -EINVAL; goto out; } /* append all the keys at the end of the user buffer */ end = in->data; if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) { rc = -EINVAL; goto out; } end += out.session_key_length; if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) { rc = -EINVAL; goto out; } end += out.server_in_key_length; if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) { rc = -EINVAL; goto out; } out: if (found) cifs_put_smb_ses(ses); return rc; } long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) { struct inode *inode = file_inode(filep); struct smb3_key_debug_info pkey_inf; int rc = -ENOTTY; /* strange error - but the precedent */ unsigned int xid; struct cifsFileInfo *pSMBFile = filep->private_data; struct cifs_tcon *tcon; struct tcon_link *tlink; struct cifs_sb_info *cifs_sb; __u64 ExtAttrBits = 0; #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY __u64 caps; #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ #endif /* CONFIG_CIFS_POSIX */ xid = get_xid(); cifs_dbg(FYI, "cifs ioctl 0x%x\n", command); switch (command) { case FS_IOC_GETFLAGS: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY caps = le64_to_cpu(tcon->fsUnixInfo.Capability); if (CIFS_UNIX_EXTATTR_CAP & caps) { __u64 ExtAttrMask = 0; rc = CIFSGetExtAttr(xid, tcon, pSMBFile->fid.netfid, &ExtAttrBits, &ExtAttrMask); if (rc == 0) rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE, (int __user *)arg); if (rc != -EOPNOTSUPP) break; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ #endif /* CONFIG_CIFS_POSIX */ rc = 0; if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) { /* add in the compressed bit */ ExtAttrBits = FS_COMPR_FL; rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE, (int __user *)arg); } break; case FS_IOC_SETFLAGS: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */ if (get_user(ExtAttrBits, (int __user *)arg)) { rc = -EFAULT; break; } /* * if (CIFS_UNIX_EXTATTR_CAP & caps) * rc = CIFSSetExtAttr(xid, tcon, * pSMBFile->fid.netfid, * extAttrBits, * &ExtAttrMask); * if (rc != -EOPNOTSUPP) * break; */ /* Currently only flag we can set is compressed flag */ if ((ExtAttrBits & FS_COMPR_FL) == 0) break; /* Try to set compress flag */ if (tcon->ses->server->ops->set_compression) { rc = tcon->ses->server->ops->set_compression( xid, tcon, pSMBFile); cifs_dbg(FYI, "set compress flag rc %d\n", rc); } break; case CIFS_IOC_COPYCHUNK_FILE: rc = cifs_ioctl_copychunk(xid, filep, arg); break; case CIFS_QUERY_INFO: rc = cifs_ioctl_query_info(xid, filep, arg); break; case CIFS_IOC_SET_INTEGRITY: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); if (tcon->ses->server->ops->set_integrity) rc = tcon->ses->server->ops->set_integrity(xid, tcon, pSMBFile); else rc = -EOPNOTSUPP; break; case CIFS_IOC_GET_MNT_INFO: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg); break; case CIFS_ENUMERATE_SNAPSHOTS: if (pSMBFile == NULL) break; if (arg == 0) { rc = -EINVAL; goto cifs_ioc_exit; } tcon = tlink_tcon(pSMBFile->tlink); if (tcon->ses->server->ops->enum_snapshots) rc = tcon->ses->server->ops->enum_snapshots(xid, tcon, pSMBFile, (void __user *)arg); else rc = -EOPNOTSUPP; break; case CIFS_DUMP_KEY: /* * Dump encryption keys. This is an old ioctl that only * handles AES-128-{CCM,GCM}. */ if (!capable(CAP_SYS_ADMIN)) { rc = -EACCES; break; } cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); break; } tcon = tlink_tcon(tlink); if (!smb3_encryption_required(tcon)) { rc = -EOPNOTSUPP; cifs_put_tlink(tlink); break; } pkey_inf.cipher_type = le16_to_cpu(tcon->ses->server->cipher_type); pkey_inf.Suid = tcon->ses->Suid; memcpy(pkey_inf.auth_key, tcon->ses->auth_key.response, 16 /* SMB2_NTLMV2_SESSKEY_SIZE */); memcpy(pkey_inf.smb3decryptionkey, tcon->ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE); memcpy(pkey_inf.smb3encryptionkey, tcon->ses->smb3encryptionkey, SMB3_SIGN_KEY_SIZE); if (copy_to_user((void __user *)arg, &pkey_inf, sizeof(struct smb3_key_debug_info))) rc = -EFAULT; else rc = 0; cifs_put_tlink(tlink); break; case CIFS_DUMP_FULL_KEY: /* * Dump encryption keys (handles any key sizes) */ if (pSMBFile == NULL) break; if (!capable(CAP_SYS_ADMIN)) { rc = -EACCES; break; } cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); break; } tcon = tlink_tcon(tlink); rc = cifs_dump_full_key(tcon, (void __user *)arg); cifs_put_tlink(tlink); break; case CIFS_IOC_NOTIFY: if (!S_ISDIR(inode->i_mode)) { /* Notify can only be done on directories */ rc = -EOPNOTSUPP; break; } cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); break; } tcon = tlink_tcon(tlink); if (tcon && tcon->ses->server->ops->notify) { rc = tcon->ses->server->ops->notify(xid, filep, (void __user *)arg, false /* no ret data */); cifs_dbg(FYI, "ioctl notify rc %d\n", rc); } else rc = -EOPNOTSUPP; cifs_put_tlink(tlink); break; case CIFS_IOC_NOTIFY_INFO: if (!S_ISDIR(inode->i_mode)) { /* Notify can only be done on directories */ rc = -EOPNOTSUPP; break; } cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); break; } tcon = tlink_tcon(tlink); if (tcon && tcon->ses->server->ops->notify) { rc = tcon->ses->server->ops->notify(xid, filep, (void __user *)arg, true /* return details */); cifs_dbg(FYI, "ioctl notify info rc %d\n", rc); } else rc = -EOPNOTSUPP; cifs_put_tlink(tlink); break; case CIFS_IOC_SHUTDOWN: rc = cifs_shutdown(inode->i_sb, arg); break; default: cifs_dbg(FYI, "unsupported ioctl\n"); break; } cifs_ioc_exit: free_xid(xid); return rc; }
linux-master
fs/smb/client/ioctl.c
// SPDX-License-Identifier: GPL-2.0-only /* * SMB1 (CIFS) version specific operations * * Copyright (c) 2012, Jeff Layton <[email protected]> */ #include <linux/pagemap.h> #include <linux/vfs.h> #include <uapi/linux/magic.h> #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifspdu.h" #include "cifs_unicode.h" #include "fs_context.h" /* * An NT cancel request header looks just like the original request except: * * The Command is SMB_COM_NT_CANCEL * The WordCount is zeroed out * The ByteCount is zeroed out * * This function mangles an existing request buffer into a * SMB_COM_NT_CANCEL request and then sends it. */ static int send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst, struct mid_q_entry *mid) { int rc = 0; struct smb_hdr *in_buf = (struct smb_hdr *)rqst->rq_iov[0].iov_base; /* -4 for RFC1001 length and +2 for BCC field */ in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2); in_buf->Command = SMB_COM_NT_CANCEL; in_buf->WordCount = 0; put_bcc(0, in_buf); cifs_server_lock(server); rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); if (rc) { cifs_server_unlock(server); return rc; } /* * The response to this call was already factored into the sequence * number when the call went out, so we must adjust it back downward * after signing here. */ --server->sequence_number; rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); if (rc < 0) server->sequence_number--; cifs_server_unlock(server); cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n", get_mid(in_buf), rc); return rc; } static bool cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2) { return ob1->fid.netfid == ob2->fid.netfid; } static unsigned int cifs_read_data_offset(char *buf) { READ_RSP *rsp = (READ_RSP *)buf; return le16_to_cpu(rsp->DataOffset); } static unsigned int cifs_read_data_length(char *buf, bool in_remaining) { READ_RSP *rsp = (READ_RSP *)buf; /* It's a bug reading remaining data for SMB1 packets */ WARN_ON(in_remaining); return (le16_to_cpu(rsp->DataLengthHigh) << 16) + le16_to_cpu(rsp->DataLength); } static struct mid_q_entry * cifs_find_mid(struct TCP_Server_Info *server, char *buffer) { struct smb_hdr *buf = (struct smb_hdr *)buffer; struct mid_q_entry *mid; spin_lock(&server->mid_lock); list_for_each_entry(mid, &server->pending_mid_q, qhead) { if (compare_mid(mid->mid, buf) && mid->mid_state == MID_REQUEST_SUBMITTED && le16_to_cpu(mid->command) == buf->Command) { kref_get(&mid->refcount); spin_unlock(&server->mid_lock); return mid; } } spin_unlock(&server->mid_lock); return NULL; } static void cifs_add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits, const int optype) { spin_lock(&server->req_lock); server->credits += credits->value; server->in_flight--; spin_unlock(&server->req_lock); wake_up(&server->request_q); } static void cifs_set_credits(struct TCP_Server_Info *server, const int val) { spin_lock(&server->req_lock); server->credits = val; server->oplocks = val > 1 ? enable_oplocks : false; spin_unlock(&server->req_lock); } static int * cifs_get_credits_field(struct TCP_Server_Info *server, const int optype) { return &server->credits; } static unsigned int cifs_get_credits(struct mid_q_entry *mid) { return 1; } /* * Find a free multiplex id (SMB mid). Otherwise there could be * mid collisions which might cause problems, demultiplexing the * wrong response to this request. Multiplex ids could collide if * one of a series requests takes much longer than the others, or * if a very large number of long lived requests (byte range * locks or FindNotify requests) are pending. No more than * 64K-1 requests can be outstanding at one time. If no * mids are available, return zero. A future optimization * could make the combination of mids and uid the key we use * to demultiplex on (rather than mid alone). * In addition to the above check, the cifs demultiplex * code already used the command code as a secondary * check of the frame and if signing is negotiated the * response would be discarded if the mid were the same * but the signature was wrong. Since the mid is not put in the * pending queue until later (when it is about to be dispatched) * we do have to limit the number of outstanding requests * to somewhat less than 64K-1 although it is hard to imagine * so many threads being in the vfs at one time. */ static __u64 cifs_get_next_mid(struct TCP_Server_Info *server) { __u64 mid = 0; __u16 last_mid, cur_mid; bool collision, reconnect = false; spin_lock(&server->mid_lock); /* mid is 16 bit only for CIFS/SMB */ cur_mid = (__u16)((server->CurrentMid) & 0xffff); /* we do not want to loop forever */ last_mid = cur_mid; cur_mid++; /* avoid 0xFFFF MID */ if (cur_mid == 0xffff) cur_mid++; /* * This nested loop looks more expensive than it is. * In practice the list of pending requests is short, * fewer than 50, and the mids are likely to be unique * on the first pass through the loop unless some request * takes longer than the 64 thousand requests before it * (and it would also have to have been a request that * did not time out). */ while (cur_mid != last_mid) { struct mid_q_entry *mid_entry; unsigned int num_mids; collision = false; if (cur_mid == 0) cur_mid++; num_mids = 0; list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { ++num_mids; if (mid_entry->mid == cur_mid && mid_entry->mid_state == MID_REQUEST_SUBMITTED) { /* This mid is in use, try a different one */ collision = true; break; } } /* * if we have more than 32k mids in the list, then something * is very wrong. Possibly a local user is trying to DoS the * box by issuing long-running calls and SIGKILL'ing them. If * we get to 2^16 mids then we're in big trouble as this * function could loop forever. * * Go ahead and assign out the mid in this situation, but force * an eventual reconnect to clean out the pending_mid_q. */ if (num_mids > 32768) reconnect = true; if (!collision) { mid = (__u64)cur_mid; server->CurrentMid = mid; break; } cur_mid++; } spin_unlock(&server->mid_lock); if (reconnect) { cifs_signal_cifsd_for_reconnect(server, false); } return mid; } /* return codes: 0 not a transact2, or all data present >0 transact2 with that much data missing -EINVAL invalid transact2 */ static int check2ndT2(char *buf) { struct smb_hdr *pSMB = (struct smb_hdr *)buf; struct smb_t2_rsp *pSMBt; int remaining; __u16 total_data_size, data_in_this_rsp; if (pSMB->Command != SMB_COM_TRANSACTION2) return 0; /* check for plausible wct, bcc and t2 data and parm sizes */ /* check for parm and data offset going beyond end of smb */ if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */ cifs_dbg(FYI, "Invalid transact2 word count\n"); return -EINVAL; } pSMBt = (struct smb_t2_rsp *)pSMB; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); if (total_data_size == data_in_this_rsp) return 0; else if (total_data_size < data_in_this_rsp) { cifs_dbg(FYI, "total data %d smaller than data in frame %d\n", total_data_size, data_in_this_rsp); return -EINVAL; } remaining = total_data_size - data_in_this_rsp; cifs_dbg(FYI, "missing %d bytes from transact2, check next response\n", remaining); if (total_data_size > CIFSMaxBufSize) { cifs_dbg(VFS, "TotalDataSize %d is over maximum buffer %d\n", total_data_size, CIFSMaxBufSize); return -EINVAL; } return remaining; } static int coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) { struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf; struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr; char *data_area_of_tgt; char *data_area_of_src; int remaining; unsigned int byte_count, total_in_tgt; __u16 tgt_total_cnt, src_total_cnt, total_in_src; src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount); tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); if (tgt_total_cnt != src_total_cnt) cifs_dbg(FYI, "total data count of primary and secondary t2 differ source=%hu target=%hu\n", src_total_cnt, tgt_total_cnt); total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); remaining = tgt_total_cnt - total_in_tgt; if (remaining < 0) { cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n", tgt_total_cnt, total_in_tgt); return -EPROTO; } if (remaining == 0) { /* nothing to do, ignore */ cifs_dbg(FYI, "no more data remains\n"); return 0; } total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount); if (remaining < total_in_src) cifs_dbg(FYI, "transact2 2nd response contains too much data\n"); /* find end of first SMB data area */ data_area_of_tgt = (char *)&pSMBt->hdr.Protocol + get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); /* validate target area */ data_area_of_src = (char *)&pSMBs->hdr.Protocol + get_unaligned_le16(&pSMBs->t2_rsp.DataOffset); data_area_of_tgt += total_in_tgt; total_in_tgt += total_in_src; /* is the result too big for the field? */ if (total_in_tgt > USHRT_MAX) { cifs_dbg(FYI, "coalesced DataCount too large (%u)\n", total_in_tgt); return -EPROTO; } put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount); /* fix up the BCC */ byte_count = get_bcc(target_hdr); byte_count += total_in_src; /* is the result too big for the field? */ if (byte_count > USHRT_MAX) { cifs_dbg(FYI, "coalesced BCC too large (%u)\n", byte_count); return -EPROTO; } put_bcc(byte_count, target_hdr); byte_count = be32_to_cpu(target_hdr->smb_buf_length); byte_count += total_in_src; /* don't allow buffer to overflow */ if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { cifs_dbg(FYI, "coalesced BCC exceeds buffer size (%u)\n", byte_count); return -ENOBUFS; } target_hdr->smb_buf_length = cpu_to_be32(byte_count); /* copy second buffer into end of first buffer */ memcpy(data_area_of_tgt, data_area_of_src, total_in_src); if (remaining != total_in_src) { /* more responses to go */ cifs_dbg(FYI, "waiting for more secondary responses\n"); return 1; } /* we are done */ cifs_dbg(FYI, "found the last secondary response\n"); return 0; } static void cifs_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache) { cifs_set_oplock_level(cinode, oplock); } static bool cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server, char *buf, int malformed) { if (malformed) return false; if (check2ndT2(buf) <= 0) return false; mid->multiRsp = true; if (mid->resp_buf) { /* merge response - fix up 1st*/ malformed = coalesce_t2(buf, mid->resp_buf); if (malformed > 0) return true; /* All parts received or packet is malformed. */ mid->multiEnd = true; dequeue_mid(mid, malformed); return true; } if (!server->large_buf) { /*FIXME: switch to already allocated largebuf?*/ cifs_dbg(VFS, "1st trans2 resp needs bigbuf\n"); } else { /* Have first buffer */ mid->resp_buf = buf; mid->large_buf = true; server->bigbuf = NULL; } return true; } static bool cifs_need_neg(struct TCP_Server_Info *server) { return server->maxBuf == 0; } static int cifs_negotiate(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { int rc; rc = CIFSSMBNegotiate(xid, ses, server); if (rc == -EAGAIN) { /* retry only once on 1st time connection */ set_credits(server, 1); rc = CIFSSMBNegotiate(xid, ses, server); if (rc == -EAGAIN) rc = -EHOSTDOWN; } return rc; } static unsigned int cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); struct TCP_Server_Info *server = tcon->ses->server; unsigned int wsize; /* start with specified wsize, or default */ if (ctx->wsize) wsize = ctx->wsize; else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) wsize = CIFS_DEFAULT_IOSIZE; else wsize = CIFS_DEFAULT_NON_POSIX_WSIZE; /* can server support 24-bit write sizes? (via UNIX extensions) */ if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE); /* * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set? * Limit it to max buffer offered by the server, minus the size of the * WRITEX header, not including the 4 byte RFC1001 length. */ if (!(server->capabilities & CAP_LARGE_WRITE_X) || (!(server->capabilities & CAP_UNIX) && server->sign)) wsize = min_t(unsigned int, wsize, server->maxBuf - sizeof(WRITE_REQ) + 4); /* hard limit of CIFS_MAX_WSIZE */ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); return wsize; } static unsigned int cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); struct TCP_Server_Info *server = tcon->ses->server; unsigned int rsize, defsize; /* * Set default value... * * HACK alert! Ancient servers have very small buffers. Even though * MS-CIFS indicates that servers are only limited by the client's * bufsize for reads, testing against win98se shows that it throws * INVALID_PARAMETER errors if you try to request too large a read. * OS/2 just sends back short reads. * * If the server doesn't advertise CAP_LARGE_READ_X, then assume that * it can't handle a read request larger than its MaxBufferSize either. */ if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP)) defsize = CIFS_DEFAULT_IOSIZE; else if (server->capabilities & CAP_LARGE_READ_X) defsize = CIFS_DEFAULT_NON_POSIX_RSIZE; else defsize = server->maxBuf - sizeof(READ_RSP); rsize = ctx->rsize ? ctx->rsize : defsize; /* * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to * the client's MaxBufferSize. */ if (!(server->capabilities & CAP_LARGE_READ_X)) rsize = min_t(unsigned int, CIFSMaxBufSize, rsize); /* hard limit of CIFS_MAX_RSIZE */ rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE); return rsize; } static void cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb) { CIFSSMBQFSDeviceInfo(xid, tcon); CIFSSMBQFSAttributeInfo(xid, tcon); } static int cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path) { int rc; FILE_ALL_INFO *file_info; file_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (file_info == NULL) return -ENOMEM; rc = CIFSSMBQPathInfo(xid, tcon, full_path, file_info, 0 /* not legacy */, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (rc == -EOPNOTSUPP || rc == -EINVAL) rc = SMBQueryInformation(xid, tcon, full_path, file_info, cifs_sb->local_nls, cifs_remap(cifs_sb)); kfree(file_info); return rc; } static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, struct cifs_open_info_data *data) { int rc; FILE_ALL_INFO fi = {}; data->symlink = false; data->adjust_tz = false; /* could do find first instead but this returns more info */ rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls, cifs_remap(cifs_sb)); /* * BB optimize code so we do not make the above call when server claims * no NT SMB support and the above call failed at least once - set flag * in tcon or mount. */ if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) { rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls, cifs_remap(cifs_sb)); data->adjust_tz = true; } if (!rc) { int tmprc; int oplock = 0; struct cifs_fid fid; struct cifs_open_parms oparms; move_cifs_info_to_smb2(&data->fi, &fi); if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) return 0; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = FILE_READ_ATTRIBUTES, .create_options = cifs_create_options(cifs_sb, 0), .disposition = FILE_OPEN, .path = full_path, .fid = &fid, }; /* Need to check if this is a symbolic link or not */ tmprc = CIFS_open(xid, &oparms, &oplock, NULL); if (tmprc == -EOPNOTSUPP) data->symlink = true; else if (tmprc == 0) CIFSSMBClose(xid, tcon, fid.netfid); } return rc; } static int cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid, struct cifs_open_info_data *unused) { /* * We can not use the IndexNumber field by default from Windows or * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA * CIFS spec claims that this value is unique within the scope of a * share, and the windows docs hint that it's actually unique * per-machine. * * There may be higher info levels that work but are there Windows * server or network appliances for which IndexNumber field is not * guaranteed unique? */ return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid, cifs_sb->local_nls, cifs_remap(cifs_sb)); } static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, struct cifs_open_info_data *data) { int rc; FILE_ALL_INFO fi = {}; if (cfile->symlink_target) { data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!data->symlink_target) return -ENOMEM; } rc = CIFSSMBQFileInfo(xid, tcon, cfile->fid.netfid, &fi); if (!rc) move_cifs_info_to_smb2(&data->fi, &fi); return rc; } static void cifs_clear_stats(struct cifs_tcon *tcon) { atomic_set(&tcon->stats.cifs_stats.num_writes, 0); atomic_set(&tcon->stats.cifs_stats.num_reads, 0); atomic_set(&tcon->stats.cifs_stats.num_flushes, 0); atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0); atomic_set(&tcon->stats.cifs_stats.num_opens, 0); atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0); atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0); atomic_set(&tcon->stats.cifs_stats.num_closes, 0); atomic_set(&tcon->stats.cifs_stats.num_deletes, 0); atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0); atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0); atomic_set(&tcon->stats.cifs_stats.num_renames, 0); atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0); atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0); atomic_set(&tcon->stats.cifs_stats.num_fnext, 0); atomic_set(&tcon->stats.cifs_stats.num_fclose, 0); atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0); atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0); atomic_set(&tcon->stats.cifs_stats.num_locks, 0); atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0); atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0); } static void cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon) { seq_printf(m, " Oplocks breaks: %d", atomic_read(&tcon->stats.cifs_stats.num_oplock_brks)); seq_printf(m, "\nReads: %d Bytes: %llu", atomic_read(&tcon->stats.cifs_stats.num_reads), (long long)(tcon->bytes_read)); seq_printf(m, "\nWrites: %d Bytes: %llu", atomic_read(&tcon->stats.cifs_stats.num_writes), (long long)(tcon->bytes_written)); seq_printf(m, "\nFlushes: %d", atomic_read(&tcon->stats.cifs_stats.num_flushes)); seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d", atomic_read(&tcon->stats.cifs_stats.num_locks), atomic_read(&tcon->stats.cifs_stats.num_hardlinks), atomic_read(&tcon->stats.cifs_stats.num_symlinks)); seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d", atomic_read(&tcon->stats.cifs_stats.num_opens), atomic_read(&tcon->stats.cifs_stats.num_closes), atomic_read(&tcon->stats.cifs_stats.num_deletes)); seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d", atomic_read(&tcon->stats.cifs_stats.num_posixopens), atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs)); seq_printf(m, "\nMkdirs: %d Rmdirs: %d", atomic_read(&tcon->stats.cifs_stats.num_mkdirs), atomic_read(&tcon->stats.cifs_stats.num_rmdirs)); seq_printf(m, "\nRenames: %d T2 Renames %d", atomic_read(&tcon->stats.cifs_stats.num_renames), atomic_read(&tcon->stats.cifs_stats.num_t2renames)); seq_printf(m, "\nFindFirst: %d FNext %d FClose %d", atomic_read(&tcon->stats.cifs_stats.num_ffirst), atomic_read(&tcon->stats.cifs_stats.num_fnext), atomic_read(&tcon->stats.cifs_stats.num_fclose)); } static void cifs_mkdir_setinfo(struct inode *inode, const char *full_path, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, const unsigned int xid) { FILE_BASIC_INFO info; struct cifsInodeInfo *cifsInode; u32 dosattrs; int rc; memset(&info, 0, sizeof(info)); cifsInode = CIFS_I(inode); dosattrs = cifsInode->cifsAttrs|ATTR_READONLY; info.Attributes = cpu_to_le32(dosattrs); rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls, cifs_sb); if (rc == 0) cifsInode->cifsAttrs = dosattrs; } static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, void *buf) { struct cifs_open_info_data *data = buf; FILE_ALL_INFO fi = {}; int rc; if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS)) rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path, oparms->disposition, oparms->desired_access, oparms->create_options, &oparms->fid->netfid, oplock, &fi, oparms->cifs_sb->local_nls, cifs_remap(oparms->cifs_sb)); else rc = CIFS_open(xid, oparms, oplock, &fi); if (!rc && data) move_cifs_info_to_smb2(&data->fi, &fi); return rc; } static void cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) { struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); cfile->fid.netfid = fid->netfid; cifs_set_oplock_level(cinode, oplock); cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode); } static void cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { CIFSSMBClose(xid, tcon, fid->netfid); } static int cifs_flush_file(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { return CIFSSMBFlush(xid, tcon, fid->netfid); } static int cifs_sync_read(const unsigned int xid, struct cifs_fid *pfid, struct cifs_io_parms *parms, unsigned int *bytes_read, char **buf, int *buf_type) { parms->netfid = pfid->netfid; return CIFSSMBRead(xid, parms, bytes_read, buf, buf_type); } static int cifs_sync_write(const unsigned int xid, struct cifs_fid *pfid, struct cifs_io_parms *parms, unsigned int *written, struct kvec *iov, unsigned long nr_segs) { parms->netfid = pfid->netfid; return CIFSSMBWrite2(xid, parms, written, iov, nr_segs); } static int smb_set_file_info(struct inode *inode, const char *full_path, FILE_BASIC_INFO *buf, const unsigned int xid) { int oplock = 0; int rc; __u32 netpid; struct cifs_fid fid; struct cifs_open_parms oparms; struct cifsFileInfo *open_file; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = NULL; struct cifs_tcon *tcon; /* if the file is already open for write, just use that fileid */ open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); if (open_file) { fid.netfid = open_file->fid.netfid; netpid = open_file->pid; tcon = tlink_tcon(open_file->tlink); goto set_via_filehandle; } tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); tlink = NULL; goto out; } tcon = tlink_tcon(tlink); rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, cifs_sb); if (rc == 0) { cinode->cifsAttrs = le32_to_cpu(buf->Attributes); goto out; } else if (rc != -EOPNOTSUPP && rc != -EINVAL) { goto out; } oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), .disposition = FILE_OPEN, .path = full_path, .fid = &fid, }; cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n"); rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc != 0) { if (rc == -EIO) rc = -EINVAL; goto out; } netpid = current->tgid; set_via_filehandle: rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid); if (!rc) cinode->cifsAttrs = le32_to_cpu(buf->Attributes); if (open_file == NULL) CIFSSMBClose(xid, tcon, fid.netfid); else cifsFileInfo_put(open_file); out: if (tlink != NULL) cifs_put_tlink(tlink); return rc; } static int cifs_set_compression(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile) { return CIFSSMB_set_compression(xid, tcon, cfile->fid.netfid); } static int cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, const char *path, struct cifs_sb_info *cifs_sb, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { int rc; rc = CIFSFindFirst(xid, tcon, path, cifs_sb, &fid->netfid, search_flags, srch_inf, true); if (rc) cifs_dbg(FYI, "find first failed=%d\n", rc); return rc; } static int cifs_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { return CIFSFindNext(xid, tcon, fid->netfid, search_flags, srch_inf); } static int cifs_close_dir(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { return CIFSFindClose(xid, tcon, fid->netfid); } static int cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode) { return CIFSSMBLock(0, tcon, net_fid, current->tgid, 0, 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false, CIFS_CACHE_READ(cinode) ? 1 : 0); } static int cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct kstatfs *buf) { int rc = -EOPNOTSUPP; buf->f_type = CIFS_SUPER_MAGIC; /* * We could add a second check for a QFS Unix capability bit */ if ((tcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability))) rc = CIFSSMBQFSPosixInfo(xid, tcon, buf); /* * Only need to call the old QFSInfo if failed on newer one, * e.g. by OS/2. **/ if (rc && (tcon->ses->capabilities & CAP_NT_SMBS)) rc = CIFSSMBQFSInfo(xid, tcon, buf); /* * Some old Windows servers also do not support level 103, retry with * older level one if old server failed the previous call or we * bypassed it because we detected that this was an older LANMAN sess */ if (rc) rc = SMBOldQFSInfo(xid, tcon, buf); return rc; } static int cifs_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u32 type, int lock, int unlock, bool wait) { return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid, current->tgid, length, offset, unlock, lock, (__u8)type, wait, 0); } static int cifs_unix_dfs_readlink(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **symlinkinfo, const struct nls_table *nls_codepage) { #ifdef CONFIG_CIFS_DFS_UPCALL int rc; struct dfs_info3_param referral = {0}; rc = get_dfs_path(xid, tcon->ses, searchName, nls_codepage, &referral, 0); if (!rc) { *symlinkinfo = kstrdup(referral.node_name, GFP_KERNEL); free_dfs_info_param(&referral); if (!*symlinkinfo) rc = -ENOMEM; } return rc; #else /* No DFS support */ return -EREMOTE; #endif } static int cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, char **target_path, struct kvec *rsp_iov) { int rc; int oplock = 0; bool is_reparse_point = !!rsp_iov; struct cifs_fid fid; struct cifs_open_parms oparms; cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); if (is_reparse_point) { cifs_dbg(VFS, "reparse points not handled for SMB1 symlinks\n"); return -EOPNOTSUPP; } /* Check for unix extensions */ if (cap_unix(tcon->ses)) { rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (rc == -EREMOTE) rc = cifs_unix_dfs_readlink(xid, tcon, full_path, target_path, cifs_sb->local_nls); goto out; } oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = FILE_READ_ATTRIBUTES, .create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT), .disposition = FILE_OPEN, .path = full_path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc) goto out; rc = CIFSSMBQuerySymLink(xid, tcon, fid.netfid, target_path, cifs_sb->local_nls); if (rc) goto out_close; convert_delimiter(*target_path, '/'); out_close: CIFSSMBClose(xid, tcon, fid.netfid); out: if (!rc) cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path); return rc; } static bool cifs_is_read_op(__u32 oplock) { return oplock == OPLOCK_READ; } static unsigned int cifs_wp_retry_size(struct inode *inode) { return CIFS_SB(inode->i_sb)->ctx->wsize; } static bool cifs_dir_needs_close(struct cifsFileInfo *cfile) { return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; } static bool cifs_can_echo(struct TCP_Server_Info *server) { if (server->tcpStatus == CifsGood) return true; return false; } static int cifs_make_node(unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, const char *full_path, umode_t mode, dev_t dev) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct inode *newinode = NULL; int rc = -EPERM; struct cifs_open_info_data buf = {}; struct cifs_io_parms io_parms; __u32 oplock = 0; struct cifs_fid fid; struct cifs_open_parms oparms; unsigned int bytes_written; struct win_dev *pdev; struct kvec iov[2]; if (tcon->unix_ext) { /* * SMB1 Unix Extensions: requires server support but * works with all special files */ struct cifs_unix_set_info_args args = { .mode = mode & ~current_umask(), .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = dev, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = current_fsuid(); args.gid = current_fsgid(); } else { args.uid = INVALID_UID; /* no change */ args.gid = INVALID_GID; /* no change */ } rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (rc) return rc; rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); if (rc == 0) d_instantiate(dentry, newinode); return rc; } /* * SMB1 SFU emulation: should work with all servers, but only * support block and char device (no socket & fifo) */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) return rc; if (!S_ISCHR(mode) && !S_ISBLK(mode)) return rc; cifs_dbg(FYI, "sfu compat create special file\n"); oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = GENERIC_WRITE, .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL), .disposition = FILE_CREATE, .path = full_path, .fid = &fid, }; if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf); if (rc) return rc; /* * BB Do not bother to decode buf since no local inode yet to put * timestamps in, but we can reuse it safely. */ pdev = (struct win_dev *)&buf.fi; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = sizeof(struct win_dev); iov[1].iov_base = &buf.fi; iov[1].iov_len = sizeof(struct win_dev); if (S_ISCHR(mode)) { memcpy(pdev->type, "IntxCHR", 8); pdev->major = cpu_to_le64(MAJOR(dev)); pdev->minor = cpu_to_le64(MINOR(dev)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); } else if (S_ISBLK(mode)) { memcpy(pdev->type, "IntxBLK", 8); pdev->major = cpu_to_le64(MAJOR(dev)); pdev->minor = cpu_to_le64(MINOR(dev)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); } tcon->ses->server->ops->close(xid, tcon, &fid); d_drop(dentry); /* FIXME: add code here to set EAs */ cifs_free_open_info(&buf); return rc; } struct smb_version_operations smb1_operations = { .send_cancel = send_nt_cancel, .compare_fids = cifs_compare_fids, .setup_request = cifs_setup_request, .setup_async_request = cifs_setup_async_request, .check_receive = cifs_check_receive, .add_credits = cifs_add_credits, .set_credits = cifs_set_credits, .get_credits_field = cifs_get_credits_field, .get_credits = cifs_get_credits, .wait_mtu_credits = cifs_wait_mtu_credits, .get_next_mid = cifs_get_next_mid, .read_data_offset = cifs_read_data_offset, .read_data_length = cifs_read_data_length, .map_error = map_smb_to_linux_error, .find_mid = cifs_find_mid, .check_message = checkSMB, .dump_detail = cifs_dump_detail, .clear_stats = cifs_clear_stats, .print_stats = cifs_print_stats, .is_oplock_break = is_valid_oplock_break, .downgrade_oplock = cifs_downgrade_oplock, .check_trans2 = cifs_check_trans2, .need_neg = cifs_need_neg, .negotiate = cifs_negotiate, .negotiate_wsize = cifs_negotiate_wsize, .negotiate_rsize = cifs_negotiate_rsize, .sess_setup = CIFS_SessSetup, .logoff = CIFSSMBLogoff, .tree_connect = CIFSTCon, .tree_disconnect = CIFSSMBTDis, .get_dfs_refer = CIFSGetDFSRefer, .qfs_tcon = cifs_qfs_tcon, .is_path_accessible = cifs_is_path_accessible, .can_echo = cifs_can_echo, .query_path_info = cifs_query_path_info, .query_file_info = cifs_query_file_info, .get_srv_inum = cifs_get_srv_inum, .set_path_size = CIFSSMBSetEOF, .set_file_size = CIFSSMBSetFileSize, .set_file_info = smb_set_file_info, .set_compression = cifs_set_compression, .echo = CIFSSMBEcho, .mkdir = CIFSSMBMkDir, .mkdir_setinfo = cifs_mkdir_setinfo, .rmdir = CIFSSMBRmDir, .unlink = CIFSSMBDelFile, .rename_pending_delete = cifs_rename_pending_delete, .rename = CIFSSMBRename, .create_hardlink = CIFSCreateHardLink, .query_symlink = cifs_query_symlink, .open = cifs_open_file, .set_fid = cifs_set_fid, .close = cifs_close_file, .flush = cifs_flush_file, .async_readv = cifs_async_readv, .async_writev = cifs_async_writev, .sync_read = cifs_sync_read, .sync_write = cifs_sync_write, .query_dir_first = cifs_query_dir_first, .query_dir_next = cifs_query_dir_next, .close_dir = cifs_close_dir, .calc_smb_size = smbCalcSize, .oplock_response = cifs_oplock_response, .queryfs = cifs_queryfs, .mand_lock = cifs_mand_lock, .mand_unlock_range = cifs_unlock_range, .push_mand_locks = cifs_push_mandatory_locks, .query_mf_symlink = cifs_query_mf_symlink, .create_mf_symlink = cifs_create_mf_symlink, .is_read_op = cifs_is_read_op, .wp_retry_size = cifs_wp_retry_size, .dir_needs_close = cifs_dir_needs_close, .select_sectype = cifs_select_sectype, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = CIFSSMBQAllEAs, .set_EA = CIFSSMBSetEA, #endif /* CIFS_XATTR */ .get_acl = get_cifs_acl, .get_acl_by_fid = get_cifs_acl_by_fid, .set_acl = set_cifs_acl, .make_node = cifs_make_node, }; struct smb_version_values smb1_values = { .version_string = SMB1_VERSION_STRING, .protocol_id = SMB10_PROT_ID, .large_lock_type = LOCKING_ANDX_LARGE_FILES, .exclusive_lock_type = 0, .shared_lock_type = LOCKING_ANDX_SHARED_LOCK, .unlock_lock_type = 0, .header_preamble_size = 4, .header_size = sizeof(struct smb_hdr), .max_header_size = MAX_CIFS_HDR_SIZE, .read_rsp_size = sizeof(READ_RSP), .lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX), .cap_unix = CAP_UNIX, .cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND, .cap_large_files = CAP_LARGE_FILES, .signing_enabled = SECMODE_SIGN_ENABLED, .signing_required = SECMODE_SIGN_REQUIRED, };
linux-master
fs/smb/client/smb1ops.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (c) International Business Machines Corp., 2000,2009 * Modified by Steve French ([email protected]) */ #include <linux/fs.h> #include <linux/slab.h> #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" int cifs_remap(struct cifs_sb_info *cifs_sb) { int map_type; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) map_type = SFM_MAP_UNI_RSVD; else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) map_type = SFU_MAP_UNI_RSVD; else map_type = NO_MAP_UNI_RSVD; return map_type; } /* Convert character using the SFU - "Services for Unix" remapping range */ static bool convert_sfu_char(const __u16 src_char, char *target) { /* * BB: Cannot handle remapping UNI_SLASH until all the calls to * build_path_from_dentry are modified, as they use slash as * separator. */ switch (src_char) { case UNI_COLON: *target = ':'; break; case UNI_ASTERISK: *target = '*'; break; case UNI_QUESTION: *target = '?'; break; case UNI_PIPE: *target = '|'; break; case UNI_GRTRTHAN: *target = '>'; break; case UNI_LESSTHAN: *target = '<'; break; default: return false; } return true; } /* Convert character using the SFM - "Services for Mac" remapping range */ static bool convert_sfm_char(const __u16 src_char, char *target) { if (src_char >= 0xF001 && src_char <= 0xF01F) { *target = src_char - 0xF000; return true; } switch (src_char) { case SFM_COLON: *target = ':'; break; case SFM_DOUBLEQUOTE: *target = '"'; break; case SFM_ASTERISK: *target = '*'; break; case SFM_QUESTION: *target = '?'; break; case SFM_PIPE: *target = '|'; break; case SFM_GRTRTHAN: *target = '>'; break; case SFM_LESSTHAN: *target = '<'; break; case SFM_SPACE: *target = ' '; break; case SFM_PERIOD: *target = '.'; break; default: return false; } return true; } /* * cifs_mapchar - convert a host-endian char to proper char in codepage * @target - where converted character should be copied * @src_char - 2 byte host-endian source character * @cp - codepage to which character should be converted * @map_type - How should the 7 NTFS/SMB reserved characters be mapped to UCS2? * * This function handles the conversion of a single character. It is the * responsibility of the caller to ensure that the target buffer is large * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE). */ static int cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp, int maptype) { int len = 1; __u16 src_char; src_char = *from; if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target)) return len; else if ((maptype == SFU_MAP_UNI_RSVD) && convert_sfu_char(src_char, target)) return len; /* if character not one of seven in special remap set */ len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); if (len <= 0) goto surrogate_pair; return len; surrogate_pair: /* convert SURROGATE_PAIR and IVS */ if (strcmp(cp->charset, "utf8")) goto unknown; len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6); if (len <= 0) goto unknown; return len; unknown: *target = '?'; len = 1; return len; } /* * cifs_from_utf16 - convert utf16le string to local charset * @to - destination buffer * @from - source buffer * @tolen - destination buffer size (in bytes) * @fromlen - source buffer size (in bytes) * @codepage - codepage to which characters should be converted * @mapchar - should characters be remapped according to the mapchars option? * * Convert a little-endian utf16le string (as sent by the server) to a string * in the provided codepage. The tolen and fromlen parameters are to ensure * that the code doesn't walk off of the end of the buffer (which is always * a danger if the alignment of the source buffer is off). The destination * string is always properly null terminated and fits in the destination * buffer. Returns the length of the destination string in bytes (including * null terminator). * * Note that some windows versions actually send multiword UTF-16 characters * instead of straight UTF16-2. The linux nls routines however aren't able to * deal with those characters properly. In the event that we get some of * those characters, they won't be translated properly. */ int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, const struct nls_table *codepage, int map_type) { int i, charlen, safelen; int outlen = 0; int nullsize = nls_nullsize(codepage); int fromwords = fromlen / 2; char tmp[NLS_MAX_CHARSET_SIZE]; __u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */ /* * because the chars can be of varying widths, we need to take care * not to overflow the destination buffer when we get close to the * end of it. Until we get to this offset, we don't need to check * for overflow however. */ safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize); for (i = 0; i < fromwords; i++) { ftmp[0] = get_unaligned_le16(&from[i]); if (ftmp[0] == 0) break; if (i + 1 < fromwords) ftmp[1] = get_unaligned_le16(&from[i + 1]); else ftmp[1] = 0; if (i + 2 < fromwords) ftmp[2] = get_unaligned_le16(&from[i + 2]); else ftmp[2] = 0; /* * check to see if converting this character might make the * conversion bleed into the null terminator */ if (outlen >= safelen) { charlen = cifs_mapchar(tmp, ftmp, codepage, map_type); if ((outlen + charlen) > (tolen - nullsize)) break; } /* put converted char into 'to' buffer */ charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type); outlen += charlen; /* charlen (=bytes of UTF-8 for 1 character) * 4bytes UTF-8(surrogate pair) is charlen=4 * (4bytes UTF-16 code) * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4 * (2 UTF-8 pairs divided to 2 UTF-16 pairs) */ if (charlen == 4) i++; else if (charlen >= 5) /* 5-6bytes UTF-8 */ i += 2; } /* properly null-terminate string */ for (i = 0; i < nullsize; i++) to[outlen++] = 0; return outlen; } /* * NAME: cifs_strtoUTF16() * * FUNCTION: Convert character string to unicode string * */ int cifs_strtoUTF16(__le16 *to, const char *from, int len, const struct nls_table *codepage) { int charlen; int i; wchar_t wchar_to; /* needed to quiet sparse */ /* special case for utf8 to handle no plane0 chars */ if (!strcmp(codepage->charset, "utf8")) { /* * convert utf8 -> utf16, we assume we have enough space * as caller should have assumed conversion does not overflow * in destination len is length in wchar_t units (16bits) */ i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN, (wchar_t *) to, len); /* if success terminate and exit */ if (i >= 0) goto success; /* * if fails fall back to UCS encoding as this * function should not return negative values * currently can fail only if source contains * invalid encoded characters */ } for (i = 0; len && *from; i++, from += charlen, len -= charlen) { charlen = codepage->char2uni(from, len, &wchar_to); if (charlen < 1) { cifs_dbg(VFS, "strtoUTF16: char2uni of 0x%x returned %d\n", *from, charlen); /* A question mark */ wchar_to = 0x003f; charlen = 1; } put_unaligned_le16(wchar_to, &to[i]); } success: put_unaligned_le16(0, &to[i]); return i; } /* * cifs_utf16_bytes - how long will a string be after conversion? * @utf16 - pointer to input string * @maxbytes - don't go past this many bytes of input string * @codepage - destination codepage * * Walk a utf16le string and return the number of bytes that the string will * be after being converted to the given charset, not including any null * termination required. Don't walk past maxbytes in the source buffer. */ int cifs_utf16_bytes(const __le16 *from, int maxbytes, const struct nls_table *codepage) { int i; int charlen, outlen = 0; int maxwords = maxbytes / 2; char tmp[NLS_MAX_CHARSET_SIZE]; __u16 ftmp[3]; for (i = 0; i < maxwords; i++) { ftmp[0] = get_unaligned_le16(&from[i]); if (ftmp[0] == 0) break; if (i + 1 < maxwords) ftmp[1] = get_unaligned_le16(&from[i + 1]); else ftmp[1] = 0; if (i + 2 < maxwords) ftmp[2] = get_unaligned_le16(&from[i + 2]); else ftmp[2] = 0; charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD); outlen += charlen; } return outlen; } /* * cifs_strndup_from_utf16 - copy a string from wire format to the local * codepage * @src - source string * @maxlen - don't walk past this many bytes in the source string * @is_unicode - is this a unicode string? * @codepage - destination codepage * * Take a string given by the server, convert it to the local codepage and * put it in a new buffer. Returns a pointer to the new string or NULL on * error. */ char * cifs_strndup_from_utf16(const char *src, const int maxlen, const bool is_unicode, const struct nls_table *codepage) { int len; char *dst; if (is_unicode) { len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage); len += nls_nullsize(codepage); dst = kmalloc(len, GFP_KERNEL); if (!dst) return NULL; cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage, NO_MAP_UNI_RSVD); } else { dst = kstrndup(src, maxlen, GFP_KERNEL); } return dst; } static __le16 convert_to_sfu_char(char src_char) { __le16 dest_char; switch (src_char) { case ':': dest_char = cpu_to_le16(UNI_COLON); break; case '*': dest_char = cpu_to_le16(UNI_ASTERISK); break; case '?': dest_char = cpu_to_le16(UNI_QUESTION); break; case '<': dest_char = cpu_to_le16(UNI_LESSTHAN); break; case '>': dest_char = cpu_to_le16(UNI_GRTRTHAN); break; case '|': dest_char = cpu_to_le16(UNI_PIPE); break; default: dest_char = 0; } return dest_char; } static __le16 convert_to_sfm_char(char src_char, bool end_of_string) { __le16 dest_char; if (src_char >= 0x01 && src_char <= 0x1F) { dest_char = cpu_to_le16(src_char + 0xF000); return dest_char; } switch (src_char) { case ':': dest_char = cpu_to_le16(SFM_COLON); break; case '"': dest_char = cpu_to_le16(SFM_DOUBLEQUOTE); break; case '*': dest_char = cpu_to_le16(SFM_ASTERISK); break; case '?': dest_char = cpu_to_le16(SFM_QUESTION); break; case '<': dest_char = cpu_to_le16(SFM_LESSTHAN); break; case '>': dest_char = cpu_to_le16(SFM_GRTRTHAN); break; case '|': dest_char = cpu_to_le16(SFM_PIPE); break; case '.': if (end_of_string) dest_char = cpu_to_le16(SFM_PERIOD); else dest_char = 0; break; case ' ': if (end_of_string) dest_char = cpu_to_le16(SFM_SPACE); else dest_char = 0; break; default: dest_char = 0; } return dest_char; } /* * Convert 16 bit Unicode pathname to wire format from string in current code * page. Conversion may involve remapping up the six characters that are * only legal in POSIX-like OS (if they are present in the string). Path * names are little endian 16 bit Unicode on the wire */ int cifsConvertToUTF16(__le16 *target, const char *source, int srclen, const struct nls_table *cp, int map_chars) { int i, charlen; int j = 0; char src_char; __le16 dst_char; wchar_t tmp; wchar_t *wchar_to; /* UTF-16 */ int ret; unicode_t u; if (map_chars == NO_MAP_UNI_RSVD) return cifs_strtoUTF16(target, source, PATH_MAX, cp); wchar_to = kzalloc(6, GFP_KERNEL); for (i = 0; i < srclen; j++) { src_char = source[i]; charlen = 1; /* check if end of string */ if (src_char == 0) goto ctoUTF16_out; /* see if we must remap this char */ if (map_chars == SFU_MAP_UNI_RSVD) dst_char = convert_to_sfu_char(src_char); else if (map_chars == SFM_MAP_UNI_RSVD) { bool end_of_string; /** * Remap spaces and periods found at the end of every * component of the path. The special cases of '.' and * '..' do not need to be dealt with explicitly because * they are addressed in namei.c:link_path_walk(). **/ if ((i == srclen - 1) || (source[i+1] == '\\')) end_of_string = true; else end_of_string = false; dst_char = convert_to_sfm_char(src_char, end_of_string); } else dst_char = 0; /* * FIXME: We can not handle remapping backslash (UNI_SLASH) * until all the calls to build_path_from_dentry are modified, * as they use backslash as separator. */ if (dst_char == 0) { charlen = cp->char2uni(source + i, srclen - i, &tmp); dst_char = cpu_to_le16(tmp); /* * if no match, use question mark, which at least in * some cases serves as wild card */ if (charlen > 0) goto ctoUTF16; /* convert SURROGATE_PAIR */ if (strcmp(cp->charset, "utf8") || !wchar_to) goto unknown; if (*(source + i) & 0x80) { charlen = utf8_to_utf32(source + i, 6, &u); if (charlen < 0) goto unknown; } else goto unknown; ret = utf8s_to_utf16s(source + i, charlen, UTF16_LITTLE_ENDIAN, wchar_to, 6); if (ret < 0) goto unknown; i += charlen; dst_char = cpu_to_le16(*wchar_to); if (charlen <= 3) /* 1-3bytes UTF-8 to 2bytes UTF-16 */ put_unaligned(dst_char, &target[j]); else if (charlen == 4) { /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16 * 7-8bytes UTF-8(IVS) divided to 2 UTF-16 * (charlen=3+4 or 4+4) */ put_unaligned(dst_char, &target[j]); dst_char = cpu_to_le16(*(wchar_to + 1)); j++; put_unaligned(dst_char, &target[j]); } else if (charlen >= 5) { /* 5-6bytes UTF-8 to 6bytes UTF-16 */ put_unaligned(dst_char, &target[j]); dst_char = cpu_to_le16(*(wchar_to + 1)); j++; put_unaligned(dst_char, &target[j]); dst_char = cpu_to_le16(*(wchar_to + 2)); j++; put_unaligned(dst_char, &target[j]); } continue; unknown: dst_char = cpu_to_le16(0x003f); charlen = 1; } ctoUTF16: /* * character may take more than one byte in the source string, * but will take exactly two bytes in the target string */ i += charlen; put_unaligned(dst_char, &target[j]); } ctoUTF16_out: put_unaligned(0, &target[j]); /* Null terminate target unicode string */ kfree(wchar_to); return j; } /* * cifs_local_to_utf16_bytes - how long will a string be after conversion? * @from - pointer to input string * @maxbytes - don't go past this many bytes of input string * @codepage - source codepage * * Walk a string and return the number of bytes that the string will * be after being converted to the given charset, not including any null * termination required. Don't walk past maxbytes in the source buffer. */ static int cifs_local_to_utf16_bytes(const char *from, int len, const struct nls_table *codepage) { int charlen; int i; wchar_t wchar_to; for (i = 0; len && *from; i++, from += charlen, len -= charlen) { charlen = codepage->char2uni(from, len, &wchar_to); /* Failed conversion defaults to a question mark */ if (charlen < 1) charlen = 1; } return 2 * i; /* UTF16 characters are two bytes */ } /* * cifs_strndup_to_utf16 - copy a string to wire format from the local codepage * @src - source string * @maxlen - don't walk past this many bytes in the source string * @utf16_len - the length of the allocated string in bytes (including null) * @cp - source codepage * @remap - map special chars * * Take a string convert it from the local codepage to UTF16 and * put it in a new buffer. Returns a pointer to the new string or NULL on * error. */ __le16 * cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len, const struct nls_table *cp, int remap) { int len; __le16 *dst; len = cifs_local_to_utf16_bytes(src, maxlen, cp); len += 2; /* NULL */ dst = kmalloc(len, GFP_KERNEL); if (!dst) { *utf16_len = 0; return NULL; } cifsConvertToUTF16(dst, src, strlen(src), cp, remap); *utf16_len = len; return dst; }
linux-master
fs/smb/client/cifs_unicode.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2017, Microsoft Corporation. * * Author(s): Long Li <[email protected]> */ #include <linux/module.h> #include <linux/highmem.h> #include "smbdirect.h" #include "cifs_debug.h" #include "cifsproto.h" #include "smb2proto.h" static struct smbd_response *get_empty_queue_buffer( struct smbd_connection *info); static struct smbd_response *get_receive_buffer( struct smbd_connection *info); static void put_receive_buffer( struct smbd_connection *info, struct smbd_response *response); static int allocate_receive_buffers(struct smbd_connection *info, int num_buf); static void destroy_receive_buffers(struct smbd_connection *info); static void put_empty_packet( struct smbd_connection *info, struct smbd_response *response); static void enqueue_reassembly( struct smbd_connection *info, struct smbd_response *response, int data_length); static struct smbd_response *_get_first_reassembly( struct smbd_connection *info); static int smbd_post_recv( struct smbd_connection *info, struct smbd_response *response); static int smbd_post_send_empty(struct smbd_connection *info); static void destroy_mr_list(struct smbd_connection *info); static int allocate_mr_list(struct smbd_connection *info); struct smb_extract_to_rdma { struct ib_sge *sge; unsigned int nr_sge; unsigned int max_sge; struct ib_device *device; u32 local_dma_lkey; enum dma_data_direction direction; }; static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len, struct smb_extract_to_rdma *rdma); /* SMBD version number */ #define SMBD_V1 0x0100 /* Port numbers for SMBD transport */ #define SMB_PORT 445 #define SMBD_PORT 5445 /* Address lookup and resolve timeout in ms */ #define RDMA_RESOLVE_TIMEOUT 5000 /* SMBD negotiation timeout in seconds */ #define SMBD_NEGOTIATE_TIMEOUT 120 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */ #define SMBD_MIN_RECEIVE_SIZE 128 #define SMBD_MIN_FRAGMENTED_SIZE 131072 /* * Default maximum number of RDMA read/write outstanding on this connection * This value is possibly decreased during QP creation on hardware limit */ #define SMBD_CM_RESPONDER_RESOURCES 32 /* Maximum number of retries on data transfer operations */ #define SMBD_CM_RETRY 6 /* No need to retry on Receiver Not Ready since SMBD manages credits */ #define SMBD_CM_RNR_RETRY 0 /* * User configurable initial values per SMBD transport connection * as defined in [MS-SMBD] 3.1.1.1 * Those may change after a SMBD negotiation */ /* The local peer's maximum number of credits to grant to the peer */ int smbd_receive_credit_max = 255; /* The remote peer's credit request of local peer */ int smbd_send_credit_target = 255; /* The maximum single message size can be sent to remote peer */ int smbd_max_send_size = 1364; /* The maximum fragmented upper-layer payload receive size supported */ int smbd_max_fragmented_recv_size = 1024 * 1024; /* The maximum single-message size which can be received */ int smbd_max_receive_size = 1364; /* The timeout to initiate send of a keepalive message on idle */ int smbd_keep_alive_interval = 120; /* * User configurable initial values for RDMA transport * The actual values used may be lower and are limited to hardware capabilities */ /* Default maximum number of pages in a single RDMA write/read */ int smbd_max_frmr_depth = 2048; /* If payload is less than this byte, use RDMA send/recv not read/write */ int rdma_readwrite_threshold = 4096; /* Transport logging functions * Logging are defined as classes. They can be OR'ed to define the actual * logging level via module parameter smbd_logging_class * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and * log_rdma_event() */ #define LOG_OUTGOING 0x1 #define LOG_INCOMING 0x2 #define LOG_READ 0x4 #define LOG_WRITE 0x8 #define LOG_RDMA_SEND 0x10 #define LOG_RDMA_RECV 0x20 #define LOG_KEEP_ALIVE 0x40 #define LOG_RDMA_EVENT 0x80 #define LOG_RDMA_MR 0x100 static unsigned int smbd_logging_class; module_param(smbd_logging_class, uint, 0644); MODULE_PARM_DESC(smbd_logging_class, "Logging class for SMBD transport 0x0 to 0x100"); #define ERR 0x0 #define INFO 0x1 static unsigned int smbd_logging_level = ERR; module_param(smbd_logging_level, uint, 0644); MODULE_PARM_DESC(smbd_logging_level, "Logging level for SMBD transport, 0 (default): error, 1: info"); #define log_rdma(level, class, fmt, args...) \ do { \ if (level <= smbd_logging_level || class & smbd_logging_class) \ cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\ } while (0) #define log_outgoing(level, fmt, args...) \ log_rdma(level, LOG_OUTGOING, fmt, ##args) #define log_incoming(level, fmt, args...) \ log_rdma(level, LOG_INCOMING, fmt, ##args) #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args) #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args) #define log_rdma_send(level, fmt, args...) \ log_rdma(level, LOG_RDMA_SEND, fmt, ##args) #define log_rdma_recv(level, fmt, args...) \ log_rdma(level, LOG_RDMA_RECV, fmt, ##args) #define log_keep_alive(level, fmt, args...) \ log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args) #define log_rdma_event(level, fmt, args...) \ log_rdma(level, LOG_RDMA_EVENT, fmt, ##args) #define log_rdma_mr(level, fmt, args...) \ log_rdma(level, LOG_RDMA_MR, fmt, ##args) static void smbd_disconnect_rdma_work(struct work_struct *work) { struct smbd_connection *info = container_of(work, struct smbd_connection, disconnect_work); if (info->transport_status == SMBD_CONNECTED) { info->transport_status = SMBD_DISCONNECTING; rdma_disconnect(info->id); } } static void smbd_disconnect_rdma_connection(struct smbd_connection *info) { queue_work(info->workqueue, &info->disconnect_work); } /* Upcall from RDMA CM */ static int smbd_conn_upcall( struct rdma_cm_id *id, struct rdma_cm_event *event) { struct smbd_connection *info = id->context; log_rdma_event(INFO, "event=%d status=%d\n", event->event, event->status); switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: case RDMA_CM_EVENT_ROUTE_RESOLVED: info->ri_rc = 0; complete(&info->ri_done); break; case RDMA_CM_EVENT_ADDR_ERROR: info->ri_rc = -EHOSTUNREACH; complete(&info->ri_done); break; case RDMA_CM_EVENT_ROUTE_ERROR: info->ri_rc = -ENETUNREACH; complete(&info->ri_done); break; case RDMA_CM_EVENT_ESTABLISHED: log_rdma_event(INFO, "connected event=%d\n", event->event); info->transport_status = SMBD_CONNECTED; wake_up_interruptible(&info->conn_wait); break; case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_REJECTED: log_rdma_event(INFO, "connecting failed event=%d\n", event->event); info->transport_status = SMBD_DISCONNECTED; wake_up_interruptible(&info->conn_wait); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DISCONNECTED: /* This happenes when we fail the negotiation */ if (info->transport_status == SMBD_NEGOTIATE_FAILED) { info->transport_status = SMBD_DISCONNECTED; wake_up(&info->conn_wait); break; } info->transport_status = SMBD_DISCONNECTED; wake_up_interruptible(&info->disconn_wait); wake_up_interruptible(&info->wait_reassembly_queue); wake_up_interruptible_all(&info->wait_send_queue); break; default: break; } return 0; } /* Upcall from RDMA QP */ static void smbd_qp_async_error_upcall(struct ib_event *event, void *context) { struct smbd_connection *info = context; log_rdma_event(ERR, "%s on device %s info %p\n", ib_event_msg(event->event), event->device->name, info); switch (event->event) { case IB_EVENT_CQ_ERR: case IB_EVENT_QP_FATAL: smbd_disconnect_rdma_connection(info); break; default: break; } } static inline void *smbd_request_payload(struct smbd_request *request) { return (void *)request->packet; } static inline void *smbd_response_payload(struct smbd_response *response) { return (void *)response->packet; } /* Called when a RDMA send is done */ static void send_done(struct ib_cq *cq, struct ib_wc *wc) { int i; struct smbd_request *request = container_of(wc->wr_cqe, struct smbd_request, cqe); log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", request, wc->status); if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", wc->status, wc->opcode); smbd_disconnect_rdma_connection(request->info); } for (i = 0; i < request->num_sge; i++) ib_dma_unmap_single(request->info->id->device, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); if (atomic_dec_and_test(&request->info->send_pending)) wake_up(&request->info->wait_send_pending); wake_up(&request->info->wait_post_send); mempool_free(request, request->info->request_mempool); } static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp) { log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n", resp->min_version, resp->max_version, resp->negotiated_version, resp->credits_requested, resp->credits_granted, resp->status, resp->max_readwrite_size, resp->preferred_send_size, resp->max_receive_size, resp->max_fragmented_size); } /* * Process a negotiation response message, according to [MS-SMBD]3.1.5.7 * response, packet_length: the negotiation response message * return value: true if negotiation is a success, false if failed */ static bool process_negotiation_response( struct smbd_response *response, int packet_length) { struct smbd_connection *info = response->info; struct smbd_negotiate_resp *packet = smbd_response_payload(response); if (packet_length < sizeof(struct smbd_negotiate_resp)) { log_rdma_event(ERR, "error: packet_length=%d\n", packet_length); return false; } if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { log_rdma_event(ERR, "error: negotiated_version=%x\n", le16_to_cpu(packet->negotiated_version)); return false; } info->protocol = le16_to_cpu(packet->negotiated_version); if (packet->credits_requested == 0) { log_rdma_event(ERR, "error: credits_requested==0\n"); return false; } info->receive_credit_target = le16_to_cpu(packet->credits_requested); if (packet->credits_granted == 0) { log_rdma_event(ERR, "error: credits_granted==0\n"); return false; } atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); atomic_set(&info->receive_credits, 0); if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { log_rdma_event(ERR, "error: preferred_send_size=%d\n", le32_to_cpu(packet->preferred_send_size)); return false; } info->max_receive_size = le32_to_cpu(packet->preferred_send_size); if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { log_rdma_event(ERR, "error: max_receive_size=%d\n", le32_to_cpu(packet->max_receive_size)); return false; } info->max_send_size = min_t(int, info->max_send_size, le32_to_cpu(packet->max_receive_size)); if (le32_to_cpu(packet->max_fragmented_size) < SMBD_MIN_FRAGMENTED_SIZE) { log_rdma_event(ERR, "error: max_fragmented_size=%d\n", le32_to_cpu(packet->max_fragmented_size)); return false; } info->max_fragmented_send_size = le32_to_cpu(packet->max_fragmented_size); info->rdma_readwrite_threshold = rdma_readwrite_threshold > info->max_fragmented_send_size ? info->max_fragmented_send_size : rdma_readwrite_threshold; info->max_readwrite_size = min_t(u32, le32_to_cpu(packet->max_readwrite_size), info->max_frmr_depth * PAGE_SIZE); info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; return true; } static void smbd_post_send_credits(struct work_struct *work) { int ret = 0; int use_receive_queue = 1; int rc; struct smbd_response *response; struct smbd_connection *info = container_of(work, struct smbd_connection, post_send_credits_work); if (info->transport_status != SMBD_CONNECTED) { wake_up(&info->wait_receive_queues); return; } if (info->receive_credit_target > atomic_read(&info->receive_credits)) { while (true) { if (use_receive_queue) response = get_receive_buffer(info); else response = get_empty_queue_buffer(info); if (!response) { /* now switch to emtpy packet queue */ if (use_receive_queue) { use_receive_queue = 0; continue; } else break; } response->type = SMBD_TRANSFER_DATA; response->first_segment = false; rc = smbd_post_recv(info, response); if (rc) { log_rdma_recv(ERR, "post_recv failed rc=%d\n", rc); put_receive_buffer(info, response); break; } ret++; } } spin_lock(&info->lock_new_credits_offered); info->new_credits_offered += ret; spin_unlock(&info->lock_new_credits_offered); /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ info->send_immediate = true; if (atomic_read(&info->receive_credits) < info->receive_credit_target - 1) { if (info->keep_alive_requested == KEEP_ALIVE_PENDING || info->send_immediate) { log_keep_alive(INFO, "send an empty message\n"); smbd_post_send_empty(info); } } } /* Called from softirq, when recv is done */ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct smbd_data_transfer *data_transfer; struct smbd_response *response = container_of(wc->wr_cqe, struct smbd_response, cqe); struct smbd_connection *info = response->info; int data_length = 0; log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", response, response->type, wc->status, wc->opcode, wc->byte_len, wc->pkey_index); if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", wc->status, wc->opcode); smbd_disconnect_rdma_connection(info); goto error; } ib_dma_sync_single_for_cpu( wc->qp->device, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); switch (response->type) { /* SMBD negotiation response */ case SMBD_NEGOTIATE_RESP: dump_smbd_negotiate_resp(smbd_response_payload(response)); info->full_packet_received = true; info->negotiate_done = process_negotiation_response(response, wc->byte_len); complete(&info->negotiate_completion); break; /* SMBD data transfer packet */ case SMBD_TRANSFER_DATA: data_transfer = smbd_response_payload(response); data_length = le32_to_cpu(data_transfer->data_length); /* * If this is a packet with data playload place the data in * reassembly queue and wake up the reading thread */ if (data_length) { if (info->full_packet_received) response->first_segment = true; if (le32_to_cpu(data_transfer->remaining_data_length)) info->full_packet_received = false; else info->full_packet_received = true; enqueue_reassembly( info, response, data_length); } else put_empty_packet(info, response); if (data_length) wake_up_interruptible(&info->wait_reassembly_queue); atomic_dec(&info->receive_credits); info->receive_credit_target = le16_to_cpu(data_transfer->credits_requested); if (le16_to_cpu(data_transfer->credits_granted)) { atomic_add(le16_to_cpu(data_transfer->credits_granted), &info->send_credits); /* * We have new send credits granted from remote peer * If any sender is waiting for credits, unblock it */ wake_up_interruptible(&info->wait_send_queue); } log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n", le16_to_cpu(data_transfer->flags), le32_to_cpu(data_transfer->data_offset), le32_to_cpu(data_transfer->data_length), le32_to_cpu(data_transfer->remaining_data_length)); /* Send a KEEP_ALIVE response right away if requested */ info->keep_alive_requested = KEEP_ALIVE_NONE; if (le16_to_cpu(data_transfer->flags) & SMB_DIRECT_RESPONSE_REQUESTED) { info->keep_alive_requested = KEEP_ALIVE_PENDING; } return; default: log_rdma_recv(ERR, "unexpected response type=%d\n", response->type); } error: put_receive_buffer(info, response); } static struct rdma_cm_id *smbd_create_id( struct smbd_connection *info, struct sockaddr *dstaddr, int port) { struct rdma_cm_id *id; int rc; __be16 *sport; id = rdma_create_id(&init_net, smbd_conn_upcall, info, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(id)) { rc = PTR_ERR(id); log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc); return id; } if (dstaddr->sa_family == AF_INET6) sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; else sport = &((struct sockaddr_in *)dstaddr)->sin_port; *sport = htons(port); init_completion(&info->ri_done); info->ri_rc = -ETIMEDOUT; rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr, RDMA_RESOLVE_TIMEOUT); if (rc) { log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc); goto out; } rc = wait_for_completion_interruptible_timeout( &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); /* e.g. if interrupted returns -ERESTARTSYS */ if (rc < 0) { log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc); goto out; } rc = info->ri_rc; if (rc) { log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc); goto out; } info->ri_rc = -ETIMEDOUT; rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); if (rc) { log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc); goto out; } rc = wait_for_completion_interruptible_timeout( &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); /* e.g. if interrupted returns -ERESTARTSYS */ if (rc < 0) { log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc); goto out; } rc = info->ri_rc; if (rc) { log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc); goto out; } return id; out: rdma_destroy_id(id); return ERR_PTR(rc); } /* * Test if FRWR (Fast Registration Work Requests) is supported on the device * This implementation requries FRWR on RDMA read/write * return value: true if it is supported */ static bool frwr_is_supported(struct ib_device_attr *attrs) { if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) return false; if (attrs->max_fast_reg_page_list_len == 0) return false; return true; } static int smbd_ia_open( struct smbd_connection *info, struct sockaddr *dstaddr, int port) { int rc; info->id = smbd_create_id(info, dstaddr, port); if (IS_ERR(info->id)) { rc = PTR_ERR(info->id); goto out1; } if (!frwr_is_supported(&info->id->device->attrs)) { log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n"); log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n", info->id->device->attrs.device_cap_flags, info->id->device->attrs.max_fast_reg_page_list_len); rc = -EPROTONOSUPPORT; goto out2; } info->max_frmr_depth = min_t(int, smbd_max_frmr_depth, info->id->device->attrs.max_fast_reg_page_list_len); info->mr_type = IB_MR_TYPE_MEM_REG; if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) info->mr_type = IB_MR_TYPE_SG_GAPS; info->pd = ib_alloc_pd(info->id->device, 0); if (IS_ERR(info->pd)) { rc = PTR_ERR(info->pd); log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc); goto out2; } return 0; out2: rdma_destroy_id(info->id); info->id = NULL; out1: return rc; } /* * Send a negotiation request message to the peer * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3 * After negotiation, the transport is connected and ready for * carrying upper layer SMB payload */ static int smbd_post_send_negotiate_req(struct smbd_connection *info) { struct ib_send_wr send_wr; int rc = -ENOMEM; struct smbd_request *request; struct smbd_negotiate_req *packet; request = mempool_alloc(info->request_mempool, GFP_KERNEL); if (!request) return rc; request->info = info; packet = smbd_request_payload(request); packet->min_version = cpu_to_le16(SMBD_V1); packet->max_version = cpu_to_le16(SMBD_V1); packet->reserved = 0; packet->credits_requested = cpu_to_le16(info->send_credit_target); packet->preferred_send_size = cpu_to_le32(info->max_send_size); packet->max_receive_size = cpu_to_le32(info->max_receive_size); packet->max_fragmented_size = cpu_to_le32(info->max_fragmented_recv_size); request->num_sge = 1; request->sge[0].addr = ib_dma_map_single( info->id->device, (void *)packet, sizeof(*packet), DMA_TO_DEVICE); if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { rc = -EIO; goto dma_mapping_failed; } request->sge[0].length = sizeof(*packet); request->sge[0].lkey = info->pd->local_dma_lkey; ib_dma_sync_single_for_device( info->id->device, request->sge[0].addr, request->sge[0].length, DMA_TO_DEVICE); request->cqe.done = send_done; send_wr.next = NULL; send_wr.wr_cqe = &request->cqe; send_wr.sg_list = request->sge; send_wr.num_sge = request->num_sge; send_wr.opcode = IB_WR_SEND; send_wr.send_flags = IB_SEND_SIGNALED; log_rdma_send(INFO, "sge addr=0x%llx length=%u lkey=0x%x\n", request->sge[0].addr, request->sge[0].length, request->sge[0].lkey); atomic_inc(&info->send_pending); rc = ib_post_send(info->id->qp, &send_wr, NULL); if (!rc) return 0; /* if we reach here, post send failed */ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); atomic_dec(&info->send_pending); ib_dma_unmap_single(info->id->device, request->sge[0].addr, request->sge[0].length, DMA_TO_DEVICE); smbd_disconnect_rdma_connection(info); dma_mapping_failed: mempool_free(request, info->request_mempool); return rc; } /* * Extend the credits to remote peer * This implements [MS-SMBD] 3.1.5.9 * The idea is that we should extend credits to remote peer as quickly as * it's allowed, to maintain data flow. We allocate as much receive * buffer as possible, and extend the receive credits to remote peer * return value: the new credtis being granted. */ static int manage_credits_prior_sending(struct smbd_connection *info) { int new_credits; spin_lock(&info->lock_new_credits_offered); new_credits = info->new_credits_offered; info->new_credits_offered = 0; spin_unlock(&info->lock_new_credits_offered); return new_credits; } /* * Check if we need to send a KEEP_ALIVE message * The idle connection timer triggers a KEEP_ALIVE message when expires * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send * back a response. * return value: * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set * 0: otherwise */ static int manage_keep_alive_before_sending(struct smbd_connection *info) { if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { info->keep_alive_requested = KEEP_ALIVE_SENT; return 1; } return 0; } /* Post the send request */ static int smbd_post_send(struct smbd_connection *info, struct smbd_request *request) { struct ib_send_wr send_wr; int rc, i; for (i = 0; i < request->num_sge; i++) { log_rdma_send(INFO, "rdma_request sge[%d] addr=0x%llx length=%u\n", i, request->sge[i].addr, request->sge[i].length); ib_dma_sync_single_for_device( info->id->device, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); } request->cqe.done = send_done; send_wr.next = NULL; send_wr.wr_cqe = &request->cqe; send_wr.sg_list = request->sge; send_wr.num_sge = request->num_sge; send_wr.opcode = IB_WR_SEND; send_wr.send_flags = IB_SEND_SIGNALED; rc = ib_post_send(info->id->qp, &send_wr, NULL); if (rc) { log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); smbd_disconnect_rdma_connection(info); rc = -EAGAIN; } else /* Reset timer for idle connection after packet is sent */ mod_delayed_work(info->workqueue, &info->idle_timer_work, info->keep_alive_interval*HZ); return rc; } static int smbd_post_send_iter(struct smbd_connection *info, struct iov_iter *iter, int *_remaining_data_length) { int i, rc; int header_length; int data_length; struct smbd_request *request; struct smbd_data_transfer *packet; int new_credits = 0; wait_credit: /* Wait for send credits. A SMBD packet needs one credit */ rc = wait_event_interruptible(info->wait_send_queue, atomic_read(&info->send_credits) > 0 || info->transport_status != SMBD_CONNECTED); if (rc) goto err_wait_credit; if (info->transport_status != SMBD_CONNECTED) { log_outgoing(ERR, "disconnected not sending on wait_credit\n"); rc = -EAGAIN; goto err_wait_credit; } if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { atomic_inc(&info->send_credits); goto wait_credit; } wait_send_queue: wait_event(info->wait_post_send, atomic_read(&info->send_pending) < info->send_credit_target || info->transport_status != SMBD_CONNECTED); if (info->transport_status != SMBD_CONNECTED) { log_outgoing(ERR, "disconnected not sending on wait_send_queue\n"); rc = -EAGAIN; goto err_wait_send_queue; } if (unlikely(atomic_inc_return(&info->send_pending) > info->send_credit_target)) { atomic_dec(&info->send_pending); goto wait_send_queue; } request = mempool_alloc(info->request_mempool, GFP_KERNEL); if (!request) { rc = -ENOMEM; goto err_alloc; } request->info = info; memset(request->sge, 0, sizeof(request->sge)); /* Fill in the data payload to find out how much data we can add */ if (iter) { struct smb_extract_to_rdma extract = { .nr_sge = 1, .max_sge = SMBDIRECT_MAX_SEND_SGE, .sge = request->sge, .device = info->id->device, .local_dma_lkey = info->pd->local_dma_lkey, .direction = DMA_TO_DEVICE, }; rc = smb_extract_iter_to_rdma(iter, *_remaining_data_length, &extract); if (rc < 0) goto err_dma; data_length = rc; request->num_sge = extract.nr_sge; *_remaining_data_length -= data_length; } else { data_length = 0; request->num_sge = 1; } /* Fill in the packet header */ packet = smbd_request_payload(request); packet->credits_requested = cpu_to_le16(info->send_credit_target); new_credits = manage_credits_prior_sending(info); atomic_add(new_credits, &info->receive_credits); packet->credits_granted = cpu_to_le16(new_credits); info->send_immediate = false; packet->flags = 0; if (manage_keep_alive_before_sending(info)) packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); packet->reserved = 0; if (!data_length) packet->data_offset = 0; else packet->data_offset = cpu_to_le32(24); packet->data_length = cpu_to_le32(data_length); packet->remaining_data_length = cpu_to_le32(*_remaining_data_length); packet->padding = 0; log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n", le16_to_cpu(packet->credits_requested), le16_to_cpu(packet->credits_granted), le32_to_cpu(packet->data_offset), le32_to_cpu(packet->data_length), le32_to_cpu(packet->remaining_data_length)); /* Map the packet to DMA */ header_length = sizeof(struct smbd_data_transfer); /* If this is a packet without payload, don't send padding */ if (!data_length) header_length = offsetof(struct smbd_data_transfer, padding); request->sge[0].addr = ib_dma_map_single(info->id->device, (void *)packet, header_length, DMA_TO_DEVICE); if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { rc = -EIO; request->sge[0].addr = 0; goto err_dma; } request->sge[0].length = header_length; request->sge[0].lkey = info->pd->local_dma_lkey; rc = smbd_post_send(info, request); if (!rc) return 0; err_dma: for (i = 0; i < request->num_sge; i++) if (request->sge[i].addr) ib_dma_unmap_single(info->id->device, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); mempool_free(request, info->request_mempool); /* roll back receive credits and credits to be offered */ spin_lock(&info->lock_new_credits_offered); info->new_credits_offered += new_credits; spin_unlock(&info->lock_new_credits_offered); atomic_sub(new_credits, &info->receive_credits); err_alloc: if (atomic_dec_and_test(&info->send_pending)) wake_up(&info->wait_send_pending); err_wait_send_queue: /* roll back send credits and pending */ atomic_inc(&info->send_credits); err_wait_credit: return rc; } /* * Send an empty message * Empty message is used to extend credits to peer to for keep live * while there is no upper layer payload to send at the time */ static int smbd_post_send_empty(struct smbd_connection *info) { int remaining_data_length = 0; info->count_send_empty++; return smbd_post_send_iter(info, NULL, &remaining_data_length); } /* * Post a receive request to the transport * The remote peer can only send data when a receive request is posted * The interaction is controlled by send/receive credit system */ static int smbd_post_recv( struct smbd_connection *info, struct smbd_response *response) { struct ib_recv_wr recv_wr; int rc = -EIO; response->sge.addr = ib_dma_map_single( info->id->device, response->packet, info->max_receive_size, DMA_FROM_DEVICE); if (ib_dma_mapping_error(info->id->device, response->sge.addr)) return rc; response->sge.length = info->max_receive_size; response->sge.lkey = info->pd->local_dma_lkey; response->cqe.done = recv_done; recv_wr.wr_cqe = &response->cqe; recv_wr.next = NULL; recv_wr.sg_list = &response->sge; recv_wr.num_sge = 1; rc = ib_post_recv(info->id->qp, &recv_wr, NULL); if (rc) { ib_dma_unmap_single(info->id->device, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); smbd_disconnect_rdma_connection(info); log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); } return rc; } /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */ static int smbd_negotiate(struct smbd_connection *info) { int rc; struct smbd_response *response = get_receive_buffer(info); response->type = SMBD_NEGOTIATE_RESP; rc = smbd_post_recv(info, response); log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n", rc, response->sge.addr, response->sge.length, response->sge.lkey); if (rc) return rc; init_completion(&info->negotiate_completion); info->negotiate_done = false; rc = smbd_post_send_negotiate_req(info); if (rc) return rc; rc = wait_for_completion_interruptible_timeout( &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc); if (info->negotiate_done) return 0; if (rc == 0) rc = -ETIMEDOUT; else if (rc == -ERESTARTSYS) rc = -EINTR; else rc = -ENOTCONN; return rc; } static void put_empty_packet( struct smbd_connection *info, struct smbd_response *response) { spin_lock(&info->empty_packet_queue_lock); list_add_tail(&response->list, &info->empty_packet_queue); info->count_empty_packet_queue++; spin_unlock(&info->empty_packet_queue_lock); queue_work(info->workqueue, &info->post_send_credits_work); } /* * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1 * This is a queue for reassembling upper layer payload and present to upper * layer. All the inncoming payload go to the reassembly queue, regardless of * if reassembly is required. The uuper layer code reads from the queue for all * incoming payloads. * Put a received packet to the reassembly queue * response: the packet received * data_length: the size of payload in this packet */ static void enqueue_reassembly( struct smbd_connection *info, struct smbd_response *response, int data_length) { spin_lock(&info->reassembly_queue_lock); list_add_tail(&response->list, &info->reassembly_queue); info->reassembly_queue_length++; /* * Make sure reassembly_data_length is updated after list and * reassembly_queue_length are updated. On the dequeue side * reassembly_data_length is checked without a lock to determine * if reassembly_queue_length and list is up to date */ virt_wmb(); info->reassembly_data_length += data_length; spin_unlock(&info->reassembly_queue_lock); info->count_reassembly_queue++; info->count_enqueue_reassembly_queue++; } /* * Get the first entry at the front of reassembly queue * Caller is responsible for locking * return value: the first entry if any, NULL if queue is empty */ static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) { struct smbd_response *ret = NULL; if (!list_empty(&info->reassembly_queue)) { ret = list_first_entry( &info->reassembly_queue, struct smbd_response, list); } return ret; } static struct smbd_response *get_empty_queue_buffer( struct smbd_connection *info) { struct smbd_response *ret = NULL; unsigned long flags; spin_lock_irqsave(&info->empty_packet_queue_lock, flags); if (!list_empty(&info->empty_packet_queue)) { ret = list_first_entry( &info->empty_packet_queue, struct smbd_response, list); list_del(&ret->list); info->count_empty_packet_queue--; } spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); return ret; } /* * Get a receive buffer * For each remote send, we need to post a receive. The receive buffers are * pre-allocated in advance. * return value: the receive buffer, NULL if none is available */ static struct smbd_response *get_receive_buffer(struct smbd_connection *info) { struct smbd_response *ret = NULL; unsigned long flags; spin_lock_irqsave(&info->receive_queue_lock, flags); if (!list_empty(&info->receive_queue)) { ret = list_first_entry( &info->receive_queue, struct smbd_response, list); list_del(&ret->list); info->count_receive_queue--; info->count_get_receive_buffer++; } spin_unlock_irqrestore(&info->receive_queue_lock, flags); return ret; } /* * Return a receive buffer * Upon returning of a receive buffer, we can post new receive and extend * more receive credits to remote peer. This is done immediately after a * receive buffer is returned. */ static void put_receive_buffer( struct smbd_connection *info, struct smbd_response *response) { unsigned long flags; ib_dma_unmap_single(info->id->device, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); spin_lock_irqsave(&info->receive_queue_lock, flags); list_add_tail(&response->list, &info->receive_queue); info->count_receive_queue++; info->count_put_receive_buffer++; spin_unlock_irqrestore(&info->receive_queue_lock, flags); queue_work(info->workqueue, &info->post_send_credits_work); } /* Preallocate all receive buffer on transport establishment */ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) { int i; struct smbd_response *response; INIT_LIST_HEAD(&info->reassembly_queue); spin_lock_init(&info->reassembly_queue_lock); info->reassembly_data_length = 0; info->reassembly_queue_length = 0; INIT_LIST_HEAD(&info->receive_queue); spin_lock_init(&info->receive_queue_lock); info->count_receive_queue = 0; INIT_LIST_HEAD(&info->empty_packet_queue); spin_lock_init(&info->empty_packet_queue_lock); info->count_empty_packet_queue = 0; init_waitqueue_head(&info->wait_receive_queues); for (i = 0; i < num_buf; i++) { response = mempool_alloc(info->response_mempool, GFP_KERNEL); if (!response) goto allocate_failed; response->info = info; list_add_tail(&response->list, &info->receive_queue); info->count_receive_queue++; } return 0; allocate_failed: while (!list_empty(&info->receive_queue)) { response = list_first_entry( &info->receive_queue, struct smbd_response, list); list_del(&response->list); info->count_receive_queue--; mempool_free(response, info->response_mempool); } return -ENOMEM; } static void destroy_receive_buffers(struct smbd_connection *info) { struct smbd_response *response; while ((response = get_receive_buffer(info))) mempool_free(response, info->response_mempool); while ((response = get_empty_queue_buffer(info))) mempool_free(response, info->response_mempool); } /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */ static void idle_connection_timer(struct work_struct *work) { struct smbd_connection *info = container_of( work, struct smbd_connection, idle_timer_work.work); if (info->keep_alive_requested != KEEP_ALIVE_NONE) { log_keep_alive(ERR, "error status info->keep_alive_requested=%d\n", info->keep_alive_requested); smbd_disconnect_rdma_connection(info); return; } log_keep_alive(INFO, "about to send an empty idle message\n"); smbd_post_send_empty(info); /* Setup the next idle timeout work */ queue_delayed_work(info->workqueue, &info->idle_timer_work, info->keep_alive_interval*HZ); } /* * Destroy the transport and related RDMA and memory resources * Need to go through all the pending counters and make sure on one is using * the transport while it is destroyed */ void smbd_destroy(struct TCP_Server_Info *server) { struct smbd_connection *info = server->smbd_conn; struct smbd_response *response; unsigned long flags; if (!info) { log_rdma_event(INFO, "rdma session already destroyed\n"); return; } log_rdma_event(INFO, "destroying rdma session\n"); if (info->transport_status != SMBD_DISCONNECTED) { rdma_disconnect(server->smbd_conn->id); log_rdma_event(INFO, "wait for transport being disconnected\n"); wait_event_interruptible( info->disconn_wait, info->transport_status == SMBD_DISCONNECTED); } log_rdma_event(INFO, "destroying qp\n"); ib_drain_qp(info->id->qp); rdma_destroy_qp(info->id); log_rdma_event(INFO, "cancelling idle timer\n"); cancel_delayed_work_sync(&info->idle_timer_work); log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); wait_event(info->wait_send_pending, atomic_read(&info->send_pending) == 0); /* It's not possible for upper layer to get to reassembly */ log_rdma_event(INFO, "drain the reassembly queue\n"); do { spin_lock_irqsave(&info->reassembly_queue_lock, flags); response = _get_first_reassembly(info); if (response) { list_del(&response->list); spin_unlock_irqrestore( &info->reassembly_queue_lock, flags); put_receive_buffer(info, response); } else spin_unlock_irqrestore( &info->reassembly_queue_lock, flags); } while (response); info->reassembly_data_length = 0; log_rdma_event(INFO, "free receive buffers\n"); wait_event(info->wait_receive_queues, info->count_receive_queue + info->count_empty_packet_queue == info->receive_credit_max); destroy_receive_buffers(info); /* * For performance reasons, memory registration and deregistration * are not locked by srv_mutex. It is possible some processes are * blocked on transport srv_mutex while holding memory registration. * Release the transport srv_mutex to allow them to hit the failure * path when sending data, and then release memory registartions. */ log_rdma_event(INFO, "freeing mr list\n"); wake_up_interruptible_all(&info->wait_mr); while (atomic_read(&info->mr_used_count)) { cifs_server_unlock(server); msleep(1000); cifs_server_lock(server); } destroy_mr_list(info); ib_free_cq(info->send_cq); ib_free_cq(info->recv_cq); ib_dealloc_pd(info->pd); rdma_destroy_id(info->id); /* free mempools */ mempool_destroy(info->request_mempool); kmem_cache_destroy(info->request_cache); mempool_destroy(info->response_mempool); kmem_cache_destroy(info->response_cache); info->transport_status = SMBD_DESTROYED; destroy_workqueue(info->workqueue); log_rdma_event(INFO, "rdma session destroyed\n"); kfree(info); server->smbd_conn = NULL; } /* * Reconnect this SMBD connection, called from upper layer * return value: 0 on success, or actual error code */ int smbd_reconnect(struct TCP_Server_Info *server) { log_rdma_event(INFO, "reconnecting rdma session\n"); if (!server->smbd_conn) { log_rdma_event(INFO, "rdma session already destroyed\n"); goto create_conn; } /* * This is possible if transport is disconnected and we haven't received * notification from RDMA, but upper layer has detected timeout */ if (server->smbd_conn->transport_status == SMBD_CONNECTED) { log_rdma_event(INFO, "disconnecting transport\n"); smbd_destroy(server); } create_conn: log_rdma_event(INFO, "creating rdma session\n"); server->smbd_conn = smbd_get_connection( server, (struct sockaddr *) &server->dstaddr); if (server->smbd_conn) { cifs_dbg(VFS, "RDMA transport re-established\n"); trace_smb3_smbd_connect_done(server->hostname, server->conn_id, &server->dstaddr); return 0; } trace_smb3_smbd_connect_err(server->hostname, server->conn_id, &server->dstaddr); return -ENOENT; } static void destroy_caches_and_workqueue(struct smbd_connection *info) { destroy_receive_buffers(info); destroy_workqueue(info->workqueue); mempool_destroy(info->response_mempool); kmem_cache_destroy(info->response_cache); mempool_destroy(info->request_mempool); kmem_cache_destroy(info->request_cache); } #define MAX_NAME_LEN 80 static int allocate_caches_and_workqueue(struct smbd_connection *info) { char name[MAX_NAME_LEN]; int rc; scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); info->request_cache = kmem_cache_create( name, sizeof(struct smbd_request) + sizeof(struct smbd_data_transfer), 0, SLAB_HWCACHE_ALIGN, NULL); if (!info->request_cache) return -ENOMEM; info->request_mempool = mempool_create(info->send_credit_target, mempool_alloc_slab, mempool_free_slab, info->request_cache); if (!info->request_mempool) goto out1; scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); info->response_cache = kmem_cache_create( name, sizeof(struct smbd_response) + info->max_receive_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!info->response_cache) goto out2; info->response_mempool = mempool_create(info->receive_credit_max, mempool_alloc_slab, mempool_free_slab, info->response_cache); if (!info->response_mempool) goto out3; scnprintf(name, MAX_NAME_LEN, "smbd_%p", info); info->workqueue = create_workqueue(name); if (!info->workqueue) goto out4; rc = allocate_receive_buffers(info, info->receive_credit_max); if (rc) { log_rdma_event(ERR, "failed to allocate receive buffers\n"); goto out5; } return 0; out5: destroy_workqueue(info->workqueue); out4: mempool_destroy(info->response_mempool); out3: kmem_cache_destroy(info->response_cache); out2: mempool_destroy(info->request_mempool); out1: kmem_cache_destroy(info->request_cache); return -ENOMEM; } /* Create a SMBD connection, called by upper layer */ static struct smbd_connection *_smbd_get_connection( struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port) { int rc; struct smbd_connection *info; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; struct ib_port_immutable port_immutable; u32 ird_ord_hdr[2]; info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); if (!info) return NULL; info->transport_status = SMBD_CONNECTING; rc = smbd_ia_open(info, dstaddr, port); if (rc) { log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc); goto create_id_failed; } if (smbd_send_credit_target > info->id->device->attrs.max_cqe || smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", smbd_send_credit_target, info->id->device->attrs.max_cqe, info->id->device->attrs.max_qp_wr); goto config_failed; } if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", smbd_receive_credit_max, info->id->device->attrs.max_cqe, info->id->device->attrs.max_qp_wr); goto config_failed; } info->receive_credit_max = smbd_receive_credit_max; info->send_credit_target = smbd_send_credit_target; info->max_send_size = smbd_max_send_size; info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; info->max_receive_size = smbd_max_receive_size; info->keep_alive_interval = smbd_keep_alive_interval; if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { log_rdma_event(ERR, "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n", IB_DEVICE_NAME_MAX, info->id->device->name, info->id->device->attrs.max_send_sge, info->id->device->attrs.max_recv_sge); goto config_failed; } info->send_cq = NULL; info->recv_cq = NULL; info->send_cq = ib_alloc_cq_any(info->id->device, info, info->send_credit_target, IB_POLL_SOFTIRQ); if (IS_ERR(info->send_cq)) { info->send_cq = NULL; goto alloc_cq_failed; } info->recv_cq = ib_alloc_cq_any(info->id->device, info, info->receive_credit_max, IB_POLL_SOFTIRQ); if (IS_ERR(info->recv_cq)) { info->recv_cq = NULL; goto alloc_cq_failed; } memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.event_handler = smbd_qp_async_error_upcall; qp_attr.qp_context = info; qp_attr.cap.max_send_wr = info->send_credit_target; qp_attr.cap.max_recv_wr = info->receive_credit_max; qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE; qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE; qp_attr.cap.max_inline_data = 0; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = info->send_cq; qp_attr.recv_cq = info->recv_cq; qp_attr.port_num = ~0; rc = rdma_create_qp(info->id, info->pd, &qp_attr); if (rc) { log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc); goto create_qp_failed; } memset(&conn_param, 0, sizeof(conn_param)); conn_param.initiator_depth = 0; conn_param.responder_resources = info->id->device->attrs.max_qp_rd_atom < SMBD_CM_RESPONDER_RESOURCES ? info->id->device->attrs.max_qp_rd_atom : SMBD_CM_RESPONDER_RESOURCES; info->responder_resources = conn_param.responder_resources; log_rdma_mr(INFO, "responder_resources=%d\n", info->responder_resources); /* Need to send IRD/ORD in private data for iWARP */ info->id->device->ops.get_port_immutable( info->id->device, info->id->port_num, &port_immutable); if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { ird_ord_hdr[0] = info->responder_resources; ird_ord_hdr[1] = 1; conn_param.private_data = ird_ord_hdr; conn_param.private_data_len = sizeof(ird_ord_hdr); } else { conn_param.private_data = NULL; conn_param.private_data_len = 0; } conn_param.retry_count = SMBD_CM_RETRY; conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY; conn_param.flow_control = 0; log_rdma_event(INFO, "connecting to IP %pI4 port %d\n", &addr_in->sin_addr, port); init_waitqueue_head(&info->conn_wait); init_waitqueue_head(&info->disconn_wait); init_waitqueue_head(&info->wait_reassembly_queue); rc = rdma_connect(info->id, &conn_param); if (rc) { log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc); goto rdma_connect_failed; } wait_event_interruptible( info->conn_wait, info->transport_status != SMBD_CONNECTING); if (info->transport_status != SMBD_CONNECTED) { log_rdma_event(ERR, "rdma_connect failed port=%d\n", port); goto rdma_connect_failed; } log_rdma_event(INFO, "rdma_connect connected\n"); rc = allocate_caches_and_workqueue(info); if (rc) { log_rdma_event(ERR, "cache allocation failed\n"); goto allocate_cache_failed; } init_waitqueue_head(&info->wait_send_queue); INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); queue_delayed_work(info->workqueue, &info->idle_timer_work, info->keep_alive_interval*HZ); init_waitqueue_head(&info->wait_send_pending); atomic_set(&info->send_pending, 0); init_waitqueue_head(&info->wait_post_send); INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); info->new_credits_offered = 0; spin_lock_init(&info->lock_new_credits_offered); rc = smbd_negotiate(info); if (rc) { log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc); goto negotiation_failed; } rc = allocate_mr_list(info); if (rc) { log_rdma_mr(ERR, "memory registration allocation failed\n"); goto allocate_mr_failed; } return info; allocate_mr_failed: /* At this point, need to a full transport shutdown */ server->smbd_conn = info; smbd_destroy(server); return NULL; negotiation_failed: cancel_delayed_work_sync(&info->idle_timer_work); destroy_caches_and_workqueue(info); info->transport_status = SMBD_NEGOTIATE_FAILED; init_waitqueue_head(&info->conn_wait); rdma_disconnect(info->id); wait_event(info->conn_wait, info->transport_status == SMBD_DISCONNECTED); allocate_cache_failed: rdma_connect_failed: rdma_destroy_qp(info->id); create_qp_failed: alloc_cq_failed: if (info->send_cq) ib_free_cq(info->send_cq); if (info->recv_cq) ib_free_cq(info->recv_cq); config_failed: ib_dealloc_pd(info->pd); rdma_destroy_id(info->id); create_id_failed: kfree(info); return NULL; } struct smbd_connection *smbd_get_connection( struct TCP_Server_Info *server, struct sockaddr *dstaddr) { struct smbd_connection *ret; int port = SMBD_PORT; try_again: ret = _smbd_get_connection(server, dstaddr, port); /* Try SMB_PORT if SMBD_PORT doesn't work */ if (!ret && port == SMBD_PORT) { port = SMB_PORT; goto try_again; } return ret; } /* * Receive data from receive reassembly queue * All the incoming data packets are placed in reassembly queue * buf: the buffer to read data into * size: the length of data to read * return value: actual data read * Note: this implementation copies the data from reassebmly queue to receive * buffers used by upper layer. This is not the optimal code path. A better way * to do it is to not have upper layer allocate its receive buffers but rather * borrow the buffer from reassembly queue, and return it after data is * consumed. But this will require more changes to upper layer code, and also * need to consider packet boundaries while they still being reassembled. */ static int smbd_recv_buf(struct smbd_connection *info, char *buf, unsigned int size) { struct smbd_response *response; struct smbd_data_transfer *data_transfer; int to_copy, to_read, data_read, offset; u32 data_length, remaining_data_length, data_offset; int rc; again: /* * No need to hold the reassembly queue lock all the time as we are * the only one reading from the front of the queue. The transport * may add more entries to the back of the queue at the same time */ log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, info->reassembly_data_length); if (info->reassembly_data_length >= size) { int queue_length; int queue_removed = 0; /* * Need to make sure reassembly_data_length is read before * reading reassembly_queue_length and calling * _get_first_reassembly. This call is lock free * as we never read at the end of the queue which are being * updated in SOFTIRQ as more data is received */ virt_rmb(); queue_length = info->reassembly_queue_length; data_read = 0; to_read = size; offset = info->first_entry_offset; while (data_read < size) { response = _get_first_reassembly(info); data_transfer = smbd_response_payload(response); data_length = le32_to_cpu(data_transfer->data_length); remaining_data_length = le32_to_cpu( data_transfer->remaining_data_length); data_offset = le32_to_cpu(data_transfer->data_offset); /* * The upper layer expects RFC1002 length at the * beginning of the payload. Return it to indicate * the total length of the packet. This minimize the * change to upper layer packet processing logic. This * will be eventually remove when an intermediate * transport layer is added */ if (response->first_segment && size == 4) { unsigned int rfc1002_len = data_length + remaining_data_length; *((__be32 *)buf) = cpu_to_be32(rfc1002_len); data_read = 4; response->first_segment = false; log_read(INFO, "returning rfc1002 length %d\n", rfc1002_len); goto read_rfc1002_done; } to_copy = min_t(int, data_length - offset, to_read); memcpy( buf + data_read, (char *)data_transfer + data_offset + offset, to_copy); /* move on to the next buffer? */ if (to_copy == data_length - offset) { queue_length--; /* * No need to lock if we are not at the * end of the queue */ if (queue_length) list_del(&response->list); else { spin_lock_irq( &info->reassembly_queue_lock); list_del(&response->list); spin_unlock_irq( &info->reassembly_queue_lock); } queue_removed++; info->count_reassembly_queue--; info->count_dequeue_reassembly_queue++; put_receive_buffer(info, response); offset = 0; log_read(INFO, "put_receive_buffer offset=0\n"); } else offset += to_copy; to_read -= to_copy; data_read += to_copy; log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n", to_copy, data_length - offset, to_read, data_read, offset); } spin_lock_irq(&info->reassembly_queue_lock); info->reassembly_data_length -= data_read; info->reassembly_queue_length -= queue_removed; spin_unlock_irq(&info->reassembly_queue_lock); info->first_entry_offset = offset; log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n", data_read, info->reassembly_data_length, info->first_entry_offset); read_rfc1002_done: return data_read; } log_read(INFO, "wait_event on more data\n"); rc = wait_event_interruptible( info->wait_reassembly_queue, info->reassembly_data_length >= size || info->transport_status != SMBD_CONNECTED); /* Don't return any data if interrupted */ if (rc) return rc; if (info->transport_status != SMBD_CONNECTED) { log_read(ERR, "disconnected\n"); return -ECONNABORTED; } goto again; } /* * Receive a page from receive reassembly queue * page: the page to read data into * to_read: the length of data to read * return value: actual data read */ static int smbd_recv_page(struct smbd_connection *info, struct page *page, unsigned int page_offset, unsigned int to_read) { int ret; char *to_address; void *page_address; /* make sure we have the page ready for read */ ret = wait_event_interruptible( info->wait_reassembly_queue, info->reassembly_data_length >= to_read || info->transport_status != SMBD_CONNECTED); if (ret) return ret; /* now we can read from reassembly queue and not sleep */ page_address = kmap_atomic(page); to_address = (char *) page_address + page_offset; log_read(INFO, "reading from page=%p address=%p to_read=%d\n", page, to_address, to_read); ret = smbd_recv_buf(info, to_address, to_read); kunmap_atomic(page_address); return ret; } /* * Receive data from transport * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC * return: total bytes read, or 0. SMB Direct will not do partial read. */ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) { char *buf; struct page *page; unsigned int to_read, page_offset; int rc; if (iov_iter_rw(&msg->msg_iter) == WRITE) { /* It's a bug in upper layer to get there */ cifs_dbg(VFS, "Invalid msg iter dir %u\n", iov_iter_rw(&msg->msg_iter)); rc = -EINVAL; goto out; } switch (iov_iter_type(&msg->msg_iter)) { case ITER_KVEC: buf = msg->msg_iter.kvec->iov_base; to_read = msg->msg_iter.kvec->iov_len; rc = smbd_recv_buf(info, buf, to_read); break; case ITER_BVEC: page = msg->msg_iter.bvec->bv_page; page_offset = msg->msg_iter.bvec->bv_offset; to_read = msg->msg_iter.bvec->bv_len; rc = smbd_recv_page(info, page, page_offset, to_read); break; default: /* It's a bug in upper layer to get there */ cifs_dbg(VFS, "Invalid msg type %d\n", iov_iter_type(&msg->msg_iter)); rc = -EINVAL; } out: /* SMBDirect will read it all or nothing */ if (rc > 0) msg->msg_iter.count = 0; return rc; } /* * Send data to transport * Each rqst is transported as a SMBDirect payload * rqst: the data to write * return value: 0 if successfully write, otherwise error code */ int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst_array) { struct smbd_connection *info = server->smbd_conn; struct smb_rqst *rqst; struct iov_iter iter; unsigned int remaining_data_length, klen; int rc, i, rqst_idx; if (info->transport_status != SMBD_CONNECTED) return -EAGAIN; /* * Add in the page array if there is one. The caller needs to set * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and * ends at page boundary */ remaining_data_length = 0; for (i = 0; i < num_rqst; i++) remaining_data_length += smb_rqst_len(server, &rqst_array[i]); if (unlikely(remaining_data_length > info->max_fragmented_send_size)) { /* assertion: payload never exceeds negotiated maximum */ log_write(ERR, "payload size %d > max size %d\n", remaining_data_length, info->max_fragmented_send_size); return -EINVAL; } log_write(INFO, "num_rqst=%d total length=%u\n", num_rqst, remaining_data_length); rqst_idx = 0; do { rqst = &rqst_array[rqst_idx]; cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n", rqst_idx, smb_rqst_len(server, rqst)); for (i = 0; i < rqst->rq_nvec; i++) dump_smb(rqst->rq_iov[i].iov_base, rqst->rq_iov[i].iov_len); log_write(INFO, "RDMA-WR[%u] nvec=%d len=%u iter=%zu rqlen=%lu\n", rqst_idx, rqst->rq_nvec, remaining_data_length, iov_iter_count(&rqst->rq_iter), smb_rqst_len(server, rqst)); /* Send the metadata pages. */ klen = 0; for (i = 0; i < rqst->rq_nvec; i++) klen += rqst->rq_iov[i].iov_len; iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen); rc = smbd_post_send_iter(info, &iter, &remaining_data_length); if (rc < 0) break; if (iov_iter_count(&rqst->rq_iter) > 0) { /* And then the data pages if there are any */ rc = smbd_post_send_iter(info, &rqst->rq_iter, &remaining_data_length); if (rc < 0) break; } } while (++rqst_idx < num_rqst); /* * As an optimization, we don't wait for individual I/O to finish * before sending the next one. * Send them all and wait for pending send count to get to 0 * that means all the I/Os have been out and we are good to return */ wait_event(info->wait_send_pending, atomic_read(&info->send_pending) == 0); return rc; } static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) { struct smbd_mr *mr; struct ib_cqe *cqe; if (wc->status) { log_rdma_mr(ERR, "status=%d\n", wc->status); cqe = wc->wr_cqe; mr = container_of(cqe, struct smbd_mr, cqe); smbd_disconnect_rdma_connection(mr->conn); } } /* * The work queue function that recovers MRs * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used * again. Both calls are slow, so finish them in a workqueue. This will not * block I/O path. * There is one workqueue that recovers MRs, there is no need to lock as the * I/O requests calling smbd_register_mr will never update the links in the * mr_list. */ static void smbd_mr_recovery_work(struct work_struct *work) { struct smbd_connection *info = container_of(work, struct smbd_connection, mr_recovery_work); struct smbd_mr *smbdirect_mr; int rc; list_for_each_entry(smbdirect_mr, &info->mr_list, list) { if (smbdirect_mr->state == MR_ERROR) { /* recover this MR entry */ rc = ib_dereg_mr(smbdirect_mr->mr); if (rc) { log_rdma_mr(ERR, "ib_dereg_mr failed rc=%x\n", rc); smbd_disconnect_rdma_connection(info); continue; } smbdirect_mr->mr = ib_alloc_mr( info->pd, info->mr_type, info->max_frmr_depth); if (IS_ERR(smbdirect_mr->mr)) { log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", info->mr_type, info->max_frmr_depth); smbd_disconnect_rdma_connection(info); continue; } } else /* This MR is being used, don't recover it */ continue; smbdirect_mr->state = MR_READY; /* smbdirect_mr->state is updated by this function * and is read and updated by I/O issuing CPUs trying * to get a MR, the call to atomic_inc_return * implicates a memory barrier and guarantees this * value is updated before waking up any calls to * get_mr() from the I/O issuing CPUs */ if (atomic_inc_return(&info->mr_ready_count) == 1) wake_up_interruptible(&info->wait_mr); } } static void destroy_mr_list(struct smbd_connection *info) { struct smbd_mr *mr, *tmp; cancel_work_sync(&info->mr_recovery_work); list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { if (mr->state == MR_INVALIDATED) ib_dma_unmap_sg(info->id->device, mr->sgt.sgl, mr->sgt.nents, mr->dir); ib_dereg_mr(mr->mr); kfree(mr->sgt.sgl); kfree(mr); } } /* * Allocate MRs used for RDMA read/write * The number of MRs will not exceed hardware capability in responder_resources * All MRs are kept in mr_list. The MR can be recovered after it's used * Recovery is done in smbd_mr_recovery_work. The content of list entry changes * as MRs are used and recovered for I/O, but the list links will not change */ static int allocate_mr_list(struct smbd_connection *info) { int i; struct smbd_mr *smbdirect_mr, *tmp; INIT_LIST_HEAD(&info->mr_list); init_waitqueue_head(&info->wait_mr); spin_lock_init(&info->mr_list_lock); atomic_set(&info->mr_ready_count, 0); atomic_set(&info->mr_used_count, 0); init_waitqueue_head(&info->wait_for_mr_cleanup); INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); /* Allocate more MRs (2x) than hardware responder_resources */ for (i = 0; i < info->responder_resources * 2; i++) { smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL); if (!smbdirect_mr) goto out; smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, info->max_frmr_depth); if (IS_ERR(smbdirect_mr->mr)) { log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", info->mr_type, info->max_frmr_depth); goto out; } smbdirect_mr->sgt.sgl = kcalloc(info->max_frmr_depth, sizeof(struct scatterlist), GFP_KERNEL); if (!smbdirect_mr->sgt.sgl) { log_rdma_mr(ERR, "failed to allocate sgl\n"); ib_dereg_mr(smbdirect_mr->mr); goto out; } smbdirect_mr->state = MR_READY; smbdirect_mr->conn = info; list_add_tail(&smbdirect_mr->list, &info->mr_list); atomic_inc(&info->mr_ready_count); } return 0; out: kfree(smbdirect_mr); list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { list_del(&smbdirect_mr->list); ib_dereg_mr(smbdirect_mr->mr); kfree(smbdirect_mr->sgt.sgl); kfree(smbdirect_mr); } return -ENOMEM; } /* * Get a MR from mr_list. This function waits until there is at least one * MR available in the list. It may access the list while the * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock * as they never modify the same places. However, there may be several CPUs * issueing I/O trying to get MR at the same time, mr_list_lock is used to * protect this situation. */ static struct smbd_mr *get_mr(struct smbd_connection *info) { struct smbd_mr *ret; int rc; again: rc = wait_event_interruptible(info->wait_mr, atomic_read(&info->mr_ready_count) || info->transport_status != SMBD_CONNECTED); if (rc) { log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc); return NULL; } if (info->transport_status != SMBD_CONNECTED) { log_rdma_mr(ERR, "info->transport_status=%x\n", info->transport_status); return NULL; } spin_lock(&info->mr_list_lock); list_for_each_entry(ret, &info->mr_list, list) { if (ret->state == MR_READY) { ret->state = MR_REGISTERED; spin_unlock(&info->mr_list_lock); atomic_dec(&info->mr_ready_count); atomic_inc(&info->mr_used_count); return ret; } } spin_unlock(&info->mr_list_lock); /* * It is possible that we could fail to get MR because other processes may * try to acquire a MR at the same time. If this is the case, retry it. */ goto again; } /* * Transcribe the pages from an iterator into an MR scatterlist. */ static int smbd_iter_to_mr(struct smbd_connection *info, struct iov_iter *iter, struct sg_table *sgt, unsigned int max_sg) { int ret; memset(sgt->sgl, 0, max_sg * sizeof(struct scatterlist)); ret = extract_iter_to_sg(iter, iov_iter_count(iter), sgt, max_sg, 0); WARN_ON(ret < 0); if (sgt->nents > 0) sg_mark_end(&sgt->sgl[sgt->nents - 1]); return ret; } /* * Register memory for RDMA read/write * iter: the buffer to register memory with * writing: true if this is a RDMA write (SMB read), false for RDMA read * need_invalidate: true if this MR needs to be locally invalidated after I/O * return value: the MR registered, NULL if failed. */ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, struct iov_iter *iter, bool writing, bool need_invalidate) { struct smbd_mr *smbdirect_mr; int rc, num_pages; enum dma_data_direction dir; struct ib_reg_wr *reg_wr; num_pages = iov_iter_npages(iter, info->max_frmr_depth + 1); if (num_pages > info->max_frmr_depth) { log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n", num_pages, info->max_frmr_depth); WARN_ON_ONCE(1); return NULL; } smbdirect_mr = get_mr(info); if (!smbdirect_mr) { log_rdma_mr(ERR, "get_mr returning NULL\n"); return NULL; } dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; smbdirect_mr->dir = dir; smbdirect_mr->need_invalidate = need_invalidate; smbdirect_mr->sgt.nents = 0; smbdirect_mr->sgt.orig_nents = 0; log_rdma_mr(INFO, "num_pages=0x%x count=0x%zx depth=%u\n", num_pages, iov_iter_count(iter), info->max_frmr_depth); smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth); rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, dir); if (!rc) { log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n", num_pages, dir, rc); goto dma_map_error; } rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, NULL, PAGE_SIZE); if (rc != smbdirect_mr->sgt.nents) { log_rdma_mr(ERR, "ib_map_mr_sg failed rc = %d nents = %x\n", rc, smbdirect_mr->sgt.nents); goto map_mr_error; } ib_update_fast_reg_key(smbdirect_mr->mr, ib_inc_rkey(smbdirect_mr->mr->rkey)); reg_wr = &smbdirect_mr->wr; reg_wr->wr.opcode = IB_WR_REG_MR; smbdirect_mr->cqe.done = register_mr_done; reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; reg_wr->wr.num_sge = 0; reg_wr->wr.send_flags = IB_SEND_SIGNALED; reg_wr->mr = smbdirect_mr->mr; reg_wr->key = smbdirect_mr->mr->rkey; reg_wr->access = writing ? IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_REMOTE_READ; /* * There is no need for waiting for complemtion on ib_post_send * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution * on the next ib_post_send when we actaully send I/O to remote peer */ rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL); if (!rc) return smbdirect_mr; log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n", rc, reg_wr->key); /* If all failed, attempt to recover this MR by setting it MR_ERROR*/ map_mr_error: ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, smbdirect_mr->dir); dma_map_error: smbdirect_mr->state = MR_ERROR; if (atomic_dec_and_test(&info->mr_used_count)) wake_up(&info->wait_for_mr_cleanup); smbd_disconnect_rdma_connection(info); return NULL; } static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) { struct smbd_mr *smbdirect_mr; struct ib_cqe *cqe; cqe = wc->wr_cqe; smbdirect_mr = container_of(cqe, struct smbd_mr, cqe); smbdirect_mr->state = MR_INVALIDATED; if (wc->status != IB_WC_SUCCESS) { log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status); smbdirect_mr->state = MR_ERROR; } complete(&smbdirect_mr->invalidate_done); } /* * Deregister a MR after I/O is done * This function may wait if remote invalidation is not used * and we have to locally invalidate the buffer to prevent data is being * modified by remote peer after upper layer consumes it */ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) { struct ib_send_wr *wr; struct smbd_connection *info = smbdirect_mr->conn; int rc = 0; if (smbdirect_mr->need_invalidate) { /* Need to finish local invalidation before returning */ wr = &smbdirect_mr->inv_wr; wr->opcode = IB_WR_LOCAL_INV; smbdirect_mr->cqe.done = local_inv_done; wr->wr_cqe = &smbdirect_mr->cqe; wr->num_sge = 0; wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; wr->send_flags = IB_SEND_SIGNALED; init_completion(&smbdirect_mr->invalidate_done); rc = ib_post_send(info->id->qp, wr, NULL); if (rc) { log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc); smbd_disconnect_rdma_connection(info); goto done; } wait_for_completion(&smbdirect_mr->invalidate_done); smbdirect_mr->need_invalidate = false; } else /* * For remote invalidation, just set it to MR_INVALIDATED * and defer to mr_recovery_work to recover the MR for next use */ smbdirect_mr->state = MR_INVALIDATED; if (smbdirect_mr->state == MR_INVALIDATED) { ib_dma_unmap_sg( info->id->device, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, smbdirect_mr->dir); smbdirect_mr->state = MR_READY; if (atomic_inc_return(&info->mr_ready_count) == 1) wake_up_interruptible(&info->wait_mr); } else /* * Schedule the work to do MR recovery for future I/Os MR * recovery is slow and don't want it to block current I/O */ queue_work(info->workqueue, &info->mr_recovery_work); done: if (atomic_dec_and_test(&info->mr_used_count)) wake_up(&info->wait_for_mr_cleanup); return rc; } static bool smb_set_sge(struct smb_extract_to_rdma *rdma, struct page *lowest_page, size_t off, size_t len) { struct ib_sge *sge = &rdma->sge[rdma->nr_sge]; u64 addr; addr = ib_dma_map_page(rdma->device, lowest_page, off, len, rdma->direction); if (ib_dma_mapping_error(rdma->device, addr)) return false; sge->addr = addr; sge->length = len; sge->lkey = rdma->local_dma_lkey; rdma->nr_sge++; return true; } /* * Extract page fragments from a BVEC-class iterator and add them to an RDMA * element list. The pages are not pinned. */ static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter, struct smb_extract_to_rdma *rdma, ssize_t maxsize) { const struct bio_vec *bv = iter->bvec; unsigned long start = iter->iov_offset; unsigned int i; ssize_t ret = 0; for (i = 0; i < iter->nr_segs; i++) { size_t off, len; len = bv[i].bv_len; if (start >= len) { start -= len; continue; } len = min_t(size_t, maxsize, len - start); off = bv[i].bv_offset + start; if (!smb_set_sge(rdma, bv[i].bv_page, off, len)) return -EIO; ret += len; maxsize -= len; if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) break; start = 0; } return ret; } /* * Extract fragments from a KVEC-class iterator and add them to an RDMA list. * This can deal with vmalloc'd buffers as well as kmalloc'd or static buffers. * The pages are not pinned. */ static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter, struct smb_extract_to_rdma *rdma, ssize_t maxsize) { const struct kvec *kv = iter->kvec; unsigned long start = iter->iov_offset; unsigned int i; ssize_t ret = 0; for (i = 0; i < iter->nr_segs; i++) { struct page *page; unsigned long kaddr; size_t off, len, seg; len = kv[i].iov_len; if (start >= len) { start -= len; continue; } kaddr = (unsigned long)kv[i].iov_base + start; off = kaddr & ~PAGE_MASK; len = min_t(size_t, maxsize, len - start); kaddr &= PAGE_MASK; maxsize -= len; do { seg = min_t(size_t, len, PAGE_SIZE - off); if (is_vmalloc_or_module_addr((void *)kaddr)) page = vmalloc_to_page((void *)kaddr); else page = virt_to_page((void *)kaddr); if (!smb_set_sge(rdma, page, off, seg)) return -EIO; ret += seg; len -= seg; kaddr += PAGE_SIZE; off = 0; } while (len > 0 && rdma->nr_sge < rdma->max_sge); if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) break; start = 0; } return ret; } /* * Extract folio fragments from an XARRAY-class iterator and add them to an * RDMA list. The folios are not pinned. */ static ssize_t smb_extract_xarray_to_rdma(struct iov_iter *iter, struct smb_extract_to_rdma *rdma, ssize_t maxsize) { struct xarray *xa = iter->xarray; struct folio *folio; loff_t start = iter->xarray_start + iter->iov_offset; pgoff_t index = start / PAGE_SIZE; ssize_t ret = 0; size_t off, len; XA_STATE(xas, xa, index); rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) { if (xas_retry(&xas, folio)) continue; if (WARN_ON(xa_is_value(folio))) break; if (WARN_ON(folio_test_hugetlb(folio))) break; off = offset_in_folio(folio, start); len = min_t(size_t, maxsize, folio_size(folio) - off); if (!smb_set_sge(rdma, folio_page(folio, 0), off, len)) { rcu_read_unlock(); return -EIO; } maxsize -= len; ret += len; if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) break; } rcu_read_unlock(); return ret; } /* * Extract page fragments from up to the given amount of the source iterator * and build up an RDMA list that refers to all of those bits. The RDMA list * is appended to, up to the maximum number of elements set in the parameter * block. * * The extracted page fragments are not pinned or ref'd in any way; if an * IOVEC/UBUF-type iterator is to be used, it should be converted to a * BVEC-type iterator and the pages pinned, ref'd or otherwise held in some * way. */ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len, struct smb_extract_to_rdma *rdma) { ssize_t ret; int before = rdma->nr_sge; switch (iov_iter_type(iter)) { case ITER_BVEC: ret = smb_extract_bvec_to_rdma(iter, rdma, len); break; case ITER_KVEC: ret = smb_extract_kvec_to_rdma(iter, rdma, len); break; case ITER_XARRAY: ret = smb_extract_xarray_to_rdma(iter, rdma, len); break; default: WARN_ON_ONCE(1); return -EIO; } if (ret > 0) { iov_iter_advance(iter, ret); } else if (ret < 0) { while (rdma->nr_sge > before) { struct ib_sge *sge = &rdma->sge[rdma->nr_sge--]; ib_dma_unmap_single(rdma->device, sge->addr, sge->length, rdma->direction); sge->addr = 0; } } return ret; }
linux-master
fs/smb/client/smbdirect.c
// SPDX-License-Identifier: GPL-2.0 /* * DFS referral cache routines * * Copyright (c) 2018-2019 Paulo Alcantara <[email protected]> */ #include <linux/jhash.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/nls.h> #include <linux/workqueue.h> #include <linux/uuid.h> #include "cifsglob.h" #include "smb2pdu.h" #include "smb2proto.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_unicode.h" #include "smb2glob.h" #include "dns_resolve.h" #include "dfs.h" #include "dfs_cache.h" #define CACHE_HTABLE_SIZE 32 #define CACHE_MAX_ENTRIES 64 #define CACHE_MIN_TTL 120 /* 2 minutes */ #define CACHE_DEFAULT_TTL 300 /* 5 minutes */ struct cache_dfs_tgt { char *name; int path_consumed; struct list_head list; }; struct cache_entry { struct hlist_node hlist; const char *path; int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */ int ttl; /* DFS_REREFERRAL_V3.TimeToLive */ int srvtype; /* DFS_REREFERRAL_V3.ServerType */ int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */ struct timespec64 etime; int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */ int numtgts; struct list_head tlist; struct cache_dfs_tgt *tgthint; }; static struct kmem_cache *cache_slab __read_mostly; struct workqueue_struct *dfscache_wq; atomic_t dfs_cache_ttl; static struct nls_table *cache_cp; /* * Number of entries in the cache */ static atomic_t cache_count; static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; static DECLARE_RWSEM(htable_rw_lock); /** * dfs_cache_canonical_path - get a canonical DFS path * * @path: DFS path * @cp: codepage * @remap: mapping type * * Return canonical path if success, otherwise error. */ char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap) { char *tmp; int plen = 0; char *npath; if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/')) return ERR_PTR(-EINVAL); if (unlikely(strcmp(cp->charset, cache_cp->charset))) { tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap); if (!tmp) { cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__); return ERR_PTR(-EINVAL); } npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp); kfree(tmp); if (!npath) { cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__); return ERR_PTR(-EINVAL); } } else { npath = kstrdup(path, GFP_KERNEL); if (!npath) return ERR_PTR(-ENOMEM); } convert_delimiter(npath, '\\'); return npath; } static inline bool cache_entry_expired(const struct cache_entry *ce) { struct timespec64 ts; ktime_get_coarse_real_ts64(&ts); return timespec64_compare(&ts, &ce->etime) >= 0; } static inline void free_tgts(struct cache_entry *ce) { struct cache_dfs_tgt *t, *n; list_for_each_entry_safe(t, n, &ce->tlist, list) { list_del(&t->list); kfree(t->name); kfree(t); } } static inline void flush_cache_ent(struct cache_entry *ce) { hlist_del_init(&ce->hlist); kfree(ce->path); free_tgts(ce); atomic_dec(&cache_count); kmem_cache_free(cache_slab, ce); } static void flush_cache_ents(void) { int i; for (i = 0; i < CACHE_HTABLE_SIZE; i++) { struct hlist_head *l = &cache_htable[i]; struct hlist_node *n; struct cache_entry *ce; hlist_for_each_entry_safe(ce, n, l, hlist) { if (!hlist_unhashed(&ce->hlist)) flush_cache_ent(ce); } } } /* * dfs cache /proc file */ static int dfscache_proc_show(struct seq_file *m, void *v) { int i; struct cache_entry *ce; struct cache_dfs_tgt *t; seq_puts(m, "DFS cache\n---------\n"); down_read(&htable_rw_lock); for (i = 0; i < CACHE_HTABLE_SIZE; i++) { struct hlist_head *l = &cache_htable[i]; hlist_for_each_entry(ce, l, hlist) { if (hlist_unhashed(&ce->hlist)) continue; seq_printf(m, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n", ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags, DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no", ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no"); list_for_each_entry(t, &ce->tlist, list) { seq_printf(m, " %s%s\n", t->name, READ_ONCE(ce->tgthint) == t ? " (target hint)" : ""); } } } up_read(&htable_rw_lock); return 0; } static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char c; int rc; rc = get_user(c, buffer); if (rc) return rc; if (c != '0') return -EINVAL; cifs_dbg(FYI, "clearing dfs cache\n"); down_write(&htable_rw_lock); flush_cache_ents(); up_write(&htable_rw_lock); return count; } static int dfscache_proc_open(struct inode *inode, struct file *file) { return single_open(file, dfscache_proc_show, NULL); } const struct proc_ops dfscache_proc_ops = { .proc_open = dfscache_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = dfscache_proc_write, }; #ifdef CONFIG_CIFS_DEBUG2 static inline void dump_tgts(const struct cache_entry *ce) { struct cache_dfs_tgt *t; cifs_dbg(FYI, "target list:\n"); list_for_each_entry(t, &ce->tlist, list) { cifs_dbg(FYI, " %s%s\n", t->name, READ_ONCE(ce->tgthint) == t ? " (target hint)" : ""); } } static inline void dump_ce(const struct cache_entry *ce) { cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n", ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags, DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no", ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no"); dump_tgts(ce); } static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs) { int i; cifs_dbg(FYI, "DFS referrals returned by the server:\n"); for (i = 0; i < numrefs; i++) { const struct dfs_info3_param *ref = &refs[i]; cifs_dbg(FYI, "\n" "flags: 0x%x\n" "path_consumed: %d\n" "server_type: 0x%x\n" "ref_flag: 0x%x\n" "path_name: %s\n" "node_name: %s\n" "ttl: %d (%dm)\n", ref->flags, ref->path_consumed, ref->server_type, ref->ref_flag, ref->path_name, ref->node_name, ref->ttl, ref->ttl / 60); } } #else #define dump_tgts(e) #define dump_ce(e) #define dump_refs(r, n) #endif /** * dfs_cache_init - Initialize DFS referral cache. * * Return zero if initialized successfully, otherwise non-zero. */ int dfs_cache_init(void) { int rc; int i; dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!dfscache_wq) return -ENOMEM; cache_slab = kmem_cache_create("cifs_dfs_cache", sizeof(struct cache_entry), 0, SLAB_HWCACHE_ALIGN, NULL); if (!cache_slab) { rc = -ENOMEM; goto out_destroy_wq; } for (i = 0; i < CACHE_HTABLE_SIZE; i++) INIT_HLIST_HEAD(&cache_htable[i]); atomic_set(&cache_count, 0); atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL); cache_cp = load_nls("utf8"); if (!cache_cp) cache_cp = load_nls_default(); cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__); return 0; out_destroy_wq: destroy_workqueue(dfscache_wq); return rc; } static int cache_entry_hash(const void *data, int size, unsigned int *hash) { int i, clen; const unsigned char *s = data; wchar_t c; unsigned int h = 0; for (i = 0; i < size; i += clen) { clen = cache_cp->char2uni(&s[i], size - i, &c); if (unlikely(clen < 0)) { cifs_dbg(VFS, "%s: can't convert char\n", __func__); return clen; } c = cifs_toupper(c); h = jhash(&c, sizeof(c), h); } *hash = h % CACHE_HTABLE_SIZE; return 0; } /* Return target hint of a DFS cache entry */ static inline char *get_tgt_name(const struct cache_entry *ce) { struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint); return t ? t->name : ERR_PTR(-ENOENT); } /* Return expire time out of a new entry's TTL */ static inline struct timespec64 get_expire_time(int ttl) { struct timespec64 ts = { .tv_sec = ttl, .tv_nsec = 0, }; struct timespec64 now; ktime_get_coarse_real_ts64(&now); return timespec64_add(now, ts); } /* Allocate a new DFS target */ static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed) { struct cache_dfs_tgt *t; t = kmalloc(sizeof(*t), GFP_ATOMIC); if (!t) return ERR_PTR(-ENOMEM); t->name = kstrdup(name, GFP_ATOMIC); if (!t->name) { kfree(t); return ERR_PTR(-ENOMEM); } t->path_consumed = path_consumed; INIT_LIST_HEAD(&t->list); return t; } /* * Copy DFS referral information to a cache entry and conditionally update * target hint. */ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, struct cache_entry *ce, const char *tgthint) { struct cache_dfs_tgt *target; int i; ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL); ce->etime = get_expire_time(ce->ttl); ce->srvtype = refs[0].server_type; ce->hdr_flags = refs[0].flags; ce->ref_flags = refs[0].ref_flag; ce->path_consumed = refs[0].path_consumed; for (i = 0; i < numrefs; i++) { struct cache_dfs_tgt *t; t = alloc_target(refs[i].node_name, refs[i].path_consumed); if (IS_ERR(t)) { free_tgts(ce); return PTR_ERR(t); } if (tgthint && !strcasecmp(t->name, tgthint)) { list_add(&t->list, &ce->tlist); tgthint = NULL; } else { list_add_tail(&t->list, &ce->tlist); } ce->numtgts++; } target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt, list); WRITE_ONCE(ce->tgthint, target); return 0; } /* Allocate a new cache entry */ static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs) { struct cache_entry *ce; int rc; ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL); if (!ce) return ERR_PTR(-ENOMEM); ce->path = refs[0].path_name; refs[0].path_name = NULL; INIT_HLIST_NODE(&ce->hlist); INIT_LIST_HEAD(&ce->tlist); rc = copy_ref_data(refs, numrefs, ce, NULL); if (rc) { kfree(ce->path); kmem_cache_free(cache_slab, ce); ce = ERR_PTR(rc); } return ce; } static void remove_oldest_entry_locked(void) { int i; struct cache_entry *ce; struct cache_entry *to_del = NULL; WARN_ON(!rwsem_is_locked(&htable_rw_lock)); for (i = 0; i < CACHE_HTABLE_SIZE; i++) { struct hlist_head *l = &cache_htable[i]; hlist_for_each_entry(ce, l, hlist) { if (hlist_unhashed(&ce->hlist)) continue; if (!to_del || timespec64_compare(&ce->etime, &to_del->etime) < 0) to_del = ce; } } if (!to_del) { cifs_dbg(FYI, "%s: no entry to remove\n", __func__); return; } cifs_dbg(FYI, "%s: removing entry\n", __func__); dump_ce(to_del); flush_cache_ent(to_del); } /* Add a new DFS cache entry */ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs) { int rc; struct cache_entry *ce; unsigned int hash; int ttl; WARN_ON(!rwsem_is_locked(&htable_rw_lock)); if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) { cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES); remove_oldest_entry_locked(); } rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash); if (rc) return ERR_PTR(rc); ce = alloc_cache_entry(refs, numrefs); if (IS_ERR(ce)) return ce; ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl); atomic_set(&dfs_cache_ttl, ttl); hlist_add_head(&ce->hlist, &cache_htable[hash]); dump_ce(ce); atomic_inc(&cache_count); return ce; } /* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */ static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2) { int i, l1, l2; wchar_t c1, c2; if (len1 != len2) return false; for (i = 0; i < len1; i += l1) { l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1); l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2); if (unlikely(l1 < 0 && l2 < 0)) { if (s1[i] != s2[i]) return false; l1 = 1; continue; } if (l1 != l2) return false; if (cifs_toupper(c1) != cifs_toupper(c2)) return false; } return true; } static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len) { struct cache_entry *ce; hlist_for_each_entry(ce, &cache_htable[hash], hlist) { if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) { dump_ce(ce); return ce; } } return ERR_PTR(-ENOENT); } /* * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path. * * Use whole path components in the match. Must be called with htable_rw_lock held. * * Return cached entry if successful. * Return ERR_PTR(-ENOENT) if the entry is not found. * Return error ptr otherwise. */ static struct cache_entry *lookup_cache_entry(const char *path) { struct cache_entry *ce; int cnt = 0; const char *s = path, *e; char sep = *s; unsigned int hash; int rc; while ((s = strchr(s, sep)) && ++cnt < 3) s++; if (cnt < 3) { rc = cache_entry_hash(path, strlen(path), &hash); if (rc) return ERR_PTR(rc); return __lookup_cache_entry(path, hash, strlen(path)); } /* * Handle paths that have more than two path components and are a complete prefix of the DFS * referral request path (@path). * * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request". */ e = path + strlen(path) - 1; while (e > s) { int len; /* skip separators */ while (e > s && *e == sep) e--; if (e == s) break; len = e + 1 - path; rc = cache_entry_hash(path, len, &hash); if (rc) return ERR_PTR(rc); ce = __lookup_cache_entry(path, hash, len); if (!IS_ERR(ce)) return ce; /* backward until separator */ while (e > s && *e != sep) e--; } return ERR_PTR(-ENOENT); } /** * dfs_cache_destroy - destroy DFS referral cache */ void dfs_cache_destroy(void) { unload_nls(cache_cp); flush_cache_ents(); kmem_cache_destroy(cache_slab); destroy_workqueue(dfscache_wq); cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__); } /* Update a cache entry with the new referral in @refs */ static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs, int numrefs) { struct cache_dfs_tgt *target; char *th = NULL; int rc; WARN_ON(!rwsem_is_locked(&htable_rw_lock)); target = READ_ONCE(ce->tgthint); if (target) { th = kstrdup(target->name, GFP_ATOMIC); if (!th) return -ENOMEM; } free_tgts(ce); ce->numtgts = 0; rc = copy_ref_data(refs, numrefs, ce, th); kfree(th); return rc; } static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path, struct dfs_info3_param **refs, int *numrefs) { int rc; int i; *refs = NULL; *numrefs = 0; if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) return -EOPNOTSUPP; if (unlikely(!cache_cp)) return -EINVAL; cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path); rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp, NO_MAP_UNI_RSVD); if (!rc) { struct dfs_info3_param *ref = *refs; for (i = 0; i < *numrefs; i++) convert_delimiter(ref[i].path_name, '\\'); } return rc; } /* * Find, create or update a DFS cache entry. * * If the entry wasn't found, it will create a new one. Or if it was found but * expired, then it will update the entry accordingly. * * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to * handle them properly. * * On success, return entry with acquired lock for reading, otherwise error ptr. */ static struct cache_entry *cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path, bool force_refresh) { struct dfs_info3_param *refs = NULL; struct cache_entry *ce; int numrefs = 0; int rc; cifs_dbg(FYI, "%s: search path: %s\n", __func__, path); down_read(&htable_rw_lock); ce = lookup_cache_entry(path); if (!IS_ERR(ce)) { if (!force_refresh && !cache_entry_expired(ce)) return ce; } else if (PTR_ERR(ce) != -ENOENT) { up_read(&htable_rw_lock); return ce; } /* * Unlock shared access as we don't want to hold any locks while getting * a new referral. The @ses used for performing the I/O could be * reconnecting and it acquires @htable_rw_lock to look up the dfs cache * in order to failover -- if necessary. */ up_read(&htable_rw_lock); /* * Either the entry was not found, or it is expired, or it is a forced * refresh. * Request a new DFS referral in order to create or update a cache entry. */ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); if (rc) { ce = ERR_PTR(rc); goto out; } dump_refs(refs, numrefs); down_write(&htable_rw_lock); /* Re-check as another task might have it added or refreshed already */ ce = lookup_cache_entry(path); if (!IS_ERR(ce)) { if (force_refresh || cache_entry_expired(ce)) { rc = update_cache_entry_locked(ce, refs, numrefs); if (rc) ce = ERR_PTR(rc); } } else if (PTR_ERR(ce) == -ENOENT) { ce = add_cache_entry_locked(refs, numrefs); } if (IS_ERR(ce)) { up_write(&htable_rw_lock); goto out; } downgrade_write(&htable_rw_lock); out: free_dfs_info_array(refs, numrefs); return ce; } /* * Set up a DFS referral from a given cache entry. * * Must be called with htable_rw_lock held. */ static int setup_referral(const char *path, struct cache_entry *ce, struct dfs_info3_param *ref, const char *target) { int rc; cifs_dbg(FYI, "%s: set up new ref\n", __func__); memset(ref, 0, sizeof(*ref)); ref->path_name = kstrdup(path, GFP_ATOMIC); if (!ref->path_name) return -ENOMEM; ref->node_name = kstrdup(target, GFP_ATOMIC); if (!ref->node_name) { rc = -ENOMEM; goto err_free_path; } ref->path_consumed = ce->path_consumed; ref->ttl = ce->ttl; ref->server_type = ce->srvtype; ref->ref_flag = ce->ref_flags; ref->flags = ce->hdr_flags; return 0; err_free_path: kfree(ref->path_name); ref->path_name = NULL; return rc; } /* Return target list of a DFS cache entry */ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl) { int rc; struct list_head *head = &tl->tl_list; struct cache_dfs_tgt *t; struct dfs_cache_tgt_iterator *it, *nit; memset(tl, 0, sizeof(*tl)); INIT_LIST_HEAD(head); list_for_each_entry(t, &ce->tlist, list) { it = kzalloc(sizeof(*it), GFP_ATOMIC); if (!it) { rc = -ENOMEM; goto err_free_it; } it->it_name = kstrdup(t->name, GFP_ATOMIC); if (!it->it_name) { kfree(it); rc = -ENOMEM; goto err_free_it; } it->it_path_consumed = t->path_consumed; if (READ_ONCE(ce->tgthint) == t) list_add(&it->it_list, head); else list_add_tail(&it->it_list, head); } tl->tl_numtgts = ce->numtgts; return 0; err_free_it: list_for_each_entry_safe(it, nit, head, it_list) { list_del(&it->it_list); kfree(it->it_name); kfree(it); } return rc; } /** * dfs_cache_find - find a DFS cache entry * * If it doesn't find the cache entry, then it will get a DFS referral * for @path and create a new entry. * * In case the cache entry exists but expired, it will get a DFS referral * for @path and then update the respective cache entry. * * These parameters are passed down to the get_dfs_refer() call if it * needs to be issued: * @xid: syscall xid * @ses: smb session to issue the request on * @cp: codepage * @remap: path character remapping type * @path: path to lookup in DFS referral cache. * * @ref: when non-NULL, store single DFS referral result in it. * @tgt_list: when non-NULL, store complete DFS target list in it. * * Return zero if the target was found, otherwise non-zero. */ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp, int remap, const char *path, struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tgt_list) { int rc; const char *npath; struct cache_entry *ce; npath = dfs_cache_canonical_path(path, cp, remap); if (IS_ERR(npath)) return PTR_ERR(npath); ce = cache_refresh_path(xid, ses, npath, false); if (IS_ERR(ce)) { rc = PTR_ERR(ce); goto out_free_path; } if (ref) rc = setup_referral(path, ce, ref, get_tgt_name(ce)); else rc = 0; if (!rc && tgt_list) rc = get_targets(ce, tgt_list); up_read(&htable_rw_lock); out_free_path: kfree(npath); return rc; } /** * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to * the currently connected server. * * NOTE: This function will neither update a cache entry in case it was * expired, nor create a new cache entry if @path hasn't been found. It heavily * relies on an existing cache entry. * * @path: canonical DFS path to lookup in the DFS referral cache. * @ref: when non-NULL, store single DFS referral result in it. * @tgt_list: when non-NULL, store complete DFS target list in it. * * Return 0 if successful. * Return -ENOENT if the entry was not found. * Return non-zero for other errors. */ int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tgt_list) { int rc; struct cache_entry *ce; cifs_dbg(FYI, "%s: path: %s\n", __func__, path); down_read(&htable_rw_lock); ce = lookup_cache_entry(path); if (IS_ERR(ce)) { rc = PTR_ERR(ce); goto out_unlock; } if (ref) rc = setup_referral(path, ce, ref, get_tgt_name(ce)); else rc = 0; if (!rc && tgt_list) rc = get_targets(ce, tgt_list); out_unlock: up_read(&htable_rw_lock); return rc; } /** * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry * without sending any requests to the currently connected server. * * NOTE: This function will neither update a cache entry in case it was * expired, nor create a new cache entry if @path hasn't been found. It heavily * relies on an existing cache entry. * * @path: canonical DFS path to lookup in DFS referral cache. * @it: target iterator which contains the target hint to update the cache * entry with. * * Return zero if the target hint was updated successfully, otherwise non-zero. */ void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it) { struct cache_dfs_tgt *t; struct cache_entry *ce; if (!path || !it) return; cifs_dbg(FYI, "%s: path: %s\n", __func__, path); down_read(&htable_rw_lock); ce = lookup_cache_entry(path); if (IS_ERR(ce)) goto out_unlock; t = READ_ONCE(ce->tgthint); if (unlikely(!strcasecmp(it->it_name, t->name))) goto out_unlock; list_for_each_entry(t, &ce->tlist, list) { if (!strcasecmp(t->name, it->it_name)) { WRITE_ONCE(ce->tgthint, t); cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, it->it_name); break; } } out_unlock: up_read(&htable_rw_lock); } /** * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given * target iterator (@it). * * @path: canonical DFS path to lookup in DFS referral cache. * @it: DFS target iterator. * @ref: DFS referral pointer to set up the gathered information. * * Return zero if the DFS referral was set up correctly, otherwise non-zero. */ int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it, struct dfs_info3_param *ref) { int rc; struct cache_entry *ce; if (!it || !ref) return -EINVAL; cifs_dbg(FYI, "%s: path: %s\n", __func__, path); down_read(&htable_rw_lock); ce = lookup_cache_entry(path); if (IS_ERR(ce)) { rc = PTR_ERR(ce); goto out_unlock; } cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name); rc = setup_referral(path, ce, ref, it->it_name); out_unlock: up_read(&htable_rw_lock); return rc; } /* Extract share from DFS target and return a pointer to prefix path or NULL */ static const char *parse_target_share(const char *target, char **share) { const char *s, *seps = "/\\"; size_t len; s = strpbrk(target + 1, seps); if (!s) return ERR_PTR(-EINVAL); len = strcspn(s + 1, seps); if (!len) return ERR_PTR(-EINVAL); s += len; len = s - target + 1; *share = kstrndup(target, len, GFP_KERNEL); if (!*share) return ERR_PTR(-ENOMEM); s = target + len; return s + strspn(s, seps); } /** * dfs_cache_get_tgt_share - parse a DFS target * * @path: DFS full path * @it: DFS target iterator. * @share: tree name. * @prefix: prefix path. * * Return zero if target was parsed correctly, otherwise non-zero. */ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share, char **prefix) { char sep; char *target_share; char *ppath = NULL; const char *target_ppath, *dfsref_ppath; size_t target_pplen, dfsref_pplen; size_t len, c; if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed) return -EINVAL; sep = it->it_name[0]; if (sep != '\\' && sep != '/') return -EINVAL; target_ppath = parse_target_share(it->it_name, &target_share); if (IS_ERR(target_ppath)) return PTR_ERR(target_ppath); /* point to prefix in DFS referral path */ dfsref_ppath = path + it->it_path_consumed; dfsref_ppath += strspn(dfsref_ppath, "/\\"); target_pplen = strlen(target_ppath); dfsref_pplen = strlen(dfsref_ppath); /* merge prefix paths from DFS referral path and target node */ if (target_pplen || dfsref_pplen) { len = target_pplen + dfsref_pplen + 2; ppath = kzalloc(len, GFP_KERNEL); if (!ppath) { kfree(target_share); return -ENOMEM; } c = strscpy(ppath, target_ppath, len); if (c && dfsref_pplen) ppath[c] = sep; strlcat(ppath, dfsref_ppath, len); } *share = target_share; *prefix = ppath; return 0; } static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2) { char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0}; const char *host; size_t hostlen; struct sockaddr_storage ss; bool match; int rc; if (strcasecmp(s1, s2)) return false; /* * Resolve share's hostname and check if server address matches. Otherwise just ignore it * as we could not have upcall to resolve hostname or failed to convert ip address. */ extract_unc_hostname(s1, &host, &hostlen); scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host); rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); if (rc < 0) { cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n", __func__, (int)hostlen, host); return true; } cifs_server_lock(server); match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss); cifs_server_unlock(server); return match; } /* * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new * target shares in @refs. */ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server, const char *path, struct dfs_cache_tgt_list *old_tl, struct dfs_cache_tgt_list *new_tl) { struct dfs_cache_tgt_iterator *oit, *nit; for (oit = dfs_cache_get_tgt_iterator(old_tl); oit; oit = dfs_cache_get_next_tgt(old_tl, oit)) { for (nit = dfs_cache_get_tgt_iterator(new_tl); nit; nit = dfs_cache_get_next_tgt(new_tl, nit)) { if (target_share_equal(server, dfs_cache_get_tgt_name(oit), dfs_cache_get_tgt_name(nit))) { dfs_cache_noreq_update_tgthint(path, nit); return; } } } cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); cifs_signal_cifsd_for_reconnect(server, true); } static bool is_ses_good(struct cifs_ses *ses) { struct TCP_Server_Info *server = ses->server; struct cifs_tcon *tcon = ses->tcon_ipc; bool ret; spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); ret = !cifs_chan_needs_reconnect(ses, server) && ses->ses_status == SES_GOOD && !tcon->need_reconnect; spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); return ret; } /* Refresh dfs referral of tcon and mark it for reconnect if needed */ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh) { struct TCP_Server_Info *server = ses->server; DFS_CACHE_TGT_LIST(old_tl); DFS_CACHE_TGT_LIST(new_tl); bool needs_refresh = false; struct cache_entry *ce; unsigned int xid; int rc = 0; xid = get_xid(); down_read(&htable_rw_lock); ce = lookup_cache_entry(path); needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); if (!IS_ERR(ce)) { rc = get_targets(ce, &old_tl); cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); } up_read(&htable_rw_lock); if (!needs_refresh) { rc = 0; goto out; } ses = CIFS_DFS_ROOT_SES(ses); if (!is_ses_good(ses)) { cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__); goto out; } ce = cache_refresh_path(xid, ses, path, true); if (!IS_ERR(ce)) { rc = get_targets(ce, &new_tl); up_read(&htable_rw_lock); cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl); } out: free_xid(xid); dfs_cache_free_tgts(&old_tl); dfs_cache_free_tgts(&new_tl); return rc; } static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh) { struct TCP_Server_Info *server = tcon->ses->server; struct cifs_ses *ses = tcon->ses; mutex_lock(&server->refpath_lock); if (server->leaf_fullpath) __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh); mutex_unlock(&server->refpath_lock); return 0; } /** * dfs_cache_remount_fs - remount a DFS share * * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not * match any of the new targets, mark it for reconnect. * * @cifs_sb: cifs superblock. * * Return zero if remounted, otherwise non-zero. */ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb) { struct cifs_tcon *tcon; if (!cifs_sb || !cifs_sb->master_tlink) return -EINVAL; tcon = cifs_sb_master_tcon(cifs_sb); spin_lock(&tcon->tc_lock); if (!tcon->origin_fullpath) { spin_unlock(&tcon->tc_lock); cifs_dbg(FYI, "%s: not a dfs mount\n", __func__); return 0; } spin_unlock(&tcon->tc_lock); /* * After reconnecting to a different server, unique ids won't match anymore, so we disable * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE). */ cifs_autodisable_serverino(cifs_sb); /* * Force the use of prefix path to support failover on DFS paths that resolve to targets * that have different prefix paths. */ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; return refresh_tcon(tcon, true); } /* Refresh all DFS referrals related to DFS tcon */ void dfs_cache_refresh(struct work_struct *work) { struct TCP_Server_Info *server; struct dfs_root_ses *rses; struct cifs_tcon *tcon; struct cifs_ses *ses; tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work); ses = tcon->ses; server = ses->server; mutex_lock(&server->refpath_lock); if (server->leaf_fullpath) __refresh_tcon(server->leaf_fullpath + 1, ses, false); mutex_unlock(&server->refpath_lock); list_for_each_entry(rses, &tcon->dfs_ses_list, list) { ses = rses->ses; server = ses->server; mutex_lock(&server->refpath_lock); if (server->leaf_fullpath) __refresh_tcon(server->leaf_fullpath + 1, ses, false); mutex_unlock(&server->refpath_lock); } queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, atomic_read(&dfs_cache_ttl) * HZ); }
linux-master
fs/smb/client/dfs_cache.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Functions which do error mapping of SMB2 status codes to POSIX errors * * Copyright (C) International Business Machines Corp., 2009 * Author(s): Steve French ([email protected]) * */ #include <linux/errno.h> #include "cifsglob.h" #include "cifs_debug.h" #include "smb2pdu.h" #include "smb2proto.h" #include "smb2status.h" #include "smb2glob.h" #include "trace.h" struct status_to_posix_error { __le32 smb2_status; int posix_error; char *status_string; }; static const struct status_to_posix_error smb2_error_map_table[] = { {STATUS_SUCCESS, 0, "STATUS_SUCCESS"}, {STATUS_WAIT_0, 0, "STATUS_WAIT_0"}, {STATUS_WAIT_1, -EIO, "STATUS_WAIT_1"}, {STATUS_WAIT_2, -EIO, "STATUS_WAIT_2"}, {STATUS_WAIT_3, -EIO, "STATUS_WAIT_3"}, {STATUS_WAIT_63, -EIO, "STATUS_WAIT_63"}, {STATUS_ABANDONED, -EIO, "STATUS_ABANDONED"}, {STATUS_ABANDONED_WAIT_0, -EIO, "STATUS_ABANDONED_WAIT_0"}, {STATUS_ABANDONED_WAIT_63, -EIO, "STATUS_ABANDONED_WAIT_63"}, {STATUS_USER_APC, -EIO, "STATUS_USER_APC"}, {STATUS_KERNEL_APC, -EIO, "STATUS_KERNEL_APC"}, {STATUS_ALERTED, -EIO, "STATUS_ALERTED"}, {STATUS_TIMEOUT, -ETIMEDOUT, "STATUS_TIMEOUT"}, {STATUS_PENDING, -EIO, "STATUS_PENDING"}, {STATUS_REPARSE, -EIO, "STATUS_REPARSE"}, {STATUS_MORE_ENTRIES, -EIO, "STATUS_MORE_ENTRIES"}, {STATUS_NOT_ALL_ASSIGNED, -EIO, "STATUS_NOT_ALL_ASSIGNED"}, {STATUS_SOME_NOT_MAPPED, -EIO, "STATUS_SOME_NOT_MAPPED"}, {STATUS_OPLOCK_BREAK_IN_PROGRESS, -EIO, "STATUS_OPLOCK_BREAK_IN_PROGRESS"}, {STATUS_VOLUME_MOUNTED, -EIO, "STATUS_VOLUME_MOUNTED"}, {STATUS_RXACT_COMMITTED, -EIO, "STATUS_RXACT_COMMITTED"}, {STATUS_NOTIFY_CLEANUP, -EIO, "STATUS_NOTIFY_CLEANUP"}, {STATUS_NOTIFY_ENUM_DIR, -EIO, "STATUS_NOTIFY_ENUM_DIR"}, {STATUS_NO_QUOTAS_FOR_ACCOUNT, -EIO, "STATUS_NO_QUOTAS_FOR_ACCOUNT"}, {STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED, -EIO, "STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED"}, {STATUS_PAGE_FAULT_TRANSITION, -EIO, "STATUS_PAGE_FAULT_TRANSITION"}, {STATUS_PAGE_FAULT_DEMAND_ZERO, -EIO, "STATUS_PAGE_FAULT_DEMAND_ZERO"}, {STATUS_PAGE_FAULT_COPY_ON_WRITE, -EIO, "STATUS_PAGE_FAULT_COPY_ON_WRITE"}, {STATUS_PAGE_FAULT_GUARD_PAGE, -EIO, "STATUS_PAGE_FAULT_GUARD_PAGE"}, {STATUS_PAGE_FAULT_PAGING_FILE, -EIO, "STATUS_PAGE_FAULT_PAGING_FILE"}, {STATUS_CACHE_PAGE_LOCKED, -EIO, "STATUS_CACHE_PAGE_LOCKED"}, {STATUS_CRASH_DUMP, -EIO, "STATUS_CRASH_DUMP"}, {STATUS_BUFFER_ALL_ZEROS, -EIO, "STATUS_BUFFER_ALL_ZEROS"}, {STATUS_REPARSE_OBJECT, -EIO, "STATUS_REPARSE_OBJECT"}, {STATUS_RESOURCE_REQUIREMENTS_CHANGED, -EIO, "STATUS_RESOURCE_REQUIREMENTS_CHANGED"}, {STATUS_TRANSLATION_COMPLETE, -EIO, "STATUS_TRANSLATION_COMPLETE"}, {STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY, -EIO, "STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY"}, {STATUS_NOTHING_TO_TERMINATE, -EIO, "STATUS_NOTHING_TO_TERMINATE"}, {STATUS_PROCESS_NOT_IN_JOB, -EIO, "STATUS_PROCESS_NOT_IN_JOB"}, {STATUS_PROCESS_IN_JOB, -EIO, "STATUS_PROCESS_IN_JOB"}, {STATUS_VOLSNAP_HIBERNATE_READY, -EIO, "STATUS_VOLSNAP_HIBERNATE_READY"}, {STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY, -EIO, "STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY"}, {STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED, -EIO, "STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED"}, {STATUS_INTERRUPT_STILL_CONNECTED, -EIO, "STATUS_INTERRUPT_STILL_CONNECTED"}, {STATUS_PROCESS_CLONED, -EIO, "STATUS_PROCESS_CLONED"}, {STATUS_FILE_LOCKED_WITH_ONLY_READERS, -EIO, "STATUS_FILE_LOCKED_WITH_ONLY_READERS"}, {STATUS_FILE_LOCKED_WITH_WRITERS, -EIO, "STATUS_FILE_LOCKED_WITH_WRITERS"}, {STATUS_RESOURCEMANAGER_READ_ONLY, -EROFS, "STATUS_RESOURCEMANAGER_READ_ONLY"}, {STATUS_WAIT_FOR_OPLOCK, -EIO, "STATUS_WAIT_FOR_OPLOCK"}, {DBG_EXCEPTION_HANDLED, -EIO, "DBG_EXCEPTION_HANDLED"}, {DBG_CONTINUE, -EIO, "DBG_CONTINUE"}, {STATUS_FLT_IO_COMPLETE, -EIO, "STATUS_FLT_IO_COMPLETE"}, {STATUS_OBJECT_NAME_EXISTS, -EIO, "STATUS_OBJECT_NAME_EXISTS"}, {STATUS_THREAD_WAS_SUSPENDED, -EIO, "STATUS_THREAD_WAS_SUSPENDED"}, {STATUS_WORKING_SET_LIMIT_RANGE, -EIO, "STATUS_WORKING_SET_LIMIT_RANGE"}, {STATUS_IMAGE_NOT_AT_BASE, -EIO, "STATUS_IMAGE_NOT_AT_BASE"}, {STATUS_RXACT_STATE_CREATED, -EIO, "STATUS_RXACT_STATE_CREATED"}, {STATUS_SEGMENT_NOTIFICATION, -EIO, "STATUS_SEGMENT_NOTIFICATION"}, {STATUS_LOCAL_USER_SESSION_KEY, -EIO, "STATUS_LOCAL_USER_SESSION_KEY"}, {STATUS_BAD_CURRENT_DIRECTORY, -EIO, "STATUS_BAD_CURRENT_DIRECTORY"}, {STATUS_SERIAL_MORE_WRITES, -EIO, "STATUS_SERIAL_MORE_WRITES"}, {STATUS_REGISTRY_RECOVERED, -EIO, "STATUS_REGISTRY_RECOVERED"}, {STATUS_FT_READ_RECOVERY_FROM_BACKUP, -EIO, "STATUS_FT_READ_RECOVERY_FROM_BACKUP"}, {STATUS_FT_WRITE_RECOVERY, -EIO, "STATUS_FT_WRITE_RECOVERY"}, {STATUS_SERIAL_COUNTER_TIMEOUT, -ETIMEDOUT, "STATUS_SERIAL_COUNTER_TIMEOUT"}, {STATUS_NULL_LM_PASSWORD, -EIO, "STATUS_NULL_LM_PASSWORD"}, {STATUS_IMAGE_MACHINE_TYPE_MISMATCH, -EIO, "STATUS_IMAGE_MACHINE_TYPE_MISMATCH"}, {STATUS_RECEIVE_PARTIAL, -EIO, "STATUS_RECEIVE_PARTIAL"}, {STATUS_RECEIVE_EXPEDITED, -EIO, "STATUS_RECEIVE_EXPEDITED"}, {STATUS_RECEIVE_PARTIAL_EXPEDITED, -EIO, "STATUS_RECEIVE_PARTIAL_EXPEDITED"}, {STATUS_EVENT_DONE, -EIO, "STATUS_EVENT_DONE"}, {STATUS_EVENT_PENDING, -EIO, "STATUS_EVENT_PENDING"}, {STATUS_CHECKING_FILE_SYSTEM, -EIO, "STATUS_CHECKING_FILE_SYSTEM"}, {STATUS_FATAL_APP_EXIT, -EIO, "STATUS_FATAL_APP_EXIT"}, {STATUS_PREDEFINED_HANDLE, -EIO, "STATUS_PREDEFINED_HANDLE"}, {STATUS_WAS_UNLOCKED, -EIO, "STATUS_WAS_UNLOCKED"}, {STATUS_SERVICE_NOTIFICATION, -EIO, "STATUS_SERVICE_NOTIFICATION"}, {STATUS_WAS_LOCKED, -EIO, "STATUS_WAS_LOCKED"}, {STATUS_LOG_HARD_ERROR, -EIO, "STATUS_LOG_HARD_ERROR"}, {STATUS_ALREADY_WIN32, -EIO, "STATUS_ALREADY_WIN32"}, {STATUS_WX86_UNSIMULATE, -EIO, "STATUS_WX86_UNSIMULATE"}, {STATUS_WX86_CONTINUE, -EIO, "STATUS_WX86_CONTINUE"}, {STATUS_WX86_SINGLE_STEP, -EIO, "STATUS_WX86_SINGLE_STEP"}, {STATUS_WX86_BREAKPOINT, -EIO, "STATUS_WX86_BREAKPOINT"}, {STATUS_WX86_EXCEPTION_CONTINUE, -EIO, "STATUS_WX86_EXCEPTION_CONTINUE"}, {STATUS_WX86_EXCEPTION_LASTCHANCE, -EIO, "STATUS_WX86_EXCEPTION_LASTCHANCE"}, {STATUS_WX86_EXCEPTION_CHAIN, -EIO, "STATUS_WX86_EXCEPTION_CHAIN"}, {STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE, -EIO, "STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE"}, {STATUS_NO_YIELD_PERFORMED, -EIO, "STATUS_NO_YIELD_PERFORMED"}, {STATUS_TIMER_RESUME_IGNORED, -EIO, "STATUS_TIMER_RESUME_IGNORED"}, {STATUS_ARBITRATION_UNHANDLED, -EIO, "STATUS_ARBITRATION_UNHANDLED"}, {STATUS_CARDBUS_NOT_SUPPORTED, -ENOSYS, "STATUS_CARDBUS_NOT_SUPPORTED"}, {STATUS_WX86_CREATEWX86TIB, -EIO, "STATUS_WX86_CREATEWX86TIB"}, {STATUS_MP_PROCESSOR_MISMATCH, -EIO, "STATUS_MP_PROCESSOR_MISMATCH"}, {STATUS_HIBERNATED, -EIO, "STATUS_HIBERNATED"}, {STATUS_RESUME_HIBERNATION, -EIO, "STATUS_RESUME_HIBERNATION"}, {STATUS_FIRMWARE_UPDATED, -EIO, "STATUS_FIRMWARE_UPDATED"}, {STATUS_DRIVERS_LEAKING_LOCKED_PAGES, -EIO, "STATUS_DRIVERS_LEAKING_LOCKED_PAGES"}, {STATUS_MESSAGE_RETRIEVED, -EIO, "STATUS_MESSAGE_RETRIEVED"}, {STATUS_SYSTEM_POWERSTATE_TRANSITION, -EIO, "STATUS_SYSTEM_POWERSTATE_TRANSITION"}, {STATUS_ALPC_CHECK_COMPLETION_LIST, -EIO, "STATUS_ALPC_CHECK_COMPLETION_LIST"}, {STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION, -EIO, "STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION"}, {STATUS_ACCESS_AUDIT_BY_POLICY, -EIO, "STATUS_ACCESS_AUDIT_BY_POLICY"}, {STATUS_ABANDON_HIBERFILE, -EIO, "STATUS_ABANDON_HIBERFILE"}, {STATUS_BIZRULES_NOT_ENABLED, -EIO, "STATUS_BIZRULES_NOT_ENABLED"}, {STATUS_WAKE_SYSTEM, -EIO, "STATUS_WAKE_SYSTEM"}, {STATUS_DS_SHUTTING_DOWN, -EIO, "STATUS_DS_SHUTTING_DOWN"}, {DBG_REPLY_LATER, -EIO, "DBG_REPLY_LATER"}, {DBG_UNABLE_TO_PROVIDE_HANDLE, -EIO, "DBG_UNABLE_TO_PROVIDE_HANDLE"}, {DBG_TERMINATE_THREAD, -EIO, "DBG_TERMINATE_THREAD"}, {DBG_TERMINATE_PROCESS, -EIO, "DBG_TERMINATE_PROCESS"}, {DBG_CONTROL_C, -EIO, "DBG_CONTROL_C"}, {DBG_PRINTEXCEPTION_C, -EIO, "DBG_PRINTEXCEPTION_C"}, {DBG_RIPEXCEPTION, -EIO, "DBG_RIPEXCEPTION"}, {DBG_CONTROL_BREAK, -EIO, "DBG_CONTROL_BREAK"}, {DBG_COMMAND_EXCEPTION, -EIO, "DBG_COMMAND_EXCEPTION"}, {RPC_NT_UUID_LOCAL_ONLY, -EIO, "RPC_NT_UUID_LOCAL_ONLY"}, {RPC_NT_SEND_INCOMPLETE, -EIO, "RPC_NT_SEND_INCOMPLETE"}, {STATUS_CTX_CDM_CONNECT, -EIO, "STATUS_CTX_CDM_CONNECT"}, {STATUS_CTX_CDM_DISCONNECT, -EIO, "STATUS_CTX_CDM_DISCONNECT"}, {STATUS_SXS_RELEASE_ACTIVATION_CONTEXT, -EIO, "STATUS_SXS_RELEASE_ACTIVATION_CONTEXT"}, {STATUS_RECOVERY_NOT_NEEDED, -EIO, "STATUS_RECOVERY_NOT_NEEDED"}, {STATUS_RM_ALREADY_STARTED, -EIO, "STATUS_RM_ALREADY_STARTED"}, {STATUS_LOG_NO_RESTART, -EIO, "STATUS_LOG_NO_RESTART"}, {STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST, -EIO, "STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST"}, {STATUS_GRAPHICS_PARTIAL_DATA_POPULATED, -EIO, "STATUS_GRAPHICS_PARTIAL_DATA_POPULATED"}, {STATUS_GRAPHICS_DRIVER_MISMATCH, -EIO, "STATUS_GRAPHICS_DRIVER_MISMATCH"}, {STATUS_GRAPHICS_MODE_NOT_PINNED, -EIO, "STATUS_GRAPHICS_MODE_NOT_PINNED"}, {STATUS_GRAPHICS_NO_PREFERRED_MODE, -EIO, "STATUS_GRAPHICS_NO_PREFERRED_MODE"}, {STATUS_GRAPHICS_DATASET_IS_EMPTY, -EIO, "STATUS_GRAPHICS_DATASET_IS_EMPTY"}, {STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET, -EIO, "STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET"}, {STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED, -EIO, "STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED"}, {STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS, -EIO, "STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS"}, {STATUS_GRAPHICS_LEADLINK_START_DEFERRED, -EIO, "STATUS_GRAPHICS_LEADLINK_START_DEFERRED"}, {STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY, -EIO, "STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY"}, {STATUS_GRAPHICS_START_DEFERRED, -EIO, "STATUS_GRAPHICS_START_DEFERRED"}, {STATUS_NDIS_INDICATION_REQUIRED, -EIO, "STATUS_NDIS_INDICATION_REQUIRED"}, {STATUS_GUARD_PAGE_VIOLATION, -EIO, "STATUS_GUARD_PAGE_VIOLATION"}, {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"}, {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"}, {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"}, {STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"}, {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"}, {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"}, {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"}, {STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"}, {STATUS_GUID_SUBSTITUTION_MADE, -EIO, "STATUS_GUID_SUBSTITUTION_MADE"}, {STATUS_PARTIAL_COPY, -EIO, "STATUS_PARTIAL_COPY"}, {STATUS_DEVICE_PAPER_EMPTY, -EIO, "STATUS_DEVICE_PAPER_EMPTY"}, {STATUS_DEVICE_POWERED_OFF, -EIO, "STATUS_DEVICE_POWERED_OFF"}, {STATUS_DEVICE_OFF_LINE, -EIO, "STATUS_DEVICE_OFF_LINE"}, {STATUS_DEVICE_BUSY, -EBUSY, "STATUS_DEVICE_BUSY"}, {STATUS_NO_MORE_EAS, -EIO, "STATUS_NO_MORE_EAS"}, {STATUS_INVALID_EA_NAME, -EINVAL, "STATUS_INVALID_EA_NAME"}, {STATUS_EA_LIST_INCONSISTENT, -EIO, "STATUS_EA_LIST_INCONSISTENT"}, {STATUS_INVALID_EA_FLAG, -EINVAL, "STATUS_INVALID_EA_FLAG"}, {STATUS_VERIFY_REQUIRED, -EIO, "STATUS_VERIFY_REQUIRED"}, {STATUS_EXTRANEOUS_INFORMATION, -EIO, "STATUS_EXTRANEOUS_INFORMATION"}, {STATUS_RXACT_COMMIT_NECESSARY, -EIO, "STATUS_RXACT_COMMIT_NECESSARY"}, {STATUS_NO_MORE_ENTRIES, -EIO, "STATUS_NO_MORE_ENTRIES"}, {STATUS_FILEMARK_DETECTED, -EIO, "STATUS_FILEMARK_DETECTED"}, {STATUS_MEDIA_CHANGED, -EIO, "STATUS_MEDIA_CHANGED"}, {STATUS_BUS_RESET, -EIO, "STATUS_BUS_RESET"}, {STATUS_END_OF_MEDIA, -EIO, "STATUS_END_OF_MEDIA"}, {STATUS_BEGINNING_OF_MEDIA, -EIO, "STATUS_BEGINNING_OF_MEDIA"}, {STATUS_MEDIA_CHECK, -EIO, "STATUS_MEDIA_CHECK"}, {STATUS_SETMARK_DETECTED, -EIO, "STATUS_SETMARK_DETECTED"}, {STATUS_NO_DATA_DETECTED, -EIO, "STATUS_NO_DATA_DETECTED"}, {STATUS_REDIRECTOR_HAS_OPEN_HANDLES, -EIO, "STATUS_REDIRECTOR_HAS_OPEN_HANDLES"}, {STATUS_SERVER_HAS_OPEN_HANDLES, -EIO, "STATUS_SERVER_HAS_OPEN_HANDLES"}, {STATUS_ALREADY_DISCONNECTED, -EIO, "STATUS_ALREADY_DISCONNECTED"}, {STATUS_LONGJUMP, -EIO, "STATUS_LONGJUMP"}, {STATUS_CLEANER_CARTRIDGE_INSTALLED, -EIO, "STATUS_CLEANER_CARTRIDGE_INSTALLED"}, {STATUS_PLUGPLAY_QUERY_VETOED, -EIO, "STATUS_PLUGPLAY_QUERY_VETOED"}, {STATUS_UNWIND_CONSOLIDATE, -EIO, "STATUS_UNWIND_CONSOLIDATE"}, {STATUS_REGISTRY_HIVE_RECOVERED, -EIO, "STATUS_REGISTRY_HIVE_RECOVERED"}, {STATUS_DLL_MIGHT_BE_INSECURE, -EIO, "STATUS_DLL_MIGHT_BE_INSECURE"}, {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO, "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"}, {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"}, {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP, "STATUS_REPARSE_NOT_HANDLED"}, {STATUS_DEVICE_REQUIRES_CLEANING, -EIO, "STATUS_DEVICE_REQUIRES_CLEANING"}, {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"}, {STATUS_DATA_LOST_REPAIR, -EIO, "STATUS_DATA_LOST_REPAIR"}, {DBG_EXCEPTION_NOT_HANDLED, -EIO, "DBG_EXCEPTION_NOT_HANDLED"}, {STATUS_CLUSTER_NODE_ALREADY_UP, -EIO, "STATUS_CLUSTER_NODE_ALREADY_UP"}, {STATUS_CLUSTER_NODE_ALREADY_DOWN, -EIO, "STATUS_CLUSTER_NODE_ALREADY_DOWN"}, {STATUS_CLUSTER_NETWORK_ALREADY_ONLINE, -EIO, "STATUS_CLUSTER_NETWORK_ALREADY_ONLINE"}, {STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE, -EIO, "STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE"}, {STATUS_CLUSTER_NODE_ALREADY_MEMBER, -EIO, "STATUS_CLUSTER_NODE_ALREADY_MEMBER"}, {STATUS_COULD_NOT_RESIZE_LOG, -EIO, "STATUS_COULD_NOT_RESIZE_LOG"}, {STATUS_NO_TXF_METADATA, -EIO, "STATUS_NO_TXF_METADATA"}, {STATUS_CANT_RECOVER_WITH_HANDLE_OPEN, -EIO, "STATUS_CANT_RECOVER_WITH_HANDLE_OPEN"}, {STATUS_TXF_METADATA_ALREADY_PRESENT, -EIO, "STATUS_TXF_METADATA_ALREADY_PRESENT"}, {STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET, -EIO, "STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET"}, {STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED, -EIO, "STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED"}, {STATUS_FLT_BUFFER_TOO_SMALL, -ENOBUFS, "STATUS_FLT_BUFFER_TOO_SMALL"}, {STATUS_FVE_PARTIAL_METADATA, -EIO, "STATUS_FVE_PARTIAL_METADATA"}, {STATUS_UNSUCCESSFUL, -EIO, "STATUS_UNSUCCESSFUL"}, {STATUS_NOT_IMPLEMENTED, -EOPNOTSUPP, "STATUS_NOT_IMPLEMENTED"}, {STATUS_INVALID_INFO_CLASS, -EIO, "STATUS_INVALID_INFO_CLASS"}, {STATUS_INFO_LENGTH_MISMATCH, -EIO, "STATUS_INFO_LENGTH_MISMATCH"}, {STATUS_ACCESS_VIOLATION, -EACCES, "STATUS_ACCESS_VIOLATION"}, {STATUS_IN_PAGE_ERROR, -EFAULT, "STATUS_IN_PAGE_ERROR"}, {STATUS_PAGEFILE_QUOTA, -EDQUOT, "STATUS_PAGEFILE_QUOTA"}, {STATUS_INVALID_HANDLE, -EBADF, "STATUS_INVALID_HANDLE"}, {STATUS_BAD_INITIAL_STACK, -EIO, "STATUS_BAD_INITIAL_STACK"}, {STATUS_BAD_INITIAL_PC, -EIO, "STATUS_BAD_INITIAL_PC"}, {STATUS_INVALID_CID, -EIO, "STATUS_INVALID_CID"}, {STATUS_TIMER_NOT_CANCELED, -EIO, "STATUS_TIMER_NOT_CANCELED"}, {STATUS_INVALID_PARAMETER, -EINVAL, "STATUS_INVALID_PARAMETER"}, {STATUS_NO_SUCH_DEVICE, -ENODEV, "STATUS_NO_SUCH_DEVICE"}, {STATUS_NO_SUCH_FILE, -ENOENT, "STATUS_NO_SUCH_FILE"}, {STATUS_INVALID_DEVICE_REQUEST, -EOPNOTSUPP, "STATUS_INVALID_DEVICE_REQUEST"}, {STATUS_END_OF_FILE, -ENODATA, "STATUS_END_OF_FILE"}, {STATUS_WRONG_VOLUME, -EIO, "STATUS_WRONG_VOLUME"}, {STATUS_NO_MEDIA_IN_DEVICE, -EIO, "STATUS_NO_MEDIA_IN_DEVICE"}, {STATUS_UNRECOGNIZED_MEDIA, -EIO, "STATUS_UNRECOGNIZED_MEDIA"}, {STATUS_NONEXISTENT_SECTOR, -EIO, "STATUS_NONEXISTENT_SECTOR"}, {STATUS_MORE_PROCESSING_REQUIRED, -EIO, "STATUS_MORE_PROCESSING_REQUIRED"}, {STATUS_NO_MEMORY, -EREMOTEIO, "STATUS_NO_MEMORY"}, {STATUS_CONFLICTING_ADDRESSES, -EADDRINUSE, "STATUS_CONFLICTING_ADDRESSES"}, {STATUS_NOT_MAPPED_VIEW, -EIO, "STATUS_NOT_MAPPED_VIEW"}, {STATUS_UNABLE_TO_FREE_VM, -EIO, "STATUS_UNABLE_TO_FREE_VM"}, {STATUS_UNABLE_TO_DELETE_SECTION, -EIO, "STATUS_UNABLE_TO_DELETE_SECTION"}, {STATUS_INVALID_SYSTEM_SERVICE, -EIO, "STATUS_INVALID_SYSTEM_SERVICE"}, {STATUS_ILLEGAL_INSTRUCTION, -EIO, "STATUS_ILLEGAL_INSTRUCTION"}, {STATUS_INVALID_LOCK_SEQUENCE, -EIO, "STATUS_INVALID_LOCK_SEQUENCE"}, {STATUS_INVALID_VIEW_SIZE, -EIO, "STATUS_INVALID_VIEW_SIZE"}, {STATUS_INVALID_FILE_FOR_SECTION, -EIO, "STATUS_INVALID_FILE_FOR_SECTION"}, {STATUS_ALREADY_COMMITTED, -EIO, "STATUS_ALREADY_COMMITTED"}, {STATUS_ACCESS_DENIED, -EACCES, "STATUS_ACCESS_DENIED"}, {STATUS_BUFFER_TOO_SMALL, -EIO, "STATUS_BUFFER_TOO_SMALL"}, {STATUS_OBJECT_TYPE_MISMATCH, -EIO, "STATUS_OBJECT_TYPE_MISMATCH"}, {STATUS_NONCONTINUABLE_EXCEPTION, -EIO, "STATUS_NONCONTINUABLE_EXCEPTION"}, {STATUS_INVALID_DISPOSITION, -EIO, "STATUS_INVALID_DISPOSITION"}, {STATUS_UNWIND, -EIO, "STATUS_UNWIND"}, {STATUS_BAD_STACK, -EIO, "STATUS_BAD_STACK"}, {STATUS_INVALID_UNWIND_TARGET, -EIO, "STATUS_INVALID_UNWIND_TARGET"}, {STATUS_NOT_LOCKED, -EIO, "STATUS_NOT_LOCKED"}, {STATUS_PARITY_ERROR, -EIO, "STATUS_PARITY_ERROR"}, {STATUS_UNABLE_TO_DECOMMIT_VM, -EIO, "STATUS_UNABLE_TO_DECOMMIT_VM"}, {STATUS_NOT_COMMITTED, -EIO, "STATUS_NOT_COMMITTED"}, {STATUS_INVALID_PORT_ATTRIBUTES, -EIO, "STATUS_INVALID_PORT_ATTRIBUTES"}, {STATUS_PORT_MESSAGE_TOO_LONG, -EIO, "STATUS_PORT_MESSAGE_TOO_LONG"}, {STATUS_INVALID_PARAMETER_MIX, -EINVAL, "STATUS_INVALID_PARAMETER_MIX"}, {STATUS_INVALID_QUOTA_LOWER, -EIO, "STATUS_INVALID_QUOTA_LOWER"}, {STATUS_DISK_CORRUPT_ERROR, -EIO, "STATUS_DISK_CORRUPT_ERROR"}, {STATUS_OBJECT_NAME_INVALID, -ENOENT, "STATUS_OBJECT_NAME_INVALID"}, {STATUS_OBJECT_NAME_NOT_FOUND, -ENOENT, "STATUS_OBJECT_NAME_NOT_FOUND"}, {STATUS_OBJECT_NAME_COLLISION, -EEXIST, "STATUS_OBJECT_NAME_COLLISION"}, {STATUS_PORT_DISCONNECTED, -EIO, "STATUS_PORT_DISCONNECTED"}, {STATUS_DEVICE_ALREADY_ATTACHED, -EIO, "STATUS_DEVICE_ALREADY_ATTACHED"}, {STATUS_OBJECT_PATH_INVALID, -ENOTDIR, "STATUS_OBJECT_PATH_INVALID"}, {STATUS_OBJECT_PATH_NOT_FOUND, -ENOENT, "STATUS_OBJECT_PATH_NOT_FOUND"}, {STATUS_OBJECT_PATH_SYNTAX_BAD, -EIO, "STATUS_OBJECT_PATH_SYNTAX_BAD"}, {STATUS_DATA_OVERRUN, -EIO, "STATUS_DATA_OVERRUN"}, {STATUS_DATA_LATE_ERROR, -EIO, "STATUS_DATA_LATE_ERROR"}, {STATUS_DATA_ERROR, -EIO, "STATUS_DATA_ERROR"}, {STATUS_CRC_ERROR, -EIO, "STATUS_CRC_ERROR"}, {STATUS_SECTION_TOO_BIG, -EIO, "STATUS_SECTION_TOO_BIG"}, {STATUS_PORT_CONNECTION_REFUSED, -ECONNREFUSED, "STATUS_PORT_CONNECTION_REFUSED"}, {STATUS_INVALID_PORT_HANDLE, -EIO, "STATUS_INVALID_PORT_HANDLE"}, {STATUS_SHARING_VIOLATION, -EBUSY, "STATUS_SHARING_VIOLATION"}, {STATUS_QUOTA_EXCEEDED, -EDQUOT, "STATUS_QUOTA_EXCEEDED"}, {STATUS_INVALID_PAGE_PROTECTION, -EIO, "STATUS_INVALID_PAGE_PROTECTION"}, {STATUS_MUTANT_NOT_OWNED, -EIO, "STATUS_MUTANT_NOT_OWNED"}, {STATUS_SEMAPHORE_LIMIT_EXCEEDED, -EIO, "STATUS_SEMAPHORE_LIMIT_EXCEEDED"}, {STATUS_PORT_ALREADY_SET, -EIO, "STATUS_PORT_ALREADY_SET"}, {STATUS_SECTION_NOT_IMAGE, -EIO, "STATUS_SECTION_NOT_IMAGE"}, {STATUS_SUSPEND_COUNT_EXCEEDED, -EIO, "STATUS_SUSPEND_COUNT_EXCEEDED"}, {STATUS_THREAD_IS_TERMINATING, -EIO, "STATUS_THREAD_IS_TERMINATING"}, {STATUS_BAD_WORKING_SET_LIMIT, -EIO, "STATUS_BAD_WORKING_SET_LIMIT"}, {STATUS_INCOMPATIBLE_FILE_MAP, -EIO, "STATUS_INCOMPATIBLE_FILE_MAP"}, {STATUS_SECTION_PROTECTION, -EIO, "STATUS_SECTION_PROTECTION"}, {STATUS_EAS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_EAS_NOT_SUPPORTED"}, {STATUS_EA_TOO_LARGE, -EIO, "STATUS_EA_TOO_LARGE"}, {STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"}, {STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"}, {STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"}, {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"}, {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"}, {STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"}, {STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS, "STATUS_CTL_FILE_NOT_SUPPORTED"}, {STATUS_UNKNOWN_REVISION, -EIO, "STATUS_UNKNOWN_REVISION"}, {STATUS_REVISION_MISMATCH, -EIO, "STATUS_REVISION_MISMATCH"}, {STATUS_INVALID_OWNER, -EIO, "STATUS_INVALID_OWNER"}, {STATUS_INVALID_PRIMARY_GROUP, -EIO, "STATUS_INVALID_PRIMARY_GROUP"}, {STATUS_NO_IMPERSONATION_TOKEN, -EIO, "STATUS_NO_IMPERSONATION_TOKEN"}, {STATUS_CANT_DISABLE_MANDATORY, -EIO, "STATUS_CANT_DISABLE_MANDATORY"}, {STATUS_NO_LOGON_SERVERS, -EIO, "STATUS_NO_LOGON_SERVERS"}, {STATUS_NO_SUCH_LOGON_SESSION, -EIO, "STATUS_NO_SUCH_LOGON_SESSION"}, {STATUS_NO_SUCH_PRIVILEGE, -EIO, "STATUS_NO_SUCH_PRIVILEGE"}, {STATUS_PRIVILEGE_NOT_HELD, -EIO, "STATUS_PRIVILEGE_NOT_HELD"}, {STATUS_INVALID_ACCOUNT_NAME, -EIO, "STATUS_INVALID_ACCOUNT_NAME"}, {STATUS_USER_EXISTS, -EIO, "STATUS_USER_EXISTS"}, {STATUS_NO_SUCH_USER, -EIO, "STATUS_NO_SUCH_USER"}, {STATUS_GROUP_EXISTS, -EIO, "STATUS_GROUP_EXISTS"}, {STATUS_NO_SUCH_GROUP, -EIO, "STATUS_NO_SUCH_GROUP"}, {STATUS_MEMBER_IN_GROUP, -EIO, "STATUS_MEMBER_IN_GROUP"}, {STATUS_MEMBER_NOT_IN_GROUP, -EIO, "STATUS_MEMBER_NOT_IN_GROUP"}, {STATUS_LAST_ADMIN, -EIO, "STATUS_LAST_ADMIN"}, {STATUS_WRONG_PASSWORD, -EACCES, "STATUS_WRONG_PASSWORD"}, {STATUS_ILL_FORMED_PASSWORD, -EINVAL, "STATUS_ILL_FORMED_PASSWORD"}, {STATUS_PASSWORD_RESTRICTION, -EACCES, "STATUS_PASSWORD_RESTRICTION"}, {STATUS_LOGON_FAILURE, -EACCES, "STATUS_LOGON_FAILURE"}, {STATUS_ACCOUNT_RESTRICTION, -EACCES, "STATUS_ACCOUNT_RESTRICTION"}, {STATUS_INVALID_LOGON_HOURS, -EACCES, "STATUS_INVALID_LOGON_HOURS"}, {STATUS_INVALID_WORKSTATION, -EACCES, "STATUS_INVALID_WORKSTATION"}, {STATUS_PASSWORD_EXPIRED, -EKEYEXPIRED, "STATUS_PASSWORD_EXPIRED"}, {STATUS_ACCOUNT_DISABLED, -EKEYREVOKED, "STATUS_ACCOUNT_DISABLED"}, {STATUS_NONE_MAPPED, -EIO, "STATUS_NONE_MAPPED"}, {STATUS_TOO_MANY_LUIDS_REQUESTED, -EIO, "STATUS_TOO_MANY_LUIDS_REQUESTED"}, {STATUS_LUIDS_EXHAUSTED, -EIO, "STATUS_LUIDS_EXHAUSTED"}, {STATUS_INVALID_SUB_AUTHORITY, -EIO, "STATUS_INVALID_SUB_AUTHORITY"}, {STATUS_INVALID_ACL, -EIO, "STATUS_INVALID_ACL"}, {STATUS_INVALID_SID, -EIO, "STATUS_INVALID_SID"}, {STATUS_INVALID_SECURITY_DESCR, -EIO, "STATUS_INVALID_SECURITY_DESCR"}, {STATUS_PROCEDURE_NOT_FOUND, -EIO, "STATUS_PROCEDURE_NOT_FOUND"}, {STATUS_INVALID_IMAGE_FORMAT, -EIO, "STATUS_INVALID_IMAGE_FORMAT"}, {STATUS_NO_TOKEN, -EIO, "STATUS_NO_TOKEN"}, {STATUS_BAD_INHERITANCE_ACL, -EIO, "STATUS_BAD_INHERITANCE_ACL"}, {STATUS_RANGE_NOT_LOCKED, -EIO, "STATUS_RANGE_NOT_LOCKED"}, {STATUS_DISK_FULL, -ENOSPC, "STATUS_DISK_FULL"}, {STATUS_SERVER_DISABLED, -EIO, "STATUS_SERVER_DISABLED"}, {STATUS_SERVER_NOT_DISABLED, -EIO, "STATUS_SERVER_NOT_DISABLED"}, {STATUS_TOO_MANY_GUIDS_REQUESTED, -EIO, "STATUS_TOO_MANY_GUIDS_REQUESTED"}, {STATUS_GUIDS_EXHAUSTED, -EIO, "STATUS_GUIDS_EXHAUSTED"}, {STATUS_INVALID_ID_AUTHORITY, -EIO, "STATUS_INVALID_ID_AUTHORITY"}, {STATUS_AGENTS_EXHAUSTED, -EIO, "STATUS_AGENTS_EXHAUSTED"}, {STATUS_INVALID_VOLUME_LABEL, -EIO, "STATUS_INVALID_VOLUME_LABEL"}, {STATUS_SECTION_NOT_EXTENDED, -EIO, "STATUS_SECTION_NOT_EXTENDED"}, {STATUS_NOT_MAPPED_DATA, -EIO, "STATUS_NOT_MAPPED_DATA"}, {STATUS_RESOURCE_DATA_NOT_FOUND, -EIO, "STATUS_RESOURCE_DATA_NOT_FOUND"}, {STATUS_RESOURCE_TYPE_NOT_FOUND, -EIO, "STATUS_RESOURCE_TYPE_NOT_FOUND"}, {STATUS_RESOURCE_NAME_NOT_FOUND, -EIO, "STATUS_RESOURCE_NAME_NOT_FOUND"}, {STATUS_ARRAY_BOUNDS_EXCEEDED, -EIO, "STATUS_ARRAY_BOUNDS_EXCEEDED"}, {STATUS_FLOAT_DENORMAL_OPERAND, -EIO, "STATUS_FLOAT_DENORMAL_OPERAND"}, {STATUS_FLOAT_DIVIDE_BY_ZERO, -EIO, "STATUS_FLOAT_DIVIDE_BY_ZERO"}, {STATUS_FLOAT_INEXACT_RESULT, -EIO, "STATUS_FLOAT_INEXACT_RESULT"}, {STATUS_FLOAT_INVALID_OPERATION, -EIO, "STATUS_FLOAT_INVALID_OPERATION"}, {STATUS_FLOAT_OVERFLOW, -EIO, "STATUS_FLOAT_OVERFLOW"}, {STATUS_FLOAT_STACK_CHECK, -EIO, "STATUS_FLOAT_STACK_CHECK"}, {STATUS_FLOAT_UNDERFLOW, -EIO, "STATUS_FLOAT_UNDERFLOW"}, {STATUS_INTEGER_DIVIDE_BY_ZERO, -EIO, "STATUS_INTEGER_DIVIDE_BY_ZERO"}, {STATUS_INTEGER_OVERFLOW, -EIO, "STATUS_INTEGER_OVERFLOW"}, {STATUS_PRIVILEGED_INSTRUCTION, -EIO, "STATUS_PRIVILEGED_INSTRUCTION"}, {STATUS_TOO_MANY_PAGING_FILES, -EIO, "STATUS_TOO_MANY_PAGING_FILES"}, {STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"}, {STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO, "STATUS_ALLOTTED_SPACE_EXCEEDED"}, {STATUS_INSUFFICIENT_RESOURCES, -EAGAIN, "STATUS_INSUFFICIENT_RESOURCES"}, {STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"}, {STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"}, {STATUS_DEVICE_NOT_CONNECTED, -EIO, "STATUS_DEVICE_NOT_CONNECTED"}, {STATUS_DEVICE_POWER_FAILURE, -EIO, "STATUS_DEVICE_POWER_FAILURE"}, {STATUS_FREE_VM_NOT_AT_BASE, -EIO, "STATUS_FREE_VM_NOT_AT_BASE"}, {STATUS_MEMORY_NOT_ALLOCATED, -EFAULT, "STATUS_MEMORY_NOT_ALLOCATED"}, {STATUS_WORKING_SET_QUOTA, -EIO, "STATUS_WORKING_SET_QUOTA"}, {STATUS_MEDIA_WRITE_PROTECTED, -EROFS, "STATUS_MEDIA_WRITE_PROTECTED"}, {STATUS_DEVICE_NOT_READY, -EIO, "STATUS_DEVICE_NOT_READY"}, {STATUS_INVALID_GROUP_ATTRIBUTES, -EIO, "STATUS_INVALID_GROUP_ATTRIBUTES"}, {STATUS_BAD_IMPERSONATION_LEVEL, -EIO, "STATUS_BAD_IMPERSONATION_LEVEL"}, {STATUS_CANT_OPEN_ANONYMOUS, -EIO, "STATUS_CANT_OPEN_ANONYMOUS"}, {STATUS_BAD_VALIDATION_CLASS, -EIO, "STATUS_BAD_VALIDATION_CLASS"}, {STATUS_BAD_TOKEN_TYPE, -EIO, "STATUS_BAD_TOKEN_TYPE"}, {STATUS_BAD_MASTER_BOOT_RECORD, -EIO, "STATUS_BAD_MASTER_BOOT_RECORD"}, {STATUS_INSTRUCTION_MISALIGNMENT, -EIO, "STATUS_INSTRUCTION_MISALIGNMENT"}, {STATUS_INSTANCE_NOT_AVAILABLE, -EIO, "STATUS_INSTANCE_NOT_AVAILABLE"}, {STATUS_PIPE_NOT_AVAILABLE, -EIO, "STATUS_PIPE_NOT_AVAILABLE"}, {STATUS_INVALID_PIPE_STATE, -EIO, "STATUS_INVALID_PIPE_STATE"}, {STATUS_PIPE_BUSY, -EBUSY, "STATUS_PIPE_BUSY"}, {STATUS_ILLEGAL_FUNCTION, -EIO, "STATUS_ILLEGAL_FUNCTION"}, {STATUS_PIPE_DISCONNECTED, -EPIPE, "STATUS_PIPE_DISCONNECTED"}, {STATUS_PIPE_CLOSING, -EIO, "STATUS_PIPE_CLOSING"}, {STATUS_PIPE_CONNECTED, -EIO, "STATUS_PIPE_CONNECTED"}, {STATUS_PIPE_LISTENING, -EIO, "STATUS_PIPE_LISTENING"}, {STATUS_INVALID_READ_MODE, -EIO, "STATUS_INVALID_READ_MODE"}, {STATUS_IO_TIMEOUT, -EAGAIN, "STATUS_IO_TIMEOUT"}, {STATUS_FILE_FORCED_CLOSED, -EIO, "STATUS_FILE_FORCED_CLOSED"}, {STATUS_PROFILING_NOT_STARTED, -EIO, "STATUS_PROFILING_NOT_STARTED"}, {STATUS_PROFILING_NOT_STOPPED, -EIO, "STATUS_PROFILING_NOT_STOPPED"}, {STATUS_COULD_NOT_INTERPRET, -EIO, "STATUS_COULD_NOT_INTERPRET"}, {STATUS_FILE_IS_A_DIRECTORY, -EISDIR, "STATUS_FILE_IS_A_DIRECTORY"}, {STATUS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_NOT_SUPPORTED"}, {STATUS_REMOTE_NOT_LISTENING, -EHOSTDOWN, "STATUS_REMOTE_NOT_LISTENING"}, {STATUS_DUPLICATE_NAME, -ENOTUNIQ, "STATUS_DUPLICATE_NAME"}, {STATUS_BAD_NETWORK_PATH, -EINVAL, "STATUS_BAD_NETWORK_PATH"}, {STATUS_NETWORK_BUSY, -EBUSY, "STATUS_NETWORK_BUSY"}, {STATUS_DEVICE_DOES_NOT_EXIST, -ENODEV, "STATUS_DEVICE_DOES_NOT_EXIST"}, {STATUS_TOO_MANY_COMMANDS, -EIO, "STATUS_TOO_MANY_COMMANDS"}, {STATUS_ADAPTER_HARDWARE_ERROR, -EIO, "STATUS_ADAPTER_HARDWARE_ERROR"}, {STATUS_INVALID_NETWORK_RESPONSE, -EIO, "STATUS_INVALID_NETWORK_RESPONSE"}, {STATUS_UNEXPECTED_NETWORK_ERROR, -EIO, "STATUS_UNEXPECTED_NETWORK_ERROR"}, {STATUS_BAD_REMOTE_ADAPTER, -EIO, "STATUS_BAD_REMOTE_ADAPTER"}, {STATUS_PRINT_QUEUE_FULL, -EIO, "STATUS_PRINT_QUEUE_FULL"}, {STATUS_NO_SPOOL_SPACE, -EIO, "STATUS_NO_SPOOL_SPACE"}, {STATUS_PRINT_CANCELLED, -EIO, "STATUS_PRINT_CANCELLED"}, {STATUS_NETWORK_NAME_DELETED, -EREMCHG, "STATUS_NETWORK_NAME_DELETED"}, {STATUS_NETWORK_ACCESS_DENIED, -EACCES, "STATUS_NETWORK_ACCESS_DENIED"}, {STATUS_BAD_DEVICE_TYPE, -EIO, "STATUS_BAD_DEVICE_TYPE"}, {STATUS_BAD_NETWORK_NAME, -ENOENT, "STATUS_BAD_NETWORK_NAME"}, {STATUS_TOO_MANY_NAMES, -EIO, "STATUS_TOO_MANY_NAMES"}, {STATUS_TOO_MANY_SESSIONS, -EIO, "STATUS_TOO_MANY_SESSIONS"}, {STATUS_SHARING_PAUSED, -EIO, "STATUS_SHARING_PAUSED"}, {STATUS_REQUEST_NOT_ACCEPTED, -EIO, "STATUS_REQUEST_NOT_ACCEPTED"}, {STATUS_REDIRECTOR_PAUSED, -EIO, "STATUS_REDIRECTOR_PAUSED"}, {STATUS_NET_WRITE_FAULT, -EIO, "STATUS_NET_WRITE_FAULT"}, {STATUS_PROFILING_AT_LIMIT, -EIO, "STATUS_PROFILING_AT_LIMIT"}, {STATUS_NOT_SAME_DEVICE, -EXDEV, "STATUS_NOT_SAME_DEVICE"}, {STATUS_FILE_RENAMED, -EIO, "STATUS_FILE_RENAMED"}, {STATUS_VIRTUAL_CIRCUIT_CLOSED, -EIO, "STATUS_VIRTUAL_CIRCUIT_CLOSED"}, {STATUS_NO_SECURITY_ON_OBJECT, -EIO, "STATUS_NO_SECURITY_ON_OBJECT"}, {STATUS_CANT_WAIT, -EIO, "STATUS_CANT_WAIT"}, {STATUS_PIPE_EMPTY, -EIO, "STATUS_PIPE_EMPTY"}, {STATUS_CANT_ACCESS_DOMAIN_INFO, -EIO, "STATUS_CANT_ACCESS_DOMAIN_INFO"}, {STATUS_CANT_TERMINATE_SELF, -EIO, "STATUS_CANT_TERMINATE_SELF"}, {STATUS_INVALID_SERVER_STATE, -EIO, "STATUS_INVALID_SERVER_STATE"}, {STATUS_INVALID_DOMAIN_STATE, -EIO, "STATUS_INVALID_DOMAIN_STATE"}, {STATUS_INVALID_DOMAIN_ROLE, -EIO, "STATUS_INVALID_DOMAIN_ROLE"}, {STATUS_NO_SUCH_DOMAIN, -EIO, "STATUS_NO_SUCH_DOMAIN"}, {STATUS_DOMAIN_EXISTS, -EIO, "STATUS_DOMAIN_EXISTS"}, {STATUS_DOMAIN_LIMIT_EXCEEDED, -EIO, "STATUS_DOMAIN_LIMIT_EXCEEDED"}, {STATUS_OPLOCK_NOT_GRANTED, -EIO, "STATUS_OPLOCK_NOT_GRANTED"}, {STATUS_INVALID_OPLOCK_PROTOCOL, -EIO, "STATUS_INVALID_OPLOCK_PROTOCOL"}, {STATUS_INTERNAL_DB_CORRUPTION, -EIO, "STATUS_INTERNAL_DB_CORRUPTION"}, {STATUS_INTERNAL_ERROR, -EIO, "STATUS_INTERNAL_ERROR"}, {STATUS_GENERIC_NOT_MAPPED, -EIO, "STATUS_GENERIC_NOT_MAPPED"}, {STATUS_BAD_DESCRIPTOR_FORMAT, -EIO, "STATUS_BAD_DESCRIPTOR_FORMAT"}, {STATUS_INVALID_USER_BUFFER, -EIO, "STATUS_INVALID_USER_BUFFER"}, {STATUS_UNEXPECTED_IO_ERROR, -EIO, "STATUS_UNEXPECTED_IO_ERROR"}, {STATUS_UNEXPECTED_MM_CREATE_ERR, -EIO, "STATUS_UNEXPECTED_MM_CREATE_ERR"}, {STATUS_UNEXPECTED_MM_MAP_ERROR, -EIO, "STATUS_UNEXPECTED_MM_MAP_ERROR"}, {STATUS_UNEXPECTED_MM_EXTEND_ERR, -EIO, "STATUS_UNEXPECTED_MM_EXTEND_ERR"}, {STATUS_NOT_LOGON_PROCESS, -EIO, "STATUS_NOT_LOGON_PROCESS"}, {STATUS_LOGON_SESSION_EXISTS, -EIO, "STATUS_LOGON_SESSION_EXISTS"}, {STATUS_INVALID_PARAMETER_1, -EINVAL, "STATUS_INVALID_PARAMETER_1"}, {STATUS_INVALID_PARAMETER_2, -EINVAL, "STATUS_INVALID_PARAMETER_2"}, {STATUS_INVALID_PARAMETER_3, -EINVAL, "STATUS_INVALID_PARAMETER_3"}, {STATUS_INVALID_PARAMETER_4, -EINVAL, "STATUS_INVALID_PARAMETER_4"}, {STATUS_INVALID_PARAMETER_5, -EINVAL, "STATUS_INVALID_PARAMETER_5"}, {STATUS_INVALID_PARAMETER_6, -EINVAL, "STATUS_INVALID_PARAMETER_6"}, {STATUS_INVALID_PARAMETER_7, -EINVAL, "STATUS_INVALID_PARAMETER_7"}, {STATUS_INVALID_PARAMETER_8, -EINVAL, "STATUS_INVALID_PARAMETER_8"}, {STATUS_INVALID_PARAMETER_9, -EINVAL, "STATUS_INVALID_PARAMETER_9"}, {STATUS_INVALID_PARAMETER_10, -EINVAL, "STATUS_INVALID_PARAMETER_10"}, {STATUS_INVALID_PARAMETER_11, -EINVAL, "STATUS_INVALID_PARAMETER_11"}, {STATUS_INVALID_PARAMETER_12, -EINVAL, "STATUS_INVALID_PARAMETER_12"}, {STATUS_REDIRECTOR_NOT_STARTED, -EIO, "STATUS_REDIRECTOR_NOT_STARTED"}, {STATUS_REDIRECTOR_STARTED, -EIO, "STATUS_REDIRECTOR_STARTED"}, {STATUS_STACK_OVERFLOW, -EIO, "STATUS_STACK_OVERFLOW"}, {STATUS_NO_SUCH_PACKAGE, -EIO, "STATUS_NO_SUCH_PACKAGE"}, {STATUS_BAD_FUNCTION_TABLE, -EIO, "STATUS_BAD_FUNCTION_TABLE"}, {STATUS_VARIABLE_NOT_FOUND, -EIO, "STATUS_VARIABLE_NOT_FOUND"}, {STATUS_DIRECTORY_NOT_EMPTY, -ENOTEMPTY, "STATUS_DIRECTORY_NOT_EMPTY"}, {STATUS_FILE_CORRUPT_ERROR, -EIO, "STATUS_FILE_CORRUPT_ERROR"}, {STATUS_NOT_A_DIRECTORY, -ENOTDIR, "STATUS_NOT_A_DIRECTORY"}, {STATUS_BAD_LOGON_SESSION_STATE, -EIO, "STATUS_BAD_LOGON_SESSION_STATE"}, {STATUS_LOGON_SESSION_COLLISION, -EIO, "STATUS_LOGON_SESSION_COLLISION"}, {STATUS_NAME_TOO_LONG, -ENAMETOOLONG, "STATUS_NAME_TOO_LONG"}, {STATUS_FILES_OPEN, -EIO, "STATUS_FILES_OPEN"}, {STATUS_CONNECTION_IN_USE, -EIO, "STATUS_CONNECTION_IN_USE"}, {STATUS_MESSAGE_NOT_FOUND, -EIO, "STATUS_MESSAGE_NOT_FOUND"}, {STATUS_PROCESS_IS_TERMINATING, -EIO, "STATUS_PROCESS_IS_TERMINATING"}, {STATUS_INVALID_LOGON_TYPE, -EIO, "STATUS_INVALID_LOGON_TYPE"}, {STATUS_NO_GUID_TRANSLATION, -EIO, "STATUS_NO_GUID_TRANSLATION"}, {STATUS_CANNOT_IMPERSONATE, -EIO, "STATUS_CANNOT_IMPERSONATE"}, {STATUS_IMAGE_ALREADY_LOADED, -EIO, "STATUS_IMAGE_ALREADY_LOADED"}, {STATUS_ABIOS_NOT_PRESENT, -EIO, "STATUS_ABIOS_NOT_PRESENT"}, {STATUS_ABIOS_LID_NOT_EXIST, -EIO, "STATUS_ABIOS_LID_NOT_EXIST"}, {STATUS_ABIOS_LID_ALREADY_OWNED, -EIO, "STATUS_ABIOS_LID_ALREADY_OWNED"}, {STATUS_ABIOS_NOT_LID_OWNER, -EIO, "STATUS_ABIOS_NOT_LID_OWNER"}, {STATUS_ABIOS_INVALID_COMMAND, -EIO, "STATUS_ABIOS_INVALID_COMMAND"}, {STATUS_ABIOS_INVALID_LID, -EIO, "STATUS_ABIOS_INVALID_LID"}, {STATUS_ABIOS_SELECTOR_NOT_AVAILABLE, -EIO, "STATUS_ABIOS_SELECTOR_NOT_AVAILABLE"}, {STATUS_ABIOS_INVALID_SELECTOR, -EIO, "STATUS_ABIOS_INVALID_SELECTOR"}, {STATUS_NO_LDT, -EIO, "STATUS_NO_LDT"}, {STATUS_INVALID_LDT_SIZE, -EIO, "STATUS_INVALID_LDT_SIZE"}, {STATUS_INVALID_LDT_OFFSET, -EIO, "STATUS_INVALID_LDT_OFFSET"}, {STATUS_INVALID_LDT_DESCRIPTOR, -EIO, "STATUS_INVALID_LDT_DESCRIPTOR"}, {STATUS_INVALID_IMAGE_NE_FORMAT, -EIO, "STATUS_INVALID_IMAGE_NE_FORMAT"}, {STATUS_RXACT_INVALID_STATE, -EIO, "STATUS_RXACT_INVALID_STATE"}, {STATUS_RXACT_COMMIT_FAILURE, -EIO, "STATUS_RXACT_COMMIT_FAILURE"}, {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"}, {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"}, {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"}, {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"}, {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"}, {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"}, {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"}, {STATUS_SPECIAL_GROUP, -EIO, "STATUS_SPECIAL_GROUP"}, {STATUS_SPECIAL_USER, -EIO, "STATUS_SPECIAL_USER"}, {STATUS_MEMBERS_PRIMARY_GROUP, -EIO, "STATUS_MEMBERS_PRIMARY_GROUP"}, {STATUS_FILE_CLOSED, -EBADF, "STATUS_FILE_CLOSED"}, {STATUS_TOO_MANY_THREADS, -EIO, "STATUS_TOO_MANY_THREADS"}, {STATUS_THREAD_NOT_IN_PROCESS, -EIO, "STATUS_THREAD_NOT_IN_PROCESS"}, {STATUS_TOKEN_ALREADY_IN_USE, -EIO, "STATUS_TOKEN_ALREADY_IN_USE"}, {STATUS_PAGEFILE_QUOTA_EXCEEDED, -EDQUOT, "STATUS_PAGEFILE_QUOTA_EXCEEDED"}, {STATUS_COMMITMENT_LIMIT, -EIO, "STATUS_COMMITMENT_LIMIT"}, {STATUS_INVALID_IMAGE_LE_FORMAT, -EIO, "STATUS_INVALID_IMAGE_LE_FORMAT"}, {STATUS_INVALID_IMAGE_NOT_MZ, -EIO, "STATUS_INVALID_IMAGE_NOT_MZ"}, {STATUS_INVALID_IMAGE_PROTECT, -EIO, "STATUS_INVALID_IMAGE_PROTECT"}, {STATUS_INVALID_IMAGE_WIN_16, -EIO, "STATUS_INVALID_IMAGE_WIN_16"}, {STATUS_LOGON_SERVER_CONFLICT, -EIO, "STATUS_LOGON_SERVER_CONFLICT"}, {STATUS_TIME_DIFFERENCE_AT_DC, -EIO, "STATUS_TIME_DIFFERENCE_AT_DC"}, {STATUS_SYNCHRONIZATION_REQUIRED, -EIO, "STATUS_SYNCHRONIZATION_REQUIRED"}, {STATUS_DLL_NOT_FOUND, -ENOENT, "STATUS_DLL_NOT_FOUND"}, {STATUS_OPEN_FAILED, -EIO, "STATUS_OPEN_FAILED"}, {STATUS_IO_PRIVILEGE_FAILED, -EIO, "STATUS_IO_PRIVILEGE_FAILED"}, {STATUS_ORDINAL_NOT_FOUND, -EIO, "STATUS_ORDINAL_NOT_FOUND"}, {STATUS_ENTRYPOINT_NOT_FOUND, -EIO, "STATUS_ENTRYPOINT_NOT_FOUND"}, {STATUS_CONTROL_C_EXIT, -EIO, "STATUS_CONTROL_C_EXIT"}, {STATUS_LOCAL_DISCONNECT, -EIO, "STATUS_LOCAL_DISCONNECT"}, {STATUS_REMOTE_DISCONNECT, -ESHUTDOWN, "STATUS_REMOTE_DISCONNECT"}, {STATUS_REMOTE_RESOURCES, -EIO, "STATUS_REMOTE_RESOURCES"}, {STATUS_LINK_FAILED, -EXDEV, "STATUS_LINK_FAILED"}, {STATUS_LINK_TIMEOUT, -ETIMEDOUT, "STATUS_LINK_TIMEOUT"}, {STATUS_INVALID_CONNECTION, -EIO, "STATUS_INVALID_CONNECTION"}, {STATUS_INVALID_ADDRESS, -EIO, "STATUS_INVALID_ADDRESS"}, {STATUS_DLL_INIT_FAILED, -EIO, "STATUS_DLL_INIT_FAILED"}, {STATUS_MISSING_SYSTEMFILE, -EIO, "STATUS_MISSING_SYSTEMFILE"}, {STATUS_UNHANDLED_EXCEPTION, -EIO, "STATUS_UNHANDLED_EXCEPTION"}, {STATUS_APP_INIT_FAILURE, -EIO, "STATUS_APP_INIT_FAILURE"}, {STATUS_PAGEFILE_CREATE_FAILED, -EIO, "STATUS_PAGEFILE_CREATE_FAILED"}, {STATUS_NO_PAGEFILE, -EIO, "STATUS_NO_PAGEFILE"}, {STATUS_INVALID_LEVEL, -EIO, "STATUS_INVALID_LEVEL"}, {STATUS_WRONG_PASSWORD_CORE, -EIO, "STATUS_WRONG_PASSWORD_CORE"}, {STATUS_ILLEGAL_FLOAT_CONTEXT, -EIO, "STATUS_ILLEGAL_FLOAT_CONTEXT"}, {STATUS_PIPE_BROKEN, -EPIPE, "STATUS_PIPE_BROKEN"}, {STATUS_REGISTRY_CORRUPT, -EIO, "STATUS_REGISTRY_CORRUPT"}, {STATUS_REGISTRY_IO_FAILED, -EIO, "STATUS_REGISTRY_IO_FAILED"}, {STATUS_NO_EVENT_PAIR, -EIO, "STATUS_NO_EVENT_PAIR"}, {STATUS_UNRECOGNIZED_VOLUME, -EIO, "STATUS_UNRECOGNIZED_VOLUME"}, {STATUS_SERIAL_NO_DEVICE_INITED, -EIO, "STATUS_SERIAL_NO_DEVICE_INITED"}, {STATUS_NO_SUCH_ALIAS, -EIO, "STATUS_NO_SUCH_ALIAS"}, {STATUS_MEMBER_NOT_IN_ALIAS, -EIO, "STATUS_MEMBER_NOT_IN_ALIAS"}, {STATUS_MEMBER_IN_ALIAS, -EIO, "STATUS_MEMBER_IN_ALIAS"}, {STATUS_ALIAS_EXISTS, -EIO, "STATUS_ALIAS_EXISTS"}, {STATUS_LOGON_NOT_GRANTED, -EIO, "STATUS_LOGON_NOT_GRANTED"}, {STATUS_TOO_MANY_SECRETS, -EIO, "STATUS_TOO_MANY_SECRETS"}, {STATUS_SECRET_TOO_LONG, -EIO, "STATUS_SECRET_TOO_LONG"}, {STATUS_INTERNAL_DB_ERROR, -EIO, "STATUS_INTERNAL_DB_ERROR"}, {STATUS_FULLSCREEN_MODE, -EIO, "STATUS_FULLSCREEN_MODE"}, {STATUS_TOO_MANY_CONTEXT_IDS, -EIO, "STATUS_TOO_MANY_CONTEXT_IDS"}, {STATUS_LOGON_TYPE_NOT_GRANTED, -EIO, "STATUS_LOGON_TYPE_NOT_GRANTED"}, {STATUS_NOT_REGISTRY_FILE, -EIO, "STATUS_NOT_REGISTRY_FILE"}, {STATUS_NT_CROSS_ENCRYPTION_REQUIRED, -EIO, "STATUS_NT_CROSS_ENCRYPTION_REQUIRED"}, {STATUS_DOMAIN_CTRLR_CONFIG_ERROR, -EIO, "STATUS_DOMAIN_CTRLR_CONFIG_ERROR"}, {STATUS_FT_MISSING_MEMBER, -EIO, "STATUS_FT_MISSING_MEMBER"}, {STATUS_ILL_FORMED_SERVICE_ENTRY, -EIO, "STATUS_ILL_FORMED_SERVICE_ENTRY"}, {STATUS_ILLEGAL_CHARACTER, -EIO, "STATUS_ILLEGAL_CHARACTER"}, {STATUS_UNMAPPABLE_CHARACTER, -EIO, "STATUS_UNMAPPABLE_CHARACTER"}, {STATUS_UNDEFINED_CHARACTER, -EIO, "STATUS_UNDEFINED_CHARACTER"}, {STATUS_FLOPPY_VOLUME, -EIO, "STATUS_FLOPPY_VOLUME"}, {STATUS_FLOPPY_ID_MARK_NOT_FOUND, -EIO, "STATUS_FLOPPY_ID_MARK_NOT_FOUND"}, {STATUS_FLOPPY_WRONG_CYLINDER, -EIO, "STATUS_FLOPPY_WRONG_CYLINDER"}, {STATUS_FLOPPY_UNKNOWN_ERROR, -EIO, "STATUS_FLOPPY_UNKNOWN_ERROR"}, {STATUS_FLOPPY_BAD_REGISTERS, -EIO, "STATUS_FLOPPY_BAD_REGISTERS"}, {STATUS_DISK_RECALIBRATE_FAILED, -EIO, "STATUS_DISK_RECALIBRATE_FAILED"}, {STATUS_DISK_OPERATION_FAILED, -EIO, "STATUS_DISK_OPERATION_FAILED"}, {STATUS_DISK_RESET_FAILED, -EIO, "STATUS_DISK_RESET_FAILED"}, {STATUS_SHARED_IRQ_BUSY, -EBUSY, "STATUS_SHARED_IRQ_BUSY"}, {STATUS_FT_ORPHANING, -EIO, "STATUS_FT_ORPHANING"}, {STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT, -EIO, "STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT"}, {STATUS_PARTITION_FAILURE, -EIO, "STATUS_PARTITION_FAILURE"}, {STATUS_INVALID_BLOCK_LENGTH, -EIO, "STATUS_INVALID_BLOCK_LENGTH"}, {STATUS_DEVICE_NOT_PARTITIONED, -EIO, "STATUS_DEVICE_NOT_PARTITIONED"}, {STATUS_UNABLE_TO_LOCK_MEDIA, -EIO, "STATUS_UNABLE_TO_LOCK_MEDIA"}, {STATUS_UNABLE_TO_UNLOAD_MEDIA, -EIO, "STATUS_UNABLE_TO_UNLOAD_MEDIA"}, {STATUS_EOM_OVERFLOW, -EIO, "STATUS_EOM_OVERFLOW"}, {STATUS_NO_MEDIA, -EIO, "STATUS_NO_MEDIA"}, {STATUS_NO_SUCH_MEMBER, -EIO, "STATUS_NO_SUCH_MEMBER"}, {STATUS_INVALID_MEMBER, -EIO, "STATUS_INVALID_MEMBER"}, {STATUS_KEY_DELETED, -EIO, "STATUS_KEY_DELETED"}, {STATUS_NO_LOG_SPACE, -EIO, "STATUS_NO_LOG_SPACE"}, {STATUS_TOO_MANY_SIDS, -EIO, "STATUS_TOO_MANY_SIDS"}, {STATUS_LM_CROSS_ENCRYPTION_REQUIRED, -EIO, "STATUS_LM_CROSS_ENCRYPTION_REQUIRED"}, {STATUS_KEY_HAS_CHILDREN, -EIO, "STATUS_KEY_HAS_CHILDREN"}, {STATUS_CHILD_MUST_BE_VOLATILE, -EIO, "STATUS_CHILD_MUST_BE_VOLATILE"}, {STATUS_DEVICE_CONFIGURATION_ERROR, -EIO, "STATUS_DEVICE_CONFIGURATION_ERROR"}, {STATUS_DRIVER_INTERNAL_ERROR, -EIO, "STATUS_DRIVER_INTERNAL_ERROR"}, {STATUS_INVALID_DEVICE_STATE, -EIO, "STATUS_INVALID_DEVICE_STATE"}, {STATUS_IO_DEVICE_ERROR, -EIO, "STATUS_IO_DEVICE_ERROR"}, {STATUS_DEVICE_PROTOCOL_ERROR, -EIO, "STATUS_DEVICE_PROTOCOL_ERROR"}, {STATUS_BACKUP_CONTROLLER, -EIO, "STATUS_BACKUP_CONTROLLER"}, {STATUS_LOG_FILE_FULL, -EIO, "STATUS_LOG_FILE_FULL"}, {STATUS_TOO_LATE, -EIO, "STATUS_TOO_LATE"}, {STATUS_NO_TRUST_LSA_SECRET, -EIO, "STATUS_NO_TRUST_LSA_SECRET"}, {STATUS_NO_TRUST_SAM_ACCOUNT, -EIO, "STATUS_NO_TRUST_SAM_ACCOUNT"}, {STATUS_TRUSTED_DOMAIN_FAILURE, -EIO, "STATUS_TRUSTED_DOMAIN_FAILURE"}, {STATUS_TRUSTED_RELATIONSHIP_FAILURE, -EIO, "STATUS_TRUSTED_RELATIONSHIP_FAILURE"}, {STATUS_EVENTLOG_FILE_CORRUPT, -EIO, "STATUS_EVENTLOG_FILE_CORRUPT"}, {STATUS_EVENTLOG_CANT_START, -EIO, "STATUS_EVENTLOG_CANT_START"}, {STATUS_TRUST_FAILURE, -EIO, "STATUS_TRUST_FAILURE"}, {STATUS_MUTANT_LIMIT_EXCEEDED, -EIO, "STATUS_MUTANT_LIMIT_EXCEEDED"}, {STATUS_NETLOGON_NOT_STARTED, -EIO, "STATUS_NETLOGON_NOT_STARTED"}, {STATUS_ACCOUNT_EXPIRED, -EKEYEXPIRED, "STATUS_ACCOUNT_EXPIRED"}, {STATUS_POSSIBLE_DEADLOCK, -EIO, "STATUS_POSSIBLE_DEADLOCK"}, {STATUS_NETWORK_CREDENTIAL_CONFLICT, -EIO, "STATUS_NETWORK_CREDENTIAL_CONFLICT"}, {STATUS_REMOTE_SESSION_LIMIT, -EIO, "STATUS_REMOTE_SESSION_LIMIT"}, {STATUS_EVENTLOG_FILE_CHANGED, -EIO, "STATUS_EVENTLOG_FILE_CHANGED"}, {STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT, -EIO, "STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT"}, {STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT, -EIO, "STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT"}, {STATUS_NOLOGON_SERVER_TRUST_ACCOUNT, -EIO, "STATUS_NOLOGON_SERVER_TRUST_ACCOUNT"}, {STATUS_DOMAIN_TRUST_INCONSISTENT, -EIO, "STATUS_DOMAIN_TRUST_INCONSISTENT"}, {STATUS_FS_DRIVER_REQUIRED, -EOPNOTSUPP, "STATUS_FS_DRIVER_REQUIRED"}, {STATUS_IMAGE_ALREADY_LOADED_AS_DLL, -EIO, "STATUS_IMAGE_ALREADY_LOADED_AS_DLL"}, {STATUS_NETWORK_OPEN_RESTRICTION, -EIO, "STATUS_NETWORK_OPEN_RESTRICTION"}, {STATUS_NO_USER_SESSION_KEY, -EIO, "STATUS_NO_USER_SESSION_KEY"}, {STATUS_USER_SESSION_DELETED, -EIO, "STATUS_USER_SESSION_DELETED"}, {STATUS_RESOURCE_LANG_NOT_FOUND, -EIO, "STATUS_RESOURCE_LANG_NOT_FOUND"}, {STATUS_INSUFF_SERVER_RESOURCES, -EIO, "STATUS_INSUFF_SERVER_RESOURCES"}, {STATUS_INVALID_BUFFER_SIZE, -EIO, "STATUS_INVALID_BUFFER_SIZE"}, {STATUS_INVALID_ADDRESS_COMPONENT, -EIO, "STATUS_INVALID_ADDRESS_COMPONENT"}, {STATUS_INVALID_ADDRESS_WILDCARD, -EIO, "STATUS_INVALID_ADDRESS_WILDCARD"}, {STATUS_TOO_MANY_ADDRESSES, -EIO, "STATUS_TOO_MANY_ADDRESSES"}, {STATUS_ADDRESS_ALREADY_EXISTS, -EADDRINUSE, "STATUS_ADDRESS_ALREADY_EXISTS"}, {STATUS_ADDRESS_CLOSED, -EIO, "STATUS_ADDRESS_CLOSED"}, {STATUS_CONNECTION_DISCONNECTED, -ECONNABORTED, "STATUS_CONNECTION_DISCONNECTED"}, {STATUS_CONNECTION_RESET, -ENETRESET, "STATUS_CONNECTION_RESET"}, {STATUS_TOO_MANY_NODES, -EIO, "STATUS_TOO_MANY_NODES"}, {STATUS_TRANSACTION_ABORTED, -EIO, "STATUS_TRANSACTION_ABORTED"}, {STATUS_TRANSACTION_TIMED_OUT, -EIO, "STATUS_TRANSACTION_TIMED_OUT"}, {STATUS_TRANSACTION_NO_RELEASE, -EIO, "STATUS_TRANSACTION_NO_RELEASE"}, {STATUS_TRANSACTION_NO_MATCH, -EIO, "STATUS_TRANSACTION_NO_MATCH"}, {STATUS_TRANSACTION_RESPONDED, -EIO, "STATUS_TRANSACTION_RESPONDED"}, {STATUS_TRANSACTION_INVALID_ID, -EIO, "STATUS_TRANSACTION_INVALID_ID"}, {STATUS_TRANSACTION_INVALID_TYPE, -EIO, "STATUS_TRANSACTION_INVALID_TYPE"}, {STATUS_NOT_SERVER_SESSION, -EIO, "STATUS_NOT_SERVER_SESSION"}, {STATUS_NOT_CLIENT_SESSION, -EIO, "STATUS_NOT_CLIENT_SESSION"}, {STATUS_CANNOT_LOAD_REGISTRY_FILE, -EIO, "STATUS_CANNOT_LOAD_REGISTRY_FILE"}, {STATUS_DEBUG_ATTACH_FAILED, -EIO, "STATUS_DEBUG_ATTACH_FAILED"}, {STATUS_SYSTEM_PROCESS_TERMINATED, -EIO, "STATUS_SYSTEM_PROCESS_TERMINATED"}, {STATUS_DATA_NOT_ACCEPTED, -EIO, "STATUS_DATA_NOT_ACCEPTED"}, {STATUS_NO_BROWSER_SERVERS_FOUND, -EIO, "STATUS_NO_BROWSER_SERVERS_FOUND"}, {STATUS_VDM_HARD_ERROR, -EIO, "STATUS_VDM_HARD_ERROR"}, {STATUS_DRIVER_CANCEL_TIMEOUT, -EIO, "STATUS_DRIVER_CANCEL_TIMEOUT"}, {STATUS_REPLY_MESSAGE_MISMATCH, -EIO, "STATUS_REPLY_MESSAGE_MISMATCH"}, {STATUS_MAPPED_ALIGNMENT, -EIO, "STATUS_MAPPED_ALIGNMENT"}, {STATUS_IMAGE_CHECKSUM_MISMATCH, -EIO, "STATUS_IMAGE_CHECKSUM_MISMATCH"}, {STATUS_LOST_WRITEBEHIND_DATA, -EIO, "STATUS_LOST_WRITEBEHIND_DATA"}, {STATUS_CLIENT_SERVER_PARAMETERS_INVALID, -EIO, "STATUS_CLIENT_SERVER_PARAMETERS_INVALID"}, {STATUS_PASSWORD_MUST_CHANGE, -EIO, "STATUS_PASSWORD_MUST_CHANGE"}, {STATUS_NOT_FOUND, -ENOENT, "STATUS_NOT_FOUND"}, {STATUS_NOT_TINY_STREAM, -EIO, "STATUS_NOT_TINY_STREAM"}, {STATUS_RECOVERY_FAILURE, -EIO, "STATUS_RECOVERY_FAILURE"}, {STATUS_STACK_OVERFLOW_READ, -EIO, "STATUS_STACK_OVERFLOW_READ"}, {STATUS_FAIL_CHECK, -EIO, "STATUS_FAIL_CHECK"}, {STATUS_DUPLICATE_OBJECTID, -EIO, "STATUS_DUPLICATE_OBJECTID"}, {STATUS_OBJECTID_EXISTS, -EIO, "STATUS_OBJECTID_EXISTS"}, {STATUS_CONVERT_TO_LARGE, -EIO, "STATUS_CONVERT_TO_LARGE"}, {STATUS_RETRY, -EAGAIN, "STATUS_RETRY"}, {STATUS_FOUND_OUT_OF_SCOPE, -EIO, "STATUS_FOUND_OUT_OF_SCOPE"}, {STATUS_ALLOCATE_BUCKET, -EIO, "STATUS_ALLOCATE_BUCKET"}, {STATUS_PROPSET_NOT_FOUND, -EIO, "STATUS_PROPSET_NOT_FOUND"}, {STATUS_MARSHALL_OVERFLOW, -EIO, "STATUS_MARSHALL_OVERFLOW"}, {STATUS_INVALID_VARIANT, -EIO, "STATUS_INVALID_VARIANT"}, {STATUS_DOMAIN_CONTROLLER_NOT_FOUND, -EIO, "STATUS_DOMAIN_CONTROLLER_NOT_FOUND"}, {STATUS_ACCOUNT_LOCKED_OUT, -EACCES, "STATUS_ACCOUNT_LOCKED_OUT"}, {STATUS_HANDLE_NOT_CLOSABLE, -EIO, "STATUS_HANDLE_NOT_CLOSABLE"}, {STATUS_CONNECTION_REFUSED, -EIO, "STATUS_CONNECTION_REFUSED"}, {STATUS_GRACEFUL_DISCONNECT, -EIO, "STATUS_GRACEFUL_DISCONNECT"}, {STATUS_ADDRESS_ALREADY_ASSOCIATED, -EIO, "STATUS_ADDRESS_ALREADY_ASSOCIATED"}, {STATUS_ADDRESS_NOT_ASSOCIATED, -EIO, "STATUS_ADDRESS_NOT_ASSOCIATED"}, {STATUS_CONNECTION_INVALID, -EIO, "STATUS_CONNECTION_INVALID"}, {STATUS_CONNECTION_ACTIVE, -EIO, "STATUS_CONNECTION_ACTIVE"}, {STATUS_NETWORK_UNREACHABLE, -ENETUNREACH, "STATUS_NETWORK_UNREACHABLE"}, {STATUS_HOST_UNREACHABLE, -EHOSTDOWN, "STATUS_HOST_UNREACHABLE"}, {STATUS_PROTOCOL_UNREACHABLE, -ENETUNREACH, "STATUS_PROTOCOL_UNREACHABLE"}, {STATUS_PORT_UNREACHABLE, -ENETUNREACH, "STATUS_PORT_UNREACHABLE"}, {STATUS_REQUEST_ABORTED, -EIO, "STATUS_REQUEST_ABORTED"}, {STATUS_CONNECTION_ABORTED, -ECONNABORTED, "STATUS_CONNECTION_ABORTED"}, {STATUS_BAD_COMPRESSION_BUFFER, -EIO, "STATUS_BAD_COMPRESSION_BUFFER"}, {STATUS_USER_MAPPED_FILE, -EIO, "STATUS_USER_MAPPED_FILE"}, {STATUS_AUDIT_FAILED, -EIO, "STATUS_AUDIT_FAILED"}, {STATUS_TIMER_RESOLUTION_NOT_SET, -EIO, "STATUS_TIMER_RESOLUTION_NOT_SET"}, {STATUS_CONNECTION_COUNT_LIMIT, -EIO, "STATUS_CONNECTION_COUNT_LIMIT"}, {STATUS_LOGIN_TIME_RESTRICTION, -EACCES, "STATUS_LOGIN_TIME_RESTRICTION"}, {STATUS_LOGIN_WKSTA_RESTRICTION, -EACCES, "STATUS_LOGIN_WKSTA_RESTRICTION"}, {STATUS_IMAGE_MP_UP_MISMATCH, -EIO, "STATUS_IMAGE_MP_UP_MISMATCH"}, {STATUS_INSUFFICIENT_LOGON_INFO, -EIO, "STATUS_INSUFFICIENT_LOGON_INFO"}, {STATUS_BAD_DLL_ENTRYPOINT, -EIO, "STATUS_BAD_DLL_ENTRYPOINT"}, {STATUS_BAD_SERVICE_ENTRYPOINT, -EIO, "STATUS_BAD_SERVICE_ENTRYPOINT"}, {STATUS_LPC_REPLY_LOST, -EIO, "STATUS_LPC_REPLY_LOST"}, {STATUS_IP_ADDRESS_CONFLICT1, -EIO, "STATUS_IP_ADDRESS_CONFLICT1"}, {STATUS_IP_ADDRESS_CONFLICT2, -EIO, "STATUS_IP_ADDRESS_CONFLICT2"}, {STATUS_REGISTRY_QUOTA_LIMIT, -EDQUOT, "STATUS_REGISTRY_QUOTA_LIMIT"}, {STATUS_PATH_NOT_COVERED, -EREMOTE, "STATUS_PATH_NOT_COVERED"}, {STATUS_NO_CALLBACK_ACTIVE, -EIO, "STATUS_NO_CALLBACK_ACTIVE"}, {STATUS_LICENSE_QUOTA_EXCEEDED, -EACCES, "STATUS_LICENSE_QUOTA_EXCEEDED"}, {STATUS_PWD_TOO_SHORT, -EIO, "STATUS_PWD_TOO_SHORT"}, {STATUS_PWD_TOO_RECENT, -EIO, "STATUS_PWD_TOO_RECENT"}, {STATUS_PWD_HISTORY_CONFLICT, -EIO, "STATUS_PWD_HISTORY_CONFLICT"}, {STATUS_PLUGPLAY_NO_DEVICE, -EIO, "STATUS_PLUGPLAY_NO_DEVICE"}, {STATUS_UNSUPPORTED_COMPRESSION, -EIO, "STATUS_UNSUPPORTED_COMPRESSION"}, {STATUS_INVALID_HW_PROFILE, -EIO, "STATUS_INVALID_HW_PROFILE"}, {STATUS_INVALID_PLUGPLAY_DEVICE_PATH, -EIO, "STATUS_INVALID_PLUGPLAY_DEVICE_PATH"}, {STATUS_DRIVER_ORDINAL_NOT_FOUND, -EIO, "STATUS_DRIVER_ORDINAL_NOT_FOUND"}, {STATUS_DRIVER_ENTRYPOINT_NOT_FOUND, -EIO, "STATUS_DRIVER_ENTRYPOINT_NOT_FOUND"}, {STATUS_RESOURCE_NOT_OWNED, -EIO, "STATUS_RESOURCE_NOT_OWNED"}, {STATUS_TOO_MANY_LINKS, -EMLINK, "STATUS_TOO_MANY_LINKS"}, {STATUS_QUOTA_LIST_INCONSISTENT, -EIO, "STATUS_QUOTA_LIST_INCONSISTENT"}, {STATUS_FILE_IS_OFFLINE, -EIO, "STATUS_FILE_IS_OFFLINE"}, {STATUS_EVALUATION_EXPIRATION, -EIO, "STATUS_EVALUATION_EXPIRATION"}, {STATUS_ILLEGAL_DLL_RELOCATION, -EIO, "STATUS_ILLEGAL_DLL_RELOCATION"}, {STATUS_LICENSE_VIOLATION, -EIO, "STATUS_LICENSE_VIOLATION"}, {STATUS_DLL_INIT_FAILED_LOGOFF, -EIO, "STATUS_DLL_INIT_FAILED_LOGOFF"}, {STATUS_DRIVER_UNABLE_TO_LOAD, -EIO, "STATUS_DRIVER_UNABLE_TO_LOAD"}, {STATUS_DFS_UNAVAILABLE, -EIO, "STATUS_DFS_UNAVAILABLE"}, {STATUS_VOLUME_DISMOUNTED, -EIO, "STATUS_VOLUME_DISMOUNTED"}, {STATUS_WX86_INTERNAL_ERROR, -EIO, "STATUS_WX86_INTERNAL_ERROR"}, {STATUS_WX86_FLOAT_STACK_CHECK, -EIO, "STATUS_WX86_FLOAT_STACK_CHECK"}, {STATUS_VALIDATE_CONTINUE, -EIO, "STATUS_VALIDATE_CONTINUE"}, {STATUS_NO_MATCH, -EIO, "STATUS_NO_MATCH"}, {STATUS_NO_MORE_MATCHES, -EIO, "STATUS_NO_MORE_MATCHES"}, {STATUS_NOT_A_REPARSE_POINT, -EIO, "STATUS_NOT_A_REPARSE_POINT"}, {STATUS_IO_REPARSE_TAG_INVALID, -EIO, "STATUS_IO_REPARSE_TAG_INVALID"}, {STATUS_IO_REPARSE_TAG_MISMATCH, -EIO, "STATUS_IO_REPARSE_TAG_MISMATCH"}, {STATUS_IO_REPARSE_DATA_INVALID, -EIO, "STATUS_IO_REPARSE_DATA_INVALID"}, {STATUS_REPARSE_POINT_NOT_RESOLVED, -EIO, "STATUS_REPARSE_POINT_NOT_RESOLVED"}, {STATUS_DIRECTORY_IS_A_REPARSE_POINT, -EIO, "STATUS_DIRECTORY_IS_A_REPARSE_POINT"}, {STATUS_RANGE_LIST_CONFLICT, -EIO, "STATUS_RANGE_LIST_CONFLICT"}, {STATUS_SOURCE_ELEMENT_EMPTY, -EIO, "STATUS_SOURCE_ELEMENT_EMPTY"}, {STATUS_DESTINATION_ELEMENT_FULL, -EIO, "STATUS_DESTINATION_ELEMENT_FULL"}, {STATUS_ILLEGAL_ELEMENT_ADDRESS, -EIO, "STATUS_ILLEGAL_ELEMENT_ADDRESS"}, {STATUS_MAGAZINE_NOT_PRESENT, -EIO, "STATUS_MAGAZINE_NOT_PRESENT"}, {STATUS_REINITIALIZATION_NEEDED, -EIO, "STATUS_REINITIALIZATION_NEEDED"}, {STATUS_ENCRYPTION_FAILED, -EIO, "STATUS_ENCRYPTION_FAILED"}, {STATUS_DECRYPTION_FAILED, -EIO, "STATUS_DECRYPTION_FAILED"}, {STATUS_RANGE_NOT_FOUND, -EIO, "STATUS_RANGE_NOT_FOUND"}, {STATUS_NO_RECOVERY_POLICY, -EIO, "STATUS_NO_RECOVERY_POLICY"}, {STATUS_NO_EFS, -EIO, "STATUS_NO_EFS"}, {STATUS_WRONG_EFS, -EIO, "STATUS_WRONG_EFS"}, {STATUS_NO_USER_KEYS, -EIO, "STATUS_NO_USER_KEYS"}, {STATUS_FILE_NOT_ENCRYPTED, -EIO, "STATUS_FILE_NOT_ENCRYPTED"}, {STATUS_NOT_EXPORT_FORMAT, -EIO, "STATUS_NOT_EXPORT_FORMAT"}, {STATUS_FILE_ENCRYPTED, -EIO, "STATUS_FILE_ENCRYPTED"}, {STATUS_WMI_GUID_NOT_FOUND, -EIO, "STATUS_WMI_GUID_NOT_FOUND"}, {STATUS_WMI_INSTANCE_NOT_FOUND, -EIO, "STATUS_WMI_INSTANCE_NOT_FOUND"}, {STATUS_WMI_ITEMID_NOT_FOUND, -EIO, "STATUS_WMI_ITEMID_NOT_FOUND"}, {STATUS_WMI_TRY_AGAIN, -EIO, "STATUS_WMI_TRY_AGAIN"}, {STATUS_SHARED_POLICY, -EIO, "STATUS_SHARED_POLICY"}, {STATUS_POLICY_OBJECT_NOT_FOUND, -EIO, "STATUS_POLICY_OBJECT_NOT_FOUND"}, {STATUS_POLICY_ONLY_IN_DS, -EIO, "STATUS_POLICY_ONLY_IN_DS"}, {STATUS_VOLUME_NOT_UPGRADED, -EIO, "STATUS_VOLUME_NOT_UPGRADED"}, {STATUS_REMOTE_STORAGE_NOT_ACTIVE, -EIO, "STATUS_REMOTE_STORAGE_NOT_ACTIVE"}, {STATUS_REMOTE_STORAGE_MEDIA_ERROR, -EIO, "STATUS_REMOTE_STORAGE_MEDIA_ERROR"}, {STATUS_NO_TRACKING_SERVICE, -EIO, "STATUS_NO_TRACKING_SERVICE"}, {STATUS_SERVER_SID_MISMATCH, -EIO, "STATUS_SERVER_SID_MISMATCH"}, {STATUS_DS_NO_ATTRIBUTE_OR_VALUE, -EIO, "STATUS_DS_NO_ATTRIBUTE_OR_VALUE"}, {STATUS_DS_INVALID_ATTRIBUTE_SYNTAX, -EIO, "STATUS_DS_INVALID_ATTRIBUTE_SYNTAX"}, {STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED, -EIO, "STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED"}, {STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS, -EIO, "STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS"}, {STATUS_DS_BUSY, -EBUSY, "STATUS_DS_BUSY"}, {STATUS_DS_UNAVAILABLE, -EIO, "STATUS_DS_UNAVAILABLE"}, {STATUS_DS_NO_RIDS_ALLOCATED, -EIO, "STATUS_DS_NO_RIDS_ALLOCATED"}, {STATUS_DS_NO_MORE_RIDS, -EIO, "STATUS_DS_NO_MORE_RIDS"}, {STATUS_DS_INCORRECT_ROLE_OWNER, -EIO, "STATUS_DS_INCORRECT_ROLE_OWNER"}, {STATUS_DS_RIDMGR_INIT_ERROR, -EIO, "STATUS_DS_RIDMGR_INIT_ERROR"}, {STATUS_DS_OBJ_CLASS_VIOLATION, -EIO, "STATUS_DS_OBJ_CLASS_VIOLATION"}, {STATUS_DS_CANT_ON_NON_LEAF, -EIO, "STATUS_DS_CANT_ON_NON_LEAF"}, {STATUS_DS_CANT_ON_RDN, -EIO, "STATUS_DS_CANT_ON_RDN"}, {STATUS_DS_CANT_MOD_OBJ_CLASS, -EIO, "STATUS_DS_CANT_MOD_OBJ_CLASS"}, {STATUS_DS_CROSS_DOM_MOVE_FAILED, -EIO, "STATUS_DS_CROSS_DOM_MOVE_FAILED"}, {STATUS_DS_GC_NOT_AVAILABLE, -EIO, "STATUS_DS_GC_NOT_AVAILABLE"}, {STATUS_DIRECTORY_SERVICE_REQUIRED, -EIO, "STATUS_DIRECTORY_SERVICE_REQUIRED"}, {STATUS_REPARSE_ATTRIBUTE_CONFLICT, -EIO, "STATUS_REPARSE_ATTRIBUTE_CONFLICT"}, {STATUS_CANT_ENABLE_DENY_ONLY, -EIO, "STATUS_CANT_ENABLE_DENY_ONLY"}, {STATUS_FLOAT_MULTIPLE_FAULTS, -EIO, "STATUS_FLOAT_MULTIPLE_FAULTS"}, {STATUS_FLOAT_MULTIPLE_TRAPS, -EIO, "STATUS_FLOAT_MULTIPLE_TRAPS"}, {STATUS_DEVICE_REMOVED, -EIO, "STATUS_DEVICE_REMOVED"}, {STATUS_JOURNAL_DELETE_IN_PROGRESS, -EIO, "STATUS_JOURNAL_DELETE_IN_PROGRESS"}, {STATUS_JOURNAL_NOT_ACTIVE, -EIO, "STATUS_JOURNAL_NOT_ACTIVE"}, {STATUS_NOINTERFACE, -EIO, "STATUS_NOINTERFACE"}, {STATUS_DS_ADMIN_LIMIT_EXCEEDED, -EIO, "STATUS_DS_ADMIN_LIMIT_EXCEEDED"}, {STATUS_DRIVER_FAILED_SLEEP, -EIO, "STATUS_DRIVER_FAILED_SLEEP"}, {STATUS_MUTUAL_AUTHENTICATION_FAILED, -EIO, "STATUS_MUTUAL_AUTHENTICATION_FAILED"}, {STATUS_CORRUPT_SYSTEM_FILE, -EIO, "STATUS_CORRUPT_SYSTEM_FILE"}, {STATUS_DATATYPE_MISALIGNMENT_ERROR, -EIO, "STATUS_DATATYPE_MISALIGNMENT_ERROR"}, {STATUS_WMI_READ_ONLY, -EROFS, "STATUS_WMI_READ_ONLY"}, {STATUS_WMI_SET_FAILURE, -EIO, "STATUS_WMI_SET_FAILURE"}, {STATUS_COMMITMENT_MINIMUM, -EIO, "STATUS_COMMITMENT_MINIMUM"}, {STATUS_REG_NAT_CONSUMPTION, -EIO, "STATUS_REG_NAT_CONSUMPTION"}, {STATUS_TRANSPORT_FULL, -EIO, "STATUS_TRANSPORT_FULL"}, {STATUS_DS_SAM_INIT_FAILURE, -EIO, "STATUS_DS_SAM_INIT_FAILURE"}, {STATUS_ONLY_IF_CONNECTED, -EIO, "STATUS_ONLY_IF_CONNECTED"}, {STATUS_DS_SENSITIVE_GROUP_VIOLATION, -EIO, "STATUS_DS_SENSITIVE_GROUP_VIOLATION"}, {STATUS_PNP_RESTART_ENUMERATION, -EIO, "STATUS_PNP_RESTART_ENUMERATION"}, {STATUS_JOURNAL_ENTRY_DELETED, -EIO, "STATUS_JOURNAL_ENTRY_DELETED"}, {STATUS_DS_CANT_MOD_PRIMARYGROUPID, -EIO, "STATUS_DS_CANT_MOD_PRIMARYGROUPID"}, {STATUS_SYSTEM_IMAGE_BAD_SIGNATURE, -EIO, "STATUS_SYSTEM_IMAGE_BAD_SIGNATURE"}, {STATUS_PNP_REBOOT_REQUIRED, -EIO, "STATUS_PNP_REBOOT_REQUIRED"}, {STATUS_POWER_STATE_INVALID, -EIO, "STATUS_POWER_STATE_INVALID"}, {STATUS_DS_INVALID_GROUP_TYPE, -EIO, "STATUS_DS_INVALID_GROUP_TYPE"}, {STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN, -EIO, "STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN"}, {STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN, -EIO, "STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN"}, {STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER, -EIO, "STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER"}, {STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER, -EIO, "STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER"}, {STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER, -EIO, "STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER"}, {STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER, -EIO, "STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER"}, {STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER, -EIO, "STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER"}, {STATUS_DS_HAVE_PRIMARY_MEMBERS, -EIO, "STATUS_DS_HAVE_PRIMARY_MEMBERS"}, {STATUS_WMI_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_WMI_NOT_SUPPORTED"}, {STATUS_INSUFFICIENT_POWER, -EIO, "STATUS_INSUFFICIENT_POWER"}, {STATUS_SAM_NEED_BOOTKEY_PASSWORD, -EIO, "STATUS_SAM_NEED_BOOTKEY_PASSWORD"}, {STATUS_SAM_NEED_BOOTKEY_FLOPPY, -EIO, "STATUS_SAM_NEED_BOOTKEY_FLOPPY"}, {STATUS_DS_CANT_START, -EIO, "STATUS_DS_CANT_START"}, {STATUS_DS_INIT_FAILURE, -EIO, "STATUS_DS_INIT_FAILURE"}, {STATUS_SAM_INIT_FAILURE, -EIO, "STATUS_SAM_INIT_FAILURE"}, {STATUS_DS_GC_REQUIRED, -EIO, "STATUS_DS_GC_REQUIRED"}, {STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY, -EIO, "STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY"}, {STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS, -EIO, "STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS"}, {STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED, -EDQUOT, "STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED"}, {STATUS_MULTIPLE_FAULT_VIOLATION, -EIO, "STATUS_MULTIPLE_FAULT_VIOLATION"}, {STATUS_CURRENT_DOMAIN_NOT_ALLOWED, -EIO, "STATUS_CURRENT_DOMAIN_NOT_ALLOWED"}, {STATUS_CANNOT_MAKE, -EIO, "STATUS_CANNOT_MAKE"}, {STATUS_SYSTEM_SHUTDOWN, -EIO, "STATUS_SYSTEM_SHUTDOWN"}, {STATUS_DS_INIT_FAILURE_CONSOLE, -EIO, "STATUS_DS_INIT_FAILURE_CONSOLE"}, {STATUS_DS_SAM_INIT_FAILURE_CONSOLE, -EIO, "STATUS_DS_SAM_INIT_FAILURE_CONSOLE"}, {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO, "STATUS_UNFINISHED_CONTEXT_DELETED"}, {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"}, /* Note that ENOATTTR and ENODATA are the same errno */ {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"}, {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"}, {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO, "STATUS_WRONG_CREDENTIAL_HANDLE"}, {STATUS_CRYPTO_SYSTEM_INVALID, -EIO, "STATUS_CRYPTO_SYSTEM_INVALID"}, {STATUS_MAX_REFERRALS_EXCEEDED, -EIO, "STATUS_MAX_REFERRALS_EXCEEDED"}, {STATUS_MUST_BE_KDC, -EIO, "STATUS_MUST_BE_KDC"}, {STATUS_STRONG_CRYPTO_NOT_SUPPORTED, -EIO, "STATUS_STRONG_CRYPTO_NOT_SUPPORTED"}, {STATUS_TOO_MANY_PRINCIPALS, -EIO, "STATUS_TOO_MANY_PRINCIPALS"}, {STATUS_NO_PA_DATA, -EIO, "STATUS_NO_PA_DATA"}, {STATUS_PKINIT_NAME_MISMATCH, -EIO, "STATUS_PKINIT_NAME_MISMATCH"}, {STATUS_SMARTCARD_LOGON_REQUIRED, -EIO, "STATUS_SMARTCARD_LOGON_REQUIRED"}, {STATUS_KDC_INVALID_REQUEST, -EIO, "STATUS_KDC_INVALID_REQUEST"}, {STATUS_KDC_UNABLE_TO_REFER, -EIO, "STATUS_KDC_UNABLE_TO_REFER"}, {STATUS_KDC_UNKNOWN_ETYPE, -EIO, "STATUS_KDC_UNKNOWN_ETYPE"}, {STATUS_SHUTDOWN_IN_PROGRESS, -EIO, "STATUS_SHUTDOWN_IN_PROGRESS"}, {STATUS_SERVER_SHUTDOWN_IN_PROGRESS, -EIO, "STATUS_SERVER_SHUTDOWN_IN_PROGRESS"}, {STATUS_NOT_SUPPORTED_ON_SBS, -EOPNOTSUPP, "STATUS_NOT_SUPPORTED_ON_SBS"}, {STATUS_WMI_GUID_DISCONNECTED, -EIO, "STATUS_WMI_GUID_DISCONNECTED"}, {STATUS_WMI_ALREADY_DISABLED, -EIO, "STATUS_WMI_ALREADY_DISABLED"}, {STATUS_WMI_ALREADY_ENABLED, -EIO, "STATUS_WMI_ALREADY_ENABLED"}, {STATUS_MFT_TOO_FRAGMENTED, -EIO, "STATUS_MFT_TOO_FRAGMENTED"}, {STATUS_COPY_PROTECTION_FAILURE, -EIO, "STATUS_COPY_PROTECTION_FAILURE"}, {STATUS_CSS_AUTHENTICATION_FAILURE, -EIO, "STATUS_CSS_AUTHENTICATION_FAILURE"}, {STATUS_CSS_KEY_NOT_PRESENT, -EIO, "STATUS_CSS_KEY_NOT_PRESENT"}, {STATUS_CSS_KEY_NOT_ESTABLISHED, -EIO, "STATUS_CSS_KEY_NOT_ESTABLISHED"}, {STATUS_CSS_SCRAMBLED_SECTOR, -EIO, "STATUS_CSS_SCRAMBLED_SECTOR"}, {STATUS_CSS_REGION_MISMATCH, -EIO, "STATUS_CSS_REGION_MISMATCH"}, {STATUS_CSS_RESETS_EXHAUSTED, -EIO, "STATUS_CSS_RESETS_EXHAUSTED"}, {STATUS_PKINIT_FAILURE, -EIO, "STATUS_PKINIT_FAILURE"}, {STATUS_SMARTCARD_SUBSYSTEM_FAILURE, -EIO, "STATUS_SMARTCARD_SUBSYSTEM_FAILURE"}, {STATUS_NO_KERB_KEY, -EIO, "STATUS_NO_KERB_KEY"}, {STATUS_HOST_DOWN, -EIO, "STATUS_HOST_DOWN"}, {STATUS_UNSUPPORTED_PREAUTH, -EIO, "STATUS_UNSUPPORTED_PREAUTH"}, {STATUS_EFS_ALG_BLOB_TOO_BIG, -EIO, "STATUS_EFS_ALG_BLOB_TOO_BIG"}, {STATUS_PORT_NOT_SET, -EIO, "STATUS_PORT_NOT_SET"}, {STATUS_DEBUGGER_INACTIVE, -EIO, "STATUS_DEBUGGER_INACTIVE"}, {STATUS_DS_VERSION_CHECK_FAILURE, -EIO, "STATUS_DS_VERSION_CHECK_FAILURE"}, {STATUS_AUDITING_DISABLED, -EIO, "STATUS_AUDITING_DISABLED"}, {STATUS_PRENT4_MACHINE_ACCOUNT, -EIO, "STATUS_PRENT4_MACHINE_ACCOUNT"}, {STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER, -EIO, "STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER"}, {STATUS_INVALID_IMAGE_WIN_32, -EIO, "STATUS_INVALID_IMAGE_WIN_32"}, {STATUS_INVALID_IMAGE_WIN_64, -EIO, "STATUS_INVALID_IMAGE_WIN_64"}, {STATUS_BAD_BINDINGS, -EIO, "STATUS_BAD_BINDINGS"}, {STATUS_NETWORK_SESSION_EXPIRED, -EIO, "STATUS_NETWORK_SESSION_EXPIRED"}, {STATUS_APPHELP_BLOCK, -EIO, "STATUS_APPHELP_BLOCK"}, {STATUS_ALL_SIDS_FILTERED, -EIO, "STATUS_ALL_SIDS_FILTERED"}, {STATUS_NOT_SAFE_MODE_DRIVER, -EIO, "STATUS_NOT_SAFE_MODE_DRIVER"}, {STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT, -EACCES, "STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT"}, {STATUS_ACCESS_DISABLED_BY_POLICY_PATH, -EACCES, "STATUS_ACCESS_DISABLED_BY_POLICY_PATH"}, {STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER, -EACCES, "STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER"}, {STATUS_ACCESS_DISABLED_BY_POLICY_OTHER, -EACCES, "STATUS_ACCESS_DISABLED_BY_POLICY_OTHER"}, {STATUS_FAILED_DRIVER_ENTRY, -EIO, "STATUS_FAILED_DRIVER_ENTRY"}, {STATUS_DEVICE_ENUMERATION_ERROR, -EIO, "STATUS_DEVICE_ENUMERATION_ERROR"}, {STATUS_MOUNT_POINT_NOT_RESOLVED, -EIO, "STATUS_MOUNT_POINT_NOT_RESOLVED"}, {STATUS_INVALID_DEVICE_OBJECT_PARAMETER, -EIO, "STATUS_INVALID_DEVICE_OBJECT_PARAMETER"}, {STATUS_MCA_OCCURED, -EIO, "STATUS_MCA_OCCURED"}, {STATUS_DRIVER_BLOCKED_CRITICAL, -EIO, "STATUS_DRIVER_BLOCKED_CRITICAL"}, {STATUS_DRIVER_BLOCKED, -EIO, "STATUS_DRIVER_BLOCKED"}, {STATUS_DRIVER_DATABASE_ERROR, -EIO, "STATUS_DRIVER_DATABASE_ERROR"}, {STATUS_SYSTEM_HIVE_TOO_LARGE, -EIO, "STATUS_SYSTEM_HIVE_TOO_LARGE"}, {STATUS_INVALID_IMPORT_OF_NON_DLL, -EIO, "STATUS_INVALID_IMPORT_OF_NON_DLL"}, {STATUS_NO_SECRETS, -EIO, "STATUS_NO_SECRETS"}, {STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY, -EACCES, "STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY"}, {STATUS_FAILED_STACK_SWITCH, -EIO, "STATUS_FAILED_STACK_SWITCH"}, {STATUS_HEAP_CORRUPTION, -EIO, "STATUS_HEAP_CORRUPTION"}, {STATUS_SMARTCARD_WRONG_PIN, -EIO, "STATUS_SMARTCARD_WRONG_PIN"}, {STATUS_SMARTCARD_CARD_BLOCKED, -EIO, "STATUS_SMARTCARD_CARD_BLOCKED"}, {STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED, -EIO, "STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED"}, {STATUS_SMARTCARD_NO_CARD, -EIO, "STATUS_SMARTCARD_NO_CARD"}, {STATUS_SMARTCARD_NO_KEY_CONTAINER, -EIO, "STATUS_SMARTCARD_NO_KEY_CONTAINER"}, {STATUS_SMARTCARD_NO_CERTIFICATE, -EIO, "STATUS_SMARTCARD_NO_CERTIFICATE"}, {STATUS_SMARTCARD_NO_KEYSET, -EIO, "STATUS_SMARTCARD_NO_KEYSET"}, {STATUS_SMARTCARD_IO_ERROR, -EIO, "STATUS_SMARTCARD_IO_ERROR"}, {STATUS_DOWNGRADE_DETECTED, -EIO, "STATUS_DOWNGRADE_DETECTED"}, {STATUS_SMARTCARD_CERT_REVOKED, -EIO, "STATUS_SMARTCARD_CERT_REVOKED"}, {STATUS_ISSUING_CA_UNTRUSTED, -EIO, "STATUS_ISSUING_CA_UNTRUSTED"}, {STATUS_REVOCATION_OFFLINE_C, -EIO, "STATUS_REVOCATION_OFFLINE_C"}, {STATUS_PKINIT_CLIENT_FAILURE, -EIO, "STATUS_PKINIT_CLIENT_FAILURE"}, {STATUS_SMARTCARD_CERT_EXPIRED, -EIO, "STATUS_SMARTCARD_CERT_EXPIRED"}, {STATUS_DRIVER_FAILED_PRIOR_UNLOAD, -EIO, "STATUS_DRIVER_FAILED_PRIOR_UNLOAD"}, {STATUS_SMARTCARD_SILENT_CONTEXT, -EIO, "STATUS_SMARTCARD_SILENT_CONTEXT"}, {STATUS_PER_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT, "STATUS_PER_USER_TRUST_QUOTA_EXCEEDED"}, {STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT, "STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED"}, {STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED, -EDQUOT, "STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED"}, {STATUS_DS_NAME_NOT_UNIQUE, -EIO, "STATUS_DS_NAME_NOT_UNIQUE"}, {STATUS_DS_DUPLICATE_ID_FOUND, -EIO, "STATUS_DS_DUPLICATE_ID_FOUND"}, {STATUS_DS_GROUP_CONVERSION_ERROR, -EIO, "STATUS_DS_GROUP_CONVERSION_ERROR"}, {STATUS_VOLSNAP_PREPARE_HIBERNATE, -EIO, "STATUS_VOLSNAP_PREPARE_HIBERNATE"}, {STATUS_USER2USER_REQUIRED, -EIO, "STATUS_USER2USER_REQUIRED"}, {STATUS_STACK_BUFFER_OVERRUN, -EIO, "STATUS_STACK_BUFFER_OVERRUN"}, {STATUS_NO_S4U_PROT_SUPPORT, -EIO, "STATUS_NO_S4U_PROT_SUPPORT"}, {STATUS_CROSSREALM_DELEGATION_FAILURE, -EIO, "STATUS_CROSSREALM_DELEGATION_FAILURE"}, {STATUS_REVOCATION_OFFLINE_KDC, -EIO, "STATUS_REVOCATION_OFFLINE_KDC"}, {STATUS_ISSUING_CA_UNTRUSTED_KDC, -EIO, "STATUS_ISSUING_CA_UNTRUSTED_KDC"}, {STATUS_KDC_CERT_EXPIRED, -EIO, "STATUS_KDC_CERT_EXPIRED"}, {STATUS_KDC_CERT_REVOKED, -EIO, "STATUS_KDC_CERT_REVOKED"}, {STATUS_PARAMETER_QUOTA_EXCEEDED, -EDQUOT, "STATUS_PARAMETER_QUOTA_EXCEEDED"}, {STATUS_HIBERNATION_FAILURE, -EIO, "STATUS_HIBERNATION_FAILURE"}, {STATUS_DELAY_LOAD_FAILED, -EIO, "STATUS_DELAY_LOAD_FAILED"}, {STATUS_AUTHENTICATION_FIREWALL_FAILED, -EIO, "STATUS_AUTHENTICATION_FIREWALL_FAILED"}, {STATUS_VDM_DISALLOWED, -EIO, "STATUS_VDM_DISALLOWED"}, {STATUS_HUNG_DISPLAY_DRIVER_THREAD, -EIO, "STATUS_HUNG_DISPLAY_DRIVER_THREAD"}, {STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE, -EIO, "STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE"}, {STATUS_INVALID_CRUNTIME_PARAMETER, -EIO, "STATUS_INVALID_CRUNTIME_PARAMETER"}, {STATUS_NTLM_BLOCKED, -EIO, "STATUS_NTLM_BLOCKED"}, {STATUS_ASSERTION_FAILURE, -EIO, "STATUS_ASSERTION_FAILURE"}, {STATUS_VERIFIER_STOP, -EIO, "STATUS_VERIFIER_STOP"}, {STATUS_CALLBACK_POP_STACK, -EIO, "STATUS_CALLBACK_POP_STACK"}, {STATUS_INCOMPATIBLE_DRIVER_BLOCKED, -EIO, "STATUS_INCOMPATIBLE_DRIVER_BLOCKED"}, {STATUS_HIVE_UNLOADED, -EIO, "STATUS_HIVE_UNLOADED"}, {STATUS_COMPRESSION_DISABLED, -EIO, "STATUS_COMPRESSION_DISABLED"}, {STATUS_FILE_SYSTEM_LIMITATION, -EIO, "STATUS_FILE_SYSTEM_LIMITATION"}, {STATUS_INVALID_IMAGE_HASH, -EIO, "STATUS_INVALID_IMAGE_HASH"}, {STATUS_NOT_CAPABLE, -EIO, "STATUS_NOT_CAPABLE"}, {STATUS_REQUEST_OUT_OF_SEQUENCE, -EIO, "STATUS_REQUEST_OUT_OF_SEQUENCE"}, {STATUS_IMPLEMENTATION_LIMIT, -EIO, "STATUS_IMPLEMENTATION_LIMIT"}, {STATUS_ELEVATION_REQUIRED, -EIO, "STATUS_ELEVATION_REQUIRED"}, {STATUS_BEYOND_VDL, -EIO, "STATUS_BEYOND_VDL"}, {STATUS_ENCOUNTERED_WRITE_IN_PROGRESS, -EIO, "STATUS_ENCOUNTERED_WRITE_IN_PROGRESS"}, {STATUS_PTE_CHANGED, -EIO, "STATUS_PTE_CHANGED"}, {STATUS_PURGE_FAILED, -EIO, "STATUS_PURGE_FAILED"}, {STATUS_CRED_REQUIRES_CONFIRMATION, -EIO, "STATUS_CRED_REQUIRES_CONFIRMATION"}, {STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE, -EIO, "STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE"}, {STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER, -EIO, "STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER"}, {STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE, -EIO, "STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE"}, {STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE, -EIO, "STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE"}, {STATUS_CS_ENCRYPTION_FILE_NOT_CSE, -EIO, "STATUS_CS_ENCRYPTION_FILE_NOT_CSE"}, {STATUS_INVALID_LABEL, -EIO, "STATUS_INVALID_LABEL"}, {STATUS_DRIVER_PROCESS_TERMINATED, -EIO, "STATUS_DRIVER_PROCESS_TERMINATED"}, {STATUS_AMBIGUOUS_SYSTEM_DEVICE, -EIO, "STATUS_AMBIGUOUS_SYSTEM_DEVICE"}, {STATUS_SYSTEM_DEVICE_NOT_FOUND, -EIO, "STATUS_SYSTEM_DEVICE_NOT_FOUND"}, {STATUS_RESTART_BOOT_APPLICATION, -EIO, "STATUS_RESTART_BOOT_APPLICATION"}, {STATUS_INVALID_TASK_NAME, -EIO, "STATUS_INVALID_TASK_NAME"}, {STATUS_INVALID_TASK_INDEX, -EIO, "STATUS_INVALID_TASK_INDEX"}, {STATUS_THREAD_ALREADY_IN_TASK, -EIO, "STATUS_THREAD_ALREADY_IN_TASK"}, {STATUS_CALLBACK_BYPASS, -EIO, "STATUS_CALLBACK_BYPASS"}, {STATUS_PORT_CLOSED, -EIO, "STATUS_PORT_CLOSED"}, {STATUS_MESSAGE_LOST, -EIO, "STATUS_MESSAGE_LOST"}, {STATUS_INVALID_MESSAGE, -EIO, "STATUS_INVALID_MESSAGE"}, {STATUS_REQUEST_CANCELED, -EIO, "STATUS_REQUEST_CANCELED"}, {STATUS_RECURSIVE_DISPATCH, -EIO, "STATUS_RECURSIVE_DISPATCH"}, {STATUS_LPC_RECEIVE_BUFFER_EXPECTED, -EIO, "STATUS_LPC_RECEIVE_BUFFER_EXPECTED"}, {STATUS_LPC_INVALID_CONNECTION_USAGE, -EIO, "STATUS_LPC_INVALID_CONNECTION_USAGE"}, {STATUS_LPC_REQUESTS_NOT_ALLOWED, -EIO, "STATUS_LPC_REQUESTS_NOT_ALLOWED"}, {STATUS_RESOURCE_IN_USE, -EIO, "STATUS_RESOURCE_IN_USE"}, {STATUS_HARDWARE_MEMORY_ERROR, -EIO, "STATUS_HARDWARE_MEMORY_ERROR"}, {STATUS_THREADPOOL_HANDLE_EXCEPTION, -EIO, "STATUS_THREADPOOL_HANDLE_EXCEPTION"}, {STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED, -EIO, "STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED"}, {STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED, -EIO, "STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED"}, {STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED, -EIO, "STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED"}, {STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED, -EIO, "STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED"}, {STATUS_THREADPOOL_RELEASED_DURING_OPERATION, -EIO, "STATUS_THREADPOOL_RELEASED_DURING_OPERATION"}, {STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING, -EIO, "STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING"}, {STATUS_APC_RETURNED_WHILE_IMPERSONATING, -EIO, "STATUS_APC_RETURNED_WHILE_IMPERSONATING"}, {STATUS_PROCESS_IS_PROTECTED, -EIO, "STATUS_PROCESS_IS_PROTECTED"}, {STATUS_MCA_EXCEPTION, -EIO, "STATUS_MCA_EXCEPTION"}, {STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE, -EIO, "STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE"}, {STATUS_SYMLINK_CLASS_DISABLED, -EIO, "STATUS_SYMLINK_CLASS_DISABLED"}, {STATUS_INVALID_IDN_NORMALIZATION, -EIO, "STATUS_INVALID_IDN_NORMALIZATION"}, {STATUS_NO_UNICODE_TRANSLATION, -EIO, "STATUS_NO_UNICODE_TRANSLATION"}, {STATUS_ALREADY_REGISTERED, -EIO, "STATUS_ALREADY_REGISTERED"}, {STATUS_CONTEXT_MISMATCH, -EIO, "STATUS_CONTEXT_MISMATCH"}, {STATUS_PORT_ALREADY_HAS_COMPLETION_LIST, -EIO, "STATUS_PORT_ALREADY_HAS_COMPLETION_LIST"}, {STATUS_CALLBACK_RETURNED_THREAD_PRIORITY, -EIO, "STATUS_CALLBACK_RETURNED_THREAD_PRIORITY"}, {STATUS_INVALID_THREAD, -EIO, "STATUS_INVALID_THREAD"}, {STATUS_CALLBACK_RETURNED_TRANSACTION, -EIO, "STATUS_CALLBACK_RETURNED_TRANSACTION"}, {STATUS_CALLBACK_RETURNED_LDR_LOCK, -EIO, "STATUS_CALLBACK_RETURNED_LDR_LOCK"}, {STATUS_CALLBACK_RETURNED_LANG, -EIO, "STATUS_CALLBACK_RETURNED_LANG"}, {STATUS_CALLBACK_RETURNED_PRI_BACK, -EIO, "STATUS_CALLBACK_RETURNED_PRI_BACK"}, {STATUS_CALLBACK_RETURNED_THREAD_AFFINITY, -EIO, "STATUS_CALLBACK_RETURNED_THREAD_AFFINITY"}, {STATUS_DISK_REPAIR_DISABLED, -EIO, "STATUS_DISK_REPAIR_DISABLED"}, {STATUS_DS_DOMAIN_RENAME_IN_PROGRESS, -EIO, "STATUS_DS_DOMAIN_RENAME_IN_PROGRESS"}, {STATUS_DISK_QUOTA_EXCEEDED, -EDQUOT, "STATUS_DISK_QUOTA_EXCEEDED"}, {STATUS_CONTENT_BLOCKED, -EIO, "STATUS_CONTENT_BLOCKED"}, {STATUS_BAD_CLUSTERS, -EIO, "STATUS_BAD_CLUSTERS"}, {STATUS_VOLUME_DIRTY, -EIO, "STATUS_VOLUME_DIRTY"}, {STATUS_FILE_CHECKED_OUT, -EIO, "STATUS_FILE_CHECKED_OUT"}, {STATUS_CHECKOUT_REQUIRED, -EIO, "STATUS_CHECKOUT_REQUIRED"}, {STATUS_BAD_FILE_TYPE, -EIO, "STATUS_BAD_FILE_TYPE"}, {STATUS_FILE_TOO_LARGE, -EIO, "STATUS_FILE_TOO_LARGE"}, {STATUS_FORMS_AUTH_REQUIRED, -EIO, "STATUS_FORMS_AUTH_REQUIRED"}, {STATUS_VIRUS_INFECTED, -EIO, "STATUS_VIRUS_INFECTED"}, {STATUS_VIRUS_DELETED, -EIO, "STATUS_VIRUS_DELETED"}, {STATUS_BAD_MCFG_TABLE, -EIO, "STATUS_BAD_MCFG_TABLE"}, {STATUS_WOW_ASSERTION, -EIO, "STATUS_WOW_ASSERTION"}, {STATUS_INVALID_SIGNATURE, -EIO, "STATUS_INVALID_SIGNATURE"}, {STATUS_HMAC_NOT_SUPPORTED, -EIO, "STATUS_HMAC_NOT_SUPPORTED"}, {STATUS_IPSEC_QUEUE_OVERFLOW, -EIO, "STATUS_IPSEC_QUEUE_OVERFLOW"}, {STATUS_ND_QUEUE_OVERFLOW, -EIO, "STATUS_ND_QUEUE_OVERFLOW"}, {STATUS_HOPLIMIT_EXCEEDED, -EIO, "STATUS_HOPLIMIT_EXCEEDED"}, {STATUS_PROTOCOL_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_PROTOCOL_NOT_SUPPORTED"}, {STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED, -EIO, "STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED"}, {STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR, -EIO, "STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR"}, {STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR, -EIO, "STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR"}, {STATUS_XML_PARSE_ERROR, -EIO, "STATUS_XML_PARSE_ERROR"}, {STATUS_XMLDSIG_ERROR, -EIO, "STATUS_XMLDSIG_ERROR"}, {STATUS_WRONG_COMPARTMENT, -EIO, "STATUS_WRONG_COMPARTMENT"}, {STATUS_AUTHIP_FAILURE, -EIO, "STATUS_AUTHIP_FAILURE"}, {DBG_NO_STATE_CHANGE, -EIO, "DBG_NO_STATE_CHANGE"}, {DBG_APP_NOT_IDLE, -EIO, "DBG_APP_NOT_IDLE"}, {RPC_NT_INVALID_STRING_BINDING, -EIO, "RPC_NT_INVALID_STRING_BINDING"}, {RPC_NT_WRONG_KIND_OF_BINDING, -EIO, "RPC_NT_WRONG_KIND_OF_BINDING"}, {RPC_NT_INVALID_BINDING, -EIO, "RPC_NT_INVALID_BINDING"}, {RPC_NT_PROTSEQ_NOT_SUPPORTED, -EOPNOTSUPP, "RPC_NT_PROTSEQ_NOT_SUPPORTED"}, {RPC_NT_INVALID_RPC_PROTSEQ, -EIO, "RPC_NT_INVALID_RPC_PROTSEQ"}, {RPC_NT_INVALID_STRING_UUID, -EIO, "RPC_NT_INVALID_STRING_UUID"}, {RPC_NT_INVALID_ENDPOINT_FORMAT, -EIO, "RPC_NT_INVALID_ENDPOINT_FORMAT"}, {RPC_NT_INVALID_NET_ADDR, -EIO, "RPC_NT_INVALID_NET_ADDR"}, {RPC_NT_NO_ENDPOINT_FOUND, -EIO, "RPC_NT_NO_ENDPOINT_FOUND"}, {RPC_NT_INVALID_TIMEOUT, -EINVAL, "RPC_NT_INVALID_TIMEOUT"}, {RPC_NT_OBJECT_NOT_FOUND, -ENOENT, "RPC_NT_OBJECT_NOT_FOUND"}, {RPC_NT_ALREADY_REGISTERED, -EIO, "RPC_NT_ALREADY_REGISTERED"}, {RPC_NT_TYPE_ALREADY_REGISTERED, -EIO, "RPC_NT_TYPE_ALREADY_REGISTERED"}, {RPC_NT_ALREADY_LISTENING, -EIO, "RPC_NT_ALREADY_LISTENING"}, {RPC_NT_NO_PROTSEQS_REGISTERED, -EIO, "RPC_NT_NO_PROTSEQS_REGISTERED"}, {RPC_NT_NOT_LISTENING, -EIO, "RPC_NT_NOT_LISTENING"}, {RPC_NT_UNKNOWN_MGR_TYPE, -EIO, "RPC_NT_UNKNOWN_MGR_TYPE"}, {RPC_NT_UNKNOWN_IF, -EIO, "RPC_NT_UNKNOWN_IF"}, {RPC_NT_NO_BINDINGS, -EIO, "RPC_NT_NO_BINDINGS"}, {RPC_NT_NO_PROTSEQS, -EIO, "RPC_NT_NO_PROTSEQS"}, {RPC_NT_CANT_CREATE_ENDPOINT, -EIO, "RPC_NT_CANT_CREATE_ENDPOINT"}, {RPC_NT_OUT_OF_RESOURCES, -EIO, "RPC_NT_OUT_OF_RESOURCES"}, {RPC_NT_SERVER_UNAVAILABLE, -EIO, "RPC_NT_SERVER_UNAVAILABLE"}, {RPC_NT_SERVER_TOO_BUSY, -EBUSY, "RPC_NT_SERVER_TOO_BUSY"}, {RPC_NT_INVALID_NETWORK_OPTIONS, -EIO, "RPC_NT_INVALID_NETWORK_OPTIONS"}, {RPC_NT_NO_CALL_ACTIVE, -EIO, "RPC_NT_NO_CALL_ACTIVE"}, {RPC_NT_CALL_FAILED, -EIO, "RPC_NT_CALL_FAILED"}, {RPC_NT_CALL_FAILED_DNE, -EIO, "RPC_NT_CALL_FAILED_DNE"}, {RPC_NT_PROTOCOL_ERROR, -EIO, "RPC_NT_PROTOCOL_ERROR"}, {RPC_NT_UNSUPPORTED_TRANS_SYN, -EIO, "RPC_NT_UNSUPPORTED_TRANS_SYN"}, {RPC_NT_UNSUPPORTED_TYPE, -EIO, "RPC_NT_UNSUPPORTED_TYPE"}, {RPC_NT_INVALID_TAG, -EIO, "RPC_NT_INVALID_TAG"}, {RPC_NT_INVALID_BOUND, -EIO, "RPC_NT_INVALID_BOUND"}, {RPC_NT_NO_ENTRY_NAME, -EIO, "RPC_NT_NO_ENTRY_NAME"}, {RPC_NT_INVALID_NAME_SYNTAX, -EIO, "RPC_NT_INVALID_NAME_SYNTAX"}, {RPC_NT_UNSUPPORTED_NAME_SYNTAX, -EIO, "RPC_NT_UNSUPPORTED_NAME_SYNTAX"}, {RPC_NT_UUID_NO_ADDRESS, -EIO, "RPC_NT_UUID_NO_ADDRESS"}, {RPC_NT_DUPLICATE_ENDPOINT, -ENOTUNIQ, "RPC_NT_DUPLICATE_ENDPOINT"}, {RPC_NT_UNKNOWN_AUTHN_TYPE, -EIO, "RPC_NT_UNKNOWN_AUTHN_TYPE"}, {RPC_NT_MAX_CALLS_TOO_SMALL, -EIO, "RPC_NT_MAX_CALLS_TOO_SMALL"}, {RPC_NT_STRING_TOO_LONG, -EIO, "RPC_NT_STRING_TOO_LONG"}, {RPC_NT_PROTSEQ_NOT_FOUND, -EIO, "RPC_NT_PROTSEQ_NOT_FOUND"}, {RPC_NT_PROCNUM_OUT_OF_RANGE, -EIO, "RPC_NT_PROCNUM_OUT_OF_RANGE"}, {RPC_NT_BINDING_HAS_NO_AUTH, -EIO, "RPC_NT_BINDING_HAS_NO_AUTH"}, {RPC_NT_UNKNOWN_AUTHN_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHN_SERVICE"}, {RPC_NT_UNKNOWN_AUTHN_LEVEL, -EIO, "RPC_NT_UNKNOWN_AUTHN_LEVEL"}, {RPC_NT_INVALID_AUTH_IDENTITY, -EIO, "RPC_NT_INVALID_AUTH_IDENTITY"}, {RPC_NT_UNKNOWN_AUTHZ_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHZ_SERVICE"}, {EPT_NT_INVALID_ENTRY, -EIO, "EPT_NT_INVALID_ENTRY"}, {EPT_NT_CANT_PERFORM_OP, -EIO, "EPT_NT_CANT_PERFORM_OP"}, {EPT_NT_NOT_REGISTERED, -EIO, "EPT_NT_NOT_REGISTERED"}, {RPC_NT_NOTHING_TO_EXPORT, -EIO, "RPC_NT_NOTHING_TO_EXPORT"}, {RPC_NT_INCOMPLETE_NAME, -EIO, "RPC_NT_INCOMPLETE_NAME"}, {RPC_NT_INVALID_VERS_OPTION, -EIO, "RPC_NT_INVALID_VERS_OPTION"}, {RPC_NT_NO_MORE_MEMBERS, -EIO, "RPC_NT_NO_MORE_MEMBERS"}, {RPC_NT_NOT_ALL_OBJS_UNEXPORTED, -EIO, "RPC_NT_NOT_ALL_OBJS_UNEXPORTED"}, {RPC_NT_INTERFACE_NOT_FOUND, -EIO, "RPC_NT_INTERFACE_NOT_FOUND"}, {RPC_NT_ENTRY_ALREADY_EXISTS, -EIO, "RPC_NT_ENTRY_ALREADY_EXISTS"}, {RPC_NT_ENTRY_NOT_FOUND, -EIO, "RPC_NT_ENTRY_NOT_FOUND"}, {RPC_NT_NAME_SERVICE_UNAVAILABLE, -EIO, "RPC_NT_NAME_SERVICE_UNAVAILABLE"}, {RPC_NT_INVALID_NAF_ID, -EIO, "RPC_NT_INVALID_NAF_ID"}, {RPC_NT_CANNOT_SUPPORT, -EOPNOTSUPP, "RPC_NT_CANNOT_SUPPORT"}, {RPC_NT_NO_CONTEXT_AVAILABLE, -EIO, "RPC_NT_NO_CONTEXT_AVAILABLE"}, {RPC_NT_INTERNAL_ERROR, -EIO, "RPC_NT_INTERNAL_ERROR"}, {RPC_NT_ZERO_DIVIDE, -EIO, "RPC_NT_ZERO_DIVIDE"}, {RPC_NT_ADDRESS_ERROR, -EIO, "RPC_NT_ADDRESS_ERROR"}, {RPC_NT_FP_DIV_ZERO, -EIO, "RPC_NT_FP_DIV_ZERO"}, {RPC_NT_FP_UNDERFLOW, -EIO, "RPC_NT_FP_UNDERFLOW"}, {RPC_NT_FP_OVERFLOW, -EIO, "RPC_NT_FP_OVERFLOW"}, {RPC_NT_CALL_IN_PROGRESS, -EIO, "RPC_NT_CALL_IN_PROGRESS"}, {RPC_NT_NO_MORE_BINDINGS, -EIO, "RPC_NT_NO_MORE_BINDINGS"}, {RPC_NT_GROUP_MEMBER_NOT_FOUND, -EIO, "RPC_NT_GROUP_MEMBER_NOT_FOUND"}, {EPT_NT_CANT_CREATE, -EIO, "EPT_NT_CANT_CREATE"}, {RPC_NT_INVALID_OBJECT, -EIO, "RPC_NT_INVALID_OBJECT"}, {RPC_NT_NO_INTERFACES, -EIO, "RPC_NT_NO_INTERFACES"}, {RPC_NT_CALL_CANCELLED, -EIO, "RPC_NT_CALL_CANCELLED"}, {RPC_NT_BINDING_INCOMPLETE, -EIO, "RPC_NT_BINDING_INCOMPLETE"}, {RPC_NT_COMM_FAILURE, -EIO, "RPC_NT_COMM_FAILURE"}, {RPC_NT_UNSUPPORTED_AUTHN_LEVEL, -EIO, "RPC_NT_UNSUPPORTED_AUTHN_LEVEL"}, {RPC_NT_NO_PRINC_NAME, -EIO, "RPC_NT_NO_PRINC_NAME"}, {RPC_NT_NOT_RPC_ERROR, -EIO, "RPC_NT_NOT_RPC_ERROR"}, {RPC_NT_SEC_PKG_ERROR, -EIO, "RPC_NT_SEC_PKG_ERROR"}, {RPC_NT_NOT_CANCELLED, -EIO, "RPC_NT_NOT_CANCELLED"}, {RPC_NT_INVALID_ASYNC_HANDLE, -EIO, "RPC_NT_INVALID_ASYNC_HANDLE"}, {RPC_NT_INVALID_ASYNC_CALL, -EIO, "RPC_NT_INVALID_ASYNC_CALL"}, {RPC_NT_PROXY_ACCESS_DENIED, -EACCES, "RPC_NT_PROXY_ACCESS_DENIED"}, {RPC_NT_NO_MORE_ENTRIES, -EIO, "RPC_NT_NO_MORE_ENTRIES"}, {RPC_NT_SS_CHAR_TRANS_OPEN_FAIL, -EIO, "RPC_NT_SS_CHAR_TRANS_OPEN_FAIL"}, {RPC_NT_SS_CHAR_TRANS_SHORT_FILE, -EIO, "RPC_NT_SS_CHAR_TRANS_SHORT_FILE"}, {RPC_NT_SS_IN_NULL_CONTEXT, -EIO, "RPC_NT_SS_IN_NULL_CONTEXT"}, {RPC_NT_SS_CONTEXT_MISMATCH, -EIO, "RPC_NT_SS_CONTEXT_MISMATCH"}, {RPC_NT_SS_CONTEXT_DAMAGED, -EIO, "RPC_NT_SS_CONTEXT_DAMAGED"}, {RPC_NT_SS_HANDLES_MISMATCH, -EIO, "RPC_NT_SS_HANDLES_MISMATCH"}, {RPC_NT_SS_CANNOT_GET_CALL_HANDLE, -EIO, "RPC_NT_SS_CANNOT_GET_CALL_HANDLE"}, {RPC_NT_NULL_REF_POINTER, -EIO, "RPC_NT_NULL_REF_POINTER"}, {RPC_NT_ENUM_VALUE_OUT_OF_RANGE, -EIO, "RPC_NT_ENUM_VALUE_OUT_OF_RANGE"}, {RPC_NT_BYTE_COUNT_TOO_SMALL, -EIO, "RPC_NT_BYTE_COUNT_TOO_SMALL"}, {RPC_NT_BAD_STUB_DATA, -EIO, "RPC_NT_BAD_STUB_DATA"}, {RPC_NT_INVALID_ES_ACTION, -EIO, "RPC_NT_INVALID_ES_ACTION"}, {RPC_NT_WRONG_ES_VERSION, -EIO, "RPC_NT_WRONG_ES_VERSION"}, {RPC_NT_WRONG_STUB_VERSION, -EIO, "RPC_NT_WRONG_STUB_VERSION"}, {RPC_NT_INVALID_PIPE_OBJECT, -EIO, "RPC_NT_INVALID_PIPE_OBJECT"}, {RPC_NT_INVALID_PIPE_OPERATION, -EIO, "RPC_NT_INVALID_PIPE_OPERATION"}, {RPC_NT_WRONG_PIPE_VERSION, -EIO, "RPC_NT_WRONG_PIPE_VERSION"}, {RPC_NT_PIPE_CLOSED, -EIO, "RPC_NT_PIPE_CLOSED"}, {RPC_NT_PIPE_DISCIPLINE_ERROR, -EIO, "RPC_NT_PIPE_DISCIPLINE_ERROR"}, {RPC_NT_PIPE_EMPTY, -EIO, "RPC_NT_PIPE_EMPTY"}, {STATUS_PNP_BAD_MPS_TABLE, -EIO, "STATUS_PNP_BAD_MPS_TABLE"}, {STATUS_PNP_TRANSLATION_FAILED, -EIO, "STATUS_PNP_TRANSLATION_FAILED"}, {STATUS_PNP_IRQ_TRANSLATION_FAILED, -EIO, "STATUS_PNP_IRQ_TRANSLATION_FAILED"}, {STATUS_PNP_INVALID_ID, -EIO, "STATUS_PNP_INVALID_ID"}, {STATUS_IO_REISSUE_AS_CACHED, -EIO, "STATUS_IO_REISSUE_AS_CACHED"}, {STATUS_CTX_WINSTATION_NAME_INVALID, -EIO, "STATUS_CTX_WINSTATION_NAME_INVALID"}, {STATUS_CTX_INVALID_PD, -EIO, "STATUS_CTX_INVALID_PD"}, {STATUS_CTX_PD_NOT_FOUND, -EIO, "STATUS_CTX_PD_NOT_FOUND"}, {STATUS_CTX_CLOSE_PENDING, -EIO, "STATUS_CTX_CLOSE_PENDING"}, {STATUS_CTX_NO_OUTBUF, -EIO, "STATUS_CTX_NO_OUTBUF"}, {STATUS_CTX_MODEM_INF_NOT_FOUND, -EIO, "STATUS_CTX_MODEM_INF_NOT_FOUND"}, {STATUS_CTX_INVALID_MODEMNAME, -EIO, "STATUS_CTX_INVALID_MODEMNAME"}, {STATUS_CTX_RESPONSE_ERROR, -EIO, "STATUS_CTX_RESPONSE_ERROR"}, {STATUS_CTX_MODEM_RESPONSE_TIMEOUT, -ETIMEDOUT, "STATUS_CTX_MODEM_RESPONSE_TIMEOUT"}, {STATUS_CTX_MODEM_RESPONSE_NO_CARRIER, -EIO, "STATUS_CTX_MODEM_RESPONSE_NO_CARRIER"}, {STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE, -EIO, "STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE"}, {STATUS_CTX_MODEM_RESPONSE_BUSY, -EBUSY, "STATUS_CTX_MODEM_RESPONSE_BUSY"}, {STATUS_CTX_MODEM_RESPONSE_VOICE, -EIO, "STATUS_CTX_MODEM_RESPONSE_VOICE"}, {STATUS_CTX_TD_ERROR, -EIO, "STATUS_CTX_TD_ERROR"}, {STATUS_CTX_LICENSE_CLIENT_INVALID, -EIO, "STATUS_CTX_LICENSE_CLIENT_INVALID"}, {STATUS_CTX_LICENSE_NOT_AVAILABLE, -EIO, "STATUS_CTX_LICENSE_NOT_AVAILABLE"}, {STATUS_CTX_LICENSE_EXPIRED, -EIO, "STATUS_CTX_LICENSE_EXPIRED"}, {STATUS_CTX_WINSTATION_NOT_FOUND, -EIO, "STATUS_CTX_WINSTATION_NOT_FOUND"}, {STATUS_CTX_WINSTATION_NAME_COLLISION, -EIO, "STATUS_CTX_WINSTATION_NAME_COLLISION"}, {STATUS_CTX_WINSTATION_BUSY, -EBUSY, "STATUS_CTX_WINSTATION_BUSY"}, {STATUS_CTX_BAD_VIDEO_MODE, -EIO, "STATUS_CTX_BAD_VIDEO_MODE"}, {STATUS_CTX_GRAPHICS_INVALID, -EIO, "STATUS_CTX_GRAPHICS_INVALID"}, {STATUS_CTX_NOT_CONSOLE, -EIO, "STATUS_CTX_NOT_CONSOLE"}, {STATUS_CTX_CLIENT_QUERY_TIMEOUT, -EIO, "STATUS_CTX_CLIENT_QUERY_TIMEOUT"}, {STATUS_CTX_CONSOLE_DISCONNECT, -EIO, "STATUS_CTX_CONSOLE_DISCONNECT"}, {STATUS_CTX_CONSOLE_CONNECT, -EIO, "STATUS_CTX_CONSOLE_CONNECT"}, {STATUS_CTX_SHADOW_DENIED, -EIO, "STATUS_CTX_SHADOW_DENIED"}, {STATUS_CTX_WINSTATION_ACCESS_DENIED, -EACCES, "STATUS_CTX_WINSTATION_ACCESS_DENIED"}, {STATUS_CTX_INVALID_WD, -EIO, "STATUS_CTX_INVALID_WD"}, {STATUS_CTX_WD_NOT_FOUND, -EIO, "STATUS_CTX_WD_NOT_FOUND"}, {STATUS_CTX_SHADOW_INVALID, -EIO, "STATUS_CTX_SHADOW_INVALID"}, {STATUS_CTX_SHADOW_DISABLED, -EIO, "STATUS_CTX_SHADOW_DISABLED"}, {STATUS_RDP_PROTOCOL_ERROR, -EIO, "STATUS_RDP_PROTOCOL_ERROR"}, {STATUS_CTX_CLIENT_LICENSE_NOT_SET, -EIO, "STATUS_CTX_CLIENT_LICENSE_NOT_SET"}, {STATUS_CTX_CLIENT_LICENSE_IN_USE, -EIO, "STATUS_CTX_CLIENT_LICENSE_IN_USE"}, {STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE, -EIO, "STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE"}, {STATUS_CTX_SHADOW_NOT_RUNNING, -EIO, "STATUS_CTX_SHADOW_NOT_RUNNING"}, {STATUS_CTX_LOGON_DISABLED, -EIO, "STATUS_CTX_LOGON_DISABLED"}, {STATUS_CTX_SECURITY_LAYER_ERROR, -EIO, "STATUS_CTX_SECURITY_LAYER_ERROR"}, {STATUS_TS_INCOMPATIBLE_SESSIONS, -EIO, "STATUS_TS_INCOMPATIBLE_SESSIONS"}, {STATUS_MUI_FILE_NOT_FOUND, -EIO, "STATUS_MUI_FILE_NOT_FOUND"}, {STATUS_MUI_INVALID_FILE, -EIO, "STATUS_MUI_INVALID_FILE"}, {STATUS_MUI_INVALID_RC_CONFIG, -EIO, "STATUS_MUI_INVALID_RC_CONFIG"}, {STATUS_MUI_INVALID_LOCALE_NAME, -EIO, "STATUS_MUI_INVALID_LOCALE_NAME"}, {STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME, -EIO, "STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME"}, {STATUS_MUI_FILE_NOT_LOADED, -EIO, "STATUS_MUI_FILE_NOT_LOADED"}, {STATUS_RESOURCE_ENUM_USER_STOP, -EIO, "STATUS_RESOURCE_ENUM_USER_STOP"}, {STATUS_CLUSTER_INVALID_NODE, -EIO, "STATUS_CLUSTER_INVALID_NODE"}, {STATUS_CLUSTER_NODE_EXISTS, -EIO, "STATUS_CLUSTER_NODE_EXISTS"}, {STATUS_CLUSTER_JOIN_IN_PROGRESS, -EIO, "STATUS_CLUSTER_JOIN_IN_PROGRESS"}, {STATUS_CLUSTER_NODE_NOT_FOUND, -EIO, "STATUS_CLUSTER_NODE_NOT_FOUND"}, {STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND, -EIO, "STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND"}, {STATUS_CLUSTER_NETWORK_EXISTS, -EIO, "STATUS_CLUSTER_NETWORK_EXISTS"}, {STATUS_CLUSTER_NETWORK_NOT_FOUND, -EIO, "STATUS_CLUSTER_NETWORK_NOT_FOUND"}, {STATUS_CLUSTER_NETINTERFACE_EXISTS, -EIO, "STATUS_CLUSTER_NETINTERFACE_EXISTS"}, {STATUS_CLUSTER_NETINTERFACE_NOT_FOUND, -EIO, "STATUS_CLUSTER_NETINTERFACE_NOT_FOUND"}, {STATUS_CLUSTER_INVALID_REQUEST, -EIO, "STATUS_CLUSTER_INVALID_REQUEST"}, {STATUS_CLUSTER_INVALID_NETWORK_PROVIDER, -EIO, "STATUS_CLUSTER_INVALID_NETWORK_PROVIDER"}, {STATUS_CLUSTER_NODE_DOWN, -EIO, "STATUS_CLUSTER_NODE_DOWN"}, {STATUS_CLUSTER_NODE_UNREACHABLE, -EIO, "STATUS_CLUSTER_NODE_UNREACHABLE"}, {STATUS_CLUSTER_NODE_NOT_MEMBER, -EIO, "STATUS_CLUSTER_NODE_NOT_MEMBER"}, {STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS, -EIO, "STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS"}, {STATUS_CLUSTER_INVALID_NETWORK, -EIO, "STATUS_CLUSTER_INVALID_NETWORK"}, {STATUS_CLUSTER_NO_NET_ADAPTERS, -EIO, "STATUS_CLUSTER_NO_NET_ADAPTERS"}, {STATUS_CLUSTER_NODE_UP, -EIO, "STATUS_CLUSTER_NODE_UP"}, {STATUS_CLUSTER_NODE_PAUSED, -EIO, "STATUS_CLUSTER_NODE_PAUSED"}, {STATUS_CLUSTER_NODE_NOT_PAUSED, -EIO, "STATUS_CLUSTER_NODE_NOT_PAUSED"}, {STATUS_CLUSTER_NO_SECURITY_CONTEXT, -EIO, "STATUS_CLUSTER_NO_SECURITY_CONTEXT"}, {STATUS_CLUSTER_NETWORK_NOT_INTERNAL, -EIO, "STATUS_CLUSTER_NETWORK_NOT_INTERNAL"}, {STATUS_CLUSTER_POISONED, -EIO, "STATUS_CLUSTER_POISONED"}, {STATUS_ACPI_INVALID_OPCODE, -EIO, "STATUS_ACPI_INVALID_OPCODE"}, {STATUS_ACPI_STACK_OVERFLOW, -EIO, "STATUS_ACPI_STACK_OVERFLOW"}, {STATUS_ACPI_ASSERT_FAILED, -EIO, "STATUS_ACPI_ASSERT_FAILED"}, {STATUS_ACPI_INVALID_INDEX, -EIO, "STATUS_ACPI_INVALID_INDEX"}, {STATUS_ACPI_INVALID_ARGUMENT, -EIO, "STATUS_ACPI_INVALID_ARGUMENT"}, {STATUS_ACPI_FATAL, -EIO, "STATUS_ACPI_FATAL"}, {STATUS_ACPI_INVALID_SUPERNAME, -EIO, "STATUS_ACPI_INVALID_SUPERNAME"}, {STATUS_ACPI_INVALID_ARGTYPE, -EIO, "STATUS_ACPI_INVALID_ARGTYPE"}, {STATUS_ACPI_INVALID_OBJTYPE, -EIO, "STATUS_ACPI_INVALID_OBJTYPE"}, {STATUS_ACPI_INVALID_TARGETTYPE, -EIO, "STATUS_ACPI_INVALID_TARGETTYPE"}, {STATUS_ACPI_INCORRECT_ARGUMENT_COUNT, -EIO, "STATUS_ACPI_INCORRECT_ARGUMENT_COUNT"}, {STATUS_ACPI_ADDRESS_NOT_MAPPED, -EIO, "STATUS_ACPI_ADDRESS_NOT_MAPPED"}, {STATUS_ACPI_INVALID_EVENTTYPE, -EIO, "STATUS_ACPI_INVALID_EVENTTYPE"}, {STATUS_ACPI_HANDLER_COLLISION, -EIO, "STATUS_ACPI_HANDLER_COLLISION"}, {STATUS_ACPI_INVALID_DATA, -EIO, "STATUS_ACPI_INVALID_DATA"}, {STATUS_ACPI_INVALID_REGION, -EIO, "STATUS_ACPI_INVALID_REGION"}, {STATUS_ACPI_INVALID_ACCESS_SIZE, -EIO, "STATUS_ACPI_INVALID_ACCESS_SIZE"}, {STATUS_ACPI_ACQUIRE_GLOBAL_LOCK, -EIO, "STATUS_ACPI_ACQUIRE_GLOBAL_LOCK"}, {STATUS_ACPI_ALREADY_INITIALIZED, -EIO, "STATUS_ACPI_ALREADY_INITIALIZED"}, {STATUS_ACPI_NOT_INITIALIZED, -EIO, "STATUS_ACPI_NOT_INITIALIZED"}, {STATUS_ACPI_INVALID_MUTEX_LEVEL, -EIO, "STATUS_ACPI_INVALID_MUTEX_LEVEL"}, {STATUS_ACPI_MUTEX_NOT_OWNED, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNED"}, {STATUS_ACPI_MUTEX_NOT_OWNER, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNER"}, {STATUS_ACPI_RS_ACCESS, -EIO, "STATUS_ACPI_RS_ACCESS"}, {STATUS_ACPI_INVALID_TABLE, -EIO, "STATUS_ACPI_INVALID_TABLE"}, {STATUS_ACPI_REG_HANDLER_FAILED, -EIO, "STATUS_ACPI_REG_HANDLER_FAILED"}, {STATUS_ACPI_POWER_REQUEST_FAILED, -EIO, "STATUS_ACPI_POWER_REQUEST_FAILED"}, {STATUS_SXS_SECTION_NOT_FOUND, -EIO, "STATUS_SXS_SECTION_NOT_FOUND"}, {STATUS_SXS_CANT_GEN_ACTCTX, -EIO, "STATUS_SXS_CANT_GEN_ACTCTX"}, {STATUS_SXS_INVALID_ACTCTXDATA_FORMAT, -EIO, "STATUS_SXS_INVALID_ACTCTXDATA_FORMAT"}, {STATUS_SXS_ASSEMBLY_NOT_FOUND, -EIO, "STATUS_SXS_ASSEMBLY_NOT_FOUND"}, {STATUS_SXS_MANIFEST_FORMAT_ERROR, -EIO, "STATUS_SXS_MANIFEST_FORMAT_ERROR"}, {STATUS_SXS_MANIFEST_PARSE_ERROR, -EIO, "STATUS_SXS_MANIFEST_PARSE_ERROR"}, {STATUS_SXS_ACTIVATION_CONTEXT_DISABLED, -EIO, "STATUS_SXS_ACTIVATION_CONTEXT_DISABLED"}, {STATUS_SXS_KEY_NOT_FOUND, -EIO, "STATUS_SXS_KEY_NOT_FOUND"}, {STATUS_SXS_VERSION_CONFLICT, -EIO, "STATUS_SXS_VERSION_CONFLICT"}, {STATUS_SXS_WRONG_SECTION_TYPE, -EIO, "STATUS_SXS_WRONG_SECTION_TYPE"}, {STATUS_SXS_THREAD_QUERIES_DISABLED, -EIO, "STATUS_SXS_THREAD_QUERIES_DISABLED"}, {STATUS_SXS_ASSEMBLY_MISSING, -EIO, "STATUS_SXS_ASSEMBLY_MISSING"}, {STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET, -EIO, "STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET"}, {STATUS_SXS_EARLY_DEACTIVATION, -EIO, "STATUS_SXS_EARLY_DEACTIVATION"}, {STATUS_SXS_INVALID_DEACTIVATION, -EIO, "STATUS_SXS_INVALID_DEACTIVATION"}, {STATUS_SXS_MULTIPLE_DEACTIVATION, -EIO, "STATUS_SXS_MULTIPLE_DEACTIVATION"}, {STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY, -EIO, "STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY"}, {STATUS_SXS_PROCESS_TERMINATION_REQUESTED, -EIO, "STATUS_SXS_PROCESS_TERMINATION_REQUESTED"}, {STATUS_SXS_CORRUPT_ACTIVATION_STACK, -EIO, "STATUS_SXS_CORRUPT_ACTIVATION_STACK"}, {STATUS_SXS_CORRUPTION, -EIO, "STATUS_SXS_CORRUPTION"}, {STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE, -EIO, "STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE"}, {STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME, -EIO, "STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME"}, {STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE, -EIO, "STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE"}, {STATUS_SXS_IDENTITY_PARSE_ERROR, -EIO, "STATUS_SXS_IDENTITY_PARSE_ERROR"}, {STATUS_SXS_COMPONENT_STORE_CORRUPT, -EIO, "STATUS_SXS_COMPONENT_STORE_CORRUPT"}, {STATUS_SXS_FILE_HASH_MISMATCH, -EIO, "STATUS_SXS_FILE_HASH_MISMATCH"}, {STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT, -EIO, "STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT"}, {STATUS_SXS_IDENTITIES_DIFFERENT, -EIO, "STATUS_SXS_IDENTITIES_DIFFERENT"}, {STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT, -EIO, "STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT"}, {STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY, -EIO, "STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY"}, {STATUS_ADVANCED_INSTALLER_FAILED, -EIO, "STATUS_ADVANCED_INSTALLER_FAILED"}, {STATUS_XML_ENCODING_MISMATCH, -EIO, "STATUS_XML_ENCODING_MISMATCH"}, {STATUS_SXS_MANIFEST_TOO_BIG, -EIO, "STATUS_SXS_MANIFEST_TOO_BIG"}, {STATUS_SXS_SETTING_NOT_REGISTERED, -EIO, "STATUS_SXS_SETTING_NOT_REGISTERED"}, {STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE, -EIO, "STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE"}, {STATUS_SMI_PRIMITIVE_INSTALLER_FAILED, -EIO, "STATUS_SMI_PRIMITIVE_INSTALLER_FAILED"}, {STATUS_GENERIC_COMMAND_FAILED, -EIO, "STATUS_GENERIC_COMMAND_FAILED"}, {STATUS_SXS_FILE_HASH_MISSING, -EIO, "STATUS_SXS_FILE_HASH_MISSING"}, {STATUS_TRANSACTIONAL_CONFLICT, -EIO, "STATUS_TRANSACTIONAL_CONFLICT"}, {STATUS_INVALID_TRANSACTION, -EIO, "STATUS_INVALID_TRANSACTION"}, {STATUS_TRANSACTION_NOT_ACTIVE, -EIO, "STATUS_TRANSACTION_NOT_ACTIVE"}, {STATUS_TM_INITIALIZATION_FAILED, -EIO, "STATUS_TM_INITIALIZATION_FAILED"}, {STATUS_RM_NOT_ACTIVE, -EIO, "STATUS_RM_NOT_ACTIVE"}, {STATUS_RM_METADATA_CORRUPT, -EIO, "STATUS_RM_METADATA_CORRUPT"}, {STATUS_TRANSACTION_NOT_JOINED, -EIO, "STATUS_TRANSACTION_NOT_JOINED"}, {STATUS_DIRECTORY_NOT_RM, -EIO, "STATUS_DIRECTORY_NOT_RM"}, {STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE, -EIO, "STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE"}, {STATUS_LOG_RESIZE_INVALID_SIZE, -EIO, "STATUS_LOG_RESIZE_INVALID_SIZE"}, {STATUS_REMOTE_FILE_VERSION_MISMATCH, -EIO, "STATUS_REMOTE_FILE_VERSION_MISMATCH"}, {STATUS_CRM_PROTOCOL_ALREADY_EXISTS, -EIO, "STATUS_CRM_PROTOCOL_ALREADY_EXISTS"}, {STATUS_TRANSACTION_PROPAGATION_FAILED, -EIO, "STATUS_TRANSACTION_PROPAGATION_FAILED"}, {STATUS_CRM_PROTOCOL_NOT_FOUND, -EIO, "STATUS_CRM_PROTOCOL_NOT_FOUND"}, {STATUS_TRANSACTION_SUPERIOR_EXISTS, -EIO, "STATUS_TRANSACTION_SUPERIOR_EXISTS"}, {STATUS_TRANSACTION_REQUEST_NOT_VALID, -EIO, "STATUS_TRANSACTION_REQUEST_NOT_VALID"}, {STATUS_TRANSACTION_NOT_REQUESTED, -EIO, "STATUS_TRANSACTION_NOT_REQUESTED"}, {STATUS_TRANSACTION_ALREADY_ABORTED, -EIO, "STATUS_TRANSACTION_ALREADY_ABORTED"}, {STATUS_TRANSACTION_ALREADY_COMMITTED, -EIO, "STATUS_TRANSACTION_ALREADY_COMMITTED"}, {STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER, -EIO, "STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER"}, {STATUS_CURRENT_TRANSACTION_NOT_VALID, -EIO, "STATUS_CURRENT_TRANSACTION_NOT_VALID"}, {STATUS_LOG_GROWTH_FAILED, -EIO, "STATUS_LOG_GROWTH_FAILED"}, {STATUS_OBJECT_NO_LONGER_EXISTS, -EIO, "STATUS_OBJECT_NO_LONGER_EXISTS"}, {STATUS_STREAM_MINIVERSION_NOT_FOUND, -EIO, "STATUS_STREAM_MINIVERSION_NOT_FOUND"}, {STATUS_STREAM_MINIVERSION_NOT_VALID, -EIO, "STATUS_STREAM_MINIVERSION_NOT_VALID"}, {STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION, -EIO, "STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION"}, {STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT, -EIO, "STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT"}, {STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS, -EIO, "STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS"}, {STATUS_HANDLE_NO_LONGER_VALID, -EIO, "STATUS_HANDLE_NO_LONGER_VALID"}, {STATUS_LOG_CORRUPTION_DETECTED, -EIO, "STATUS_LOG_CORRUPTION_DETECTED"}, {STATUS_RM_DISCONNECTED, -EIO, "STATUS_RM_DISCONNECTED"}, {STATUS_ENLISTMENT_NOT_SUPERIOR, -EIO, "STATUS_ENLISTMENT_NOT_SUPERIOR"}, {STATUS_FILE_IDENTITY_NOT_PERSISTENT, -EIO, "STATUS_FILE_IDENTITY_NOT_PERSISTENT"}, {STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY, -EIO, "STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY"}, {STATUS_CANT_CROSS_RM_BOUNDARY, -EIO, "STATUS_CANT_CROSS_RM_BOUNDARY"}, {STATUS_TXF_DIR_NOT_EMPTY, -EIO, "STATUS_TXF_DIR_NOT_EMPTY"}, {STATUS_INDOUBT_TRANSACTIONS_EXIST, -EIO, "STATUS_INDOUBT_TRANSACTIONS_EXIST"}, {STATUS_TM_VOLATILE, -EIO, "STATUS_TM_VOLATILE"}, {STATUS_ROLLBACK_TIMER_EXPIRED, -EIO, "STATUS_ROLLBACK_TIMER_EXPIRED"}, {STATUS_TXF_ATTRIBUTE_CORRUPT, -EIO, "STATUS_TXF_ATTRIBUTE_CORRUPT"}, {STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION, -EIO, "STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION"}, {STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED, -EIO, "STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED"}, {STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE, -EIO, "STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE"}, {STATUS_TRANSACTION_REQUIRED_PROMOTION, -EIO, "STATUS_TRANSACTION_REQUIRED_PROMOTION"}, {STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION, -EIO, "STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION"}, {STATUS_TRANSACTIONS_NOT_FROZEN, -EIO, "STATUS_TRANSACTIONS_NOT_FROZEN"}, {STATUS_TRANSACTION_FREEZE_IN_PROGRESS, -EIO, "STATUS_TRANSACTION_FREEZE_IN_PROGRESS"}, {STATUS_NOT_SNAPSHOT_VOLUME, -EIO, "STATUS_NOT_SNAPSHOT_VOLUME"}, {STATUS_NO_SAVEPOINT_WITH_OPEN_FILES, -EIO, "STATUS_NO_SAVEPOINT_WITH_OPEN_FILES"}, {STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION, -EIO, "STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION"}, {STATUS_TM_IDENTITY_MISMATCH, -EIO, "STATUS_TM_IDENTITY_MISMATCH"}, {STATUS_FLOATED_SECTION, -EIO, "STATUS_FLOATED_SECTION"}, {STATUS_CANNOT_ACCEPT_TRANSACTED_WORK, -EIO, "STATUS_CANNOT_ACCEPT_TRANSACTED_WORK"}, {STATUS_CANNOT_ABORT_TRANSACTIONS, -EIO, "STATUS_CANNOT_ABORT_TRANSACTIONS"}, {STATUS_TRANSACTION_NOT_FOUND, -EIO, "STATUS_TRANSACTION_NOT_FOUND"}, {STATUS_RESOURCEMANAGER_NOT_FOUND, -EIO, "STATUS_RESOURCEMANAGER_NOT_FOUND"}, {STATUS_ENLISTMENT_NOT_FOUND, -EIO, "STATUS_ENLISTMENT_NOT_FOUND"}, {STATUS_TRANSACTIONMANAGER_NOT_FOUND, -EIO, "STATUS_TRANSACTIONMANAGER_NOT_FOUND"}, {STATUS_TRANSACTIONMANAGER_NOT_ONLINE, -EIO, "STATUS_TRANSACTIONMANAGER_NOT_ONLINE"}, {STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION, -EIO, "STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION"}, {STATUS_TRANSACTION_NOT_ROOT, -EIO, "STATUS_TRANSACTION_NOT_ROOT"}, {STATUS_TRANSACTION_OBJECT_EXPIRED, -EIO, "STATUS_TRANSACTION_OBJECT_EXPIRED"}, {STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION, -EIO, "STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION"}, {STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED, -EIO, "STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED"}, {STATUS_TRANSACTION_RECORD_TOO_LONG, -EIO, "STATUS_TRANSACTION_RECORD_TOO_LONG"}, {STATUS_NO_LINK_TRACKING_IN_TRANSACTION, -EIO, "STATUS_NO_LINK_TRACKING_IN_TRANSACTION"}, {STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION, -EOPNOTSUPP, "STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION"}, {STATUS_TRANSACTION_INTEGRITY_VIOLATED, -EIO, "STATUS_TRANSACTION_INTEGRITY_VIOLATED"}, {STATUS_LOG_SECTOR_INVALID, -EIO, "STATUS_LOG_SECTOR_INVALID"}, {STATUS_LOG_SECTOR_PARITY_INVALID, -EIO, "STATUS_LOG_SECTOR_PARITY_INVALID"}, {STATUS_LOG_SECTOR_REMAPPED, -EIO, "STATUS_LOG_SECTOR_REMAPPED"}, {STATUS_LOG_BLOCK_INCOMPLETE, -EIO, "STATUS_LOG_BLOCK_INCOMPLETE"}, {STATUS_LOG_INVALID_RANGE, -EIO, "STATUS_LOG_INVALID_RANGE"}, {STATUS_LOG_BLOCKS_EXHAUSTED, -EIO, "STATUS_LOG_BLOCKS_EXHAUSTED"}, {STATUS_LOG_READ_CONTEXT_INVALID, -EIO, "STATUS_LOG_READ_CONTEXT_INVALID"}, {STATUS_LOG_RESTART_INVALID, -EIO, "STATUS_LOG_RESTART_INVALID"}, {STATUS_LOG_BLOCK_VERSION, -EIO, "STATUS_LOG_BLOCK_VERSION"}, {STATUS_LOG_BLOCK_INVALID, -EIO, "STATUS_LOG_BLOCK_INVALID"}, {STATUS_LOG_READ_MODE_INVALID, -EIO, "STATUS_LOG_READ_MODE_INVALID"}, {STATUS_LOG_METADATA_CORRUPT, -EIO, "STATUS_LOG_METADATA_CORRUPT"}, {STATUS_LOG_METADATA_INVALID, -EIO, "STATUS_LOG_METADATA_INVALID"}, {STATUS_LOG_METADATA_INCONSISTENT, -EIO, "STATUS_LOG_METADATA_INCONSISTENT"}, {STATUS_LOG_RESERVATION_INVALID, -EIO, "STATUS_LOG_RESERVATION_INVALID"}, {STATUS_LOG_CANT_DELETE, -EIO, "STATUS_LOG_CANT_DELETE"}, {STATUS_LOG_CONTAINER_LIMIT_EXCEEDED, -EIO, "STATUS_LOG_CONTAINER_LIMIT_EXCEEDED"}, {STATUS_LOG_START_OF_LOG, -EIO, "STATUS_LOG_START_OF_LOG"}, {STATUS_LOG_POLICY_ALREADY_INSTALLED, -EIO, "STATUS_LOG_POLICY_ALREADY_INSTALLED"}, {STATUS_LOG_POLICY_NOT_INSTALLED, -EIO, "STATUS_LOG_POLICY_NOT_INSTALLED"}, {STATUS_LOG_POLICY_INVALID, -EIO, "STATUS_LOG_POLICY_INVALID"}, {STATUS_LOG_POLICY_CONFLICT, -EIO, "STATUS_LOG_POLICY_CONFLICT"}, {STATUS_LOG_PINNED_ARCHIVE_TAIL, -EIO, "STATUS_LOG_PINNED_ARCHIVE_TAIL"}, {STATUS_LOG_RECORD_NONEXISTENT, -EIO, "STATUS_LOG_RECORD_NONEXISTENT"}, {STATUS_LOG_RECORDS_RESERVED_INVALID, -EIO, "STATUS_LOG_RECORDS_RESERVED_INVALID"}, {STATUS_LOG_SPACE_RESERVED_INVALID, -EIO, "STATUS_LOG_SPACE_RESERVED_INVALID"}, {STATUS_LOG_TAIL_INVALID, -EIO, "STATUS_LOG_TAIL_INVALID"}, {STATUS_LOG_FULL, -EIO, "STATUS_LOG_FULL"}, {STATUS_LOG_MULTIPLEXED, -EIO, "STATUS_LOG_MULTIPLEXED"}, {STATUS_LOG_DEDICATED, -EIO, "STATUS_LOG_DEDICATED"}, {STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS, -EIO, "STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS"}, {STATUS_LOG_ARCHIVE_IN_PROGRESS, -EIO, "STATUS_LOG_ARCHIVE_IN_PROGRESS"}, {STATUS_LOG_EPHEMERAL, -EIO, "STATUS_LOG_EPHEMERAL"}, {STATUS_LOG_NOT_ENOUGH_CONTAINERS, -EIO, "STATUS_LOG_NOT_ENOUGH_CONTAINERS"}, {STATUS_LOG_CLIENT_ALREADY_REGISTERED, -EIO, "STATUS_LOG_CLIENT_ALREADY_REGISTERED"}, {STATUS_LOG_CLIENT_NOT_REGISTERED, -EIO, "STATUS_LOG_CLIENT_NOT_REGISTERED"}, {STATUS_LOG_FULL_HANDLER_IN_PROGRESS, -EIO, "STATUS_LOG_FULL_HANDLER_IN_PROGRESS"}, {STATUS_LOG_CONTAINER_READ_FAILED, -EIO, "STATUS_LOG_CONTAINER_READ_FAILED"}, {STATUS_LOG_CONTAINER_WRITE_FAILED, -EIO, "STATUS_LOG_CONTAINER_WRITE_FAILED"}, {STATUS_LOG_CONTAINER_OPEN_FAILED, -EIO, "STATUS_LOG_CONTAINER_OPEN_FAILED"}, {STATUS_LOG_CONTAINER_STATE_INVALID, -EIO, "STATUS_LOG_CONTAINER_STATE_INVALID"}, {STATUS_LOG_STATE_INVALID, -EIO, "STATUS_LOG_STATE_INVALID"}, {STATUS_LOG_PINNED, -EIO, "STATUS_LOG_PINNED"}, {STATUS_LOG_METADATA_FLUSH_FAILED, -EIO, "STATUS_LOG_METADATA_FLUSH_FAILED"}, {STATUS_LOG_INCONSISTENT_SECURITY, -EIO, "STATUS_LOG_INCONSISTENT_SECURITY"}, {STATUS_LOG_APPENDED_FLUSH_FAILED, -EIO, "STATUS_LOG_APPENDED_FLUSH_FAILED"}, {STATUS_LOG_PINNED_RESERVATION, -EIO, "STATUS_LOG_PINNED_RESERVATION"}, {STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD, -EIO, "STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD"}, {STATUS_FLT_NO_HANDLER_DEFINED, -EIO, "STATUS_FLT_NO_HANDLER_DEFINED"}, {STATUS_FLT_CONTEXT_ALREADY_DEFINED, -EIO, "STATUS_FLT_CONTEXT_ALREADY_DEFINED"}, {STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST, -EIO, "STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST"}, {STATUS_FLT_DISALLOW_FAST_IO, -EIO, "STATUS_FLT_DISALLOW_FAST_IO"}, {STATUS_FLT_INVALID_NAME_REQUEST, -EIO, "STATUS_FLT_INVALID_NAME_REQUEST"}, {STATUS_FLT_NOT_SAFE_TO_POST_OPERATION, -EIO, "STATUS_FLT_NOT_SAFE_TO_POST_OPERATION"}, {STATUS_FLT_NOT_INITIALIZED, -EIO, "STATUS_FLT_NOT_INITIALIZED"}, {STATUS_FLT_FILTER_NOT_READY, -EIO, "STATUS_FLT_FILTER_NOT_READY"}, {STATUS_FLT_POST_OPERATION_CLEANUP, -EIO, "STATUS_FLT_POST_OPERATION_CLEANUP"}, {STATUS_FLT_INTERNAL_ERROR, -EIO, "STATUS_FLT_INTERNAL_ERROR"}, {STATUS_FLT_DELETING_OBJECT, -EIO, "STATUS_FLT_DELETING_OBJECT"}, {STATUS_FLT_MUST_BE_NONPAGED_POOL, -EIO, "STATUS_FLT_MUST_BE_NONPAGED_POOL"}, {STATUS_FLT_DUPLICATE_ENTRY, -EIO, "STATUS_FLT_DUPLICATE_ENTRY"}, {STATUS_FLT_CBDQ_DISABLED, -EIO, "STATUS_FLT_CBDQ_DISABLED"}, {STATUS_FLT_DO_NOT_ATTACH, -EIO, "STATUS_FLT_DO_NOT_ATTACH"}, {STATUS_FLT_DO_NOT_DETACH, -EIO, "STATUS_FLT_DO_NOT_DETACH"}, {STATUS_FLT_INSTANCE_ALTITUDE_COLLISION, -EIO, "STATUS_FLT_INSTANCE_ALTITUDE_COLLISION"}, {STATUS_FLT_INSTANCE_NAME_COLLISION, -EIO, "STATUS_FLT_INSTANCE_NAME_COLLISION"}, {STATUS_FLT_FILTER_NOT_FOUND, -EIO, "STATUS_FLT_FILTER_NOT_FOUND"}, {STATUS_FLT_VOLUME_NOT_FOUND, -EIO, "STATUS_FLT_VOLUME_NOT_FOUND"}, {STATUS_FLT_INSTANCE_NOT_FOUND, -EIO, "STATUS_FLT_INSTANCE_NOT_FOUND"}, {STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND, -EIO, "STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND"}, {STATUS_FLT_INVALID_CONTEXT_REGISTRATION, -EIO, "STATUS_FLT_INVALID_CONTEXT_REGISTRATION"}, {STATUS_FLT_NAME_CACHE_MISS, -EIO, "STATUS_FLT_NAME_CACHE_MISS"}, {STATUS_FLT_NO_DEVICE_OBJECT, -EIO, "STATUS_FLT_NO_DEVICE_OBJECT"}, {STATUS_FLT_VOLUME_ALREADY_MOUNTED, -EIO, "STATUS_FLT_VOLUME_ALREADY_MOUNTED"}, {STATUS_FLT_ALREADY_ENLISTED, -EIO, "STATUS_FLT_ALREADY_ENLISTED"}, {STATUS_FLT_CONTEXT_ALREADY_LINKED, -EIO, "STATUS_FLT_CONTEXT_ALREADY_LINKED"}, {STATUS_FLT_NO_WAITER_FOR_REPLY, -EIO, "STATUS_FLT_NO_WAITER_FOR_REPLY"}, {STATUS_MONITOR_NO_DESCRIPTOR, -EIO, "STATUS_MONITOR_NO_DESCRIPTOR"}, {STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT, -EIO, "STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT"}, {STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM, -EIO, "STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM"}, {STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK, -EIO, "STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK"}, {STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED, -EIO, "STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED"}, {STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK, -EIO, "STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK"}, {STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK, -EIO, "STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK"}, {STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA, -EIO, "STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA"}, {STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK, -EIO, "STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK"}, {STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER, -EIO, "STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER"}, {STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER, -EIO, "STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER"}, {STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER, -EIO, "STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER"}, {STATUS_GRAPHICS_ADAPTER_WAS_RESET, -EIO, "STATUS_GRAPHICS_ADAPTER_WAS_RESET"}, {STATUS_GRAPHICS_INVALID_DRIVER_MODEL, -EIO, "STATUS_GRAPHICS_INVALID_DRIVER_MODEL"}, {STATUS_GRAPHICS_PRESENT_MODE_CHANGED, -EIO, "STATUS_GRAPHICS_PRESENT_MODE_CHANGED"}, {STATUS_GRAPHICS_PRESENT_OCCLUDED, -EIO, "STATUS_GRAPHICS_PRESENT_OCCLUDED"}, {STATUS_GRAPHICS_PRESENT_DENIED, -EIO, "STATUS_GRAPHICS_PRESENT_DENIED"}, {STATUS_GRAPHICS_CANNOTCOLORCONVERT, -EIO, "STATUS_GRAPHICS_CANNOTCOLORCONVERT"}, {STATUS_GRAPHICS_NO_VIDEO_MEMORY, -EIO, "STATUS_GRAPHICS_NO_VIDEO_MEMORY"}, {STATUS_GRAPHICS_CANT_LOCK_MEMORY, -EIO, "STATUS_GRAPHICS_CANT_LOCK_MEMORY"}, {STATUS_GRAPHICS_ALLOCATION_BUSY, -EBUSY, "STATUS_GRAPHICS_ALLOCATION_BUSY"}, {STATUS_GRAPHICS_TOO_MANY_REFERENCES, -EIO, "STATUS_GRAPHICS_TOO_MANY_REFERENCES"}, {STATUS_GRAPHICS_TRY_AGAIN_LATER, -EIO, "STATUS_GRAPHICS_TRY_AGAIN_LATER"}, {STATUS_GRAPHICS_TRY_AGAIN_NOW, -EIO, "STATUS_GRAPHICS_TRY_AGAIN_NOW"}, {STATUS_GRAPHICS_ALLOCATION_INVALID, -EIO, "STATUS_GRAPHICS_ALLOCATION_INVALID"}, {STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE, -EIO, "STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE"}, {STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED, -EIO, "STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED"}, {STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION, -EIO, "STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION"}, {STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE, -EIO, "STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE"}, {STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION, -EIO, "STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION"}, {STATUS_GRAPHICS_ALLOCATION_CLOSED, -EIO, "STATUS_GRAPHICS_ALLOCATION_CLOSED"}, {STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE, -EIO, "STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE"}, {STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE, -EIO, "STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE"}, {STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE, -EIO, "STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE"}, {STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST, -EIO, "STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST"}, {STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE, -EIO, "STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE"}, {STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY"}, {STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED"}, {STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED"}, {STATUS_GRAPHICS_INVALID_VIDPN, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN"}, {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE, -EIO, "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE"}, {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET, -EIO, "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET"}, {STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED"}, {STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET"}, {STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET"}, {STATUS_GRAPHICS_INVALID_FREQUENCY, -EIO, "STATUS_GRAPHICS_INVALID_FREQUENCY"}, {STATUS_GRAPHICS_INVALID_ACTIVE_REGION, -EIO, "STATUS_GRAPHICS_INVALID_ACTIVE_REGION"}, {STATUS_GRAPHICS_INVALID_TOTAL_REGION, -EIO, "STATUS_GRAPHICS_INVALID_TOTAL_REGION"}, {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE, -EIO, "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE"}, {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE, -EIO, "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE"}, {STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET, -EIO, "STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET"}, {STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY, -EIO, "STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY"}, {STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET, -EIO, "STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET"}, {STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET, -EIO, "STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET"}, {STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET, -EIO, "STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET"}, {STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET, -EIO, "STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET"}, {STATUS_GRAPHICS_TARGET_ALREADY_IN_SET, -EIO, "STATUS_GRAPHICS_TARGET_ALREADY_IN_SET"}, {STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH"}, {STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY, -EIO, "STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY"}, {STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET, -EIO, "STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET"}, {STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE, -EIO, "STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE"}, {STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET, -EIO, "STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET"}, {STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET, -EIO, "STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET"}, {STATUS_GRAPHICS_STALE_MODESET, -EIO, "STATUS_GRAPHICS_STALE_MODESET"}, {STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET, -EIO, "STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET"}, {STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE, -EIO, "STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE"}, {STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN, -EIO, "STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN"}, {STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE, -EIO, "STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE"}, {STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION, -EIO, "STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION"}, {STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES, -EIO, "STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES"}, {STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY, -EIO, "STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY"}, {STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE, -EIO, "STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE"}, {STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET, -EIO, "STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET"}, {STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET, -EIO, "STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET"}, {STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR, -EIO, "STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR"}, {STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET, -EIO, "STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET"}, {STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET, -EIO, "STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET"}, {STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE, -EIO, "STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE"}, {STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE"}, {STATUS_GRAPHICS_RESOURCES_NOT_RELATED, -EIO, "STATUS_GRAPHICS_RESOURCES_NOT_RELATED"}, {STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE, -EIO, "STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE"}, {STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE, -EIO, "STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE"}, {STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET, -EIO, "STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET"}, {STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER, -EIO, "STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER"}, {STATUS_GRAPHICS_NO_VIDPNMGR, -EIO, "STATUS_GRAPHICS_NO_VIDPNMGR"}, {STATUS_GRAPHICS_NO_ACTIVE_VIDPN, -EIO, "STATUS_GRAPHICS_NO_ACTIVE_VIDPN"}, {STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY, -EIO, "STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY"}, {STATUS_GRAPHICS_MONITOR_NOT_CONNECTED, -EIO, "STATUS_GRAPHICS_MONITOR_NOT_CONNECTED"}, {STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY, -EIO, "STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY"}, {STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE, -EIO, "STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE"}, {STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE, -EIO, "STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE"}, {STATUS_GRAPHICS_INVALID_STRIDE, -EIO, "STATUS_GRAPHICS_INVALID_STRIDE"}, {STATUS_GRAPHICS_INVALID_PIXELFORMAT, -EIO, "STATUS_GRAPHICS_INVALID_PIXELFORMAT"}, {STATUS_GRAPHICS_INVALID_COLORBASIS, -EIO, "STATUS_GRAPHICS_INVALID_COLORBASIS"}, {STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE, -EIO, "STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE"}, {STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY, -EIO, "STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY"}, {STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT, -EIO, "STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT"}, {STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE, -EIO, "STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE"}, {STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN, -EIO, "STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN"}, {STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL, -EIO, "STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL"}, {STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION, -EIO, "STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION"}, {STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED"}, {STATUS_GRAPHICS_INVALID_GAMMA_RAMP, -EIO, "STATUS_GRAPHICS_INVALID_GAMMA_RAMP"}, {STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED"}, {STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED"}, {STATUS_GRAPHICS_MODE_NOT_IN_MODESET, -EIO, "STATUS_GRAPHICS_MODE_NOT_IN_MODESET"}, {STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON"}, {STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE, -EIO, "STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE"}, {STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE, -EIO, "STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE"}, {STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS, -EIO, "STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS"}, {STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING, -EIO, "STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING"}, {STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED, -EIO, "STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED"}, {STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS, -EIO, "STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS"}, {STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT, -EIO, "STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT"}, {STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM, -EIO, "STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM"}, {STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN, -EIO, "STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN"}, {STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT, -EIO, "STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT"}, {STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED, -EIO, "STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED"}, {STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION, -EIO, "STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION"}, {STATUS_GRAPHICS_INVALID_CLIENT_TYPE, -EIO, "STATUS_GRAPHICS_INVALID_CLIENT_TYPE"}, {STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET, -EIO, "STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET"}, {STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED, -EIO, "STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED"}, {STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED"}, {STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER, -EIO, "STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER"}, {STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED, -EIO, "STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED"}, {STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED, -EIO, "STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED"}, {STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY, -EIO, "STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY"}, {STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED, -EIO, "STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED"}, {STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON, -EIO, "STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON"}, {STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE, -EIO, "STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE"}, {STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER, -EIO, "STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER"}, {STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED, -EIO, "STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED"}, {STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS, -EIO, "STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS"}, {STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST, -EIO, "STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST"}, {STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR, -EIO, "STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR"}, {STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS, -EIO, "STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS"}, {STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED"}, {STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST, -EIO, "STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST"}, {STATUS_GRAPHICS_OPM_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_OPM_NOT_SUPPORTED"}, {STATUS_GRAPHICS_COPP_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_COPP_NOT_SUPPORTED"}, {STATUS_GRAPHICS_UAB_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_UAB_NOT_SUPPORTED"}, {STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS, -EIO, "STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS"}, {STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL, -EIO, "STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL"}, {STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST, -EIO, "STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST"}, {STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO, "STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"}, {STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO, "STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"}, {STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED"}, {STATUS_GRAPHICS_OPM_INVALID_POINTER, -EIO, "STATUS_GRAPHICS_OPM_INVALID_POINTER"}, {STATUS_GRAPHICS_OPM_INTERNAL_ERROR, -EIO, "STATUS_GRAPHICS_OPM_INTERNAL_ERROR"}, {STATUS_GRAPHICS_OPM_INVALID_HANDLE, -EIO, "STATUS_GRAPHICS_OPM_INVALID_HANDLE"}, {STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO, "STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"}, {STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH, -EIO, "STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH"}, {STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED, -EIO, "STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED"}, {STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED, -EIO, "STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED"}, {STATUS_GRAPHICS_PVP_HFS_FAILED, -EIO, "STATUS_GRAPHICS_PVP_HFS_FAILED"}, {STATUS_GRAPHICS_OPM_INVALID_SRM, -EIO, "STATUS_GRAPHICS_OPM_INVALID_SRM"}, {STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP, -EIO, "STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP"}, {STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP, -EIO, "STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP"}, {STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA, -EIO, "STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA"}, {STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET, -EIO, "STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET"}, {STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH, -EIO, "STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH"}, {STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE, -EIO, "STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE"}, {STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS, -EIO, "STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS"}, {STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO, "STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS"}, {STATUS_GRAPHICS_I2C_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_I2C_NOT_SUPPORTED"}, {STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST, -EIO, "STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST"}, {STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA, -EIO, "STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA"}, {STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA, -EIO, "STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA"}, {STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED"}, {STATUS_GRAPHICS_DDCCI_INVALID_DATA, -EIO, "STATUS_GRAPHICS_DDCCI_INVALID_DATA"}, {STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE, -EIO, "STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE"}, {STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING, -EIO, "STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING"}, {STATUS_GRAPHICS_MCA_INTERNAL_ERROR, -EIO, "STATUS_GRAPHICS_MCA_INTERNAL_ERROR"}, {STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND, -EIO, "STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND"}, {STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH, -EIO, "STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH"}, {STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM, -EIO, "STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM"}, {STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE, -EIO, "STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE"}, {STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS, -EIO, "STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS"}, {STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED, -EIO, "STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED"}, {STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO, "STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"}, {STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO, "STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"}, {STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO, "STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED"}, {STATUS_GRAPHICS_INVALID_POINTER, -EIO, "STATUS_GRAPHICS_INVALID_POINTER"}, {STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO, "STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"}, {STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL, -EIO, "STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL"}, {STATUS_GRAPHICS_INTERNAL_ERROR, -EIO, "STATUS_GRAPHICS_INTERNAL_ERROR"}, {STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO, "STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS"}, {STATUS_FVE_LOCKED_VOLUME, -EIO, "STATUS_FVE_LOCKED_VOLUME"}, {STATUS_FVE_NOT_ENCRYPTED, -EIO, "STATUS_FVE_NOT_ENCRYPTED"}, {STATUS_FVE_BAD_INFORMATION, -EIO, "STATUS_FVE_BAD_INFORMATION"}, {STATUS_FVE_TOO_SMALL, -EIO, "STATUS_FVE_TOO_SMALL"}, {STATUS_FVE_FAILED_WRONG_FS, -EIO, "STATUS_FVE_FAILED_WRONG_FS"}, {STATUS_FVE_FAILED_BAD_FS, -EIO, "STATUS_FVE_FAILED_BAD_FS"}, {STATUS_FVE_FS_NOT_EXTENDED, -EIO, "STATUS_FVE_FS_NOT_EXTENDED"}, {STATUS_FVE_FS_MOUNTED, -EIO, "STATUS_FVE_FS_MOUNTED"}, {STATUS_FVE_NO_LICENSE, -EIO, "STATUS_FVE_NO_LICENSE"}, {STATUS_FVE_ACTION_NOT_ALLOWED, -EIO, "STATUS_FVE_ACTION_NOT_ALLOWED"}, {STATUS_FVE_BAD_DATA, -EIO, "STATUS_FVE_BAD_DATA"}, {STATUS_FVE_VOLUME_NOT_BOUND, -EIO, "STATUS_FVE_VOLUME_NOT_BOUND"}, {STATUS_FVE_NOT_DATA_VOLUME, -EIO, "STATUS_FVE_NOT_DATA_VOLUME"}, {STATUS_FVE_CONV_READ_ERROR, -EIO, "STATUS_FVE_CONV_READ_ERROR"}, {STATUS_FVE_CONV_WRITE_ERROR, -EIO, "STATUS_FVE_CONV_WRITE_ERROR"}, {STATUS_FVE_OVERLAPPED_UPDATE, -EIO, "STATUS_FVE_OVERLAPPED_UPDATE"}, {STATUS_FVE_FAILED_SECTOR_SIZE, -EIO, "STATUS_FVE_FAILED_SECTOR_SIZE"}, {STATUS_FVE_FAILED_AUTHENTICATION, -EIO, "STATUS_FVE_FAILED_AUTHENTICATION"}, {STATUS_FVE_NOT_OS_VOLUME, -EIO, "STATUS_FVE_NOT_OS_VOLUME"}, {STATUS_FVE_KEYFILE_NOT_FOUND, -EIO, "STATUS_FVE_KEYFILE_NOT_FOUND"}, {STATUS_FVE_KEYFILE_INVALID, -EIO, "STATUS_FVE_KEYFILE_INVALID"}, {STATUS_FVE_KEYFILE_NO_VMK, -EIO, "STATUS_FVE_KEYFILE_NO_VMK"}, {STATUS_FVE_TPM_DISABLED, -EIO, "STATUS_FVE_TPM_DISABLED"}, {STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO, -EIO, "STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO"}, {STATUS_FVE_TPM_INVALID_PCR, -EIO, "STATUS_FVE_TPM_INVALID_PCR"}, {STATUS_FVE_TPM_NO_VMK, -EIO, "STATUS_FVE_TPM_NO_VMK"}, {STATUS_FVE_PIN_INVALID, -EIO, "STATUS_FVE_PIN_INVALID"}, {STATUS_FVE_AUTH_INVALID_APPLICATION, -EIO, "STATUS_FVE_AUTH_INVALID_APPLICATION"}, {STATUS_FVE_AUTH_INVALID_CONFIG, -EIO, "STATUS_FVE_AUTH_INVALID_CONFIG"}, {STATUS_FVE_DEBUGGER_ENABLED, -EIO, "STATUS_FVE_DEBUGGER_ENABLED"}, {STATUS_FVE_DRY_RUN_FAILED, -EIO, "STATUS_FVE_DRY_RUN_FAILED"}, {STATUS_FVE_BAD_METADATA_POINTER, -EIO, "STATUS_FVE_BAD_METADATA_POINTER"}, {STATUS_FVE_OLD_METADATA_COPY, -EIO, "STATUS_FVE_OLD_METADATA_COPY"}, {STATUS_FVE_REBOOT_REQUIRED, -EIO, "STATUS_FVE_REBOOT_REQUIRED"}, {STATUS_FVE_RAW_ACCESS, -EIO, "STATUS_FVE_RAW_ACCESS"}, {STATUS_FVE_RAW_BLOCKED, -EIO, "STATUS_FVE_RAW_BLOCKED"}, {STATUS_FWP_CALLOUT_NOT_FOUND, -EIO, "STATUS_FWP_CALLOUT_NOT_FOUND"}, {STATUS_FWP_CONDITION_NOT_FOUND, -EIO, "STATUS_FWP_CONDITION_NOT_FOUND"}, {STATUS_FWP_FILTER_NOT_FOUND, -EIO, "STATUS_FWP_FILTER_NOT_FOUND"}, {STATUS_FWP_LAYER_NOT_FOUND, -EIO, "STATUS_FWP_LAYER_NOT_FOUND"}, {STATUS_FWP_PROVIDER_NOT_FOUND, -EIO, "STATUS_FWP_PROVIDER_NOT_FOUND"}, {STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND, -EIO, "STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND"}, {STATUS_FWP_SUBLAYER_NOT_FOUND, -EIO, "STATUS_FWP_SUBLAYER_NOT_FOUND"}, {STATUS_FWP_NOT_FOUND, -EIO, "STATUS_FWP_NOT_FOUND"}, {STATUS_FWP_ALREADY_EXISTS, -EIO, "STATUS_FWP_ALREADY_EXISTS"}, {STATUS_FWP_IN_USE, -EIO, "STATUS_FWP_IN_USE"}, {STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS, -EIO, "STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS"}, {STATUS_FWP_WRONG_SESSION, -EIO, "STATUS_FWP_WRONG_SESSION"}, {STATUS_FWP_NO_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_NO_TXN_IN_PROGRESS"}, {STATUS_FWP_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_TXN_IN_PROGRESS"}, {STATUS_FWP_TXN_ABORTED, -EIO, "STATUS_FWP_TXN_ABORTED"}, {STATUS_FWP_SESSION_ABORTED, -EIO, "STATUS_FWP_SESSION_ABORTED"}, {STATUS_FWP_INCOMPATIBLE_TXN, -EIO, "STATUS_FWP_INCOMPATIBLE_TXN"}, {STATUS_FWP_TIMEOUT, -ETIMEDOUT, "STATUS_FWP_TIMEOUT"}, {STATUS_FWP_NET_EVENTS_DISABLED, -EIO, "STATUS_FWP_NET_EVENTS_DISABLED"}, {STATUS_FWP_INCOMPATIBLE_LAYER, -EIO, "STATUS_FWP_INCOMPATIBLE_LAYER"}, {STATUS_FWP_KM_CLIENTS_ONLY, -EIO, "STATUS_FWP_KM_CLIENTS_ONLY"}, {STATUS_FWP_LIFETIME_MISMATCH, -EIO, "STATUS_FWP_LIFETIME_MISMATCH"}, {STATUS_FWP_BUILTIN_OBJECT, -EIO, "STATUS_FWP_BUILTIN_OBJECT"}, {STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS, -EIO, "STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS"}, {STATUS_FWP_TOO_MANY_CALLOUTS, -EIO, "STATUS_FWP_TOO_MANY_CALLOUTS"}, {STATUS_FWP_NOTIFICATION_DROPPED, -EIO, "STATUS_FWP_NOTIFICATION_DROPPED"}, {STATUS_FWP_TRAFFIC_MISMATCH, -EIO, "STATUS_FWP_TRAFFIC_MISMATCH"}, {STATUS_FWP_INCOMPATIBLE_SA_STATE, -EIO, "STATUS_FWP_INCOMPATIBLE_SA_STATE"}, {STATUS_FWP_NULL_POINTER, -EIO, "STATUS_FWP_NULL_POINTER"}, {STATUS_FWP_INVALID_ENUMERATOR, -EIO, "STATUS_FWP_INVALID_ENUMERATOR"}, {STATUS_FWP_INVALID_FLAGS, -EIO, "STATUS_FWP_INVALID_FLAGS"}, {STATUS_FWP_INVALID_NET_MASK, -EIO, "STATUS_FWP_INVALID_NET_MASK"}, {STATUS_FWP_INVALID_RANGE, -EIO, "STATUS_FWP_INVALID_RANGE"}, {STATUS_FWP_INVALID_INTERVAL, -EIO, "STATUS_FWP_INVALID_INTERVAL"}, {STATUS_FWP_ZERO_LENGTH_ARRAY, -EIO, "STATUS_FWP_ZERO_LENGTH_ARRAY"}, {STATUS_FWP_NULL_DISPLAY_NAME, -EIO, "STATUS_FWP_NULL_DISPLAY_NAME"}, {STATUS_FWP_INVALID_ACTION_TYPE, -EIO, "STATUS_FWP_INVALID_ACTION_TYPE"}, {STATUS_FWP_INVALID_WEIGHT, -EIO, "STATUS_FWP_INVALID_WEIGHT"}, {STATUS_FWP_MATCH_TYPE_MISMATCH, -EIO, "STATUS_FWP_MATCH_TYPE_MISMATCH"}, {STATUS_FWP_TYPE_MISMATCH, -EIO, "STATUS_FWP_TYPE_MISMATCH"}, {STATUS_FWP_OUT_OF_BOUNDS, -EIO, "STATUS_FWP_OUT_OF_BOUNDS"}, {STATUS_FWP_RESERVED, -EIO, "STATUS_FWP_RESERVED"}, {STATUS_FWP_DUPLICATE_CONDITION, -EIO, "STATUS_FWP_DUPLICATE_CONDITION"}, {STATUS_FWP_DUPLICATE_KEYMOD, -EIO, "STATUS_FWP_DUPLICATE_KEYMOD"}, {STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER, -EIO, "STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER"}, {STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER, -EIO, "STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER"}, {STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER, -EIO, "STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER"}, {STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT, -EIO, "STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT"}, {STATUS_FWP_INCOMPATIBLE_AUTH_METHOD, -EIO, "STATUS_FWP_INCOMPATIBLE_AUTH_METHOD"}, {STATUS_FWP_INCOMPATIBLE_DH_GROUP, -EIO, "STATUS_FWP_INCOMPATIBLE_DH_GROUP"}, {STATUS_FWP_EM_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_FWP_EM_NOT_SUPPORTED"}, {STATUS_FWP_NEVER_MATCH, -EIO, "STATUS_FWP_NEVER_MATCH"}, {STATUS_FWP_PROVIDER_CONTEXT_MISMATCH, -EIO, "STATUS_FWP_PROVIDER_CONTEXT_MISMATCH"}, {STATUS_FWP_INVALID_PARAMETER, -EIO, "STATUS_FWP_INVALID_PARAMETER"}, {STATUS_FWP_TOO_MANY_SUBLAYERS, -EIO, "STATUS_FWP_TOO_MANY_SUBLAYERS"}, {STATUS_FWP_CALLOUT_NOTIFICATION_FAILED, -EIO, "STATUS_FWP_CALLOUT_NOTIFICATION_FAILED"}, {STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG, -EIO, "STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG"}, {STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG, -EIO, "STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG"}, {STATUS_FWP_TCPIP_NOT_READY, -EIO, "STATUS_FWP_TCPIP_NOT_READY"}, {STATUS_FWP_INJECT_HANDLE_CLOSING, -EIO, "STATUS_FWP_INJECT_HANDLE_CLOSING"}, {STATUS_FWP_INJECT_HANDLE_STALE, -EIO, "STATUS_FWP_INJECT_HANDLE_STALE"}, {STATUS_FWP_CANNOT_PEND, -EIO, "STATUS_FWP_CANNOT_PEND"}, {STATUS_NDIS_CLOSING, -EIO, "STATUS_NDIS_CLOSING"}, {STATUS_NDIS_BAD_VERSION, -EIO, "STATUS_NDIS_BAD_VERSION"}, {STATUS_NDIS_BAD_CHARACTERISTICS, -EIO, "STATUS_NDIS_BAD_CHARACTERISTICS"}, {STATUS_NDIS_ADAPTER_NOT_FOUND, -EIO, "STATUS_NDIS_ADAPTER_NOT_FOUND"}, {STATUS_NDIS_OPEN_FAILED, -EIO, "STATUS_NDIS_OPEN_FAILED"}, {STATUS_NDIS_DEVICE_FAILED, -EIO, "STATUS_NDIS_DEVICE_FAILED"}, {STATUS_NDIS_MULTICAST_FULL, -EIO, "STATUS_NDIS_MULTICAST_FULL"}, {STATUS_NDIS_MULTICAST_EXISTS, -EIO, "STATUS_NDIS_MULTICAST_EXISTS"}, {STATUS_NDIS_MULTICAST_NOT_FOUND, -EIO, "STATUS_NDIS_MULTICAST_NOT_FOUND"}, {STATUS_NDIS_REQUEST_ABORTED, -EIO, "STATUS_NDIS_REQUEST_ABORTED"}, {STATUS_NDIS_RESET_IN_PROGRESS, -EIO, "STATUS_NDIS_RESET_IN_PROGRESS"}, {STATUS_NDIS_INVALID_PACKET, -EIO, "STATUS_NDIS_INVALID_PACKET"}, {STATUS_NDIS_INVALID_DEVICE_REQUEST, -EIO, "STATUS_NDIS_INVALID_DEVICE_REQUEST"}, {STATUS_NDIS_ADAPTER_NOT_READY, -EIO, "STATUS_NDIS_ADAPTER_NOT_READY"}, {STATUS_NDIS_INVALID_LENGTH, -EIO, "STATUS_NDIS_INVALID_LENGTH"}, {STATUS_NDIS_INVALID_DATA, -EIO, "STATUS_NDIS_INVALID_DATA"}, {STATUS_NDIS_BUFFER_TOO_SHORT, -ENOBUFS, "STATUS_NDIS_BUFFER_TOO_SHORT"}, {STATUS_NDIS_INVALID_OID, -EIO, "STATUS_NDIS_INVALID_OID"}, {STATUS_NDIS_ADAPTER_REMOVED, -EIO, "STATUS_NDIS_ADAPTER_REMOVED"}, {STATUS_NDIS_UNSUPPORTED_MEDIA, -EIO, "STATUS_NDIS_UNSUPPORTED_MEDIA"}, {STATUS_NDIS_GROUP_ADDRESS_IN_USE, -EIO, "STATUS_NDIS_GROUP_ADDRESS_IN_USE"}, {STATUS_NDIS_FILE_NOT_FOUND, -EIO, "STATUS_NDIS_FILE_NOT_FOUND"}, {STATUS_NDIS_ERROR_READING_FILE, -EIO, "STATUS_NDIS_ERROR_READING_FILE"}, {STATUS_NDIS_ALREADY_MAPPED, -EIO, "STATUS_NDIS_ALREADY_MAPPED"}, {STATUS_NDIS_RESOURCE_CONFLICT, -EIO, "STATUS_NDIS_RESOURCE_CONFLICT"}, {STATUS_NDIS_MEDIA_DISCONNECTED, -EIO, "STATUS_NDIS_MEDIA_DISCONNECTED"}, {STATUS_NDIS_INVALID_ADDRESS, -EIO, "STATUS_NDIS_INVALID_ADDRESS"}, {STATUS_NDIS_PAUSED, -EIO, "STATUS_NDIS_PAUSED"}, {STATUS_NDIS_INTERFACE_NOT_FOUND, -EIO, "STATUS_NDIS_INTERFACE_NOT_FOUND"}, {STATUS_NDIS_UNSUPPORTED_REVISION, -EIO, "STATUS_NDIS_UNSUPPORTED_REVISION"}, {STATUS_NDIS_INVALID_PORT, -EIO, "STATUS_NDIS_INVALID_PORT"}, {STATUS_NDIS_INVALID_PORT_STATE, -EIO, "STATUS_NDIS_INVALID_PORT_STATE"}, {STATUS_NDIS_LOW_POWER_STATE, -EIO, "STATUS_NDIS_LOW_POWER_STATE"}, {STATUS_NDIS_NOT_SUPPORTED, -ENOSYS, "STATUS_NDIS_NOT_SUPPORTED"}, {STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED, -EIO, "STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED"}, {STATUS_NDIS_DOT11_MEDIA_IN_USE, -EIO, "STATUS_NDIS_DOT11_MEDIA_IN_USE"}, {STATUS_NDIS_DOT11_POWER_STATE_INVALID, -EIO, "STATUS_NDIS_DOT11_POWER_STATE_INVALID"}, {STATUS_IPSEC_BAD_SPI, -EIO, "STATUS_IPSEC_BAD_SPI"}, {STATUS_IPSEC_SA_LIFETIME_EXPIRED, -EIO, "STATUS_IPSEC_SA_LIFETIME_EXPIRED"}, {STATUS_IPSEC_WRONG_SA, -EIO, "STATUS_IPSEC_WRONG_SA"}, {STATUS_IPSEC_REPLAY_CHECK_FAILED, -EIO, "STATUS_IPSEC_REPLAY_CHECK_FAILED"}, {STATUS_IPSEC_INVALID_PACKET, -EIO, "STATUS_IPSEC_INVALID_PACKET"}, {STATUS_IPSEC_INTEGRITY_CHECK_FAILED, -EIO, "STATUS_IPSEC_INTEGRITY_CHECK_FAILED"}, {STATUS_IPSEC_CLEAR_TEXT_DROP, -EIO, "STATUS_IPSEC_CLEAR_TEXT_DROP"}, {0, 0, NULL} }; /***************************************************************************** Print an error message from the status code *****************************************************************************/ static void smb2_print_status(__le32 status) { int idx = 0; while (smb2_error_map_table[idx].status_string != NULL) { if ((smb2_error_map_table[idx].smb2_status) == status) { pr_notice("Status code returned 0x%08x %s\n", status, smb2_error_map_table[idx].status_string); } idx++; } return; } int map_smb2_to_linux_error(char *buf, bool log_err) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; unsigned int i; int rc = -EIO; __le32 smb2err = shdr->Status; if (smb2err == 0) { trace_smb3_cmd_done(le32_to_cpu(shdr->Id.SyncId.TreeId), le64_to_cpu(shdr->SessionId), le16_to_cpu(shdr->Command), le64_to_cpu(shdr->MessageId)); return 0; } /* mask facility */ if (log_err && (smb2err != STATUS_MORE_PROCESSING_REQUIRED) && (smb2err != STATUS_END_OF_FILE)) smb2_print_status(smb2err); else if (cifsFYI & CIFS_RC) smb2_print_status(smb2err); for (i = 0; i < sizeof(smb2_error_map_table) / sizeof(struct status_to_posix_error); i++) { if (smb2_error_map_table[i].smb2_status == smb2err) { rc = smb2_error_map_table[i].posix_error; break; } } /* on error mapping not found - return EIO */ cifs_dbg(FYI, "Mapping SMB2 status code 0x%08x to POSIX err %d\n", __le32_to_cpu(smb2err), rc); trace_smb3_cmd_err(le32_to_cpu(shdr->Id.SyncId.TreeId), le64_to_cpu(shdr->SessionId), le16_to_cpu(shdr->Command), le64_to_cpu(shdr->MessageId), le32_to_cpu(smb2err), rc); return rc; }
linux-master
fs/smb/client/smb2maperror.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/module.h> #include <linux/kernel.h> #include <linux/oid_registry.h> #include "cifsglob.h" #include "cifs_debug.h" #include "cifsproto.h" #include "cifs_spnego_negtokeninit.asn1.h" int decode_negTokenInit(unsigned char *security_blob, int length, struct TCP_Server_Info *server) { if (asn1_ber_decoder(&cifs_spnego_negtokeninit_decoder, server, security_blob, length) == 0) return 1; else return 0; } int cifs_gssapi_this_mech(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { enum OID oid; oid = look_up_OID(value, vlen); if (oid != OID_spnego) { char buf[50]; sprint_oid(value, vlen, buf, sizeof(buf)); cifs_dbg(FYI, "Error decoding negTokenInit header: unexpected OID %s\n", buf); return -EBADMSG; } return 0; } int cifs_neg_token_init_mech_type(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct TCP_Server_Info *server = context; enum OID oid; oid = look_up_OID(value, vlen); if (oid == OID_mskrb5) server->sec_mskerberos = true; else if (oid == OID_krb5u2u) server->sec_kerberosu2u = true; else if (oid == OID_krb5) server->sec_kerberos = true; else if (oid == OID_ntlmssp) server->sec_ntlmssp = true; else { char buf[50]; sprint_oid(value, vlen, buf, sizeof(buf)); cifs_dbg(FYI, "Decoding negTokenInit: unsupported OID %s\n", buf); } return 0; }
linux-master
fs/smb/client/asn1.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2020, Microsoft Corporation. * * Author(s): Steve French <[email protected]> * David Howells <[email protected]> */ /* #include <linux/module.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/magic.h> #include <linux/security.h> #include <net/net_namespace.h> #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif */ #include <linux/ctype.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/parser.h> #include <linux/utsname.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" #include "fs_context.h" static DEFINE_MUTEX(cifs_mount_mutex); static const match_table_t cifs_smb_version_tokens = { { Smb_1, SMB1_VERSION_STRING }, { Smb_20, SMB20_VERSION_STRING}, { Smb_21, SMB21_VERSION_STRING }, { Smb_30, SMB30_VERSION_STRING }, { Smb_302, SMB302_VERSION_STRING }, { Smb_302, ALT_SMB302_VERSION_STRING }, { Smb_311, SMB311_VERSION_STRING }, { Smb_311, ALT_SMB311_VERSION_STRING }, { Smb_3any, SMB3ANY_VERSION_STRING }, { Smb_default, SMBDEFAULT_VERSION_STRING }, { Smb_version_err, NULL } }; static const match_table_t cifs_secflavor_tokens = { { Opt_sec_krb5, "krb5" }, { Opt_sec_krb5i, "krb5i" }, { Opt_sec_krb5p, "krb5p" }, { Opt_sec_ntlmsspi, "ntlmsspi" }, { Opt_sec_ntlmssp, "ntlmssp" }, { Opt_sec_ntlmv2, "nontlm" }, { Opt_sec_ntlmv2, "ntlmv2" }, { Opt_sec_ntlmv2i, "ntlmv2i" }, { Opt_sec_none, "none" }, { Opt_sec_err, NULL } }; const struct fs_parameter_spec smb3_fs_parameters[] = { /* Mount options that take no arguments */ fsparam_flag_no("user_xattr", Opt_user_xattr), fsparam_flag_no("forceuid", Opt_forceuid), fsparam_flag_no("multichannel", Opt_multichannel), fsparam_flag_no("forcegid", Opt_forcegid), fsparam_flag("noblocksend", Opt_noblocksend), fsparam_flag("noautotune", Opt_noautotune), fsparam_flag("nolease", Opt_nolease), fsparam_flag_no("hard", Opt_hard), fsparam_flag_no("soft", Opt_soft), fsparam_flag_no("perm", Opt_perm), fsparam_flag("nodelete", Opt_nodelete), fsparam_flag_no("mapposix", Opt_mapposix), fsparam_flag("mapchars", Opt_mapchars), fsparam_flag("nomapchars", Opt_nomapchars), fsparam_flag_no("sfu", Opt_sfu), fsparam_flag("nodfs", Opt_nodfs), fsparam_flag_no("posixpaths", Opt_posixpaths), fsparam_flag_no("unix", Opt_unix), fsparam_flag_no("linux", Opt_unix), fsparam_flag_no("posix", Opt_unix), fsparam_flag("nocase", Opt_nocase), fsparam_flag("ignorecase", Opt_nocase), fsparam_flag_no("brl", Opt_brl), fsparam_flag_no("handlecache", Opt_handlecache), fsparam_flag("forcemandatorylock", Opt_forcemandatorylock), fsparam_flag("forcemand", Opt_forcemandatorylock), fsparam_flag("setuidfromacl", Opt_setuidfromacl), fsparam_flag("idsfromsid", Opt_setuidfromacl), fsparam_flag_no("setuids", Opt_setuids), fsparam_flag_no("dynperm", Opt_dynperm), fsparam_flag_no("intr", Opt_intr), fsparam_flag_no("strictsync", Opt_strictsync), fsparam_flag_no("serverino", Opt_serverino), fsparam_flag("rwpidforward", Opt_rwpidforward), fsparam_flag("cifsacl", Opt_cifsacl), fsparam_flag_no("acl", Opt_acl), fsparam_flag("locallease", Opt_locallease), fsparam_flag("sign", Opt_sign), fsparam_flag("ignore_signature", Opt_ignore_signature), fsparam_flag("signloosely", Opt_ignore_signature), fsparam_flag("seal", Opt_seal), fsparam_flag("noac", Opt_noac), fsparam_flag("fsc", Opt_fsc), fsparam_flag("mfsymlinks", Opt_mfsymlinks), fsparam_flag("multiuser", Opt_multiuser), fsparam_flag("sloppy", Opt_sloppy), fsparam_flag("nosharesock", Opt_nosharesock), fsparam_flag_no("persistenthandles", Opt_persistent), fsparam_flag_no("resilienthandles", Opt_resilient), fsparam_flag_no("tcpnodelay", Opt_tcp_nodelay), fsparam_flag("nosparse", Opt_nosparse), fsparam_flag("domainauto", Opt_domainauto), fsparam_flag("rdma", Opt_rdma), fsparam_flag("modesid", Opt_modesid), fsparam_flag("modefromsid", Opt_modesid), fsparam_flag("rootfs", Opt_rootfs), fsparam_flag("compress", Opt_compress), fsparam_flag("witness", Opt_witness), /* Mount options which take numeric value */ fsparam_u32("backupuid", Opt_backupuid), fsparam_u32("backupgid", Opt_backupgid), fsparam_u32("uid", Opt_uid), fsparam_u32("cruid", Opt_cruid), fsparam_u32("gid", Opt_gid), fsparam_u32("file_mode", Opt_file_mode), fsparam_u32("dirmode", Opt_dirmode), fsparam_u32("dir_mode", Opt_dirmode), fsparam_u32("port", Opt_port), fsparam_u32("min_enc_offload", Opt_min_enc_offload), fsparam_u32("esize", Opt_min_enc_offload), fsparam_u32("bsize", Opt_blocksize), fsparam_u32("rasize", Opt_rasize), fsparam_u32("rsize", Opt_rsize), fsparam_u32("wsize", Opt_wsize), fsparam_u32("actimeo", Opt_actimeo), fsparam_u32("acdirmax", Opt_acdirmax), fsparam_u32("acregmax", Opt_acregmax), fsparam_u32("closetimeo", Opt_closetimeo), fsparam_u32("echo_interval", Opt_echo_interval), fsparam_u32("max_credits", Opt_max_credits), fsparam_u32("max_cached_dirs", Opt_max_cached_dirs), fsparam_u32("handletimeout", Opt_handletimeout), fsparam_u64("snapshot", Opt_snapshot), fsparam_u32("max_channels", Opt_max_channels), /* Mount options which take string value */ fsparam_string("source", Opt_source), fsparam_string("user", Opt_user), fsparam_string("username", Opt_user), fsparam_string("pass", Opt_pass), fsparam_string("password", Opt_pass), fsparam_string("ip", Opt_ip), fsparam_string("addr", Opt_ip), fsparam_string("domain", Opt_domain), fsparam_string("dom", Opt_domain), fsparam_string("srcaddr", Opt_srcaddr), fsparam_string("iocharset", Opt_iocharset), fsparam_string("netbiosname", Opt_netbiosname), fsparam_string("servern", Opt_servern), fsparam_string("ver", Opt_ver), fsparam_string("vers", Opt_vers), fsparam_string("sec", Opt_sec), fsparam_string("cache", Opt_cache), /* Arguments that should be ignored */ fsparam_flag("guest", Opt_ignore), fsparam_flag("noatime", Opt_ignore), fsparam_flag("relatime", Opt_ignore), fsparam_flag("_netdev", Opt_ignore), fsparam_flag_no("suid", Opt_ignore), fsparam_flag_no("exec", Opt_ignore), fsparam_flag_no("dev", Opt_ignore), fsparam_flag_no("mand", Opt_ignore), fsparam_flag_no("auto", Opt_ignore), fsparam_string("cred", Opt_ignore), fsparam_string("credentials", Opt_ignore), /* * UNC and prefixpath is now extracted from Opt_source * in the new mount API so we can just ignore them going forward. */ fsparam_string("unc", Opt_ignore), fsparam_string("prefixpath", Opt_ignore), {} }; static int cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_context *ctx) { substring_t args[MAX_OPT_ARGS]; /* * With mount options, the last one should win. Reset any existing * settings back to default. */ ctx->sectype = Unspecified; ctx->sign = false; switch (match_token(value, cifs_secflavor_tokens, args)) { case Opt_sec_krb5p: cifs_errorf(fc, "sec=krb5p is not supported!\n"); return 1; case Opt_sec_krb5i: ctx->sign = true; fallthrough; case Opt_sec_krb5: ctx->sectype = Kerberos; break; case Opt_sec_ntlmsspi: ctx->sign = true; fallthrough; case Opt_sec_ntlmssp: ctx->sectype = RawNTLMSSP; break; case Opt_sec_ntlmv2i: ctx->sign = true; fallthrough; case Opt_sec_ntlmv2: ctx->sectype = NTLMv2; break; case Opt_sec_none: ctx->nullauth = 1; kfree(ctx->username); ctx->username = NULL; break; default: cifs_errorf(fc, "bad security option: %s\n", value); return 1; } return 0; } static const match_table_t cifs_cacheflavor_tokens = { { Opt_cache_loose, "loose" }, { Opt_cache_strict, "strict" }, { Opt_cache_none, "none" }, { Opt_cache_ro, "ro" }, { Opt_cache_rw, "singleclient" }, { Opt_cache_err, NULL } }; static int cifs_parse_cache_flavor(struct fs_context *fc, char *value, struct smb3_fs_context *ctx) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_cacheflavor_tokens, args)) { case Opt_cache_loose: ctx->direct_io = false; ctx->strict_io = false; ctx->cache_ro = false; ctx->cache_rw = false; break; case Opt_cache_strict: ctx->direct_io = false; ctx->strict_io = true; ctx->cache_ro = false; ctx->cache_rw = false; break; case Opt_cache_none: ctx->direct_io = true; ctx->strict_io = false; ctx->cache_ro = false; ctx->cache_rw = false; break; case Opt_cache_ro: ctx->direct_io = false; ctx->strict_io = false; ctx->cache_ro = true; ctx->cache_rw = false; break; case Opt_cache_rw: ctx->direct_io = false; ctx->strict_io = false; ctx->cache_ro = false; ctx->cache_rw = true; break; default: cifs_errorf(fc, "bad cache= option: %s\n", value); return 1; } return 0; } #define DUP_CTX_STR(field) \ do { \ if (ctx->field) { \ new_ctx->field = kstrdup(ctx->field, GFP_ATOMIC); \ if (new_ctx->field == NULL) { \ smb3_cleanup_fs_context_contents(new_ctx); \ return -ENOMEM; \ } \ } \ } while (0) int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx) { memcpy(new_ctx, ctx, sizeof(*ctx)); new_ctx->prepath = NULL; new_ctx->nodename = NULL; new_ctx->username = NULL; new_ctx->password = NULL; new_ctx->server_hostname = NULL; new_ctx->domainname = NULL; new_ctx->UNC = NULL; new_ctx->source = NULL; new_ctx->iocharset = NULL; new_ctx->leaf_fullpath = NULL; /* * Make sure to stay in sync with smb3_cleanup_fs_context_contents() */ DUP_CTX_STR(prepath); DUP_CTX_STR(username); DUP_CTX_STR(password); DUP_CTX_STR(server_hostname); DUP_CTX_STR(UNC); DUP_CTX_STR(source); DUP_CTX_STR(domainname); DUP_CTX_STR(nodename); DUP_CTX_STR(iocharset); DUP_CTX_STR(leaf_fullpath); return 0; } static int cifs_parse_smb_version(struct fs_context *fc, char *value, struct smb3_fs_context *ctx, bool is_smb3) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_smb_version_tokens, args)) { #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY case Smb_1: if (disable_legacy_dialects) { cifs_errorf(fc, "mount with legacy dialect disabled\n"); return 1; } if (is_smb3) { cifs_errorf(fc, "vers=1.0 (cifs) not permitted when mounting with smb3\n"); return 1; } cifs_errorf(fc, "Use of the less secure dialect vers=1.0 is not recommended unless required for access to very old servers\n"); ctx->ops = &smb1_operations; ctx->vals = &smb1_values; break; case Smb_20: if (disable_legacy_dialects) { cifs_errorf(fc, "mount with legacy dialect disabled\n"); return 1; } if (is_smb3) { cifs_errorf(fc, "vers=2.0 not permitted when mounting with smb3\n"); return 1; } ctx->ops = &smb20_operations; ctx->vals = &smb20_values; break; #else case Smb_1: cifs_errorf(fc, "vers=1.0 (cifs) mount not permitted when legacy dialects disabled\n"); return 1; case Smb_20: cifs_errorf(fc, "vers=2.0 mount not permitted when legacy dialects disabled\n"); return 1; #endif /* CIFS_ALLOW_INSECURE_LEGACY */ case Smb_21: ctx->ops = &smb21_operations; ctx->vals = &smb21_values; break; case Smb_30: ctx->ops = &smb30_operations; ctx->vals = &smb30_values; break; case Smb_302: ctx->ops = &smb30_operations; /* currently identical with 3.0 */ ctx->vals = &smb302_values; break; case Smb_311: ctx->ops = &smb311_operations; ctx->vals = &smb311_values; break; case Smb_3any: ctx->ops = &smb30_operations; /* currently identical with 3.0 */ ctx->vals = &smb3any_values; break; case Smb_default: ctx->ops = &smb30_operations; ctx->vals = &smbdefault_values; break; default: cifs_errorf(fc, "Unknown vers= option specified: %s\n", value); return 1; } return 0; } int smb3_parse_opt(const char *options, const char *key, char **val) { int rc = -ENOENT; char *opts, *orig, *p; orig = opts = kstrdup(options, GFP_KERNEL); if (!opts) return -ENOMEM; while ((p = strsep(&opts, ","))) { char *nval; if (!*p) continue; if (strncasecmp(p, key, strlen(key))) continue; nval = strchr(p, '='); if (nval) { if (nval == p) continue; *nval++ = 0; *val = kstrdup(nval, GFP_KERNEL); rc = !*val ? -ENOMEM : 0; goto out; } } out: kfree(orig); return rc; } /* * Remove duplicate path delimiters. Windows is supposed to do that * but there are some bugs that prevent rename from working if there are * multiple delimiters. * * Return a sanitized duplicate of @path or NULL for empty prefix paths. * Otherwise, return ERR_PTR. * * @gfp indicates the GFP_* flags for kstrdup. * The caller is responsible for freeing the original. */ #define IS_DELIM(c) ((c) == '/' || (c) == '\\') char *cifs_sanitize_prepath(char *prepath, gfp_t gfp) { char *cursor1 = prepath, *cursor2 = prepath; char *s; /* skip all prepended delimiters */ while (IS_DELIM(*cursor1)) cursor1++; /* copy the first letter */ *cursor2 = *cursor1; /* copy the remainder... */ while (*(cursor1++)) { /* ... skipping all duplicated delimiters */ if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2)) continue; *(++cursor2) = *cursor1; } /* if the last character is a delimiter, skip it */ if (IS_DELIM(*(cursor2 - 1))) cursor2--; *cursor2 = '\0'; if (!*prepath) return NULL; s = kstrdup(prepath, gfp); if (!s) return ERR_PTR(-ENOMEM); return s; } /* * Return full path based on the values of @ctx->{UNC,prepath}. * * It is assumed that both values were already parsed by smb3_parse_devname(). */ char *smb3_fs_context_fullpath(const struct smb3_fs_context *ctx, char dirsep) { size_t ulen, plen; char *s; ulen = strlen(ctx->UNC); plen = ctx->prepath ? strlen(ctx->prepath) + 1 : 0; s = kmalloc(ulen + plen + 1, GFP_KERNEL); if (!s) return ERR_PTR(-ENOMEM); memcpy(s, ctx->UNC, ulen); if (plen) { s[ulen] = dirsep; memcpy(s + ulen + 1, ctx->prepath, plen); } s[ulen + plen] = '\0'; convert_delimiter(s, dirsep); return s; } /* * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath * fields with the result. Returns 0 on success and an error otherwise * (e.g. ENOMEM or EINVAL) */ int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx) { char *pos; const char *delims = "/\\"; size_t len; int rc; if (unlikely(!devname || !*devname)) { cifs_dbg(VFS, "Device name not specified\n"); return -EINVAL; } /* make sure we have a valid UNC double delimiter prefix */ len = strspn(devname, delims); if (len != 2) return -EINVAL; /* find delimiter between host and sharename */ pos = strpbrk(devname + 2, delims); if (!pos) return -EINVAL; /* record the server hostname */ kfree(ctx->server_hostname); ctx->server_hostname = kstrndup(devname + 2, pos - devname - 2, GFP_KERNEL); if (!ctx->server_hostname) return -ENOMEM; /* skip past delimiter */ ++pos; /* now go until next delimiter or end of string */ len = strcspn(pos, delims); if (!len) return -EINVAL; /* move "pos" up to delimiter or NULL */ pos += len; kfree(ctx->UNC); ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL); if (!ctx->UNC) return -ENOMEM; convert_delimiter(ctx->UNC, '\\'); /* skip any delimiter */ if (*pos == '/' || *pos == '\\') pos++; kfree(ctx->prepath); ctx->prepath = NULL; /* If pos is NULL then no prepath */ if (!*pos) return 0; ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL); if (IS_ERR(ctx->prepath)) { rc = PTR_ERR(ctx->prepath); ctx->prepath = NULL; return rc; } return 0; } static void smb3_fs_context_free(struct fs_context *fc); static int smb3_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); static int smb3_fs_context_parse_monolithic(struct fs_context *fc, void *data); static int smb3_get_tree(struct fs_context *fc); static int smb3_reconfigure(struct fs_context *fc); static const struct fs_context_operations smb3_fs_context_ops = { .free = smb3_fs_context_free, .parse_param = smb3_fs_context_parse_param, .parse_monolithic = smb3_fs_context_parse_monolithic, .get_tree = smb3_get_tree, .reconfigure = smb3_reconfigure, }; /* * Parse a monolithic block of data from sys_mount(). * smb3_fs_context_parse_monolithic - Parse key[=val][,key[=val]]* mount data * @ctx: The superblock configuration to fill in. * @data: The data to parse * * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be * called from the ->monolithic_mount_data() fs_context operation. * * Returns 0 on success or the error returned by the ->parse_option() fs_context * operation on failure. */ static int smb3_fs_context_parse_monolithic(struct fs_context *fc, void *data) { char *options = data, *key; int ret = 0; if (!options) return 0; ret = security_sb_eat_lsm_opts(options, &fc->security); if (ret) return ret; /* BB Need to add support for sep= here TBD */ while ((key = strsep(&options, ",")) != NULL) { size_t len; char *value; if (*key == 0) break; /* Check if following character is the deliminator If yes, * we have encountered a double deliminator reset the NULL * character to the deliminator */ while (options && options[0] == ',') { len = strlen(key); strcpy(key + len, options); options = strchr(options, ','); if (options) *options++ = 0; } len = 0; value = strchr(key, '='); if (value) { if (value == key) continue; *value++ = 0; len = strlen(value); } ret = vfs_parse_fs_string(fc, key, value, len); if (ret < 0) break; } return ret; } /* * Validate the preparsed information in the config. */ static int smb3_fs_context_validate(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); if (ctx->rdma && ctx->vals->protocol_id < SMB30_PROT_ID) { cifs_errorf(fc, "SMB Direct requires Version >=3.0\n"); return -EOPNOTSUPP; } #ifndef CONFIG_KEYS /* Muliuser mounts require CONFIG_KEYS support */ if (ctx->multiuser) { cifs_errorf(fc, "Multiuser mounts require kernels with CONFIG_KEYS enabled\n"); return -1; } #endif if (ctx->got_version == false) pr_warn_once("No dialect specified on mount. Default has changed to a more secure dialect, SMB2.1 or later (e.g. SMB3.1.1), from CIFS (SMB1). To use the less secure SMB1 dialect to access old servers which do not support SMB3.1.1 (or even SMB3 or SMB2.1) specify vers=1.0 on mount.\n"); if (!ctx->UNC) { cifs_errorf(fc, "CIFS mount error: No usable UNC path provided in device string!\n"); return -1; } /* make sure UNC has a share name */ if (strlen(ctx->UNC) < 3 || !strchr(ctx->UNC + 3, '\\')) { cifs_errorf(fc, "Malformed UNC. Unable to find share name.\n"); return -ENOENT; } if (!ctx->got_ip) { int len; const char *slash; /* No ip= option specified? Try to get it from UNC */ /* Use the address part of the UNC. */ slash = strchr(&ctx->UNC[2], '\\'); len = slash - &ctx->UNC[2]; if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr, &ctx->UNC[2], len)) { pr_err("Unable to determine destination address\n"); return -EHOSTUNREACH; } } /* set the port that we got earlier */ cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port); if (ctx->override_uid && !ctx->uid_specified) { ctx->override_uid = 0; pr_notice("ignoring forceuid mount option specified with no uid= option\n"); } if (ctx->override_gid && !ctx->gid_specified) { ctx->override_gid = 0; pr_notice("ignoring forcegid mount option specified with no gid= option\n"); } return 0; } static int smb3_get_tree_common(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); struct dentry *root; int rc = 0; root = cifs_smb3_do_mount(fc->fs_type, 0, ctx); if (IS_ERR(root)) return PTR_ERR(root); fc->root = root; return rc; } /* * Create an SMB3 superblock from the parameters passed. */ static int smb3_get_tree(struct fs_context *fc) { int err = smb3_fs_context_validate(fc); int ret; if (err) return err; mutex_lock(&cifs_mount_mutex); ret = smb3_get_tree_common(fc); mutex_unlock(&cifs_mount_mutex); return ret; } static void smb3_fs_context_free(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); smb3_cleanup_fs_context(ctx); } /* * Compare the old and new proposed context during reconfigure * and check if the changes are compatible. */ static int smb3_verify_reconfigure_ctx(struct fs_context *fc, struct smb3_fs_context *new_ctx, struct smb3_fs_context *old_ctx) { if (new_ctx->posix_paths != old_ctx->posix_paths) { cifs_errorf(fc, "can not change posixpaths during remount\n"); return -EINVAL; } if (new_ctx->sectype != old_ctx->sectype) { cifs_errorf(fc, "can not change sec during remount\n"); return -EINVAL; } if (new_ctx->multiuser != old_ctx->multiuser) { cifs_errorf(fc, "can not change multiuser during remount\n"); return -EINVAL; } if (new_ctx->UNC && (!old_ctx->UNC || strcmp(new_ctx->UNC, old_ctx->UNC))) { cifs_errorf(fc, "can not change UNC during remount\n"); return -EINVAL; } if (new_ctx->username && (!old_ctx->username || strcmp(new_ctx->username, old_ctx->username))) { cifs_errorf(fc, "can not change username during remount\n"); return -EINVAL; } if (new_ctx->password && (!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) { cifs_errorf(fc, "can not change password during remount\n"); return -EINVAL; } if (new_ctx->domainname && (!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) { cifs_errorf(fc, "can not change domainname during remount\n"); return -EINVAL; } if (strcmp(new_ctx->workstation_name, old_ctx->workstation_name)) { cifs_errorf(fc, "can not change workstation_name during remount\n"); return -EINVAL; } if (new_ctx->nodename && (!old_ctx->nodename || strcmp(new_ctx->nodename, old_ctx->nodename))) { cifs_errorf(fc, "can not change nodename during remount\n"); return -EINVAL; } if (new_ctx->iocharset && (!old_ctx->iocharset || strcmp(new_ctx->iocharset, old_ctx->iocharset))) { cifs_errorf(fc, "can not change iocharset during remount\n"); return -EINVAL; } return 0; } #define STEAL_STRING(cifs_sb, ctx, field) \ do { \ kfree(ctx->field); \ ctx->field = cifs_sb->ctx->field; \ cifs_sb->ctx->field = NULL; \ } while (0) #define STEAL_STRING_SENSITIVE(cifs_sb, ctx, field) \ do { \ kfree_sensitive(ctx->field); \ ctx->field = cifs_sb->ctx->field; \ cifs_sb->ctx->field = NULL; \ } while (0) static int smb3_reconfigure(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); struct dentry *root = fc->root; struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); int rc; rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx); if (rc) return rc; /* * We can not change UNC/username/password/domainname/ * workstation_name/nodename/iocharset * during reconnect so ignore what we have in the new context and * just use what we already have in cifs_sb->ctx. */ STEAL_STRING(cifs_sb, ctx, UNC); STEAL_STRING(cifs_sb, ctx, source); STEAL_STRING(cifs_sb, ctx, username); STEAL_STRING_SENSITIVE(cifs_sb, ctx, password); STEAL_STRING(cifs_sb, ctx, domainname); STEAL_STRING(cifs_sb, ctx, nodename); STEAL_STRING(cifs_sb, ctx, iocharset); /* if rsize or wsize not passed in on remount, use previous values */ if (ctx->rsize == 0) ctx->rsize = cifs_sb->ctx->rsize; if (ctx->wsize == 0) ctx->wsize = cifs_sb->ctx->wsize; smb3_cleanup_fs_context_contents(cifs_sb->ctx); rc = smb3_fs_context_dup(cifs_sb->ctx, ctx); smb3_update_mnt_flags(cifs_sb); #ifdef CONFIG_CIFS_DFS_UPCALL if (!rc) rc = dfs_cache_remount_fs(cifs_sb); #endif return rc; } static int smb3_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; struct smb3_fs_context *ctx = smb3_fc2context(fc); int i, opt; bool is_smb3 = !strcmp(fc->fs_type->name, "smb3"); bool skip_parsing = false; kuid_t uid; kgid_t gid; cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key); /* * fs_parse can not handle string options with an empty value so * we will need special handling of them. */ if (param->type == fs_value_is_string && param->string[0] == 0) { if (!strcmp("pass", param->key) || !strcmp("password", param->key)) { skip_parsing = true; opt = Opt_pass; } else if (!strcmp("user", param->key) || !strcmp("username", param->key)) { skip_parsing = true; opt = Opt_user; } } if (!skip_parsing) { opt = fs_parse(fc, smb3_fs_parameters, param, &result); if (opt < 0) return ctx->sloppy ? 1 : opt; } switch (opt) { case Opt_compress: ctx->compression = UNKNOWN_TYPE; cifs_dbg(VFS, "SMB3 compression support is experimental\n"); break; case Opt_nodfs: ctx->nodfs = 1; break; case Opt_hard: if (result.negated) { if (ctx->retry == 1) cifs_dbg(VFS, "conflicting hard vs. soft mount options\n"); ctx->retry = 0; } else ctx->retry = 1; break; case Opt_soft: if (result.negated) ctx->retry = 1; else { if (ctx->retry == 1) cifs_dbg(VFS, "conflicting hard vs soft mount options\n"); ctx->retry = 0; } break; case Opt_mapposix: if (result.negated) ctx->remap = false; else { ctx->remap = true; ctx->sfu_remap = false; /* disable SFU mapping */ } break; case Opt_mapchars: if (result.negated) ctx->sfu_remap = false; else { ctx->sfu_remap = true; ctx->remap = false; /* disable SFM (mapposix) mapping */ } break; case Opt_user_xattr: if (result.negated) ctx->no_xattr = 1; else ctx->no_xattr = 0; break; case Opt_forceuid: if (result.negated) ctx->override_uid = 0; else ctx->override_uid = 1; break; case Opt_forcegid: if (result.negated) ctx->override_gid = 0; else ctx->override_gid = 1; break; case Opt_perm: if (result.negated) ctx->noperm = 1; else ctx->noperm = 0; break; case Opt_dynperm: if (result.negated) ctx->dynperm = 0; else ctx->dynperm = 1; break; case Opt_sfu: if (result.negated) ctx->sfu_emul = 0; else ctx->sfu_emul = 1; break; case Opt_noblocksend: ctx->noblocksnd = 1; break; case Opt_noautotune: ctx->noautotune = 1; break; case Opt_nolease: ctx->no_lease = 1; break; case Opt_nosparse: ctx->no_sparse = 1; break; case Opt_nodelete: ctx->nodelete = 1; break; case Opt_multichannel: if (result.negated) { ctx->multichannel = false; ctx->max_channels = 1; } else { ctx->multichannel = true; /* if number of channels not specified, default to 2 */ if (ctx->max_channels < 2) ctx->max_channels = 2; } break; case Opt_uid: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) goto cifs_parse_mount_err; ctx->linux_uid = uid; ctx->uid_specified = true; break; case Opt_cruid: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) goto cifs_parse_mount_err; ctx->cred_uid = uid; ctx->cruid_specified = true; break; case Opt_backupuid: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) goto cifs_parse_mount_err; ctx->backupuid = uid; ctx->backupuid_specified = true; break; case Opt_backupgid: gid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(gid)) goto cifs_parse_mount_err; ctx->backupgid = gid; ctx->backupgid_specified = true; break; case Opt_gid: gid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(gid)) goto cifs_parse_mount_err; ctx->linux_gid = gid; ctx->gid_specified = true; break; case Opt_port: ctx->port = result.uint_32; break; case Opt_file_mode: ctx->file_mode = result.uint_32; break; case Opt_dirmode: ctx->dir_mode = result.uint_32; break; case Opt_min_enc_offload: ctx->min_offload = result.uint_32; break; case Opt_blocksize: /* * inode blocksize realistically should never need to be * less than 16K or greater than 16M and default is 1MB. * Note that small inode block sizes (e.g. 64K) can lead * to very poor performance of common tools like cp and scp */ if ((result.uint_32 < CIFS_MAX_MSGSIZE) || (result.uint_32 > (4 * SMB3_DEFAULT_IOSIZE))) { cifs_errorf(fc, "%s: Invalid blocksize\n", __func__); goto cifs_parse_mount_err; } ctx->bsize = result.uint_32; ctx->got_bsize = true; break; case Opt_rasize: /* * readahead size realistically should never need to be * less than 1M (CIFS_DEFAULT_IOSIZE) or greater than 32M * (perhaps an exception should be considered in the * for the case of a large number of channels * when multichannel is negotiated) since that would lead * to plenty of parallel I/O in flight to the server. * Note that smaller read ahead sizes would * hurt performance of common tools like cp and scp * which often trigger sequential i/o with read ahead */ if ((result.uint_32 > (8 * SMB3_DEFAULT_IOSIZE)) || (result.uint_32 < CIFS_DEFAULT_IOSIZE)) { cifs_errorf(fc, "%s: Invalid rasize %d vs. %d\n", __func__, result.uint_32, SMB3_DEFAULT_IOSIZE); goto cifs_parse_mount_err; } ctx->rasize = result.uint_32; break; case Opt_rsize: ctx->rsize = result.uint_32; ctx->got_rsize = true; break; case Opt_wsize: ctx->wsize = result.uint_32; ctx->got_wsize = true; break; case Opt_acregmax: ctx->acregmax = HZ * result.uint_32; if (ctx->acregmax > CIFS_MAX_ACTIMEO) { cifs_errorf(fc, "acregmax too large\n"); goto cifs_parse_mount_err; } break; case Opt_acdirmax: ctx->acdirmax = HZ * result.uint_32; if (ctx->acdirmax > CIFS_MAX_ACTIMEO) { cifs_errorf(fc, "acdirmax too large\n"); goto cifs_parse_mount_err; } break; case Opt_actimeo: if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) { cifs_errorf(fc, "timeout too large\n"); goto cifs_parse_mount_err; } if ((ctx->acdirmax != CIFS_DEF_ACTIMEO) || (ctx->acregmax != CIFS_DEF_ACTIMEO)) { cifs_errorf(fc, "actimeo ignored since acregmax or acdirmax specified\n"); break; } ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; break; case Opt_closetimeo: ctx->closetimeo = HZ * result.uint_32; if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) { cifs_errorf(fc, "closetimeo too large\n"); goto cifs_parse_mount_err; } break; case Opt_echo_interval: ctx->echo_interval = result.uint_32; break; case Opt_snapshot: ctx->snapshot_time = result.uint_64; break; case Opt_max_credits: if (result.uint_32 < 20 || result.uint_32 > 60000) { cifs_errorf(fc, "%s: Invalid max_credits value\n", __func__); goto cifs_parse_mount_err; } ctx->max_credits = result.uint_32; break; case Opt_max_channels: if (result.uint_32 < 1 || result.uint_32 > CIFS_MAX_CHANNELS) { cifs_errorf(fc, "%s: Invalid max_channels value, needs to be 1-%d\n", __func__, CIFS_MAX_CHANNELS); goto cifs_parse_mount_err; } ctx->max_channels = result.uint_32; /* If more than one channel requested ... they want multichan */ if (result.uint_32 > 1) ctx->multichannel = true; break; case Opt_max_cached_dirs: if (result.uint_32 < 1) { cifs_errorf(fc, "%s: Invalid max_cached_dirs, needs to be 1 or more\n", __func__); goto cifs_parse_mount_err; } ctx->max_cached_dirs = result.uint_32; break; case Opt_handletimeout: ctx->handle_timeout = result.uint_32; if (ctx->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) { cifs_errorf(fc, "Invalid handle cache timeout, longer than 16 minutes\n"); goto cifs_parse_mount_err; } break; case Opt_source: kfree(ctx->UNC); ctx->UNC = NULL; switch (smb3_parse_devname(param->string, ctx)) { case 0: break; case -ENOMEM: cifs_errorf(fc, "Unable to allocate memory for devname\n"); goto cifs_parse_mount_err; case -EINVAL: cifs_errorf(fc, "Malformed UNC in devname\n"); goto cifs_parse_mount_err; default: cifs_errorf(fc, "Unknown error parsing devname\n"); goto cifs_parse_mount_err; } ctx->source = smb3_fs_context_fullpath(ctx, '/'); if (IS_ERR(ctx->source)) { ctx->source = NULL; cifs_errorf(fc, "OOM when copying UNC string\n"); goto cifs_parse_mount_err; } fc->source = kstrdup(ctx->source, GFP_KERNEL); if (fc->source == NULL) { cifs_errorf(fc, "OOM when copying UNC string\n"); goto cifs_parse_mount_err; } break; case Opt_user: kfree(ctx->username); ctx->username = NULL; if (ctx->nullauth) break; if (strlen(param->string) == 0) { /* null user, ie. anonymous authentication */ ctx->nullauth = 1; break; } if (strnlen(param->string, CIFS_MAX_USERNAME_LEN) > CIFS_MAX_USERNAME_LEN) { pr_warn("username too long\n"); goto cifs_parse_mount_err; } ctx->username = kstrdup(param->string, GFP_KERNEL); if (ctx->username == NULL) { cifs_errorf(fc, "OOM when copying username string\n"); goto cifs_parse_mount_err; } break; case Opt_pass: kfree_sensitive(ctx->password); ctx->password = NULL; if (strlen(param->string) == 0) break; ctx->password = kstrdup(param->string, GFP_KERNEL); if (ctx->password == NULL) { cifs_errorf(fc, "OOM when copying password string\n"); goto cifs_parse_mount_err; } break; case Opt_ip: if (strlen(param->string) == 0) { ctx->got_ip = false; break; } if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr, param->string, strlen(param->string))) { pr_err("bad ip= option (%s)\n", param->string); goto cifs_parse_mount_err; } ctx->got_ip = true; break; case Opt_domain: if (strnlen(param->string, CIFS_MAX_DOMAINNAME_LEN) == CIFS_MAX_DOMAINNAME_LEN) { pr_warn("domain name too long\n"); goto cifs_parse_mount_err; } kfree(ctx->domainname); ctx->domainname = kstrdup(param->string, GFP_KERNEL); if (ctx->domainname == NULL) { cifs_errorf(fc, "OOM when copying domainname string\n"); goto cifs_parse_mount_err; } cifs_dbg(FYI, "Domain name set\n"); break; case Opt_srcaddr: if (!cifs_convert_address( (struct sockaddr *)&ctx->srcaddr, param->string, strlen(param->string))) { pr_warn("Could not parse srcaddr: %s\n", param->string); goto cifs_parse_mount_err; } break; case Opt_iocharset: if (strnlen(param->string, 1024) >= 65) { pr_warn("iocharset name too long\n"); goto cifs_parse_mount_err; } if (strncasecmp(param->string, "default", 7) != 0) { kfree(ctx->iocharset); ctx->iocharset = kstrdup(param->string, GFP_KERNEL); if (ctx->iocharset == NULL) { cifs_errorf(fc, "OOM when copying iocharset string\n"); goto cifs_parse_mount_err; } } /* if iocharset not set then load_nls_default * is used by caller */ cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset); break; case Opt_netbiosname: memset(ctx->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); /* * FIXME: are there cases in which a comma can * be valid in workstation netbios name (and * need special handling)? */ for (i = 0; i < RFC1001_NAME_LEN; i++) { /* don't ucase netbiosname for user */ if (param->string[i] == 0) break; ctx->source_rfc1001_name[i] = param->string[i]; } /* The string has 16th byte zero still from * set at top of the function */ if (i == RFC1001_NAME_LEN && param->string[i] != 0) pr_warn("netbiosname longer than 15 truncated\n"); break; case Opt_servern: /* last byte, type, is 0x20 for servr type */ memset(ctx->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL); /* * BB are there cases in which a comma can be valid in this * workstation netbios name (and need special handling)? */ /* user or mount helper must uppercase the netbios name */ for (i = 0; i < 15; i++) { if (param->string[i] == 0) break; ctx->target_rfc1001_name[i] = param->string[i]; } /* The string has 16th byte zero still from set at top of function */ if (i == RFC1001_NAME_LEN && param->string[i] != 0) pr_warn("server netbiosname longer than 15 truncated\n"); break; case Opt_ver: /* version of mount userspace tools, not dialect */ /* If interface changes in mount.cifs bump to new ver */ if (strncasecmp(param->string, "1", 1) == 0) { if (strlen(param->string) > 1) { pr_warn("Bad mount helper ver=%s. Did you want SMB1 (CIFS) dialect and mean to type vers=1.0 instead?\n", param->string); goto cifs_parse_mount_err; } /* This is the default */ break; } /* For all other value, error */ pr_warn("Invalid mount helper version specified\n"); goto cifs_parse_mount_err; case Opt_vers: /* protocol version (dialect) */ if (cifs_parse_smb_version(fc, param->string, ctx, is_smb3) != 0) goto cifs_parse_mount_err; ctx->got_version = true; break; case Opt_sec: if (cifs_parse_security_flavors(fc, param->string, ctx) != 0) goto cifs_parse_mount_err; break; case Opt_cache: if (cifs_parse_cache_flavor(fc, param->string, ctx) != 0) goto cifs_parse_mount_err; break; case Opt_witness: #ifndef CONFIG_CIFS_SWN_UPCALL cifs_errorf(fc, "Witness support needs CONFIG_CIFS_SWN_UPCALL config option\n"); goto cifs_parse_mount_err; #endif ctx->witness = true; pr_warn_once("Witness protocol support is experimental\n"); break; case Opt_rootfs: #ifndef CONFIG_CIFS_ROOT cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n"); goto cifs_parse_mount_err; #endif ctx->rootfs = true; break; case Opt_posixpaths: if (result.negated) ctx->posix_paths = 0; else ctx->posix_paths = 1; break; case Opt_unix: if (result.negated) { if (ctx->linux_ext == 1) pr_warn_once("conflicting posix mount options specified\n"); ctx->linux_ext = 0; ctx->no_linux_ext = 1; } else { if (ctx->no_linux_ext == 1) pr_warn_once("conflicting posix mount options specified\n"); ctx->linux_ext = 1; ctx->no_linux_ext = 0; } break; case Opt_nocase: ctx->nocase = 1; break; case Opt_brl: if (result.negated) { /* * turn off mandatory locking in mode * if remote locking is turned off since the * local vfs will do advisory */ if (ctx->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP))) ctx->file_mode = S_IALLUGO; ctx->nobrl = 1; } else ctx->nobrl = 0; break; case Opt_handlecache: if (result.negated) ctx->nohandlecache = 1; else ctx->nohandlecache = 0; break; case Opt_forcemandatorylock: ctx->mand_lock = 1; break; case Opt_setuids: ctx->setuids = result.negated; break; case Opt_intr: ctx->intr = !result.negated; break; case Opt_setuidfromacl: ctx->setuidfromacl = 1; break; case Opt_strictsync: ctx->nostrictsync = result.negated; break; case Opt_serverino: ctx->server_ino = !result.negated; break; case Opt_rwpidforward: ctx->rwpidforward = 1; break; case Opt_modesid: ctx->mode_ace = 1; break; case Opt_cifsacl: ctx->cifs_acl = !result.negated; break; case Opt_acl: ctx->no_psx_acl = result.negated; break; case Opt_locallease: ctx->local_lease = 1; break; case Opt_sign: ctx->sign = true; break; case Opt_ignore_signature: ctx->sign = true; ctx->ignore_signature = true; break; case Opt_seal: /* we do not do the following in secFlags because seal * is a per tree connection (mount) not a per socket * or per-smb connection option in the protocol * vol->secFlg |= CIFSSEC_MUST_SEAL; */ ctx->seal = 1; break; case Opt_noac: pr_warn("Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n"); break; case Opt_fsc: #ifndef CONFIG_CIFS_FSCACHE cifs_errorf(fc, "FS-Cache support needs CONFIG_CIFS_FSCACHE kernel config option set\n"); goto cifs_parse_mount_err; #endif ctx->fsc = true; break; case Opt_mfsymlinks: ctx->mfsymlinks = true; break; case Opt_multiuser: ctx->multiuser = true; break; case Opt_sloppy: ctx->sloppy = true; break; case Opt_nosharesock: ctx->nosharesock = true; break; case Opt_persistent: if (result.negated) { ctx->nopersistent = true; if (ctx->persistent) { cifs_errorf(fc, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } } else { ctx->persistent = true; if ((ctx->nopersistent) || (ctx->resilient)) { cifs_errorf(fc, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } } break; case Opt_resilient: if (result.negated) { ctx->resilient = false; /* already the default */ } else { ctx->resilient = true; if (ctx->persistent) { cifs_errorf(fc, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } } break; case Opt_tcp_nodelay: /* tcp nodelay should not usually be needed since we CORK/UNCORK the socket */ if (result.negated) ctx->sockopt_tcp_nodelay = false; else ctx->sockopt_tcp_nodelay = true; break; case Opt_domainauto: ctx->domainauto = true; break; case Opt_rdma: ctx->rdma = true; break; } /* case Opt_ignore: - is ignored as expected ... */ return 0; cifs_parse_mount_err: kfree_sensitive(ctx->password); return -EINVAL; } int smb3_init_fs_context(struct fs_context *fc) { struct smb3_fs_context *ctx; char *nodename = utsname()->nodename; int i; ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); if (unlikely(!ctx)) return -ENOMEM; strscpy(ctx->workstation_name, nodename, sizeof(ctx->workstation_name)); /* * does not have to be perfect mapping since field is * informational, only used for servers that do not support * port 445 and it can be overridden at mount time */ memset(ctx->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++) ctx->source_rfc1001_name[i] = toupper(nodename[i]); ctx->source_rfc1001_name[RFC1001_NAME_LEN] = 0; /* * null target name indicates to use *SMBSERVR default called name * if we end up sending RFC1001 session initialize */ ctx->target_rfc1001_name[0] = 0; ctx->cred_uid = current_uid(); ctx->linux_uid = current_uid(); ctx->linux_gid = current_gid(); /* By default 4MB read ahead size, 1MB block size */ ctx->bsize = CIFS_DEFAULT_IOSIZE; /* can improve cp performance significantly */ ctx->rasize = 0; /* 0 = use default (ie negotiated rsize) for read ahead pages */ /* * default to SFM style remapping of seven reserved characters * unless user overrides it or we negotiate CIFS POSIX where * it is unnecessary. Can not simultaneously use more than one mapping * since then readdir could list files that open could not open */ ctx->remap = true; /* default to only allowing write access to owner of the mount */ ctx->dir_mode = ctx->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; /* ctx->retry default is 0 (i.e. "soft" limited retry not hard retry) */ /* default is always to request posix paths. */ ctx->posix_paths = 1; /* default to using server inode numbers where available */ ctx->server_ino = 1; /* default is to use strict cifs caching semantics */ ctx->strict_io = true; ctx->acregmax = CIFS_DEF_ACTIMEO; ctx->acdirmax = CIFS_DEF_ACTIMEO; ctx->closetimeo = SMB3_DEF_DCLOSETIMEO; ctx->max_cached_dirs = MAX_CACHED_FIDS; /* Most clients set timeout to 0, allows server to use its default */ ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */ /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ ctx->ops = &smb30_operations; ctx->vals = &smbdefault_values; ctx->echo_interval = SMB_ECHO_INTERVAL_DEFAULT; /* default to no multichannel (single server connection) */ ctx->multichannel = false; ctx->max_channels = 1; ctx->backupuid_specified = false; /* no backup intent for a user */ ctx->backupgid_specified = false; /* no backup intent for a group */ /* * short int override_uid = -1; * short int override_gid = -1; * char *nodename = strdup(utsname()->nodename); * struct sockaddr *dstaddr = (struct sockaddr *)&vol->dstaddr; */ fc->fs_private = ctx; fc->ops = &smb3_fs_context_ops; return 0; } void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx) { if (ctx == NULL) return; /* * Make sure this stays in sync with smb3_fs_context_dup() */ kfree(ctx->username); ctx->username = NULL; kfree_sensitive(ctx->password); ctx->password = NULL; kfree(ctx->server_hostname); ctx->server_hostname = NULL; kfree(ctx->UNC); ctx->UNC = NULL; kfree(ctx->source); ctx->source = NULL; kfree(ctx->domainname); ctx->domainname = NULL; kfree(ctx->nodename); ctx->nodename = NULL; kfree(ctx->iocharset); ctx->iocharset = NULL; kfree(ctx->prepath); ctx->prepath = NULL; kfree(ctx->leaf_fullpath); ctx->leaf_fullpath = NULL; } void smb3_cleanup_fs_context(struct smb3_fs_context *ctx) { if (!ctx) return; smb3_cleanup_fs_context_contents(ctx); kfree(ctx); } void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb) { struct smb3_fs_context *ctx = cifs_sb->ctx; if (ctx->nodfs) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_DFS; if (ctx->noperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_PERM; if (ctx->setuids) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SET_UID; if (ctx->setuidfromacl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UID_FROM_ACL; if (ctx->server_ino) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; if (ctx->remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SFM_CHR; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SFM_CHR; if (ctx->sfu_remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR; if (ctx->no_xattr) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_XATTR; if (ctx->sfu_emul) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UNX_EMUL; if (ctx->nobrl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_BRL; if (ctx->nohandlecache) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_HANDLE_CACHE; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_HANDLE_CACHE; if (ctx->nostrictsync) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOSSYNC; if (ctx->mand_lock) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOPOSIXBRL; if (ctx->rwpidforward) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_RWPIDFORWARD; if (ctx->mode_ace) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MODE_FROM_SID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MODE_FROM_SID; if (ctx->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_ACL; if (ctx->backupuid_specified) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPUID; if (ctx->backupgid_specified) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPGID; if (ctx->override_uid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_UID; if (ctx->override_gid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_GID; if (ctx->dynperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DYNPERM; if (ctx->fsc) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_FSCACHE; if (ctx->multiuser) cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM); else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MULTIUSER; if (ctx->strict_io) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_STRICT_IO; if (ctx->direct_io) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DIRECT_IO; if (ctx->mfsymlinks) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MF_SYMLINKS; if (ctx->mfsymlinks) { if (ctx->sfu_emul) { /* * Our SFU ("Services for Unix" emulation does not allow * creating symlinks but does allow reading existing SFU * symlinks (it does allow both creating and reading SFU * style mknod and FIFOs though). When "mfsymlinks" and * "sfu" are both enabled at the same time, it allows * reading both types of symlinks, but will only create * them with mfsymlinks format. This allows better * Apple compatibility (probably better for Samba too) * while still recognizing old Windows style symlinks. */ cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n"); } } cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SHUTDOWN; return; }
linux-master
fs/smb/client/fs_context.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French ([email protected]) * * Contains the routines for constructing the SMB PDUs themselves * */ /* SMB/CIFS PDU handling routines here - except for leftovers in connect.c */ /* These are mostly routines that operate on a pathname, or on a tree id */ /* (mounted volume), but there are eight handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/posix_acl_xattr.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/task_io_accounting_ops.h> #include <linux/uaccess.h> #include "cifspdu.h" #include "cifsfs.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "fscache.h" #include "smbdirect.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif #ifdef CONFIG_CIFS_POSIX static struct { int index; char *name; } protocols[] = { {CIFS_PROT, "\2NT LM 0.12"}, {POSIX_PROT, "\2POSIX 2"}, {BAD_PROT, "\2"} }; #else static struct { int index; char *name; } protocols[] = { {CIFS_PROT, "\2NT LM 0.12"}, {BAD_PROT, "\2"} }; #endif /* define the number of elements in the cifs dialect array */ #ifdef CONFIG_CIFS_POSIX #define CIFS_NUM_PROT 2 #else /* not posix */ #define CIFS_NUM_PROT 1 #endif /* CIFS_POSIX */ /* reconnect the socket, tcon, and smb session if needed */ static int cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) { int rc; struct cifs_ses *ses; struct TCP_Server_Info *server; struct nls_table *nls_codepage = NULL; /* * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for * tcp and smb session status done differently for those three - in the * calling routine */ if (!tcon) return 0; ses = tcon->ses; server = ses->server; /* * only tree disconnect, open, and write, (and ulogoff which does not * have tcon) are allowed as we start umount */ spin_lock(&tcon->tc_lock); if (tcon->status == TID_EXITING) { if (smb_command != SMB_COM_TREE_DISCONNECT) { spin_unlock(&tcon->tc_lock); cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb_command); return -ENODEV; } } spin_unlock(&tcon->tc_lock); again: rc = cifs_wait_for_server_reconnect(server, tcon->retry); if (rc) return rc; spin_lock(&ses->chan_lock); if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) { spin_unlock(&ses->chan_lock); return 0; } spin_unlock(&ses->chan_lock); mutex_lock(&ses->session_mutex); /* * Recheck after acquire mutex. If another thread is negotiating * and the server never sends an answer the socket will be closed * and tcpStatus set to reconnect. */ spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { spin_unlock(&server->srv_lock); mutex_unlock(&ses->session_mutex); if (tcon->retry) goto again; rc = -EHOSTDOWN; goto out; } spin_unlock(&server->srv_lock); nls_codepage = ses->local_nls; /* * need to prevent multiple threads trying to simultaneously * reconnect the same SMB session */ spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); if (!cifs_chan_needs_reconnect(ses, server) && ses->ses_status == SES_GOOD) { spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); /* this means that we only need to tree connect */ if (tcon->need_reconnect) goto skip_sess_setup; mutex_unlock(&ses->session_mutex); goto out; } spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); rc = cifs_negotiate_protocol(0, ses, server); if (!rc) rc = cifs_setup_session(0, ses, server, nls_codepage); /* do we need to reconnect tcon? */ if (rc || !tcon->need_reconnect) { mutex_unlock(&ses->session_mutex); goto out; } skip_sess_setup: cifs_mark_open_files_invalid(tcon); rc = cifs_tree_connect(0, tcon, nls_codepage); mutex_unlock(&ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); if (rc) { pr_warn_once("reconnect tcon failed rc = %d\n", rc); goto out; } atomic_inc(&tconInfoReconnectCount); /* tell server Unix caps we support */ if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, NULL); /* * Removed call to reopen open files here. It is safer (and faster) to * reopen files one at a time as needed in read and write. * * FIXME: what about file locks? don't we need to reclaim them ASAP? */ out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle */ switch (smb_command) { case SMB_COM_READ_ANDX: case SMB_COM_WRITE_ANDX: case SMB_COM_CLOSE: case SMB_COM_FIND_CLOSE2: case SMB_COM_LOCKING_ANDX: rc = -EAGAIN; } return rc; } /* Allocate and return pointer to an SMB request buffer, and set basic SMB information in the SMB header. If the return code is zero, this function must have filled in request_buf pointer */ static int small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } int small_smb_init_no_tc(const int smb_command, const int wct, struct cifs_ses *ses, void **request_buf) { int rc; struct smb_hdr *buffer; rc = small_smb_init(smb_command, wct, NULL, request_buf); if (rc) return rc; buffer = (struct smb_hdr *)*request_buf; buffer->Mid = get_next_mid(ses->server); if (ses->capabilities & CAP_UNICODE) buffer->Flags2 |= SMBFLG2_UNICODE; if (ses->capabilities & CAP_STATUS32) buffer->Flags2 |= SMBFLG2_ERR_STATUS; /* uid, tid can stay at zero as set in header assemble */ /* BB add support for turning on the signing when this function is used after 1st of session setup requests */ return rc; } /* If the return code is zero, this function must fill in request_buf pointer */ static int __smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { *request_buf = cifs_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } /* Although the original thought was we needed the response buf for */ /* potential retries of smb operations it turns out we can determine */ /* from the mid flags when the request buffer can be resent without */ /* having to use a second distinct buffer for the response */ if (response_buf) *response_buf = *request_buf; header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } /* If the return code is zero, this function must fill in request_buf pointer */ static int smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { spin_lock(&tcon->ses->chan_lock); if (cifs_chan_needs_reconnect(tcon->ses, tcon->ses->server) || tcon->need_reconnect) { spin_unlock(&tcon->ses->chan_lock); return -EHOSTDOWN; } spin_unlock(&tcon->ses->chan_lock); return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int validate_t2(struct smb_t2_rsp *pSMB) { unsigned int total_size; /* check for plausible wct */ if (pSMB->hdr.WordCount < 10) goto vt2_err; /* check for parm and data offset going beyond end of smb */ if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 || get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024) goto vt2_err; total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount); if (total_size >= 512) goto vt2_err; /* check that bcc is at least as big as parms + data, and that it is * less than negotiated smb buffer */ total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount); if (total_size > get_bcc(&pSMB->hdr) || total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) goto vt2_err; return 0; vt2_err: cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB, sizeof(struct smb_t2_rsp) + 16); return -EINVAL; } static int decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr) { int rc = 0; u16 count; char *guid = pSMBr->u.extended_response.GUID; struct TCP_Server_Info *server = ses->server; count = get_bcc(&pSMBr->hdr); if (count < SMB1_CLIENT_GUID_SIZE) return -EIO; spin_lock(&cifs_tcp_ses_lock); if (server->srv_count > 1) { spin_unlock(&cifs_tcp_ses_lock); if (memcmp(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE) != 0) { cifs_dbg(FYI, "server UID changed\n"); memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE); } } else { spin_unlock(&cifs_tcp_ses_lock); memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE); } if (count == SMB1_CLIENT_GUID_SIZE) { server->sec_ntlmssp = true; } else { count -= SMB1_CLIENT_GUID_SIZE; rc = decode_negTokenInit( pSMBr->u.extended_response.SecurityBlob, count, server); if (rc != 1) return -EINVAL; } return 0; } static bool should_set_ext_sec_flag(enum securityEnum sectype) { switch (sectype) { case RawNTLMSSP: case Kerberos: return true; case Unspecified: if (global_secflags & (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)) return true; fallthrough; default: return false; } } int CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { NEGOTIATE_REQ *pSMB; NEGOTIATE_RSP *pSMBr; int rc = 0; int bytes_returned; int i; u16 count; if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ , (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->hdr.Mid = get_next_mid(server); pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); if (should_set_ext_sec_flag(ses->sectype)) { cifs_dbg(FYI, "Requesting extended security\n"); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } count = 0; /* * We know that all the name entries in the protocols array * are short (< 16 bytes anyway) and are NUL terminated. */ for (i = 0; i < CIFS_NUM_PROT; i++) { size_t len = strlen(protocols[i].name) + 1; memcpy(&pSMB->DialectsArray[count], protocols[i].name, len); count += len; } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc != 0) goto neg_err_exit; server->dialect = le16_to_cpu(pSMBr->DialectIndex); cifs_dbg(FYI, "Dialect: %d\n", server->dialect); /* Check wct = 1 error case */ if ((pSMBr->hdr.WordCount <= 13) || (server->dialect == BAD_PROT)) { /* core returns wct = 1, but we do not ask for core - otherwise small wct just comes when dialect index is -1 indicating we could not negotiate a common dialect */ rc = -EOPNOTSUPP; goto neg_err_exit; } else if (pSMBr->hdr.WordCount != 17) { /* unknown wct */ rc = -EOPNOTSUPP; goto neg_err_exit; } /* else wct == 17, NTLM or better */ server->sec_mode = pSMBr->SecurityMode; if ((server->sec_mode & SECMODE_USER) == 0) cifs_dbg(FYI, "share mode security\n"); /* one byte, so no need to convert this or EncryptionKeyLen from little endian */ server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount), cifs_max_pending); set_credits(server, server->maxReq); /* probably no need to store and check maxvcs */ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); /* set up max_read for readahead check */ server->max_read = server->maxBuf; server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf); server->capabilities = le32_to_cpu(pSMBr->Capabilities); server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); server->timeAdj *= 60; if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { server->negflavor = CIFS_NEGFLAVOR_UNENCAP; memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if (pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC || server->capabilities & CAP_EXTENDED_SECURITY) { server->negflavor = CIFS_NEGFLAVOR_EXTENDED; rc = decode_ext_sec_blob(ses, pSMBr); } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { rc = -EIO; /* no crypt key only if plain text pwd */ } else { server->negflavor = CIFS_NEGFLAVOR_UNENCAP; server->capabilities &= ~CAP_EXTENDED_SECURITY; } if (!rc) rc = cifs_enable_signing(server, ses->sign); neg_err_exit: cifs_buf_release(pSMB); cifs_dbg(FYI, "negprot rc %d\n", rc); return rc; } int CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon) { struct smb_hdr *smb_buffer; int rc = 0; cifs_dbg(FYI, "In tree disconnect\n"); /* BB: do we need to check this? These should never be NULL. */ if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) return -EIO; /* * No need to return error on this operation if tid invalidated and * closed on server already e.g. due to tcp session crashing. Also, * the tcon is no longer on the list, so no need to take lock before * checking this. */ spin_lock(&tcon->ses->chan_lock); if ((tcon->need_reconnect) || CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses)) { spin_unlock(&tcon->ses->chan_lock); return -EIO; } spin_unlock(&tcon->ses->chan_lock); rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, (void **)&smb_buffer); if (rc) return rc; rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0); cifs_small_buf_release(smb_buffer); if (rc) cifs_dbg(FYI, "Tree disconnect failed %d\n", rc); /* No need to return error on this operation if tid invalidated and closed on server already e.g. due to tcp session crashing */ if (rc == -EAGAIN) rc = 0; return rc; } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void cifs_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; struct cifs_credits credits = { .value = 1, .instance = 0 }; release_mid(mid); add_credits(server, &credits, CIFS_ECHO_OP); } int CIFSSMBEcho(struct TCP_Server_Info *server) { ECHO_REQ *smb; int rc = 0; struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; cifs_dbg(FYI, "In echo request\n"); rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb); if (rc) return rc; if (server->capabilities & CAP_UNICODE) smb->hdr.Flags2 |= SMBFLG2_UNICODE; /* set up echo request */ smb->hdr.Tid = 0xffff; smb->hdr.WordCount = 1; put_unaligned_le16(1, &smb->EchoCount); put_bcc(1, &smb->hdr); smb->Data[0] = 'a'; inc_rfc1001_len(smb, 3); iov[0].iov_len = 4; iov[0].iov_base = smb; iov[1].iov_len = get_rfc1002_length(smb); iov[1].iov_base = (char *)smb + 4; rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, NULL, server, CIFS_NON_BLOCKING | CIFS_ECHO_OP, NULL); if (rc) cifs_dbg(FYI, "Echo request failed: %d\n", rc); cifs_small_buf_release(smb); return rc; } int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses) { LOGOFF_ANDX_REQ *pSMB; int rc = 0; cifs_dbg(FYI, "In SMBLogoff for session disconnect\n"); /* * BB: do we need to check validity of ses and server? They should * always be valid since we have an active reference. If not, that * should probably be a BUG() */ if (!ses || !ses->server) return -EIO; mutex_lock(&ses->session_mutex); spin_lock(&ses->chan_lock); if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { spin_unlock(&ses->chan_lock); goto session_already_dead; /* no need to send SMBlogoff if uid already closed due to reconnect */ } spin_unlock(&ses->chan_lock); rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); if (rc) { mutex_unlock(&ses->session_mutex); return rc; } pSMB->hdr.Mid = get_next_mid(ses->server); if (ses->server->sign) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; pSMB->AndXCommand = 0xFF; rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); session_already_dead: mutex_unlock(&ses->session_mutex); /* if session dead then we do not need to do ulogoff, since server closed smb session, no sense reporting error */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, __u16 type, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; struct unlink_psx_rq *pRqD; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In POSIX delete\n"); PsxDelete: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = 0; /* BB double check this with jra */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* Setup pointer to Request Data (inode type). * Note that SMB offsets are from the beginning of SMB which is 4 bytes * in, after RFC1001 field */ pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4); pRqD->type = cpu_to_le16(type); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + sizeof(struct unlink_psx_rq); pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "Posix delete returned %d\n", rc); cifs_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes); if (rc == -EAGAIN) goto PsxDelete; return rc; } int CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { DELETE_FILE_REQ *pSMB = NULL; DELETE_FILE_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; int remap = cifs_remap(cifs_sb); DelFileRetry: rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->fileName, name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->fileName, name); } pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes); if (rc) cifs_dbg(FYI, "Error in RMFile = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto DelFileRetry; return rc; } int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { DELETE_DIRECTORY_REQ *pSMB = NULL; DELETE_DIRECTORY_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; int remap = cifs_remap(cifs_sb); cifs_dbg(FYI, "In CIFSSMBRmDir\n"); RmDirRetry: rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->DirName, name); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_rmdirs); if (rc) cifs_dbg(FYI, "Error in RMDir = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto RmDirRetry; return rc; } int CIFSSMBMkDir(const unsigned int xid, struct inode *inode, umode_t mode, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { int rc = 0; CREATE_DIRECTORY_REQ *pSMB = NULL; CREATE_DIRECTORY_RSP *pSMBr = NULL; int bytes_returned; int name_len; int remap = cifs_remap(cifs_sb); cifs_dbg(FYI, "In CIFSSMBMkDir\n"); MkDirRetry: rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->DirName, name); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_mkdirs); if (rc) cifs_dbg(FYI, "Error in Mkdir = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto MkDirRetry; return rc; } int CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon, __u32 posix_flags, __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, __u32 *pOplock, const char *name, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count, count; OPEN_PSX_REQ *pdata; OPEN_PSX_RSP *psx_rsp; cifs_dbg(FYI, "In POSIX Create\n"); PsxCreat: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, name); } params = 6 + name_len; count = sizeof(OPEN_PSX_REQ); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* large enough */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4); pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pdata->Permissions = cpu_to_le64(mode); pdata->PosixOpenFlags = cpu_to_le32(posix_flags); pdata->OpenFlags = cpu_to_le32(*pOplock); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Posix create returned %d\n", rc); goto psx_create_err; } cifs_dbg(FYI, "copying inode info\n"); rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)) { rc = -EIO; /* bad smb */ goto psx_create_err; } /* copy return information to pRetData */ psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset)); *pOplock = le16_to_cpu(psx_rsp->OplockFlags); if (netfid) *netfid = psx_rsp->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction) *pOplock |= CIFS_CREATE_ACTION; /* check to make sure response data is there */ if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { pRetData->Type = cpu_to_le32(-1); /* unknown */ cifs_dbg(NOISY, "unknown type\n"); } else { if (get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP) + sizeof(FILE_UNIX_BASIC_INFO)) { cifs_dbg(VFS, "Open response data too small\n"); pRetData->Type = cpu_to_le32(-1); goto psx_create_err; } memcpy((char *) pRetData, (char *)psx_rsp + sizeof(OPEN_PSX_RSP), sizeof(FILE_UNIX_BASIC_INFO)); } psx_create_err: cifs_buf_release(pSMB); if (posix_flags & SMB_O_DIRECTORY) cifs_stats_inc(&tcon->stats.cifs_stats.num_posixmkdirs); else cifs_stats_inc(&tcon->stats.cifs_stats.num_posixopens); if (rc == -EAGAIN) goto PsxCreat; return rc; } static __u16 convert_disposition(int disposition) { __u16 ofun = 0; switch (disposition) { case FILE_SUPERSEDE: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; case FILE_OPEN: ofun = SMBOPEN_OAPPEND; break; case FILE_CREATE: ofun = SMBOPEN_OCREATE; break; case FILE_OPEN_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OAPPEND; break; case FILE_OVERWRITE: ofun = SMBOPEN_OTRUNC; break; case FILE_OVERWRITE_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; default: cifs_dbg(FYI, "unknown disposition %d\n", disposition); ofun = SMBOPEN_OAPPEND; /* regular open */ } return ofun; } static int access_flags_to_smbopen_mode(const int access_flags) { int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE); if (masked_flags == GENERIC_READ) return SMBOPEN_READ; else if (masked_flags == GENERIC_WRITE) return SMBOPEN_WRITE; /* just go for read/write */ return SMBOPEN_READWRITE; } int SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const int openDisposition, const int access_flags, const int create_options, __u16 *netfid, int *pOplock, FILE_ALL_INFO *pfile_info, const struct nls_table *nls_codepage, int remap) { int rc; OPENX_REQ *pSMB = NULL; OPENX_RSP *pSMBr = NULL; int bytes_returned; int name_len; __u16 count; OldOpenRetry: rc = smb_init(SMB_COM_OPEN_ANDX, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->AndXCommand = 0xFF; /* none */ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { count = 1; /* account for one byte pad to word boundary */ name_len = cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1), fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { count = 0; /* no pad */ name_len = copy_path_name(pSMB->fileName, fileName); } if (*pOplock & REQ_OPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); else if (*pOplock & REQ_BATCHOPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK); pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO); pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags)); pSMB->Mode |= cpu_to_le16(0x40); /* deny none */ /* set file as system file if special file such as fifo and server expecting SFU style and no Unix extensions */ if (create_options & CREATE_OPTION_SPECIAL) pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM); else /* BB FIXME BB */ pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); if (create_options & CREATE_OPTION_READONLY) pSMB->FileAttributes |= cpu_to_le16(ATTR_READONLY); /* BB FIXME BB */ /* pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); */ /* BB FIXME END BB */ pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->OpenFunction = cpu_to_le16(convert_disposition(openDisposition)); count += name_len; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_opens); if (rc) { cifs_dbg(FYI, "Error in Open = %d\n", rc); } else { /* BB verify if wct == 15 */ /* *pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/ *netfid = pSMBr->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ /* BB FIXME BB */ /* if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction) *pOplock |= CIFS_CREATE_ACTION; */ /* BB FIXME END */ if (pfile_info) { pfile_info->CreationTime = 0; /* BB convert CreateTime*/ pfile_info->LastAccessTime = 0; /* BB fixme */ pfile_info->LastWriteTime = 0; /* BB fixme */ pfile_info->ChangeTime = 0; /* BB fixme */ pfile_info->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes)); /* the file_info buf is endian converted by caller */ pfile_info->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile)); pfile_info->EndOfFile = pfile_info->AllocationSize; pfile_info->NumberOfLinks = cpu_to_le32(1); pfile_info->DeletePending = 0; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto OldOpenRetry; return rc; } int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock, FILE_ALL_INFO *buf) { int rc; OPEN_REQ *req = NULL; OPEN_RSP *rsp = NULL; int bytes_returned; int name_len; __u16 count; struct cifs_sb_info *cifs_sb = oparms->cifs_sb; struct cifs_tcon *tcon = oparms->tcon; int remap = cifs_remap(cifs_sb); const struct nls_table *nls = cifs_sb->local_nls; int create_options = oparms->create_options; int desired_access = oparms->desired_access; int disposition = oparms->disposition; const char *path = oparms->path; openRetry: rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **)&req, (void **)&rsp); if (rc) return rc; /* no commands go after this */ req->AndXCommand = 0xFF; if (req->hdr.Flags2 & SMBFLG2_UNICODE) { /* account for one byte pad to word boundary */ count = 1; name_len = cifsConvertToUTF16((__le16 *)(req->fileName + 1), path, PATH_MAX, nls, remap); /* trailing null */ name_len++; name_len *= 2; req->NameLength = cpu_to_le16(name_len); } else { /* BB improve check for buffer overruns BB */ /* no pad */ count = 0; name_len = copy_path_name(req->fileName, path); req->NameLength = cpu_to_le16(name_len); } if (*oplock & REQ_OPLOCK) req->OpenFlags = cpu_to_le32(REQ_OPLOCK); else if (*oplock & REQ_BATCHOPLOCK) req->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK); req->DesiredAccess = cpu_to_le32(desired_access); req->AllocationSize = 0; /* * Set file as system file if special file such as fifo and server * expecting SFU style and no Unix extensions. */ if (create_options & CREATE_OPTION_SPECIAL) req->FileAttributes = cpu_to_le32(ATTR_SYSTEM); else req->FileAttributes = cpu_to_le32(ATTR_NORMAL); /* * XP does not handle ATTR_POSIX_SEMANTICS but it helps speed up case * sensitive checks for other servers such as Samba. */ if (tcon->ses->capabilities & CAP_UNIX) req->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS); if (create_options & CREATE_OPTION_READONLY) req->FileAttributes |= cpu_to_le32(ATTR_READONLY); req->ShareAccess = cpu_to_le32(FILE_SHARE_ALL); req->CreateDisposition = cpu_to_le32(disposition); req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); /* BB Expirement with various impersonation levels and verify */ req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION); req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY; count += name_len; inc_rfc1001_len(req, count); req->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)req, (struct smb_hdr *)rsp, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_opens); if (rc) { cifs_dbg(FYI, "Error in Open = %d\n", rc); cifs_buf_release(req); if (rc == -EAGAIN) goto openRetry; return rc; } /* 1 byte no need to le_to_cpu */ *oplock = rsp->OplockLevel; /* cifs fid stays in le */ oparms->fid->netfid = rsp->Fid; oparms->fid->access = desired_access; /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == rsp->CreateAction) *oplock |= CIFS_CREATE_ACTION; if (buf) { /* copy from CreationTime to Attributes */ memcpy((char *)buf, (char *)&rsp->CreationTime, 36); /* the file_info buf is endian converted by caller */ buf->AllocationSize = rsp->AllocationSize; buf->EndOfFile = rsp->EndOfFile; buf->NumberOfLinks = cpu_to_le32(1); buf->DeletePending = 0; } cifs_buf_release(req); return rc; } static void cifs_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2, .rq_iter_size = iov_iter_count(&rdata->iter), .rq_iter = rdata->iter }; struct cifs_credits credits = { .value = 1, .instance = 0 }; cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n", __func__, mid->mid, mid->mid_state, rdata->result, rdata->bytes); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: /* result already set, check signature */ if (server->sign) { int rc = 0; rc = cifs_verify_signature(&rqst, server, mid->sequence_number); if (rc) cifs_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; if (server->sign && rdata->got_bytes) /* reset bytes number since we can not check a sign */ rdata->got_bytes = 0; /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; default: rdata->result = -EIO; } queue_work(cifsiod_wq, &rdata->work); release_mid(mid); add_credits(server, &credits, 0); } /* cifs_async_readv - send an async write, and set up mid to handle result */ int cifs_async_readv(struct cifs_readdata *rdata) { int rc; READ_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2 }; cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", __func__, rdata->offset, rdata->bytes); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((rdata->offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb); if (rc) return rc; smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = rdata->cfile->fid.netfid; smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF); if (wct == 12) smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32); smb->Remaining = 0; smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF); smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16); if (wct == 12) smb->ByteCount = 0; else { /* old style read */ struct smb_com_readx_req *smbr = (struct smb_com_readx_req *)smb; smbr->ByteCount = 0; } /* 4 for RFC1001 length + 1 for BCC */ rdata->iov[0].iov_base = smb; rdata->iov[0].iov_len = 4; rdata->iov[1].iov_base = (char *)smb + 4; rdata->iov[1].iov_len = get_rfc1002_length(smb); kref_get(&rdata->refcount); rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive, cifs_readv_callback, NULL, rdata, 0, NULL); if (rc == 0) cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); else kref_put(&rdata->refcount, cifs_readdata_release); cifs_small_buf_release(smb); return rc; } int CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *pbuf_type) { int rc = -EACCES; READ_REQ *pSMB = NULL; READ_RSP *pSMBr = NULL; char *pReadData = NULL; int wct; int resp_buf_type = 0; struct kvec iov[1]; struct kvec rsp_iov; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; cifs_dbg(FYI, "Reading %d bytes on fid %d\n", count, netfid); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } *nbytes = 0; rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 12) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Remaining = 0; pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); pSMB->MaxCountHigh = cpu_to_le32(count >> 16); if (wct == 12) pSMB->ByteCount = 0; /* no need to do le conversion since 0 */ else { /* old style read */ struct smb_com_readx_req *pSMBW = (struct smb_com_readx_req *)pSMB; pSMBW->ByteCount = 0; } iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1, &resp_buf_type, CIFS_LOG_ERROR, &rsp_iov); cifs_small_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); pSMBr = (READ_RSP *)rsp_iov.iov_base; if (rc) { cifs_dbg(VFS, "Send error in read = %d\n", rc); } else { int data_length = le16_to_cpu(pSMBr->DataLengthHigh); data_length = data_length << 16; data_length += le16_to_cpu(pSMBr->DataLength); *nbytes = data_length; /*check that DataLength would not go beyond end of SMB */ if ((data_length > CIFSMaxBufSize) || (data_length > count)) { cifs_dbg(FYI, "bad length %d for count %d\n", data_length, count); rc = -EIO; *nbytes = 0; } else { pReadData = (char *) (&pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->DataOffset); /* if (rc = copy_to_user(buf, pReadData, data_length)) { cifs_dbg(VFS, "Faulting on read rc = %d\n",rc); rc = -EFAULT; }*/ /* can not use copy_to_user when using page cache*/ if (*buf) memcpy(*buf, pReadData, data_length); } } if (*buf) { free_rsp_buf(resp_buf_type, rsp_iov.iov_base); } else if (resp_buf_type != CIFS_NO_BUFFER) { /* return buffer to caller to free */ *buf = rsp_iov.iov_base; if (resp_buf_type == CIFS_SMALL_BUFFER) *pbuf_type = CIFS_SMALL_BUFFER; else if (resp_buf_type == CIFS_LARGE_BUFFER) *pbuf_type = CIFS_LARGE_BUFFER; } /* else no valid buffer on return - leave as null */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, const char *buf) { int rc = -EACCES; WRITE_REQ *pSMB = NULL; WRITE_RSP *pSMBr = NULL; int bytes_returned, wct; __u32 bytes_sent; __u16 byte_count; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; *nbytes = 0; /* cifs_dbg(FYI, "write at %lld %d bytes\n", offset, count);*/ if (tcon->ses == NULL) return -ECONNABORTED; if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 14; else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; /* Can increase buffer size if buffer is big enough in some cases ie we can send more if LARGE_WRITE_X capability returned by the server and if our buffer is big enough or if we convert to iovecs on socket writes and eliminate the copy to the CIFS buffer */ if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) { bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count); } else { bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & ~0xFF; } if (bytes_sent > count) bytes_sent = count; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); if (buf) memcpy(pSMB->Data, buf, bytes_sent); else if (count != 0) { /* No buffer */ cifs_buf_release(pSMB); return -EINVAL; } /* else setting file size with write of zero bytes */ if (wct == 14) byte_count = bytes_sent + 1; /* pad */ else /* wct == 12 */ byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */ pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16); inc_rfc1001_len(pSMB, byte_count); if (wct == 14) pSMB->ByteCount = cpu_to_le16(byte_count); else { /* old style write has byte count 4 bytes earlier so 4 bytes pad */ struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(byte_count); } rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); if (rc) { cifs_dbg(FYI, "Send error in write = %d\n", rc); } else { *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. Some * OS/2 servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. */ static void cifs_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); unsigned int written; WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; struct cifs_credits credits = { .value = 1, .instance = 0 }; switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: wdata->result = cifs_check_receive(mid, tcon->ses->server, 0); if (wdata->result != 0) break; written = le16_to_cpu(smb->CountHigh); written <<= 16; written += le16_to_cpu(smb->Count); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; default: wdata->result = -EIO; break; } queue_work(cifsiod_wq, &wdata->work); release_mid(mid); add_credits(tcon->ses->server, &credits, 0); } /* cifs_async_writev - send an async write, and set up mid to handle result */ int cifs_async_writev(struct cifs_writedata *wdata, void (*release)(struct kref *kref)) { int rc = -EACCES; WRITE_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct kvec iov[2]; struct smb_rqst rqst = { }; if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if (wdata->offset >> 32 > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb); if (rc) goto async_writev_out; smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = wdata->cfile->fid.netfid; smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF); if (wct == 14) smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32); smb->Reserved = 0xFFFFFFFF; smb->WriteMode = 0; smb->Remaining = 0; smb->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); /* 4 for RFC1001 length + 1 for BCC */ iov[0].iov_len = 4; iov[0].iov_base = smb; iov[1].iov_len = get_rfc1002_length(smb) + 1; iov[1].iov_base = (char *)smb + 4; rqst.rq_iov = iov; rqst.rq_nvec = 2; rqst.rq_iter = wdata->iter; rqst.rq_iter_size = iov_iter_count(&wdata->iter); cifs_dbg(FYI, "async write at %llu %u bytes\n", wdata->offset, wdata->bytes); smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF); smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16); if (wct == 14) { inc_rfc1001_len(&smb->hdr, wdata->bytes + 1); put_bcc(wdata->bytes + 1, &smb->hdr); } else { /* wct == 12 */ struct smb_com_writex_req *smbw = (struct smb_com_writex_req *)smb; inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5); put_bcc(wdata->bytes + 5, &smbw->hdr); iov[1].iov_len += 4; /* pad bigger by four bytes */ } kref_get(&wdata->refcount); rc = cifs_call_async(tcon->ses->server, &rqst, NULL, cifs_writev_callback, NULL, wdata, 0, NULL); if (rc == 0) cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); else kref_put(&wdata->refcount, release); async_writev_out: cifs_small_buf_release(smb); return rc; } int CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec) { int rc; WRITE_REQ *pSMB = NULL; int wct; int smb_hdr_len; int resp_buf_type = 0; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; struct kvec rsp_iov; *nbytes = 0; cifs_dbg(FYI, "write2 at %lld %d bytes\n", (long long)offset, count); if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(count >> 16); /* header + 1 byte pad */ smb_hdr_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 1; if (wct == 14) inc_rfc1001_len(pSMB, count + 1); else /* wct == 12 */ inc_rfc1001_len(pSMB, count + 5); /* smb data starts later */ if (wct == 14) pSMB->ByteCount = cpu_to_le16(count + 1); else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ { struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(count + 5); } iov[0].iov_base = pSMB; if (wct == 14) iov[0].iov_len = smb_hdr_len + 4; else /* wct == 12 pad bigger by four bytes */ iov[0].iov_len = smb_hdr_len + 8; rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, 0, &rsp_iov); cifs_small_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); if (rc) { cifs_dbg(FYI, "Send error Write2 = %d\n", rc); } else if (resp_buf_type == 0) { /* presumably this can not happen, but best to be safe */ rc = -EIO; } else { WRITE_RSP *pSMBr = (WRITE_RSP *)rsp_iov.iov_base; *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. OS/2 * servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } free_rsp_buf(resp_buf_type, rsp_iov.iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon, const __u16 netfid, const __u8 lock_type, const __u32 num_unlock, const __u32 num_lock, LOCKING_ANDX_RANGE *buf) { int rc = 0; LOCK_REQ *pSMB = NULL; struct kvec iov[2]; struct kvec rsp_iov; int resp_buf_type; __u16 count; cifs_dbg(FYI, "cifs_lockv num lock %d num unlock %d\n", num_lock, num_unlock); rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); if (rc) return rc; pSMB->Timeout = 0; pSMB->NumberOfLocks = cpu_to_le16(num_lock); pSMB->NumberOfUnlocks = cpu_to_le16(num_unlock); pSMB->LockType = lock_type; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; /* netfid stays le */ count = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4 - (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); iov[1].iov_base = (char *)buf; iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RSP_BUF, &rsp_iov); cifs_small_buf_release(pSMB); if (rc) cifs_dbg(FYI, "Send error in cifs_lockv = %d\n", rc); return rc; } int CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const __u32 netpid, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, const bool waitFlag, const __u8 oplock_level) { int rc = 0; LOCK_REQ *pSMB = NULL; /* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */ int bytes_returned; int flags = 0; __u16 count; cifs_dbg(FYI, "CIFSSMBLock timeout %d numLock %d\n", (int)waitFlag, numLock); rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); if (rc) return rc; if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) { /* no response expected */ flags = CIFS_NO_SRV_RSP | CIFS_NON_BLOCKING | CIFS_OBREAK_OP; pSMB->Timeout = 0; } else if (waitFlag) { flags = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */ } else { pSMB->Timeout = 0; } pSMB->NumberOfLocks = cpu_to_le16(numLock); pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock); pSMB->LockType = lockType; pSMB->OplockLevel = oplock_level; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = smb_file_id; /* netfid stays le */ if ((numLock != 0) || (numUnlock != 0)) { pSMB->Locks[0].Pid = cpu_to_le16(netpid); /* BB where to store pid high? */ pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len); pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32)); pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset); pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32)); count = sizeof(LOCKING_ANDX_RANGE); } else { /* oplock break */ count = 0; } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); if (waitFlag) rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMB, &bytes_returned); else rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, flags); cifs_small_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); if (rc) cifs_dbg(FYI, "Send error in Lock = %d\n", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const __u32 netpid, const loff_t start_offset, const __u64 len, struct file_lock *pLockData, const __u16 lock_type, const bool waitFlag) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct cifs_posix_lock *parm_data; int rc = 0; int timeout = 0; int bytes_returned = 0; int resp_buf_type = 0; __u16 params, param_offset, offset, byte_count, count; struct kvec iov[1]; struct kvec rsp_iov; cifs_dbg(FYI, "Posix Lock\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct cifs_posix_lock); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; if (pLockData) pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); else pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ parm_data = (struct cifs_posix_lock *) (((char *)pSMB) + offset + 4); parm_data->lock_type = cpu_to_le16(lock_type); if (waitFlag) { timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ parm_data->lock_flags = cpu_to_le16(1); pSMB->Timeout = cpu_to_le32(-1); } else pSMB->Timeout = 0; parm_data->pid = cpu_to_le32(netpid); parm_data->start = cpu_to_le64(start_offset); parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = smb_file_id; pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); if (waitFlag) { rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned); } else { iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, &resp_buf_type, timeout, &rsp_iov); pSMBr = (struct smb_com_transaction2_sfi_rsp *)rsp_iov.iov_base; } cifs_small_buf_release(pSMB); if (rc) { cifs_dbg(FYI, "Send error in Posix Lock = %d\n", rc); } else if (pLockData) { /* lock structure can be returned on get */ __u16 data_offset; __u16 data_count; rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(*parm_data)) { rc = -EIO; /* bad smb */ goto plk_err_exit; } data_offset = le16_to_cpu(pSMBr->t2.DataOffset); data_count = le16_to_cpu(pSMBr->t2.DataCount); if (data_count < sizeof(struct cifs_posix_lock)) { rc = -EIO; goto plk_err_exit; } parm_data = (struct cifs_posix_lock *) ((char *)&pSMBr->hdr.Protocol + data_offset); if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) pLockData->fl_type = F_UNLCK; else { if (parm_data->lock_type == cpu_to_le16(CIFS_RDLCK)) pLockData->fl_type = F_RDLCK; else if (parm_data->lock_type == cpu_to_le16(CIFS_WRLCK)) pLockData->fl_type = F_WRLCK; pLockData->fl_start = le64_to_cpu(parm_data->start); pLockData->fl_end = pLockData->fl_start + (le64_to_cpu(parm_data->length) ? le64_to_cpu(parm_data->length) - 1 : 0); pLockData->fl_pid = -le32_to_cpu(parm_data->pid); } } plk_err_exit: free_rsp_buf(resp_buf_type, rsp_iov.iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; CLOSE_REQ *pSMB = NULL; cifs_dbg(FYI, "In CIFSSMBClose\n"); /* do not retry on dead session on close */ rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB); if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->LastWriteTime = 0xFFFFFFFF; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_closes); if (rc) { if (rc != -EINTR) { /* EINTR is expected when user ctl-c to kill app */ cifs_dbg(VFS, "Send error in Close = %d\n", rc); } } /* Since session is dead, file will be closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; FLUSH_REQ *pSMB = NULL; cifs_dbg(FYI, "In CIFSSMBFlush\n"); rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB); if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes); if (rc) cifs_dbg(VFS, "Send error in Flush = %d\n", rc); return rc; } int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { int rc = 0; RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; int remap = cifs_remap(cifs_sb); cifs_dbg(FYI, "In CIFSSMBRename\n"); renameRetry: rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName, from_name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], to_name, PATH_MAX, cifs_sb->local_nls, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { name_len = copy_path_name(pSMB->OldFileName, from_name); name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name); pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_renames); if (rc) cifs_dbg(FYI, "Send error in rename = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto renameRetry; return rc; } int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon, int netfid, const char *target_name, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct set_file_rename *rename_info; char *data_offset; char dummy_string[30]; int rc = 0; int bytes_returned = 0; int len_of_str; __u16 params, param_offset, offset, count, byte_count; cifs_dbg(FYI, "Rename to File by handle\n"); rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ data_offset = (char *)(pSMB) + offset + 4; rename_info = (struct set_file_rename *) data_offset; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* construct random name ".cifs_tmp<inodenum><mid>" */ rename_info->overwrite = cpu_to_le32(1); rename_info->root_fid = 0; /* unicode only call */ if (target_name == NULL) { sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid); len_of_str = cifsConvertToUTF16((__le16 *)rename_info->target_name, dummy_string, 24, nls_codepage, remap); } else { len_of_str = cifsConvertToUTF16((__le16 *)rename_info->target_name, target_name, PATH_MAX, nls_codepage, remap); } rename_info->target_name_len = cpu_to_le32(2 * len_of_str); count = sizeof(struct set_file_rename) + (2 * len_of_str); byte_count += count; pSMB->DataCount = cpu_to_le16(count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->Fid = netfid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&pTcon->stats.cifs_stats.num_t2renames); if (rc) cifs_dbg(FYI, "Send error in Rename (by file handle) = %d\n", rc); cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBCopy(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const __u16 target_tid, const char *toName, const int flags, const struct nls_table *nls_codepage, int remap) { int rc = 0; COPY_REQ *pSMB = NULL; COPY_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cifs_dbg(FYI, "In CIFSSMBCopy\n"); copyRetry: rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->Tid2 = target_tid; pSMB->Flags = cpu_to_le16(flags & COPY_TREE); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { name_len = copy_path_name(pSMB->OldFileName, fromName); pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName); name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in copy = %d with %d files copied\n", rc, le16_to_cpu(pSMBr->CopyCount)); } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto copyRetry; return rc; } int CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In Symlink Unix style\n"); createSymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName, /* find define for this maxpathcomponent */ PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, fromName); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ data_offset = (char *)pSMB + offset + 4; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifsConvertToUTF16((__le16 *) data_offset, toName, /* find define for this maxpathcomponent */ PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; } else { name_len_target = copy_path_name(data_offset, toName); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_symlinks); if (rc) cifs_dbg(FYI, "Send error in SetPathInfo create symlink = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createSymLinkRetry; return rc; } int CIFSUnixCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In Create Hard link Unix style\n"); createHardLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, toName); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ data_offset = (char *)pSMB + offset + 4; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifsConvertToUTF16((__le16 *) data_offset, fromName, PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; } else { name_len_target = copy_path_name(data_offset, fromName); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess*/ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks); if (rc) cifs_dbg(FYI, "Send error in SetPathInfo (hard link) = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createHardLinkRetry; return rc; } int CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { int rc = 0; NT_RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; int remap = cifs_remap(cifs_sb); cifs_dbg(FYI, "In CIFSCreateHardLink\n"); winCreateHardLinkRetry: rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK); pSMB->ClusterCount = 0; pSMB->BufferFormat = 0x04; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName, from_name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; /* protocol specifies ASCII buffer format (0x04) for unicode */ pSMB->OldFileName[name_len] = 0x04; pSMB->OldFileName[name_len + 1] = 0x00; /* pad */ name_len2 = cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], to_name, PATH_MAX, cifs_sb->local_nls, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { name_len = copy_path_name(pSMB->OldFileName, from_name); pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name); name_len2++; /* signature byte */ } count = 1 /* string type byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks); if (rc) cifs_dbg(FYI, "Send error in hard link (NT rename) = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto winCreateHardLinkRetry; return rc; } int CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **symlinkinfo, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_FILE_UNIX_LINK */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; char *data_start; cifs_dbg(FYI, "In QPathSymLinkInfo (Unix) for path %s\n", searchName); querySymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QuerySymLinkInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; else { bool is_unicode; u16 count = le16_to_cpu(pSMBr->t2.DataCount); data_start = ((char *) &pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->t2.DataOffset); if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* BB FIXME investigate remapping reserved chars here */ *symlinkinfo = cifs_strndup_from_utf16(data_start, count, is_unicode, nls_codepage); if (!*symlinkinfo) rc = -ENOMEM; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto querySymLinkRetry; return rc; } /* * Recent Windows versions now create symlinks more frequently * and they use the "reparse point" mechanism below. We can of course * do symlinks nicely to Samba and other servers which support the * CIFS Unix Extensions and we can also do SFU symlinks and "client only" * "MF" symlinks optionally, but for recent Windows we really need to * reenable the code below and fix the cifs_symlink callers to handle this. * In the interim this code has been moved to its own config option so * it is not compiled in by default until callers fixed up and more tested. */ int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, char **symlinkinfo, const struct nls_table *nls_codepage) { int rc = 0; int bytes_returned; struct smb_com_transaction_ioctl_req *pSMB; struct smb_com_transaction_ioctl_rsp *pSMBr; bool is_unicode; unsigned int sub_len; char *sub_start; struct reparse_symlink_data *reparse_buf; struct reparse_posix_data *posix_buf; __u32 data_offset, data_count; char *end_of_smb; cifs_dbg(FYI, "In Windows reparse style QueryLink for fid %u\n", fid); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->TotalParameterCount = 0 ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le32(2); /* BB find exact data count max from sess structure BB */ pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 4; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT); pSMB->IsFsctl = 1; /* FSCTL */ pSMB->IsRootFlag = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->ByteCount = 0; rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QueryReparseLinkInfo = %d\n", rc); goto qreparse_out; } data_offset = le32_to_cpu(pSMBr->DataOffset); data_count = le32_to_cpu(pSMBr->DataCount); if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) { /* BB also check enough total bytes returned */ rc = -EIO; /* bad smb */ goto qreparse_out; } if (!data_count || (data_count > 2048)) { rc = -EIO; cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n"); goto qreparse_out; } end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount; reparse_buf = (struct reparse_symlink_data *) ((char *)&pSMBr->hdr.Protocol + data_offset); if ((char *)reparse_buf >= end_of_smb) { rc = -EIO; goto qreparse_out; } if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) { cifs_dbg(FYI, "NFS style reparse tag\n"); posix_buf = (struct reparse_posix_data *)reparse_buf; if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) { cifs_dbg(FYI, "unsupported file type 0x%llx\n", le64_to_cpu(posix_buf->InodeType)); rc = -EOPNOTSUPP; goto qreparse_out; } is_unicode = true; sub_len = le16_to_cpu(reparse_buf->ReparseDataLength); if (posix_buf->PathBuffer + sub_len > end_of_smb) { cifs_dbg(FYI, "reparse buf beyond SMB\n"); rc = -EIO; goto qreparse_out; } *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer, sub_len, is_unicode, nls_codepage); goto qreparse_out; } else if (reparse_buf->ReparseTag != cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) { rc = -EOPNOTSUPP; goto qreparse_out; } /* Reparse tag is NTFS symlink */ sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) + reparse_buf->PathBuffer; sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength); if (sub_start + sub_len > end_of_smb) { cifs_dbg(FYI, "reparse buf beyond SMB\n"); rc = -EIO; goto qreparse_out; } if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* BB FIXME investigate remapping reserved chars here */ *symlinkinfo = cifs_strndup_from_utf16(sub_start, sub_len, is_unicode, nls_codepage); if (!*symlinkinfo) rc = -ENOMEM; qreparse_out: cifs_buf_release(pSMB); /* * Note: On -EAGAIN error only caller can retry on handle based calls * since file handle passed in no longer valid. */ return rc; } int CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid) { int rc = 0; int bytes_returned; struct smb_com_transaction_compr_ioctl_req *pSMB; struct smb_com_transaction_ioctl_rsp *pSMBr; cifs_dbg(FYI, "Set compression for %u\n", fid); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->compression_state = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); pSMB->TotalParameterCount = 0; pSMB->TotalDataCount = cpu_to_le32(2); pSMB->MaxParameterCount = 0; pSMB->MaxDataCount = 0; pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = cpu_to_le32(2); pSMB->DataOffset = cpu_to_le32(offsetof(struct smb_com_transaction_compr_ioctl_req, compression_state) - 4); /* 84 */ pSMB->SetupCount = 4; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL); pSMB->ParameterCount = 0; pSMB->FunctionCode = cpu_to_le32(FSCTL_SET_COMPRESSION); pSMB->IsFsctl = 1; /* FSCTL */ pSMB->IsRootFlag = 0; pSMB->Fid = fid; /* file handle always le */ /* 3 byte pad, followed by 2 byte compress state */ pSMB->ByteCount = cpu_to_le16(5); inc_rfc1001_len(pSMB, 5); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "Send error in SetCompression = %d\n", rc); cifs_buf_release(pSMB); /* * Note: On -EAGAIN error only caller can retry on handle based calls * since file handle passed in no longer valid. */ return rc; } #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_FS_POSIX_ACL /** * cifs_init_posix_acl - convert ACL from cifs to POSIX ACL format * @ace: POSIX ACL entry to store converted ACL into * @cifs_ace: ACL in cifs format * * Convert an Access Control Entry from wire format to local POSIX xattr * format. * * Note that the @cifs_uid member is used to store both {g,u}id_t. */ static void cifs_init_posix_acl(struct posix_acl_entry *ace, struct cifs_posix_ace *cifs_ace) { /* u8 cifs fields do not need le conversion */ ace->e_perm = cifs_ace->cifs_e_perm; ace->e_tag = cifs_ace->cifs_e_tag; switch (ace->e_tag) { case ACL_USER: ace->e_uid = make_kuid(&init_user_ns, le64_to_cpu(cifs_ace->cifs_uid)); break; case ACL_GROUP: ace->e_gid = make_kgid(&init_user_ns, le64_to_cpu(cifs_ace->cifs_uid)); break; } return; } /** * cifs_to_posix_acl - copy cifs ACL format to POSIX ACL format * @acl: ACLs returned in POSIX ACL format * @src: ACLs in cifs format * @acl_type: type of POSIX ACL requested * @size_of_data_area: size of SMB we got * * This function converts ACLs from cifs format to POSIX ACL format. * If @acl is NULL then the size of the buffer required to store POSIX ACLs in * their uapi format is returned. */ static int cifs_to_posix_acl(struct posix_acl **acl, char *src, const int acl_type, const int size_of_data_area) { int size = 0; __u16 count; struct cifs_posix_ace *pACE; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src; struct posix_acl *kacl = NULL; struct posix_acl_entry *pa, *pe; if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION) return -EOPNOTSUPP; if (acl_type == ACL_TYPE_ACCESS) { count = le16_to_cpu(cifs_acl->access_entry_count); pACE = &cifs_acl->ace_array[0]; size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) { cifs_dbg(FYI, "bad CIFS POSIX ACL size %d vs. %d\n", size_of_data_area, size); return -EINVAL; } } else if (acl_type == ACL_TYPE_DEFAULT) { count = le16_to_cpu(cifs_acl->access_entry_count); size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* skip past access ACEs to get to default ACEs */ pACE = &cifs_acl->ace_array[count]; count = le16_to_cpu(cifs_acl->default_entry_count); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) return -EINVAL; } else { /* illegal type */ return -EINVAL; } /* Allocate number of POSIX ACLs to store in VFS format. */ kacl = posix_acl_alloc(count, GFP_NOFS); if (!kacl) return -ENOMEM; FOREACH_ACL_ENTRY(pa, kacl, pe) { cifs_init_posix_acl(pa, pACE); pACE++; } *acl = kacl; return 0; } /** * cifs_init_ace - convert ACL entry from POSIX ACL to cifs format * @cifs_ace: the cifs ACL entry to store into * @local_ace: the POSIX ACL entry to convert */ static void cifs_init_ace(struct cifs_posix_ace *cifs_ace, const struct posix_acl_entry *local_ace) { cifs_ace->cifs_e_perm = local_ace->e_perm; cifs_ace->cifs_e_tag = local_ace->e_tag; switch (local_ace->e_tag) { case ACL_USER: cifs_ace->cifs_uid = cpu_to_le64(from_kuid(&init_user_ns, local_ace->e_uid)); break; case ACL_GROUP: cifs_ace->cifs_uid = cpu_to_le64(from_kgid(&init_user_ns, local_ace->e_gid)); break; default: cifs_ace->cifs_uid = cpu_to_le64(-1); } } /** * posix_acl_to_cifs - convert ACLs from POSIX ACL to cifs format * @parm_data: ACLs in cifs format to conver to * @acl: ACLs in POSIX ACL format to convert from * @acl_type: the type of POSIX ACLs stored in @acl * * Return: the number cifs ACL entries after conversion */ static __u16 posix_acl_to_cifs(char *parm_data, const struct posix_acl *acl, const int acl_type) { __u16 rc = 0; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data; const struct posix_acl_entry *pa, *pe; int count; int i = 0; if ((acl == NULL) || (cifs_acl == NULL)) return 0; count = acl->a_count; cifs_dbg(FYI, "setting acl with %d entries\n", count); /* * Note that the uapi POSIX ACL version is verified by the VFS and is * independent of the cifs ACL version. Changing the POSIX ACL version * is a uapi change and if it's changed we will pass down the POSIX ACL * version in struct posix_acl from the VFS. For now there's really * only one that all filesystems know how to deal with. */ cifs_acl->version = cpu_to_le16(1); if (acl_type == ACL_TYPE_ACCESS) { cifs_acl->access_entry_count = cpu_to_le16(count); cifs_acl->default_entry_count = cpu_to_le16(0xFFFF); } else if (acl_type == ACL_TYPE_DEFAULT) { cifs_acl->default_entry_count = cpu_to_le16(count); cifs_acl->access_entry_count = cpu_to_le16(0xFFFF); } else { cifs_dbg(FYI, "unknown ACL type %d\n", acl_type); return 0; } FOREACH_ACL_ENTRY(pa, acl, pe) { cifs_init_ace(&cifs_acl->ace_array[i++], pa); } if (rc == 0) { rc = (__u16)(count * sizeof(struct cifs_posix_ace)); rc += sizeof(struct cifs_posix_acl); /* BB add check to make sure ACL does not overflow SMB */ } return rc; } int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, struct posix_acl **acl, const int acl_type, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_POSIX_ACL */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; cifs_dbg(FYI, "In GetPosixACL (Unix) for path %s\n", searchName); queryAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; } else { name_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get); if (rc) { cifs_dbg(FYI, "Send error in Query POSIX ACL = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); rc = cifs_to_posix_acl(acl, (char *)&pSMBr->hdr.Protocol+data_offset, acl_type, count); } } cifs_buf_release(pSMB); /* * The else branch after SendReceive() doesn't return EAGAIN so if we * allocated @acl in cifs_to_posix_acl() we are guaranteed to return * here and don't leak POSIX ACLs. */ if (rc == -EAGAIN) goto queryAclRetry; return rc; } int cifs_do_set_acl(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *fileName, const struct posix_acl *acl, const int acl_type, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; char *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, byte_count, data_count, param_offset, offset; cifs_dbg(FYI, "In SetPosixACL (Unix) for path %s\n", fileName); setAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB size from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; parm_data = ((char *)pSMB) + sizeof(pSMB->hdr.smb_buf_length) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); /* convert to on the wire format for POSIX ACL */ data_count = posix_acl_to_cifs(parm_data, acl, acl_type); if (data_count == 0) { rc = -EOPNOTSUPP; goto setACLerrorExit; } pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "Set POSIX ACL returned %d\n", rc); setACLerrorExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setAclRetry; return rc; } #else int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, struct posix_acl **acl, const int acl_type, const struct nls_table *nls_codepage, int remap) { return -EOPNOTSUPP; } int cifs_do_set_acl(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *fileName, const struct posix_acl *acl, const int acl_type, const struct nls_table *nls_codepage, int remap) { return -EOPNOTSUPP; } #endif /* CONFIG_FS_POSIX_ACL */ int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask) { int rc = 0; struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int bytes_returned; __u16 params, byte_count; cifs_dbg(FYI, "In GetExtAttr\n"); if (tcon == NULL) return -ENODEV; GetExtAttrRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(4000); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "error %d in GetExtAttr\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_chattr_info *pfinfo; if (count != 16) { cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n"); rc = -EIO; goto GetExtAttrOut; } pfinfo = (struct file_chattr_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *pExtAttrBits = le64_to_cpu(pfinfo->mode); *pMask = le64_to_cpu(pfinfo->mask); } } GetExtAttrOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetExtAttrRetry; return rc; } #endif /* CONFIG_POSIX */ /* * Initialize NT TRANSACT SMB into small smb request buffer. This assumes that * all NT TRANSACTS that we init here have total parm and data under about 400 * bytes (to fit in small cifs buffer size), which is the case so far, it * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of * returned setup area) and MaxParameterCount (returned parms size) must be set * by caller */ static int smb_init_nttransact(const __u16 sub_command, const int setup_count, const int parm_len, struct cifs_tcon *tcon, void **ret_buf) { int rc; __u32 temp_offset; struct smb_com_ntransact_req *pSMB; rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon, (void **)&pSMB); if (rc) return rc; *ret_buf = (void *)pSMB; pSMB->Reserved = 0; pSMB->TotalParameterCount = cpu_to_le32(parm_len); pSMB->TotalDataCount = 0; pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->DataCount = pSMB->TotalDataCount; temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + (setup_count * 2) - 4 /* for rfc1001 length itself */; pSMB->ParameterOffset = cpu_to_le32(temp_offset); pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len); pSMB->SetupCount = setup_count; /* no need to le convert byte fields */ pSMB->SubCommand = cpu_to_le16(sub_command); return 0; } static int validate_ntransact(char *buf, char **ppparm, char **ppdata, __u32 *pparmlen, __u32 *pdatalen) { char *end_of_smb; __u32 data_count, data_offset, parm_count, parm_offset; struct smb_com_ntransact_rsp *pSMBr; u16 bcc; *pdatalen = 0; *pparmlen = 0; if (buf == NULL) return -EINVAL; pSMBr = (struct smb_com_ntransact_rsp *)buf; bcc = get_bcc(&pSMBr->hdr); end_of_smb = 2 /* sizeof byte count */ + bcc + (char *)&pSMBr->ByteCount; data_offset = le32_to_cpu(pSMBr->DataOffset); data_count = le32_to_cpu(pSMBr->DataCount); parm_offset = le32_to_cpu(pSMBr->ParameterOffset); parm_count = le32_to_cpu(pSMBr->ParameterCount); *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset; *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset; /* should we also check that parm and data areas do not overlap? */ if (*ppparm > end_of_smb) { cifs_dbg(FYI, "parms start after end of smb\n"); return -EINVAL; } else if (parm_count + *ppparm > end_of_smb) { cifs_dbg(FYI, "parm end after end of smb\n"); return -EINVAL; } else if (*ppdata > end_of_smb) { cifs_dbg(FYI, "data starts after end of smb\n"); return -EINVAL; } else if (data_count + *ppdata > end_of_smb) { cifs_dbg(FYI, "data %p + count %d (%p) past smb end %p start %p\n", *ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr); return -EINVAL; } else if (parm_count + data_count > bcc) { cifs_dbg(FYI, "parm count and data count larger than SMB\n"); return -EINVAL; } *pdatalen = data_count; *pparmlen = parm_count; return 0; } /* Get Security Descriptor (by handle) from remote server for a file or dir */ int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd **acl_inf, __u32 *pbuflen) { int rc = 0; int buf_type = 0; QUERY_SEC_DESC_REQ *pSMB; struct kvec iov[1]; struct kvec rsp_iov; cifs_dbg(FYI, "GetCifsACL\n"); *pbuflen = 0; *acl_inf = NULL; rc = smb_init_nttransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0, 8 /* parm len */, tcon, (void **) &pSMB); if (rc) return rc; pSMB->MaxParameterCount = cpu_to_le32(4); /* BB TEST with big acls that might need to be e.g. larger than 16K */ pSMB->MaxSetupCount = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP | CIFS_ACL_DACL); pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */ inc_rfc1001_len(pSMB, 11); iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0, &rsp_iov); cifs_small_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get); if (rc) { cifs_dbg(FYI, "Send error in QuerySecDesc = %d\n", rc); } else { /* decode response */ __le32 *parm; __u32 parm_len; __u32 acl_len; struct smb_com_ntransact_rsp *pSMBr; char *pdata; /* validate_nttransact */ rc = validate_ntransact(rsp_iov.iov_base, (char **)&parm, &pdata, &parm_len, pbuflen); if (rc) goto qsec_out; pSMBr = (struct smb_com_ntransact_rsp *)rsp_iov.iov_base; cifs_dbg(FYI, "smb %p parm %p data %p\n", pSMBr, parm, *acl_inf); if (le32_to_cpu(pSMBr->ParameterCount) != 4) { rc = -EIO; /* bad smb */ *pbuflen = 0; goto qsec_out; } /* BB check that data area is minimum length and as big as acl_len */ acl_len = le32_to_cpu(*parm); if (acl_len != *pbuflen) { cifs_dbg(VFS, "acl length %d does not match %d\n", acl_len, *pbuflen); if (*pbuflen > acl_len) *pbuflen = acl_len; } /* check if buffer is big enough for the acl header followed by the smallest SID */ if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) || (*pbuflen >= 64 * 1024)) { cifs_dbg(VFS, "bad acl length %d\n", *pbuflen); rc = -EINVAL; *pbuflen = 0; } else { *acl_inf = kmemdup(pdata, *pbuflen, GFP_KERNEL); if (*acl_inf == NULL) { *pbuflen = 0; rc = -ENOMEM; } } } qsec_out: free_rsp_buf(buf_type, rsp_iov.iov_base); return rc; } int CIFSSMBSetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd *pntsd, __u32 acllen, int aclflag) { __u16 byte_count, param_count, data_count, param_offset, data_offset; int rc = 0; int bytes_returned = 0; SET_SEC_DESC_REQ *pSMB = NULL; void *pSMBr; setCifsAclRetry: rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, &pSMBr); if (rc) return rc; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; param_count = 8; param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4; data_count = acllen; data_offset = param_offset + param_count; byte_count = 3 /* pad */ + param_count; pSMB->DataCount = cpu_to_le32(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->MaxParameterCount = cpu_to_le32(4); pSMB->MaxDataCount = cpu_to_le32(16384); pSMB->ParameterCount = cpu_to_le32(param_count); pSMB->ParameterOffset = cpu_to_le32(param_offset); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataOffset = cpu_to_le32(data_offset); pSMB->SetupCount = 0; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC); pSMB->ByteCount = cpu_to_le16(byte_count+data_count); pSMB->Fid = fid; /* file handle always le */ pSMB->Reserved2 = 0; pSMB->AclFlags = cpu_to_le32(aclflag); if (pntsd && acllen) { memcpy((char *)pSMBr + offsetof(struct smb_hdr, Protocol) + data_offset, pntsd, acllen); inc_rfc1001_len(pSMB, byte_count + data_count); } else inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_dbg(FYI, "SetCIFSACL bytes_returned: %d, rc: %d\n", bytes_returned, rc); if (rc) cifs_dbg(FYI, "Set CIFS ACL returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setCifsAclRetry; return (rc); } /* Legacy Query Path Information call for lookup to old servers such as Win9x/WinME */ int SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon, const char *search_name, FILE_ALL_INFO *data, const struct nls_table *nls_codepage, int remap) { QUERY_INFORMATION_REQ *pSMB; QUERY_INFORMATION_RSP *pSMBr; int rc = 0; int bytes_returned; int name_len; cifs_dbg(FYI, "In SMBQPath path %s\n", search_name); QInfRetry: rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, search_name); } pSMB->BufferFormat = 0x04; name_len++; /* account for buffer type byte */ inc_rfc1001_len(pSMB, (__u16)name_len); pSMB->ByteCount = cpu_to_le16(name_len); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QueryInfo = %d\n", rc); } else if (data) { struct timespec64 ts; __u32 time = le32_to_cpu(pSMBr->last_write_time); /* decode response */ /* BB FIXME - add time zone adjustment BB */ memset(data, 0, sizeof(FILE_ALL_INFO)); ts.tv_nsec = 0; ts.tv_sec = time; /* decode time fields */ data->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts)); data->LastWriteTime = data->ChangeTime; data->LastAccessTime = 0; data->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->size)); data->EndOfFile = data->AllocationSize; data->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->attr)); } else rc = -EIO; /* bad buffer passed in */ cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QInfRetry; return rc; } int CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon, u16 netfid, FILE_ALL_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; QFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFileInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (pFindData) { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_ALL_INFO)); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFileInfoRetry; return rc; } int CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *search_name, FILE_ALL_INFO *data, int legacy /* old style infolevel */, const struct nls_table *nls_codepage, int remap) { /* level 263 SMB_QUERY_FILE_ALL_INFO */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; /* cifs_dbg(FYI, "In QPathInfo path %s\n", search_name); */ QPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, search_name); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; if (legacy) pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD); else pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (!legacy && get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (legacy && get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* 24 or 26 expected but we do not read last field */ else if (data) { int size; __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); /* * On legacy responses we do not read the last field, * EAsize, fortunately since it varies by subdialect and * also note it differs on Set vs Get, ie two bytes or 4 * bytes depending but we don't care here. */ if (legacy) size = sizeof(FILE_INFO_STANDARD); else size = sizeof(FILE_ALL_INFO); memcpy((char *) data, (char *) &pSMBr->hdr.Protocol + data_offset, size); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QPathInfoRetry; return rc; } int CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon, u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; UnixQFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in UnixQFileInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n"); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQFileInfoRetry; return rc; } int CIFSSMBUnixQPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_UNIX_BASIC_INFO *pFindData, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_FILE_UNIX_BASIC */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; int name_len; __u16 params, byte_count; cifs_dbg(FYI, "In QPathInfo (Unix) the path %s\n", searchName); UnixQPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in UnixQPathInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n"); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQPathInfoRetry; return rc; } /* xid, tcon, searchName and codepage are input parms, rest are returned */ int CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon, const char *searchName, struct cifs_sb_info *cifs_sb, __u16 *pnetfid, __u16 search_flags, struct cifs_search_info *psrch_inf, bool msearch) { /* level 257 SMB_ */ TRANSACTION2_FFIRST_REQ *pSMB = NULL; TRANSACTION2_FFIRST_RSP *pSMBr = NULL; T2_FFIRST_RSP_PARMS *parms; struct nls_table *nls_codepage; unsigned int lnoff; __u16 params, byte_count; int bytes_returned = 0; int name_len, remap; int rc = 0; cifs_dbg(FYI, "In FindFirst for %s\n", searchName); findFirstRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; nls_codepage = cifs_sb->local_nls; remap = cifs_remap(cifs_sb); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); /* We can not add the asterik earlier in case it got remapped to 0xF03A as if it were part of the directory name instead of a wildcard */ name_len *= 2; if (msearch) { pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); pSMB->FileName[name_len+1] = 0; pSMB->FileName[name_len+2] = '*'; pSMB->FileName[name_len+3] = 0; name_len += 4; /* now the trailing null */ /* null terminate just in case */ pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; name_len += 2; } } else { name_len = copy_path_name(pSMB->FileName, searchName); if (msearch) { if (WARN_ON_ONCE(name_len > PATH_MAX-2)) name_len = PATH_MAX-2; /* overwrite nul byte */ pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb); pSMB->FileName[name_len] = '*'; pSMB->FileName[name_len+1] = 0; name_len += 2; } } params = 12 + name_len /* includes null */ ; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(10); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */ pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST); pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO)); pSMB->SearchFlags = cpu_to_le16(search_flags); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); /* BB what should we set StorageType to? Does it matter? BB */ pSMB->SearchStorageType = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_ffirst); if (rc) { /* * BB: add logic to retry regular search if Unix search rejected * unexpectedly by server. */ /* BB: add code to handle unsupported level rc */ cifs_dbg(FYI, "Error in FindFirst = %d\n", rc); cifs_buf_release(pSMB); /* * BB: eventually could optimize out free and realloc of buf for * this case. */ if (rc == -EAGAIN) goto findFirstRetry; return rc; } /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) { cifs_buf_release(pSMB); return rc; } psrch_inf->unicode = !!(pSMBr->hdr.Flags2 & SMBFLG2_UNICODE); psrch_inf->ntwrk_buf_start = (char *)pSMBr; psrch_inf->smallBuf = false; psrch_inf->srch_entries_start = (char *)&pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); parms = (T2_FFIRST_RSP_PARMS *)((char *)&pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset)); psrch_inf->endOfSearch = !!parms->EndofSearch; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry = 2 /* skip . and .. */ + psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (CIFSMaxBufSize < lnoff) { cifs_dbg(VFS, "ignoring corrupt resume name\n"); psrch_inf->last_entry = NULL; } else { psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; if (pnetfid) *pnetfid = parms->SearchHandle; } return 0; } int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon, __u16 searchHandle, __u16 search_flags, struct cifs_search_info *psrch_inf) { TRANSACTION2_FNEXT_REQ *pSMB = NULL; TRANSACTION2_FNEXT_RSP *pSMBr = NULL; T2_FNEXT_RSP_PARMS *parms; unsigned int name_len; unsigned int lnoff; __u16 params, byte_count; char *response_data; int bytes_returned; int rc = 0; cifs_dbg(FYI, "In FindNext\n"); if (psrch_inf->endOfSearch) return -ENOENT; rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 14; /* includes 2 bytes of null string, converted to LE below*/ byte_count = 0; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(8); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT); pSMB->SearchHandle = searchHandle; /* always kept as le */ pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO)); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); pSMB->ResumeKey = psrch_inf->resume_key; pSMB->SearchFlags = cpu_to_le16(search_flags); name_len = psrch_inf->resume_name_len; params += name_len; if (name_len < PATH_MAX) { memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len); byte_count += name_len; /* 14 byte parm len above enough for 2 byte null terminator */ pSMB->ResumeFileName[name_len] = 0; pSMB->ResumeFileName[name_len+1] = 0; } else { cifs_buf_release(pSMB); return -EINVAL; } byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_fnext); if (rc) { cifs_buf_release(pSMB); if (rc == -EBADF) { psrch_inf->endOfSearch = true; rc = 0; /* search probably was closed at end of search*/ } else { cifs_dbg(FYI, "FindNext returned = %d\n", rc); } return rc; } /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) { cifs_buf_release(pSMB); return rc; } /* BB fixme add lock for file (srch_info) struct here */ psrch_inf->unicode = !!(pSMBr->hdr.Flags2 & SMBFLG2_UNICODE); response_data = (char *)&pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset); parms = (T2_FNEXT_RSP_PARMS *)response_data; response_data = (char *)&pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); if (psrch_inf->smallBuf) cifs_small_buf_release(psrch_inf->ntwrk_buf_start); else cifs_buf_release(psrch_inf->ntwrk_buf_start); psrch_inf->srch_entries_start = response_data; psrch_inf->ntwrk_buf_start = (char *)pSMB; psrch_inf->smallBuf = false; psrch_inf->endOfSearch = !!parms->EndofSearch; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry += psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (CIFSMaxBufSize < lnoff) { cifs_dbg(VFS, "ignoring corrupt resume name\n"); psrch_inf->last_entry = NULL; } else { psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; } /* BB fixme add unlock here */ /* * BB: On error, should we leave previous search buf * (and count and last entry fields) intact or free the previous one? * * Note: On -EAGAIN error only caller can retry on handle based calls * since file handle passed in no longer valid. */ return 0; } int CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon, const __u16 searchHandle) { int rc = 0; FINDCLOSE_REQ *pSMB = NULL; cifs_dbg(FYI, "In CIFSSMBFindClose\n"); rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB); /* no sense returning error if session restarted as file handle has been closed */ if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = searchHandle; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); if (rc) cifs_dbg(VFS, "Send error in FindClose = %d\n", rc); cifs_stats_inc(&tcon->stats.cifs_stats.num_fclose); /* Since session is dead, search handle closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon, const char *search_name, __u64 *inode_number, const struct nls_table *nls_codepage, int remap) { int rc = 0; TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int name_len, bytes_returned; __u16 params, byte_count; cifs_dbg(FYI, "In GetSrvInodeNum for %s\n", search_name); if (tcon == NULL) return -ENODEV; GetInodeNumberRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, search_name); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "error %d in QueryInternalInfo\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_internal_info *pfinfo; /* BB Do we need a cast or hash here ? */ if (count < 8) { cifs_dbg(FYI, "Invalid size ret in QryIntrnlInf\n"); rc = -EIO; goto GetInodeNumOut; } pfinfo = (struct file_internal_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *inode_number = le64_to_cpu(pfinfo->UniqueId); } } GetInodeNumOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetInodeNumberRetry; return rc; } int CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, const char *search_name, struct dfs_info3_param **target_nodes, unsigned int *num_of_nodes, const struct nls_table *nls_codepage, int remap) { /* TRANS2_GET_DFS_REFERRAL */ TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL; TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; *num_of_nodes = 0; *target_nodes = NULL; cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name); if (ses == NULL || ses->tcon_ipc == NULL) return -ENODEV; getDFSRetry: /* * Use smb_init_no_reconnect() instead of smb_init() as * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus * causing an infinite recursion. */ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **)&pSMB, (void **)&pSMBr); if (rc) return rc; /* server pointer checked in called function, but should never be null here anyway */ pSMB->hdr.Mid = get_next_mid(ses->server); pSMB->hdr.Tid = ses->tcon_ipc->tid; pSMB->hdr.Uid = ses->Suid; if (ses->capabilities & CAP_STATUS32) pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS; if (ses->capabilities & CAP_DFS) pSMB->hdr.Flags2 |= SMBFLG2_DFS; if (ses->capabilities & CAP_UNICODE) { pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; name_len = cifsConvertToUTF16((__le16 *) pSMB->RequestFileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = copy_path_name(pSMB->RequestFileName, search_name); } if (ses->server->sign) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; params = 2 /* level */ + name_len /*includes null */ ; pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = 0; /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL); byte_count = params + 3 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->MaxReferralLevel = cpu_to_le16(3); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in GetDFSRefer = %d\n", rc); goto GetDFSRefExit; } rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB Also check if enough total bytes returned? */ if (rc || get_bcc(&pSMBr->hdr) < 17) { rc = -EIO; /* bad smb */ goto GetDFSRefExit; } cifs_dbg(FYI, "Decoding GetDFSRefer response BCC: %d Offset %d\n", get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset)); /* parse returned result into more usable form */ rc = parse_dfs_referrals(&pSMBr->dfs_data, le16_to_cpu(pSMBr->t2.DataCount), num_of_nodes, target_nodes, nls_codepage, remap, search_name, (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) != 0); GetDFSRefExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto getDFSRetry; return rc; } /* Query File System Info such as free space to old servers such as Win 9x */ int SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ALLOC_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "OldQFSInfo\n"); oldQFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_INFO_ALLOCATION); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 18) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); cifs_dbg(FYI, "qfsinf resp BCC: %d Offset %d\n", get_bcc(&pSMBr->hdr), data_offset); response_data = (FILE_SYSTEM_ALLOC_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le16_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); /* * much prefer larger but if server doesn't report * a valid size than 4K is a reasonable minimum */ if (FSData->f_bsize < 512) FSData->f_bsize = 4096; FSData->f_blocks = le32_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le32_to_cpu(response_data->FreeAllocationUnits); cifs_dbg(FYI, "Blocks: %lld Free: %lld Block size %ld\n", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto oldQFSInfoRetry; return rc; } int CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSInfo\n"); QFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); /* * much prefer larger but if server doesn't report * a valid size than 4K is a reasonable minimum */ if (FSData->f_bsize < 512) FSData->f_bsize = 4096; FSData->f_blocks = le64_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le64_to_cpu(response_data->FreeAllocationUnits); cifs_dbg(FYI, "Blocks: %lld Free: %lld Block size %ld\n", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSInfoRetry; return rc; } int CIFSSMBQFSAttributeInfo(const unsigned int xid, struct cifs_tcon *tcon) { /* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ATTRIBUTE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSAttributeInfo\n"); QFSAttributeRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(VFS, "Send error in QFSAttributeInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { /* BB also check if enough bytes returned */ rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_ATTRIBUTE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsAttrInfo, response_data, sizeof(FILE_SYSTEM_ATTRIBUTE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSAttributeRetry; return rc; } int CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon) { /* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_DEVICE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSDeviceInfo\n"); QFSDeviceRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSDeviceInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_SYSTEM_DEVICE_INFO)) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_DEVICE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsDevInfo, response_data, sizeof(FILE_SYSTEM_DEVICE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSDeviceRetry; return rc; } int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon) { /* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_UNIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSUnixInfo\n"); QFSUnixRetry: rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(VFS, "Send error in QFSUnixInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_UNIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsUnixInfo, response_data, sizeof(FILE_SYSTEM_UNIX_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSUnixRetry; return rc; } int CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon, __u64 cap) { /* level 0x200 SMB_SET_CIFS_UNIX_INFO */ TRANSACTION2_SETFSI_REQ *pSMB = NULL; TRANSACTION2_SETFSI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In SETFSUnixInfo\n"); SETFSUnixRetry: /* BB switch to small buf init to save memory */ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 4; /* 2 bytes zero followed by info level. */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum) - 4; offset = param_offset + params; pSMB->MaxParameterCount = cpu_to_le16(4); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION); byte_count = 1 /* pad */ + params + 12; pSMB->DataCount = cpu_to_le16(12); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* Params. */ pSMB->FileNum = 0; pSMB->InformationLevel = cpu_to_le16(SMB_SET_CIFS_UNIX_INFO); /* Data. */ pSMB->ClientUnixMajor = cpu_to_le16(CIFS_UNIX_MAJOR_VERSION); pSMB->ClientUnixMinor = cpu_to_le16(CIFS_UNIX_MINOR_VERSION); pSMB->ClientUnixCap = cpu_to_le64(cap); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(VFS, "Send error in SETFSUnixInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) rc = -EIO; /* bad smb */ } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SETFSUnixRetry; return rc; } int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_POSIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSPosixInfo\n"); QFSPosixRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSUnixInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_POSIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BlockSize); /* * much prefer larger but if server doesn't report * a valid size than 4K is a reasonable minimum */ if (FSData->f_bsize < 512) FSData->f_bsize = 4096; FSData->f_blocks = le64_to_cpu(response_data->TotalBlocks); FSData->f_bfree = le64_to_cpu(response_data->BlocksAvail); if (response_data->UserBlocksAvail == cpu_to_le64(-1)) { FSData->f_bavail = FSData->f_bfree; } else { FSData->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); } if (response_data->TotalFileNodes != cpu_to_le64(-1)) FSData->f_files = le64_to_cpu(response_data->TotalFileNodes); if (response_data->FreeFileNodes != cpu_to_le64(-1)) FSData->f_ffree = le64_to_cpu(response_data->FreeFileNodes); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSPosixRetry; return rc; } /* * We can not use write of zero bytes trick to set file size due to need for * large file support. Also note that this SetPathInfo is preferred to * SetFileInfo based method in next routine which is only needed to work around * a sharing violation bugin Samba which this routine can run into. */ int CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon, const char *file_name, __u64 size, struct cifs_sb_info *cifs_sb, bool set_allocation) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct file_end_of_file_info *parm_data; int name_len; int rc = 0; int bytes_returned = 0; int remap = cifs_remap(cifs_sb); __u16 params, byte_count, data_count, param_offset, offset; cifs_dbg(FYI, "In SetEOF\n"); SetEOFRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, file_name); } params = 6 + name_len; data_count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(4100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; if (set_allocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } parm_data = (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); parm_data->FileSize = cpu_to_le64(size); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (file size) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEOFRetry; return rc; } int CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, __u64 size, bool set_allocation) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct file_end_of_file_info *parm_data; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "SetFileSize (via SetFileInfo) %lld\n", (long long)size); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)cfile->pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(cfile->pid >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ parm_data = (struct file_end_of_file_info *)(((char *)pSMB) + offset + 4); pSMB->DataOffset = cpu_to_le16(offset); parm_data->FileSize = cpu_to_le64(size); pSMB->Fid = cfile->fid.netfid; if (set_allocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); if (rc) { cifs_dbg(FYI, "Send error in SetFileInfo (SetFileSize) = %d\n", rc); } /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } /* Some legacy servers such as NT4 require that the file times be set on an open handle, rather than by pathname - this is awkward due to potential access conflicts on the open, but it is unavoidable for these old servers since the only other choice is to go from 100 nanosecond DCE time and resort to the original setpathinfo level which takes the ancient DOS time format with 2 second granularity */ int CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "Set Times (via SetFileInfo)\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *)pSMB + offsetof(struct smb_hdr, Protocol) + offset; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); if (rc) cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon, bool delete_file, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "Set File Disposition (via SetFileInfo)\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ data_offset = (char *)(pSMB) + offset + 4; count = 1; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_DISPOSITION_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); *data_offset = delete_file ? 1 : 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); if (rc) cifs_dbg(FYI, "Send error in SetFileDisposition = %d\n", rc); return rc; } static int CIFSSMBSetPathInfoFB(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, struct cifs_sb_info *cifs_sb) { int oplock = 0; struct cifs_open_parms oparms; struct cifs_fid fid; int rc; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = GENERIC_WRITE, .create_options = cifs_create_options(cifs_sb, 0), .disposition = FILE_OPEN, .path = fileName, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc) goto out; rc = CIFSSMBSetFileInfo(xid, tcon, data, fid.netfid, current->tgid); CIFSSMBClose(xid, tcon, fid.netfid); out: return rc; } int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, struct cifs_sb_info *cifs_sb) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; char *data_offset; __u16 params, param_offset, offset, byte_count, count; int remap = cifs_remap(cifs_sb); cifs_dbg(FYI, "In SetTimes\n"); SetTimesRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (times) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetTimesRetry; if (rc == -EOPNOTSUPP) return CIFSSMBSetPathInfoFB(xid, tcon, fileName, data, nls_codepage, cifs_sb); return rc; } static void cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset, const struct cifs_unix_set_info_args *args) { u64 uid = NO_CHANGE_64, gid = NO_CHANGE_64; u64 mode = args->mode; if (uid_valid(args->uid)) uid = from_kuid(&init_user_ns, args->uid); if (gid_valid(args->gid)) gid = from_kgid(&init_user_ns, args->gid); /* * Samba server ignores set of file size to zero due to bugs in some * older clients, but we should be precise - we use SetFileSize to * set file size and do not want to truncate file size to zero * accidentally as happened on one Samba server beta by putting * zero instead of -1 here */ data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64); data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64); data_offset->LastStatusChange = cpu_to_le64(args->ctime); data_offset->LastAccessTime = cpu_to_le64(args->atime); data_offset->LastModificationTime = cpu_to_le64(args->mtime); data_offset->Uid = cpu_to_le64(uid); data_offset->Gid = cpu_to_le64(gid); /* better to leave device as zero when it is */ data_offset->DevMajor = cpu_to_le64(MAJOR(args->device)); data_offset->DevMinor = cpu_to_le64(MINOR(args->device)); data_offset->Permissions = cpu_to_le64(mode); if (S_ISREG(mode)) data_offset->Type = cpu_to_le32(UNIX_FILE); else if (S_ISDIR(mode)) data_offset->Type = cpu_to_le32(UNIX_DIR); else if (S_ISLNK(mode)) data_offset->Type = cpu_to_le32(UNIX_SYMLINK); else if (S_ISCHR(mode)) data_offset->Type = cpu_to_le32(UNIX_CHARDEV); else if (S_ISBLK(mode)) data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV); else if (S_ISFIFO(mode)) data_offset->Type = cpu_to_le32(UNIX_FIFO); else if (S_ISSOCK(mode)) data_offset->Type = cpu_to_le32(UNIX_SOCKET); } int CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, const struct cifs_unix_set_info_args *args, u16 fid, u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "Set Unix Info (via SetFileInfo)\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *)pSMB + offsetof(struct smb_hdr, Protocol) + offset; count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_small_buf_release(pSMB); if (rc) cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBUnixSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *file_name, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; FILE_UNIX_BASIC_INFO *data_offset; __u16 params, param_offset, offset, count, byte_count; cifs_dbg(FYI, "In SetUID/GID/Mode\n"); setPermsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, file_name); } params = 6 + name_len; count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */ data_offset = (FILE_UNIX_BASIC_INFO *)((char *) pSMB + offset + 4); memset(data_offset, 0, count); pSMB->DataOffset = cpu_to_le16(offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->ParameterCount = cpu_to_le16(params); pSMB->DataCount = cpu_to_le16(count); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->TotalDataCount = pSMB->DataCount; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); cifs_fill_unix_set_info(data_offset, args); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (perms) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setPermsRetry; return rc; } #ifdef CONFIG_CIFS_XATTR /* * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common * function used by listxattr and getxattr type calls. When ea_name is set, * it looks for that attribute name and stuffs that value into the EAData * buffer. When ea_name is NULL, it stuffs a list of attribute names into the * buffer. In both cases, the return value is either the length of the * resulting data or a negative error code. If EAData is a NULL pointer then * the data isn't copied to it, but the length is returned. */ ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, const unsigned char *ea_name, char *EAData, size_t buf_size, struct cifs_sb_info *cifs_sb) { /* BB assumes one setup word */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int remap = cifs_remap(cifs_sb); struct nls_table *nls_codepage = cifs_sb->local_nls; int rc = 0; int bytes_returned; int list_len; struct fealist *ea_response_data; struct fea *temp_fea; char *temp_ptr; char *end_of_smb; __u16 params, byte_count, data_offset; unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0; cifs_dbg(FYI, "In Query All EAs path %s\n", searchName); QAllEAsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { list_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); list_len++; /* trailing null */ list_len *= 2; } else { list_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QueryAllEAs = %d\n", rc); goto QAllEAsOut; } /* BB also check enough total bytes returned */ /* BB we need to improve the validity checking of these trans2 responses */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 4) { rc = -EIO; /* bad smb */ goto QAllEAsOut; } /* check that length of list is not more than bcc */ /* check that each entry does not go beyond length of list */ /* check that each element of each entry does not go beyond end of list */ /* validate_trans2_offsets() */ /* BB check if start of smb + data_offset > &bcc+ bcc */ data_offset = le16_to_cpu(pSMBr->t2.DataOffset); ea_response_data = (struct fealist *) (((char *) &pSMBr->hdr.Protocol) + data_offset); list_len = le32_to_cpu(ea_response_data->list_len); cifs_dbg(FYI, "ea length %d\n", list_len); if (list_len <= 8) { cifs_dbg(FYI, "empty EA list returned from server\n"); /* didn't find the named attribute */ if (ea_name) rc = -ENODATA; goto QAllEAsOut; } /* make sure list_len doesn't go past end of SMB */ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr); if ((char *)ea_response_data + list_len > end_of_smb) { cifs_dbg(FYI, "EA list appears to go beyond SMB\n"); rc = -EIO; goto QAllEAsOut; } /* account for ea list len */ list_len -= 4; temp_fea = &ea_response_data->list; temp_ptr = (char *)temp_fea; while (list_len > 0) { unsigned int name_len; __u16 value_len; list_len -= 4; temp_ptr += 4; /* make sure we can read name_len and value_len */ if (list_len < 0) { cifs_dbg(FYI, "EA entry goes beyond length of list\n"); rc = -EIO; goto QAllEAsOut; } name_len = temp_fea->name_len; value_len = le16_to_cpu(temp_fea->value_len); list_len -= name_len + 1 + value_len; if (list_len < 0) { cifs_dbg(FYI, "EA entry goes beyond length of list\n"); rc = -EIO; goto QAllEAsOut; } if (ea_name) { if (ea_name_len == name_len && memcmp(ea_name, temp_ptr, name_len) == 0) { temp_ptr += name_len + 1; rc = value_len; if (buf_size == 0) goto QAllEAsOut; if ((size_t)value_len > buf_size) { rc = -ERANGE; goto QAllEAsOut; } memcpy(EAData, temp_ptr, value_len); goto QAllEAsOut; } } else { /* account for prefix user. and trailing null */ rc += (5 + 1 + name_len); if (rc < (int) buf_size) { memcpy(EAData, "user.", 5); EAData += 5; memcpy(EAData, temp_ptr, name_len); EAData += name_len; /* null terminate name */ *EAData = 0; ++EAData; } else if (buf_size == 0) { /* skip copy - calc size only */ } else { /* stop before overrun buffer */ rc = -ERANGE; break; } } temp_ptr += name_len + 1 + value_len; temp_fea = (struct fea *)temp_ptr; } /* didn't find the named attribute */ if (ea_name) rc = -ENODATA; QAllEAsOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QAllEAsRetry; return (ssize_t)rc; } int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const char *ea_name, const void *ea_value, const __u16 ea_value_len, const struct nls_table *nls_codepage, struct cifs_sb_info *cifs_sb) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct fealist *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, byte_count, offset, count; int remap = cifs_remap(cifs_sb); cifs_dbg(FYI, "In SetEA\n"); SetEARetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; /* done calculating parms using name_len of file name, now use name_len to calculate length of ea name we are going to create in the inode xattrs */ if (ea_name == NULL) name_len = 0; else name_len = strnlen(ea_name, 255); count = sizeof(*parm_data) + 1 + ea_value_len + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_EA); parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); parm_data->list_len = cpu_to_le32(count); parm_data->list.EA_flags = 0; /* we checked above that name len is less than 255 */ parm_data->list.name_len = (__u8)name_len; /* EA names are always ASCII */ if (ea_name) strncpy(parm_data->list.name, ea_name, name_len); parm_data->list.name[name_len] = '\0'; parm_data->list.value_len = cpu_to_le16(ea_value_len); /* caller ensures that ea_value_len is less than 64K but we need to ensure that it fits within the smb */ /*BB add length check to see if it would fit in negotiated SMB buffer size BB */ /* if (ea_value_len > buffer_size - 512 (enough for header)) */ if (ea_value_len) memcpy(parm_data->list.name + name_len + 1, ea_value, ea_value_len); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (EA) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEARetry; return rc; } #endif
linux-master
fs/smb/client/cifssmb.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 * Author(s): Steve French ([email protected]) * Pavel Shilovsky ([email protected]) 2012 * * Contains the routines for constructing the SMB2 PDUs themselves * */ /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ /* Note that there are handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/task_io_accounting_ops.h> #include <linux/uaccess.h> #include <linux/uuid.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "ntlmssp.h" #include "smb2status.h" #include "smb2glob.h" #include "cifspdu.h" #include "cifs_spnego.h" #include "smbdirect.h" #include "trace.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif #include "cached_dir.h" /* * The following table defines the expected "StructureSize" of SMB2 requests * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order. */ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ 36, /* SMB2_SESSION_SETUP */ 25, /* SMB2_LOGOFF */ 4, /* SMB2_TREE_CONNECT */ 9, /* SMB2_TREE_DISCONNECT */ 4, /* SMB2_CREATE */ 57, /* SMB2_CLOSE */ 24, /* SMB2_FLUSH */ 24, /* SMB2_READ */ 49, /* SMB2_WRITE */ 49, /* SMB2_LOCK */ 48, /* SMB2_IOCTL */ 57, /* SMB2_CANCEL */ 4, /* SMB2_ECHO */ 4, /* SMB2_QUERY_DIRECTORY */ 33, /* SMB2_CHANGE_NOTIFY */ 32, /* SMB2_QUERY_INFO */ 41, /* SMB2_SET_INFO */ 33, /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ }; int smb3_encryption_required(const struct cifs_tcon *tcon) { if (!tcon || !tcon->ses) return 0; if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) return 1; if (tcon->seal && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) return 1; return 0; } static void smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd, const struct cifs_tcon *tcon, struct TCP_Server_Info *server) { struct smb3_hdr_req *smb3_hdr; shdr->ProtocolId = SMB2_PROTO_NUMBER; shdr->StructureSize = cpu_to_le16(64); shdr->Command = smb2_cmd; if (server) { /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */ if (server->dialect >= SMB30_PROT_ID) { smb3_hdr = (struct smb3_hdr_req *)shdr; /* * if primary channel is not set yet, use default * channel for chan sequence num */ if (SERVER_IS_CHAN(server)) smb3_hdr->ChannelSequence = cpu_to_le16(server->primary_server->channel_sequence_num); else smb3_hdr->ChannelSequence = cpu_to_le16(server->channel_sequence_num); } spin_lock(&server->req_lock); /* Request up to 10 credits but don't go over the limit. */ if (server->credits >= server->max_credits) shdr->CreditRequest = cpu_to_le16(0); else shdr->CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, 10)); spin_unlock(&server->req_lock); } else { shdr->CreditRequest = cpu_to_le16(2); } shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid); if (!tcon) goto out; /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) shdr->CreditCharge = cpu_to_le16(1); /* else CreditCharge MBZ */ shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid); /* Uid is not converted */ if (tcon->ses) shdr->SessionId = cpu_to_le64(tcon->ses->Suid); /* * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have * to pass the path on the Open SMB prefixed by \\server\share. * Not sure when we would need to do the augmented path (if ever) and * setting this flag breaks the SMB2 open operation since it is * illegal to send an empty path name (without \\server\share prefix) * when the DFS flag is set in the SMB open header. We could * consider setting the flag on all operations other than open * but it is safer to net set it for now. */ /* if (tcon->share_flags & SHI1005_FLAGS_DFS) shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ if (server && server->sign && !smb3_encryption_required(tcon)) shdr->Flags |= SMB2_FLAGS_SIGNED; out: return; } static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, struct TCP_Server_Info *server) { int rc = 0; struct nls_table *nls_codepage = NULL; struct cifs_ses *ses; /* * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so * check for tcp and smb session status done differently * for those three - in the calling routine. */ if (tcon == NULL) return 0; /* * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in * cifs_tree_connect(). */ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) return 0; spin_lock(&tcon->tc_lock); if (tcon->status == TID_EXITING) { /* * only tree disconnect allowed when disconnecting ... */ if (smb2_command != SMB2_TREE_DISCONNECT) { spin_unlock(&tcon->tc_lock); cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb2_command); return -ENODEV; } } spin_unlock(&tcon->tc_lock); ses = tcon->ses; if (!ses) return -EIO; spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING) { spin_unlock(&ses->ses_lock); return -EIO; } spin_unlock(&ses->ses_lock); if (!ses->server || !server) return -EIO; spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { /* * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE * here since they are implicitly done when session drops. */ switch (smb2_command) { /* * BB Should we keep oplock break and add flush to exceptions? */ case SMB2_TREE_DISCONNECT: case SMB2_CANCEL: case SMB2_CLOSE: case SMB2_OPLOCK_BREAK: spin_unlock(&server->srv_lock); return -EAGAIN; } } spin_unlock(&server->srv_lock); again: rc = cifs_wait_for_server_reconnect(server, tcon->retry); if (rc) return rc; spin_lock(&ses->chan_lock); if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) { spin_unlock(&ses->chan_lock); return 0; } spin_unlock(&ses->chan_lock); cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d", tcon->ses->chans_need_reconnect, tcon->need_reconnect); mutex_lock(&ses->session_mutex); /* * Recheck after acquire mutex. If another thread is negotiating * and the server never sends an answer the socket will be closed * and tcpStatus set to reconnect. */ spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { spin_unlock(&server->srv_lock); mutex_unlock(&ses->session_mutex); if (tcon->retry) goto again; rc = -EHOSTDOWN; goto out; } spin_unlock(&server->srv_lock); nls_codepage = ses->local_nls; /* * need to prevent multiple threads trying to simultaneously * reconnect the same SMB session */ spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); if (!cifs_chan_needs_reconnect(ses, server) && ses->ses_status == SES_GOOD) { spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); /* this means that we only need to tree connect */ if (tcon->need_reconnect) goto skip_sess_setup; mutex_unlock(&ses->session_mutex); goto out; } spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); rc = cifs_negotiate_protocol(0, ses, server); if (!rc) { rc = cifs_setup_session(0, ses, server, nls_codepage); if ((rc == -EACCES) && !tcon->retry) { mutex_unlock(&ses->session_mutex); rc = -EHOSTDOWN; goto failed; } else if (rc) { mutex_unlock(&ses->session_mutex); goto out; } } else { mutex_unlock(&ses->session_mutex); goto out; } skip_sess_setup: if (!tcon->need_reconnect) { mutex_unlock(&ses->session_mutex); goto out; } cifs_mark_open_files_invalid(tcon); if (tcon->use_persistent) tcon->need_reopen_files = true; rc = cifs_tree_connect(0, tcon, nls_codepage); mutex_unlock(&ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); if (rc) { /* If sess reconnected but tcon didn't, something strange ... */ cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc); goto out; } if (smb2_command != SMB2_INTERNAL_CMD) mod_delayed_work(cifsiod_wq, &server->reconnect, 0); atomic_inc(&tconInfoReconnectCount); out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle. */ /* * BB Is flush done by server on drop of tcp session? Should we special * case it and skip above? */ switch (smb2_command) { case SMB2_FLUSH: case SMB2_READ: case SMB2_WRITE: case SMB2_LOCK: case SMB2_QUERY_DIRECTORY: case SMB2_CHANGE_NOTIFY: case SMB2_QUERY_INFO: case SMB2_SET_INFO: rc = -EAGAIN; } failed: return rc; } static void fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, struct TCP_Server_Info *server, void *buf, unsigned int *total_len) { struct smb2_pdu *spdu = buf; /* lookup word count ie StructureSize from table */ __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; /* * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of * largest operations (Create) */ memset(buf, 0, 256); smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server); spdu->StructureSize2 = cpu_to_le16(parmsize); *total_len = parmsize + sizeof(struct smb2_hdr); } /* * Allocate and return pointer to an SMB request hdr, and set basic * SMB information in the SMB header. If the return code is zero, this * function must have filled in request_buf pointer. */ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, struct TCP_Server_Info *server, void **request_buf, unsigned int *total_len) { /* BB eventually switch this to SMB2 specific small buf size */ if (smb2_command == SMB2_SET_INFO) *request_buf = cifs_buf_get(); else *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } fill_small_buf(smb2_command, tcon, server, (struct smb2_hdr *)(*request_buf), total_len); if (tcon != NULL) { uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); cifs_stats_inc(&tcon->num_smbs_sent); } return 0; } static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, struct TCP_Server_Info *server, void **request_buf, unsigned int *total_len) { int rc; rc = smb2_reconnect(smb2_command, tcon, server); if (rc) return rc; return __smb2_plain_req_init(smb2_command, tcon, server, request_buf, total_len); } static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, struct TCP_Server_Info *server, void **request_buf, unsigned int *total_len) { /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) { return __smb2_plain_req_init(SMB2_IOCTL, tcon, server, request_buf, total_len); } return smb2_plain_req_init(SMB2_IOCTL, tcon, server, request_buf, total_len); } /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */ static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(38); pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; } static void build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(sizeof(struct smb2_compression_capabilities_context) - sizeof(struct smb2_neg_context)); pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3); pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77; pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF; pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1; } static unsigned int build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt) { unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities); unsigned short num_algs = 1; /* number of signing algorithms sent */ pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES; /* * Context Data length must be rounded to multiple of 8 for some servers */ pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) - sizeof(struct smb2_neg_context) + (num_algs * sizeof(u16)), 8)); pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs); pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC); ctxt_len += sizeof(__le16) * num_algs; ctxt_len = ALIGN(ctxt_len, 8); return ctxt_len; /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */ } static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; if (require_gcm_256) { pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */ pneg_ctxt->CipherCount = cpu_to_le16(1); pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM; } else if (enable_gcm_256) { pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */ pneg_ctxt->CipherCount = cpu_to_le16(3); pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM; pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM; } else { pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */ pneg_ctxt->CipherCount = cpu_to_le16(2); pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; } } static unsigned int build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname) { struct nls_table *cp = load_nls_default(); pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID; /* copy up to max of first 100 bytes of server name to NetName field */ pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp)); /* context size is DataLength + minimal smb2_neg_context */ return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8); } static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE; pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ pneg_ctxt->Name[0] = 0x93; pneg_ctxt->Name[1] = 0xAD; pneg_ctxt->Name[2] = 0x25; pneg_ctxt->Name[3] = 0x50; pneg_ctxt->Name[4] = 0x9C; pneg_ctxt->Name[5] = 0xB4; pneg_ctxt->Name[6] = 0x11; pneg_ctxt->Name[7] = 0xE7; pneg_ctxt->Name[8] = 0xB4; pneg_ctxt->Name[9] = 0x23; pneg_ctxt->Name[10] = 0x83; pneg_ctxt->Name[11] = 0xDE; pneg_ctxt->Name[12] = 0x96; pneg_ctxt->Name[13] = 0x8B; pneg_ctxt->Name[14] = 0xCD; pneg_ctxt->Name[15] = 0x7C; } static void assemble_neg_contexts(struct smb2_negotiate_req *req, struct TCP_Server_Info *server, unsigned int *total_len) { unsigned int ctxt_len, neg_context_count; struct TCP_Server_Info *pserver; char *pneg_ctxt; char *hostname; if (*total_len > 200) { /* In case length corrupted don't want to overrun smb buffer */ cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n"); return; } /* * round up total_len of fixed part of SMB3 negotiate request to 8 * byte boundary before adding negotiate contexts */ *total_len = ALIGN(*total_len, 8); pneg_ctxt = (*total_len) + (char *)req; req->NegotiateContextOffset = cpu_to_le32(*total_len); build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8); *total_len += ctxt_len; pneg_ctxt += ctxt_len; build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8); *total_len += ctxt_len; pneg_ctxt += ctxt_len; /* * secondary channels don't have the hostname field populated * use the hostname field in the primary channel instead */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; cifs_server_lock(pserver); hostname = pserver->hostname; if (hostname && (hostname[0] != 0)) { ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, hostname); *total_len += ctxt_len; pneg_ctxt += ctxt_len; neg_context_count = 3; } else neg_context_count = 2; cifs_server_unlock(pserver); build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); *total_len += sizeof(struct smb2_posix_neg_context); pneg_ctxt += sizeof(struct smb2_posix_neg_context); neg_context_count++; if (server->compress_algorithm) { build_compression_ctxt((struct smb2_compression_capabilities_context *) pneg_ctxt); ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8); *total_len += ctxt_len; pneg_ctxt += ctxt_len; neg_context_count++; } if (enable_negotiate_signing) { ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *) pneg_ctxt); *total_len += ctxt_len; pneg_ctxt += ctxt_len; neg_context_count++; } /* check for and add transport_capabilities and signing capabilities */ req->NegotiateContextCount = cpu_to_le16(neg_context_count); } /* If invalid preauth context warn but use what we requested, SHA-512 */ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) { unsigned int len = le16_to_cpu(ctxt->DataLength); /* * Caller checked that DataLength remains within SMB boundary. We still * need to confirm that one HashAlgorithms member is accounted for. */ if (len < MIN_PREAUTH_CTXT_DATA_LEN) { pr_warn_once("server sent bad preauth context\n"); return; } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { pr_warn_once("server sent invalid SaltLength\n"); return; } if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) pr_warn_once("Invalid SMB3 hash algorithm count\n"); if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) pr_warn_once("unknown SMB3 hash algorithm\n"); } static void decode_compress_ctx(struct TCP_Server_Info *server, struct smb2_compression_capabilities_context *ctxt) { unsigned int len = le16_to_cpu(ctxt->DataLength); /* * Caller checked that DataLength remains within SMB boundary. We still * need to confirm that one CompressionAlgorithms member is accounted * for. */ if (len < 10) { pr_warn_once("server sent bad compression cntxt\n"); return; } if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) { pr_warn_once("Invalid SMB3 compress algorithm count\n"); return; } if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) { pr_warn_once("unknown compression algorithm\n"); return; } server->compress_algorithm = ctxt->CompressionAlgorithms[0]; } static int decode_encrypt_ctx(struct TCP_Server_Info *server, struct smb2_encryption_neg_context *ctxt) { unsigned int len = le16_to_cpu(ctxt->DataLength); cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len); /* * Caller checked that DataLength remains within SMB boundary. We still * need to confirm that one Cipher flexible array member is accounted * for. */ if (len < MIN_ENCRYPT_CTXT_DATA_LEN) { pr_warn_once("server sent bad crypto ctxt len\n"); return -EINVAL; } if (le16_to_cpu(ctxt->CipherCount) != 1) { pr_warn_once("Invalid SMB3.11 cipher count\n"); return -EINVAL; } cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0])); if (require_gcm_256) { if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) { cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n"); return -EOPNOTSUPP; } } else if (ctxt->Ciphers[0] == 0) { /* * e.g. if server only supported AES256_CCM (very unlikely) * or server supported no encryption types or had all disabled. * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case * in which mount requested encryption ("seal") checks later * on during tree connection will return proper rc, but if * seal not requested by client, since server is allowed to * return 0 to indicate no supported cipher, we can't fail here */ server->cipher_type = 0; server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION; pr_warn_once("Server does not support requested encryption types\n"); return 0; } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) && (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) && (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) { /* server returned a cipher we didn't ask for */ pr_warn_once("Invalid SMB3.11 cipher returned\n"); return -EINVAL; } server->cipher_type = ctxt->Ciphers[0]; server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; return 0; } static void decode_signing_ctx(struct TCP_Server_Info *server, struct smb2_signing_capabilities *pctxt) { unsigned int len = le16_to_cpu(pctxt->DataLength); /* * Caller checked that DataLength remains within SMB boundary. We still * need to confirm that one SigningAlgorithms flexible array member is * accounted for. */ if ((len < 4) || (len > 16)) { pr_warn_once("server sent bad signing negcontext\n"); return; } if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) { pr_warn_once("Invalid signing algorithm count\n"); return; } if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) { pr_warn_once("unknown signing algorithm\n"); return; } server->signing_negotiated = true; server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]); cifs_dbg(FYI, "signing algorithm %d chosen\n", server->signing_algorithm); } static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, struct TCP_Server_Info *server, unsigned int len_of_smb) { struct smb2_neg_context *pctx; unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset); unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount); unsigned int len_of_ctxts, i; int rc = 0; cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt); if (len_of_smb <= offset) { cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n"); return -EINVAL; } len_of_ctxts = len_of_smb - offset; for (i = 0; i < ctxt_cnt; i++) { int clen; /* check that offset is not beyond end of SMB */ if (len_of_ctxts < sizeof(struct smb2_neg_context)) break; pctx = (struct smb2_neg_context *)(offset + (char *)rsp); clen = sizeof(struct smb2_neg_context) + le16_to_cpu(pctx->DataLength); /* * 2.2.4 SMB2 NEGOTIATE Response * Subsequent negotiate contexts MUST appear at the first 8-byte * aligned offset following the previous negotiate context. */ if (i + 1 != ctxt_cnt) clen = ALIGN(clen, 8); if (clen > len_of_ctxts) break; if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) decode_preauth_context( (struct smb2_preauth_neg_context *)pctx); else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) rc = decode_encrypt_ctx(server, (struct smb2_encryption_neg_context *)pctx); else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) decode_compress_ctx(server, (struct smb2_compression_capabilities_context *)pctx); else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) server->posix_ext_supported = true; else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) decode_signing_ctx(server, (struct smb2_signing_capabilities *)pctx); else cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n", le16_to_cpu(pctx->ContextType)); if (rc) break; offset += clen; len_of_ctxts -= clen; } return rc; } static struct create_posix * create_posix_buf(umode_t mode) { struct create_posix *buf; buf = kzalloc(sizeof(struct create_posix), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct create_posix, Mode)); buf->ccontext.DataLength = cpu_to_le32(4); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct create_posix, Name)); buf->ccontext.NameLength = cpu_to_le16(16); /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ buf->Name[0] = 0x93; buf->Name[1] = 0xAD; buf->Name[2] = 0x25; buf->Name[3] = 0x50; buf->Name[4] = 0x9C; buf->Name[5] = 0xB4; buf->Name[6] = 0x11; buf->Name[7] = 0xE7; buf->Name[8] = 0xB4; buf->Name[9] = 0x23; buf->Name[10] = 0x83; buf->Name[11] = 0xDE; buf->Name[12] = 0x96; buf->Name[13] = 0x8B; buf->Name[14] = 0xCD; buf->Name[15] = 0x7C; buf->Mode = cpu_to_le32(mode); cifs_dbg(FYI, "mode on posix create 0%o\n", mode); return buf; } static int add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode) { unsigned int num = *num_iovec; iov[num].iov_base = create_posix_buf(mode); if (mode == ACL_NO_MODE) cifs_dbg(FYI, "%s: no mode\n", __func__); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_posix); *num_iovec = num + 1; return 0; } /* * * SMB2 Worker functions follow: * * The general structure of the worker functions is: * 1) Call smb2_init (assembles SMB2 header) * 2) Initialize SMB2 command specific fields in fixed length area of SMB * 3) Call smb_sendrcv2 (sends request on socket and waits for response) * 4) Decode SMB2 command specific fields in the fixed length area * 5) Decode variable length data area (if any for this SMB2 command type) * 6) Call free smb buffer * 7) return * */ int SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { struct smb_rqst rqst; struct smb2_negotiate_req *req; struct smb2_negotiate_rsp *rsp; struct kvec iov[1]; struct kvec rsp_iov; int rc; int resp_buftype; int blob_offset, blob_length; char *security_blob; int flags = CIFS_NEG_OP; unsigned int total_len; cifs_dbg(FYI, "Negotiate protocol\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server, (void **) &req, &total_len); if (rc) return rc; req->hdr.SessionId = 0; memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); if (strcmp(server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); req->DialectCount = cpu_to_le16(3); total_len += 6; } else if (strcmp(server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); req->DialectCount = cpu_to_le16(4); total_len += 8; } else { /* otherwise send specific dialect */ req->Dialects[0] = cpu_to_le16(server->vals->protocol_id); req->DialectCount = cpu_to_le16(1); total_len += 2; } /* only one of SMB2 signing flags may be set in SMB2 request */ if (ses->sign) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else req->SecurityMode = 0; req->Capabilities = cpu_to_le32(server->vals->req_capabilities); if (ses->chan_max > 1) req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); /* ClientGUID must be zero for SMB2.02 dialect */ if (server->vals->protocol_id == SMB20_PROT_ID) memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); else { memcpy(req->ClientGUID, server->client_guid, SMB2_CLIENT_GUID_SIZE); if ((server->vals->protocol_id == SMB311_PROT_ID) || (strcmp(server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) || (strcmp(server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0)) assemble_neg_contexts(req, server, &total_len); } iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ if (rc == -EOPNOTSUPP) { cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n"); goto neg_exit; } else if (rc != 0) goto neg_exit; rc = -EIO; if (strcmp(server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { cifs_server_dbg(VFS, "SMB2 dialect returned but not requested\n"); goto neg_exit; } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { cifs_server_dbg(VFS, "SMB2.1 dialect returned but not requested\n"); goto neg_exit; } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { /* ops set to 3.0 by default for default so update */ server->ops = &smb311_operations; server->vals = &smb311_values; } } else if (strcmp(server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { cifs_server_dbg(VFS, "SMB2 dialect returned but not requested\n"); goto neg_exit; } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { /* ops set to 3.0 by default for default so update */ server->ops = &smb21_operations; server->vals = &smb21_values; } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { server->ops = &smb311_operations; server->vals = &smb311_values; } } else if (le16_to_cpu(rsp->DialectRevision) != server->vals->protocol_id) { /* if requested single dialect ensure returned dialect matched */ cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n", le16_to_cpu(rsp->DialectRevision)); goto neg_exit; } cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); else { cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n", le16_to_cpu(rsp->DialectRevision)); goto neg_exit; } rc = 0; server->dialect = le16_to_cpu(rsp->DialectRevision); /* * Keep a copy of the hash after negprot. This hash will be * the starting hash value for all sessions made from this * server. */ memcpy(server->preauth_sha_hash, ses->preauth_sha_hash, SMB2_PREAUTH_HASH_SIZE); /* SMB2 only has an extended negflavor */ server->negflavor = CIFS_NEGFLAVOR_EXTENDED; /* set it to the maximum buffer size value we can send with 1 credit */ server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), SMB2_MAX_BUFFER_SIZE); server->max_read = le32_to_cpu(rsp->MaxReadSize); server->max_write = le32_to_cpu(rsp->MaxWriteSize); server->sec_mode = le16_to_cpu(rsp->SecurityMode); if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode) cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n", server->sec_mode); server->capabilities = le32_to_cpu(rsp->Capabilities); /* Internal types */ server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; /* * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context * Set the cipher type manually. */ if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) server->cipher_type = SMB2_ENCRYPTION_AES128_CCM; security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, (struct smb2_hdr *)rsp); /* * See MS-SMB2 section 2.2.4: if no blob, client picks default which * for us will be * ses->sectype = RawNTLMSSP; * but for time being this is our only auth choice so doesn't matter. * We just found a server which sets blob length to zero expecting raw. */ if (blob_length == 0) { cifs_dbg(FYI, "missing security blob on negprot\n"); server->sec_ntlmssp = true; } rc = cifs_enable_signing(server, ses->sign); if (rc) goto neg_exit; if (blob_length) { rc = decode_negTokenInit(security_blob, blob_length, server); if (rc == 1) rc = 0; else if (rc == 0) rc = -EIO; } if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { if (rsp->NegotiateContextCount) rc = smb311_decode_neg_context(rsp, server, rsp_iov.iov_len); else cifs_server_dbg(VFS, "Missing expected negotiate contexts\n"); } neg_exit: free_rsp_buf(resp_buftype, rsp); return rc; } int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) { int rc; struct validate_negotiate_info_req *pneg_inbuf; struct validate_negotiate_info_rsp *pneg_rsp = NULL; u32 rsplen; u32 inbuflen; /* max of 4 dialects */ struct TCP_Server_Info *server = tcon->ses->server; cifs_dbg(FYI, "validate negotiate\n"); /* In SMB3.11 preauth integrity supersedes validate negotiate */ if (server->dialect == SMB311_PROT_ID) return 0; /* * validation ioctl must be signed, so no point sending this if we * can not sign it (ie are not known user). Even if signing is not * required (enabled but not negotiated), in those cases we selectively * sign just this, the first and only signed request on a connection. * Having validation of negotiate info helps reduce attack vectors. */ if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) return 0; /* validation requires signing */ if (tcon->ses->user_name == NULL) { cifs_dbg(FYI, "Can't validate negotiate: null user mount\n"); return 0; /* validation requires signing */ } if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS); if (!pneg_inbuf) return -ENOMEM; pneg_inbuf->Capabilities = cpu_to_le32(server->vals->req_capabilities); if (tcon->ses->chan_max > 1) pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); memcpy(pneg_inbuf->Guid, server->client_guid, SMB2_CLIENT_GUID_SIZE); if (tcon->ses->sign) pneg_inbuf->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) pneg_inbuf->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else pneg_inbuf->SecurityMode = 0; if (strcmp(server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); pneg_inbuf->DialectCount = cpu_to_le16(3); /* SMB 2.1 not included so subtract one dialect from len */ inbuflen = sizeof(*pneg_inbuf) - (sizeof(pneg_inbuf->Dialects[0])); } else if (strcmp(server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); pneg_inbuf->DialectCount = cpu_to_le16(4); /* structure is big enough for 4 dialects */ inbuflen = sizeof(*pneg_inbuf); } else { /* otherwise specific dialect was requested */ pneg_inbuf->Dialects[0] = cpu_to_le16(server->vals->protocol_id); pneg_inbuf->DialectCount = cpu_to_le16(1); /* structure is big enough for 4 dialects, sending only 1 */ inbuflen = sizeof(*pneg_inbuf) - sizeof(pneg_inbuf->Dialects[0]) * 3; } rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_VALIDATE_NEGOTIATE_INFO, (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, (char **)&pneg_rsp, &rsplen); if (rc == -EOPNOTSUPP) { /* * Old Windows versions or Netapp SMB server can return * not supported error. Client should accept it. */ cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n"); rc = 0; goto out_free_inbuf; } else if (rc != 0) { cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); rc = -EIO; goto out_free_inbuf; } rc = -EIO; if (rsplen != sizeof(*pneg_rsp)) { cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n", rsplen); /* relax check since Mac returns max bufsize allowed on ioctl */ if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) goto out_free_rsp; } /* check validate negotiate info response matches what we got earlier */ if (pneg_rsp->Dialect != cpu_to_le16(server->dialect)) goto vneg_out; if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode)) goto vneg_out; /* do not validate server guid because not saved at negprot time yet */ if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND | SMB2_LARGE_FILES) != server->capabilities) goto vneg_out; /* validate negotiate successful */ rc = 0; cifs_dbg(FYI, "validate negotiate info successful\n"); goto out_free_rsp; vneg_out: cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n"); out_free_rsp: kfree(pneg_rsp); out_free_inbuf: kfree(pneg_inbuf); return rc; } enum securityEnum smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) { switch (requested) { case Kerberos: case RawNTLMSSP: return requested; case NTLMv2: return RawNTLMSSP; case Unspecified: if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; default: return Unspecified; } } struct SMB2_sess_data { unsigned int xid; struct cifs_ses *ses; struct TCP_Server_Info *server; struct nls_table *nls_cp; void (*func)(struct SMB2_sess_data *); int result; u64 previous_session; /* we will send the SMB in three pieces: * a fixed length beginning part, an optional * SPNEGO blob (which can be zero length), and a * last part which will include the strings * and rest of bcc area. This allows us to avoid * a large buffer 17K allocation */ int buf0_type; struct kvec iov[2]; }; static int SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; struct smb2_sess_setup_req *req; unsigned int total_len; bool is_binding = false; rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server, (void **) &req, &total_len); if (rc) return rc; spin_lock(&ses->ses_lock); is_binding = (ses->ses_status == SES_GOOD); spin_unlock(&ses->ses_lock); if (is_binding) { req->hdr.SessionId = cpu_to_le64(ses->Suid); req->hdr.Flags |= SMB2_FLAGS_SIGNED; req->PreviousSessionId = 0; req->Flags = SMB2_SESSION_REQ_FLAG_BINDING; cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid); } else { /* First session, not a reauthenticate */ req->hdr.SessionId = 0; /* * if reconnect, we need to send previous sess id * otherwise it is 0 */ req->PreviousSessionId = cpu_to_le64(sess_data->previous_session); req->Flags = 0; /* MBZ */ cifs_dbg(FYI, "Fresh session. Previous: %llx\n", sess_data->previous_session); } /* enough to enable echos and oplocks and one max size write */ if (server->credits >= server->max_credits) req->hdr.CreditRequest = cpu_to_le16(0); else req->hdr.CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, 130)); /* only one of SMB2 signing flags may be set in SMB2 request */ if (server->sign) req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; else req->SecurityMode = 0; #ifdef CONFIG_CIFS_DFS_UPCALL req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS); #else req->Capabilities = 0; #endif /* DFS_UPCALL */ req->Channel = 0; /* MBZ */ sess_data->iov[0].iov_base = (char *)req; /* 1 for pad */ sess_data->iov[0].iov_len = total_len - 1; /* * This variable will be used to clear the buffer * allocated above in case of any error in the calling function. */ sess_data->buf0_type = CIFS_SMALL_BUFFER; return 0; } static void SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) { struct kvec *iov = sess_data->iov; /* iov[1] is already freed by caller */ if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) memzero_explicit(iov[0].iov_base, iov[0].iov_len); free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; } static int SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) { int rc; struct smb_rqst rqst; struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; struct kvec rsp_iov = { NULL, 0 }; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->SecurityBufferOffset = cpu_to_le16(sizeof(struct smb2_sess_setup_req)); req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = sess_data->iov; rqst.rq_nvec = 2; /* BB add code to build os and lm fields */ rc = cifs_send_recv(sess_data->xid, sess_data->ses, sess_data->server, &rqst, &sess_data->buf0_type, CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); cifs_small_buf_release(sess_data->iov[0].iov_base); memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); return rc; } static int SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; cifs_server_lock(server); if (server->ops->generate_signingkey) { rc = server->ops->generate_signingkey(ses, server); if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); cifs_server_unlock(server); return rc; } } if (!server->session_estab) { server->sequence_number = 0x2; server->session_estab = true; } cifs_server_unlock(server); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); return rc; } #ifdef CONFIG_CIFS_UPCALL static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; struct cifs_spnego_msg *msg; struct key *spnego_key = NULL; struct smb2_sess_setup_rsp *rsp = NULL; bool is_binding = false; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; spnego_key = cifs_get_spnego_key(ses, server); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); if (rc == -ENOKEY) cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); spnego_key = NULL; goto out; } msg = spnego_key->payload.data[0]; /* * check version field to make sure that cifs.upcall is * sending us a response in an expected form */ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n", CIFS_SPNEGO_UPCALL_VERSION, msg->version); rc = -EKEYREJECTED; goto out_put_spnego_key; } spin_lock(&ses->ses_lock); is_binding = (ses->ses_status == SES_GOOD); spin_unlock(&ses->ses_lock); /* keep session key if binding */ if (!is_binding) { kfree_sensitive(ses->auth_key.response); ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n", msg->sesskey_len); rc = -ENOMEM; goto out_put_spnego_key; } ses->auth_key.len = msg->sesskey_len; } sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; sess_data->iov[1].iov_len = msg->secblob_len; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out_put_spnego_key; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; /* keep session id and flags if binding */ if (!is_binding) { ses->Suid = le64_to_cpu(rsp->hdr.SessionId); ses->session_flags = le16_to_cpu(rsp->SessionFlags); } rc = SMB2_sess_establish_session(sess_data); out_put_spnego_key: key_invalidate(spnego_key); key_put(spnego_key); if (rc) { kfree_sensitive(ses->auth_key.response); ses->auth_key.response = NULL; ses->auth_key.len = 0; } out: sess_data->result = rc; sess_data->func = NULL; SMB2_sess_free_buffer(sess_data); } #else static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); sess_data->result = -EOPNOTSUPP; sess_data->func = NULL; } #endif static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); static void SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; struct smb2_sess_setup_rsp *rsp = NULL; unsigned char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; bool is_binding = false; /* * If memory allocation is successful, caller of this function * frees it. */ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); if (!ses->ntlmssp) { rc = -ENOMEM; goto out_err; } ses->ntlmssp->sesskey_per_smbsess = true; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out_err; rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob, &blob_length, ses, server, sess_data->nls_cp); if (rc) goto out; if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; /* If true, rc here is expected and not an error */ if (sess_data->buf0_type != CIFS_NO_BUFFER && rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) rc = 0; if (rc) goto out; if (offsetof(struct smb2_sess_setup_rsp, Buffer) != le16_to_cpu(rsp->SecurityBufferOffset)) { cifs_dbg(VFS, "Invalid security buffer offset %d\n", le16_to_cpu(rsp->SecurityBufferOffset)); rc = -EIO; goto out; } rc = decode_ntlmssp_challenge(rsp->Buffer, le16_to_cpu(rsp->SecurityBufferLength), ses); if (rc) goto out; cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); spin_lock(&ses->ses_lock); is_binding = (ses->ses_status == SES_GOOD); spin_unlock(&ses->ses_lock); /* keep existing ses id and flags if binding */ if (!is_binding) { ses->Suid = le64_to_cpu(rsp->hdr.SessionId); ses->session_flags = le16_to_cpu(rsp->SessionFlags); } out: kfree_sensitive(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); if (!rc) { sess_data->result = 0; sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; return; } out_err: kfree_sensitive(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; struct smb2_sess_setup_req *req; struct smb2_sess_setup_rsp *rsp = NULL; unsigned char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; bool is_binding = false; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; req->hdr.SessionId = cpu_to_le64(ses->Suid); rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, server, sess_data->nls_cp); if (rc) { cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); goto out; } if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; spin_lock(&ses->ses_lock); is_binding = (ses->ses_status == SES_GOOD); spin_unlock(&ses->ses_lock); /* keep existing ses id and flags if binding */ if (!is_binding) { ses->Suid = le64_to_cpu(rsp->hdr.SessionId); ses->session_flags = le16_to_cpu(rsp->SessionFlags); } rc = SMB2_sess_establish_session(sess_data); #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS if (ses->server->dialect < SMB30_PROT_ID) { cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__); /* * The session id is opaque in terms of endianness, so we can't * print it as a long long. we dump it as we got it on the wire */ cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), &ses->Suid); cifs_dbg(VFS, "Session Key %*ph\n", SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); cifs_dbg(VFS, "Signing Key %*ph\n", SMB3_SIGN_KEY_SIZE, ses->auth_key.response); } #endif out: kfree_sensitive(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); kfree_sensitive(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static int SMB2_select_sec(struct SMB2_sess_data *sess_data) { int type; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; type = smb2_select_sectype(server, ses->sectype); cifs_dbg(FYI, "sess setup type %d\n", type); if (type == Unspecified) { cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); return -EINVAL; } switch (type) { case Kerberos: sess_data->func = SMB2_auth_kerberos; break; case RawNTLMSSP: sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; break; default: cifs_dbg(VFS, "secType %d not supported!\n", type); return -EOPNOTSUPP; } return 0; } int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server, const struct nls_table *nls_cp) { int rc = 0; struct SMB2_sess_data *sess_data; cifs_dbg(FYI, "Session Setup\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL); if (!sess_data) return -ENOMEM; sess_data->xid = xid; sess_data->ses = ses; sess_data->server = server; sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->nls_cp = (struct nls_table *) nls_cp; sess_data->previous_session = ses->Suid; rc = SMB2_select_sec(sess_data); if (rc) goto out; /* * Initialize the session hash with the server one. */ memcpy(ses->preauth_sha_hash, server->preauth_sha_hash, SMB2_PREAUTH_HASH_SIZE); while (sess_data->func) sess_data->func(sess_data); if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign)) cifs_server_dbg(VFS, "signing requested but authenticated as guest\n"); rc = sess_data->result; out: kfree_sensitive(sess_data); return rc; } int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) { struct smb_rqst rqst; struct smb2_logoff_req *req; /* response is also trivial struct */ int rc = 0; struct TCP_Server_Info *server; int flags = 0; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; cifs_dbg(FYI, "disconnect session %p\n", ses); if (ses && (ses->server)) server = ses->server; else return -EIO; /* no need to send SMB logoff if uid already closed due to reconnect */ spin_lock(&ses->chan_lock); if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { spin_unlock(&ses->chan_lock); goto smb2_session_already_dead; } spin_unlock(&ses->chan_lock); rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server, (void **) &req, &total_len); if (rc) return rc; /* since no tcon, smb2_init can not do this, so do here */ req->hdr.SessionId = cpu_to_le64(ses->Suid); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) flags |= CIFS_TRANSFORM_REQ; else if (server->sign) req->hdr.Flags |= SMB2_FLAGS_SIGNED; flags |= CIFS_NO_RSP_BUF; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, ses->server, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ smb2_session_already_dead: return rc; } static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) { cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); } #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) /* These are similar values to what Windows uses */ static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) { tcon->max_chunks = 256; tcon->max_bytes_chunk = 1048576; tcon->max_bytes_copy = 16777216; } int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *cp) { struct smb_rqst rqst; struct smb2_tree_connect_req *req; struct smb2_tree_connect_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov = { NULL, 0 }; int rc = 0; int resp_buftype; int unc_path_len; __le16 *unc_path = NULL; int flags = 0; unsigned int total_len; struct TCP_Server_Info *server; /* always use master channel */ server = ses->server; cifs_dbg(FYI, "TCON\n"); if (!server || !tree) return -EIO; unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp); if (unc_path_len <= 0) { kfree(unc_path); return -EINVAL; } unc_path_len *= 2; /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ tcon->tid = 0; atomic_set(&tcon->num_remote_opens, 0); rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server, (void **) &req, &total_len); if (rc) { kfree(unc_path); return rc; } if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; iov[0].iov_base = (char *)req; /* 1 for pad */ iov[0].iov_len = total_len - 1; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)); req->PathLength = cpu_to_le16(unc_path_len); iov[1].iov_base = unc_path; iov[1].iov_len = unc_path_len; /* * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1 * (Samba servers don't always set the flag so also check if null user) */ if ((server->dialect == SMB311_PROT_ID) && !smb3_encryption_required(tcon) && !(ses->session_flags & (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) && ((ses->user_name != NULL) || (ses->sectype == Kerberos))) req->hdr.Flags |= SMB2_FLAGS_SIGNED; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 2; /* Need 64 for max size write so ask for more in case not there yet */ if (server->credits >= server->max_credits) req->hdr.CreditRequest = cpu_to_le16(0); else req->hdr.CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, 64)); rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc); if ((rc != 0) || (rsp == NULL)) { cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); tcon->need_reconnect = true; goto tcon_error_exit; } switch (rsp->ShareType) { case SMB2_SHARE_TYPE_DISK: cifs_dbg(FYI, "connection to disk share\n"); break; case SMB2_SHARE_TYPE_PIPE: tcon->pipe = true; cifs_dbg(FYI, "connection to pipe share\n"); break; case SMB2_SHARE_TYPE_PRINT: tcon->print = true; cifs_dbg(FYI, "connection to printer\n"); break; default: cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType); rc = -EOPNOTSUPP; goto tcon_error_exit; } tcon->share_flags = le32_to_cpu(rsp->ShareFlags); tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId); strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n"); if (tcon->seal && !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n"); init_copy_chunk_defaults(tcon); if (server->ops->validate_negotiate) rc = server->ops->validate_negotiate(xid, tcon); if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */ if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT) server->nosharesock = true; tcon_exit: free_rsp_buf(resp_buftype, rsp); kfree(unc_path); return rc; tcon_error_exit: if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); goto tcon_exit; } int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) { struct smb_rqst rqst; struct smb2_tree_disconnect_req *req; /* response is trivial */ int rc = 0; struct cifs_ses *ses = tcon->ses; int flags = 0; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; cifs_dbg(FYI, "Tree Disconnect\n"); if (!ses || !(ses->server)) return -EIO; trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name); spin_lock(&ses->chan_lock); if ((tcon->need_reconnect) || (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) { spin_unlock(&ses->chan_lock); return 0; } spin_unlock(&ses->chan_lock); invalidate_all_cached_dirs(tcon); rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; flags |= CIFS_NO_RSP_BUF; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, ses->server, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) { cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc); } trace_smb3_tdis_done(xid, tcon->tid, ses->Suid); return rc; } static struct create_durable * create_durable_buf(void) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'Q'; return buf; } static struct create_durable * create_reconnect_durable_buf(struct cifs_fid *fid) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->Data.Fid.PersistentFileId = fid->persistent_fid; buf->Data.Fid.VolatileFileId = fid->volatile_fid; /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'C'; return buf; } static void parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf) { struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc; cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n", pdisk_id->DiskFileId, pdisk_id->VolumeId); buf->IndexNumber = pdisk_id->DiskFileId; } static void parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, struct create_posix_rsp *posix) { int sid_len; u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset); u8 *end = beg + le32_to_cpu(cc->DataLength); u8 *sid; memset(posix, 0, sizeof(*posix)); posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0)); posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4)); posix->mode = le32_to_cpu(*(__le32 *)(beg + 8)); sid = beg + 12; sid_len = posix_info_sid_size(sid, end); if (sid_len < 0) { cifs_dbg(VFS, "bad owner sid in posix create response\n"); return; } memcpy(&posix->owner, sid, sid_len); sid = sid + sid_len; sid_len = posix_info_sid_size(sid, end); if (sid_len < 0) { cifs_dbg(VFS, "bad group sid in posix create response\n"); return; } memcpy(&posix->group, sid, sid_len); cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n", posix->nlink, posix->mode, posix->reparse_tag); } void smb2_parse_contexts(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, unsigned int *epoch, char *lease_key, __u8 *oplock, struct smb2_file_all_info *buf, struct create_posix_rsp *posix) { char *data_offset; struct create_context *cc; unsigned int next; unsigned int remaining; char *name; static const char smb3_create_tag_posix[] = { 0x93, 0xAD, 0x25, 0x50, 0x9C, 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83, 0xDE, 0x96, 0x8B, 0xCD, 0x7C }; *oplock = 0; data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset); remaining = le32_to_cpu(rsp->CreateContextsLength); cc = (struct create_context *)data_offset; /* Initialize inode number to 0 in case no valid data in qfid context */ if (buf) buf->IndexNumber = 0; while (remaining >= sizeof(struct create_context)) { name = le16_to_cpu(cc->NameOffset) + (char *)cc; if (le16_to_cpu(cc->NameLength) == 4 && strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0) *oplock = server->ops->parse_lease_buf(cc, epoch, lease_key); else if (buf && (le16_to_cpu(cc->NameLength) == 4) && strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0) parse_query_id_ctxt(cc, buf); else if ((le16_to_cpu(cc->NameLength) == 16)) { if (posix && memcmp(name, smb3_create_tag_posix, 16) == 0) parse_posix_ctxt(cc, buf, posix); } /* else { cifs_dbg(FYI, "Context not matched with len %d\n", le16_to_cpu(cc->NameLength)); cifs_dump_mem("Cctxt name: ", name, 4); } */ next = le32_to_cpu(cc->Next); if (!next) break; remaining -= next; cc = (struct create_context *)((char *)cc + next); } if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) *oplock = rsp->OplockLevel; return; } static int add_lease_context(struct TCP_Server_Info *server, struct smb2_create_req *req, struct kvec *iov, unsigned int *num_iovec, u8 *lease_key, __u8 *oplock) { unsigned int num = *num_iovec; iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = server->vals->create_lease_size; req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; *num_iovec = num + 1; return 0; } static struct create_durable_v2 * create_durable_v2_buf(struct cifs_open_parms *oparms) { struct cifs_fid *pfid = oparms->fid; struct create_durable_v2 *buf; buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* * NB: Handle timeout defaults to 0, which allows server to choose * (most servers default to 120 seconds) and most clients default to 0. * This can be overridden at mount ("handletimeout=") if the user wants * a different persistent (or resilient) handle timeout for all opens * on a particular SMB3 mount. */ buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); generate_random_uuid(buf->dcontext.CreateGuid); memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'Q'; return buf; } static struct create_durable_handle_reconnect_v2 * create_reconnect_durable_v2_buf(struct cifs_fid *fid) { struct create_durable_handle_reconnect_v2 *buf; buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'C'; return buf; } static int add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { unsigned int num = *num_iovec; iov[num].iov_base = create_durable_v2_buf(oparms); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_v2); *num_iovec = num + 1; return 0; } static int add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { unsigned int num = *num_iovec; /* indicate that we don't need to relock the file */ oparms->reconnect = false; iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); *num_iovec = num + 1; return 0; } static int add_durable_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms, bool use_persistent) { unsigned int num = *num_iovec; if (use_persistent) { if (oparms->reconnect) return add_durable_reconnect_v2_context(iov, num_iovec, oparms); else return add_durable_v2_context(iov, num_iovec, oparms); } if (oparms->reconnect) { iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); /* indicate that we don't need to relock the file */ oparms->reconnect = false; } else iov[num].iov_base = create_durable_buf(); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable); *num_iovec = num + 1; return 0; } /* See MS-SMB2 2.2.13.2.7 */ static struct crt_twarp_ctxt * create_twarp_buf(__u64 timewarp) { struct crt_twarp_ctxt *buf; buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct crt_twarp_ctxt, Timestamp)); buf->ccontext.DataLength = cpu_to_le32(8); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct crt_twarp_ctxt, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */ buf->Name[0] = 'T'; buf->Name[1] = 'W'; buf->Name[2] = 'r'; buf->Name[3] = 'p'; buf->Timestamp = cpu_to_le64(timewarp); return buf; } /* See MS-SMB2 2.2.13.2.7 */ static int add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp) { unsigned int num = *num_iovec; iov[num].iov_base = create_twarp_buf(timewarp); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct crt_twarp_ctxt); *num_iovec = num + 1; return 0; } /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ static void setup_owner_group_sids(char *buf) { struct owner_group_sids *sids = (struct owner_group_sids *)buf; /* Populate the user ownership fields S-1-5-88-1 */ sids->owner.Revision = 1; sids->owner.NumAuth = 3; sids->owner.Authority[5] = 5; sids->owner.SubAuthorities[0] = cpu_to_le32(88); sids->owner.SubAuthorities[1] = cpu_to_le32(1); sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val); /* Populate the group ownership fields S-1-5-88-2 */ sids->group.Revision = 1; sids->group.NumAuth = 3; sids->group.Authority[5] = 5; sids->group.SubAuthorities[0] = cpu_to_le32(88); sids->group.SubAuthorities[1] = cpu_to_le32(2); sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val); cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val); } /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */ static struct crt_sd_ctxt * create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) { struct crt_sd_ctxt *buf; __u8 *ptr, *aclptr; unsigned int acelen, acl_size, ace_count; unsigned int owner_offset = 0; unsigned int group_offset = 0; struct smb3_acl acl = {}; *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); if (set_owner) { /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ *len += sizeof(struct owner_group_sids); } buf = kzalloc(*len, GFP_KERNEL); if (buf == NULL) return buf; ptr = (__u8 *)&buf[1]; if (set_owner) { /* offset fields are from beginning of security descriptor not of create context */ owner_offset = ptr - (__u8 *)&buf->sd; buf->sd.OffsetOwner = cpu_to_le32(owner_offset); group_offset = owner_offset + offsetof(struct owner_group_sids, group); buf->sd.OffsetGroup = cpu_to_le32(group_offset); setup_owner_group_sids(ptr); ptr += sizeof(struct owner_group_sids); } else { buf->sd.OffsetOwner = 0; buf->sd.OffsetGroup = 0; } buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd)); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */ buf->Name[0] = 'S'; buf->Name[1] = 'e'; buf->Name[2] = 'c'; buf->Name[3] = 'D'; buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */ /* * ACL is "self relative" ie ACL is stored in contiguous block of memory * and "DP" ie the DACL is present */ buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP); /* offset owner, group and Sbz1 and SACL are all zero */ buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); /* Ship the ACL for now. we will copy it into buf later. */ aclptr = ptr; ptr += sizeof(struct smb3_acl); /* create one ACE to hold the mode embedded in reserved special SID */ acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode); ptr += acelen; acl_size = acelen + sizeof(struct smb3_acl); ace_count = 1; if (set_owner) { /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */ acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr); ptr += acelen; acl_size += acelen; ace_count += 1; } /* and one more ACE to allow access for authenticated users */ acelen = setup_authusers_ACE((struct cifs_ace *)ptr); ptr += acelen; acl_size += acelen; ace_count += 1; acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ acl.AclSize = cpu_to_le16(acl_size); acl.AceCount = cpu_to_le16(ace_count); /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */ memcpy(aclptr, &acl, sizeof(struct smb3_acl)); buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8); return buf; } static int add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner) { unsigned int num = *num_iovec; unsigned int len = 0; iov[num].iov_base = create_sd_buf(mode, set_owner, &len); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = len; *num_iovec = num + 1; return 0; } static struct crt_query_id_ctxt * create_query_id_buf(void) { struct crt_query_id_ctxt *buf; buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(0); buf->ccontext.DataLength = cpu_to_le32(0); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct crt_query_id_ctxt, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */ buf->Name[0] = 'Q'; buf->Name[1] = 'F'; buf->Name[2] = 'i'; buf->Name[3] = 'd'; return buf; } /* See MS-SMB2 2.2.13.2.9 */ static int add_query_id_context(struct kvec *iov, unsigned int *num_iovec) { unsigned int num = *num_iovec; iov[num].iov_base = create_query_id_buf(); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct crt_query_id_ctxt); *num_iovec = num + 1; return 0; } static int alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, const char *treename, const __le16 *path) { int treename_len, path_len; struct nls_table *cp; const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; /* * skip leading "\\" */ treename_len = strlen(treename); if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) return -EINVAL; treename += 2; treename_len -= 2; path_len = UniStrnlen((wchar_t *)path, PATH_MAX); /* make room for one path separator only if @path isn't empty */ *out_len = treename_len + (path[0] ? 1 : 0) + path_len; /* * final path needs to be 8-byte aligned as specified in * MS-SMB2 2.2.13 SMB2 CREATE Request. */ *out_size = round_up(*out_len * sizeof(__le16), 8); *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL); if (!*out_path) return -ENOMEM; cp = load_nls_default(); cifs_strtoUTF16(*out_path, treename, treename_len, cp); /* Do not append the separator if the path is empty */ if (path[0] != cpu_to_le16(0x0000)) { UniStrcat((wchar_t *)*out_path, (wchar_t *)sep); UniStrcat((wchar_t *)*out_path, (wchar_t *)path); } unload_nls(cp); return 0; } int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, umode_t mode, struct cifs_tcon *tcon, const char *full_path, struct cifs_sb_info *cifs_sb) { struct smb_rqst rqst; struct smb2_create_req *req; struct smb2_create_rsp *rsp = NULL; struct cifs_ses *ses = tcon->ses; struct kvec iov[3]; /* make sure at least one for each open context */ struct kvec rsp_iov = {NULL, 0}; int resp_buftype; int uni_path_len; __le16 *copy_path = NULL; int copy_size; int rc = 0; unsigned int n_iov = 2; __u32 file_attributes = 0; char *pc_buf = NULL; int flags = 0; unsigned int total_len; __le16 *utf16_path = NULL; struct TCP_Server_Info *server = cifs_pick_channel(ses); cifs_dbg(FYI, "mkdir\n"); /* resource #1: path allocation */ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); if (!utf16_path) return -ENOMEM; if (!ses || !server) { rc = -EIO; goto err_free_path; } /* resource #2: request */ rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, (void **) &req, &total_len); if (rc) goto err_free_path; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->ImpersonationLevel = IL_IMPERSONATION; req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES); /* File attributes ignored on open (used in create though) */ req->FileAttributes = cpu_to_le32(file_attributes); req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(FILE_CREATE); req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE); iov[0].iov_base = (char *)req; /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len = total_len - 1; req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); /* [MS-SMB2] 2.2.13 NameOffset: * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of * the SMB2 header, the file name includes a prefix that will * be processed during DFS name normalization as specified in * section 3.3.5.9. Otherwise, the file name is relative to * the share that is identified by the TreeId in the SMB2 * header. */ if (tcon->share_flags & SHI1005_FLAGS_DFS) { int name_len; req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; rc = alloc_path_with_tree_prefix(&copy_path, &copy_size, &name_len, tcon->tree_name, utf16_path); if (rc) goto err_free_req; req->NameLength = cpu_to_le16(name_len * 2); uni_path_len = copy_size; /* free before overwriting resource */ kfree(utf16_path); utf16_path = copy_path; } else { uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2; /* MUST set path len (NameLength) to 0 opening root of share */ req->NameLength = cpu_to_le16(uni_path_len - 2); if (uni_path_len % 8 != 0) { copy_size = roundup(uni_path_len, 8); copy_path = kzalloc(copy_size, GFP_KERNEL); if (!copy_path) { rc = -ENOMEM; goto err_free_req; } memcpy((char *)copy_path, (const char *)utf16_path, uni_path_len); uni_path_len = copy_size; /* free before overwriting resource */ kfree(utf16_path); utf16_path = copy_path; } } iov[1].iov_len = uni_path_len; iov[1].iov_base = utf16_path; req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE; if (tcon->posix_extensions) { /* resource #3: posix buf */ rc = add_posix_context(iov, &n_iov, mode); if (rc) goto err_free_req; req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) + iov[1].iov_len); pc_buf = iov[n_iov-1].iov_base; } memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = n_iov; /* no need to inc num_remote_opens because we close it just below */ trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); /* resource #4: response buffer */ rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); if (rc) { cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc); goto err_free_rsp_buf; } /* * Although unlikely to be possible for rsp to be null and rc not set, * adding check below is slightly safer long term (and quiets Coverity * warning) */ rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; if (rsp == NULL) { rc = -EIO; kfree(pc_buf); goto err_free_req; } trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); /* Eventually save off posix specific response info and timestaps */ err_free_rsp_buf: free_rsp_buf(resp_buftype, rsp); kfree(pc_buf); err_free_req: cifs_small_buf_release(req); err_free_path: kfree(utf16_path); return rc; } int SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, __u8 *oplock, struct cifs_open_parms *oparms, __le16 *path) { struct smb2_create_req *req; unsigned int n_iov = 2; __u32 file_attributes = 0; int copy_size; int uni_path_len; unsigned int total_len; struct kvec *iov = rqst->rq_iov; __le16 *copy_path; int rc; rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, (void **) &req, &total_len); if (rc) return rc; iov[0].iov_base = (char *)req; /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len = total_len - 1; if (oparms->create_options & CREATE_OPTION_READONLY) file_attributes |= ATTR_READONLY; if (oparms->create_options & CREATE_OPTION_SPECIAL) file_attributes |= ATTR_SYSTEM; req->ImpersonationLevel = IL_IMPERSONATION; req->DesiredAccess = cpu_to_le32(oparms->desired_access); /* File attributes ignored on open (used in create though) */ req->FileAttributes = cpu_to_le32(file_attributes); req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(oparms->disposition); req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); /* [MS-SMB2] 2.2.13 NameOffset: * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of * the SMB2 header, the file name includes a prefix that will * be processed during DFS name normalization as specified in * section 3.3.5.9. Otherwise, the file name is relative to * the share that is identified by the TreeId in the SMB2 * header. */ if (tcon->share_flags & SHI1005_FLAGS_DFS) { int name_len; req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; rc = alloc_path_with_tree_prefix(&copy_path, &copy_size, &name_len, tcon->tree_name, path); if (rc) return rc; req->NameLength = cpu_to_le16(name_len * 2); uni_path_len = copy_size; path = copy_path; } else { uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; /* MUST set path len (NameLength) to 0 opening root of share */ req->NameLength = cpu_to_le16(uni_path_len - 2); copy_size = round_up(uni_path_len, 8); copy_path = kzalloc(copy_size, GFP_KERNEL); if (!copy_path) return -ENOMEM; memcpy((char *)copy_path, (const char *)path, uni_path_len); uni_path_len = copy_size; path = copy_path; } iov[1].iov_len = uni_path_len; iov[1].iov_base = path; if ((!server->oplocks) || (tcon->no_lease)) *oplock = SMB2_OPLOCK_LEVEL_NONE; if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && (oparms->create_options & CREATE_NOT_FILE)) req->RequestedOplockLevel = *oplock; /* no srv lease support */ else { rc = add_lease_context(server, req, iov, &n_iov, oparms->fid->lease_key, oplock); if (rc) return rc; } if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { rc = add_durable_context(iov, &n_iov, oparms, tcon->use_persistent); if (rc) return rc; } if (tcon->posix_extensions) { rc = add_posix_context(iov, &n_iov, oparms->mode); if (rc) return rc; } if (tcon->snapshot_time) { cifs_dbg(FYI, "adding snapshot context\n"); rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time); if (rc) return rc; } if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) { bool set_mode; bool set_owner; if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) && (oparms->mode != ACL_NO_MODE)) set_mode = true; else { set_mode = false; oparms->mode = ACL_NO_MODE; } if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) set_owner = true; else set_owner = false; if (set_owner | set_mode) { cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode); rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner); if (rc) return rc; } } add_query_id_context(iov, &n_iov); if (n_iov > 2) { /* * We have create contexts behind iov[1] (the file * name), point at them from the main create request */ req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) + iov[1].iov_len); req->CreateContextsLength = 0; for (unsigned int i = 2; i < (n_iov-1); i++) { struct kvec *v = &iov[i]; size_t len = v->iov_len; struct create_context *cctx = (struct create_context *)v->iov_base; cctx->Next = cpu_to_le32(len); le32_add_cpu(&req->CreateContextsLength, len); } le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len); } rqst->rq_nvec = n_iov; return 0; } /* rq_iov[0] is the request and is released by cifs_small_buf_release(). * All other vectors are freed by kfree(). */ void SMB2_open_free(struct smb_rqst *rqst) { int i; if (rqst && rqst->rq_iov) { cifs_small_buf_release(rqst->rq_iov[0].iov_base); for (i = 1; i < rqst->rq_nvec; i++) if (rqst->rq_iov[i].iov_base != smb2_padding) kfree(rqst->rq_iov[i].iov_base); } } int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, __u8 *oplock, struct smb2_file_all_info *buf, struct create_posix_rsp *posix, struct kvec *err_iov, int *buftype) { struct smb_rqst rqst; struct smb2_create_rsp *rsp = NULL; struct cifs_tcon *tcon = oparms->tcon; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); struct kvec iov[SMB2_CREATE_IOV_SIZE]; struct kvec rsp_iov = {NULL, 0}; int resp_buftype = CIFS_NO_BUFFER; int rc = 0; int flags = 0; cifs_dbg(FYI, "create/open\n"); if (!ses || !server) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = SMB2_CREATE_IOV_SIZE; rc = SMB2_open_init(tcon, server, &rqst, oplock, oparms, path); if (rc) goto creat_exit; trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path, oparms->create_options, oparms->desired_access); rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); if (err_iov && rsp) { *err_iov = rsp_iov; *buftype = resp_buftype; resp_buftype = CIFS_NO_BUFFER; rsp = NULL; } trace_smb3_open_err(xid, tcon->tid, ses->Suid, oparms->create_options, oparms->desired_access, rc); if (rc == -EREMCHG) { pr_warn_once("server share %s deleted\n", tcon->tree_name); tcon->need_reconnect = true; } goto creat_exit; } else if (rsp == NULL) /* unlikely to happen, but safer to check */ goto creat_exit; else trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, oparms->create_options, oparms->desired_access); atomic_inc(&tcon->num_remote_opens); oparms->fid->persistent_fid = rsp->PersistentFileId; oparms->fid->volatile_fid = rsp->VolatileFileId; oparms->fid->access = oparms->desired_access; #ifdef CONFIG_CIFS_DEBUG2 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId); #endif /* CIFS_DEBUG2 */ if (buf) { buf->CreationTime = rsp->CreationTime; buf->LastAccessTime = rsp->LastAccessTime; buf->LastWriteTime = rsp->LastWriteTime; buf->ChangeTime = rsp->ChangeTime; buf->AllocationSize = rsp->AllocationSize; buf->EndOfFile = rsp->EndofFile; buf->Attributes = rsp->FileAttributes; buf->NumberOfLinks = cpu_to_le32(1); buf->DeletePending = 0; } smb2_parse_contexts(server, rsp, &oparms->fid->epoch, oparms->fid->lease_key, oplock, buf, posix); creat_exit: SMB2_open_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, __u32 max_response_size) { struct smb2_ioctl_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; char *in_data_buf; rc = smb2_ioctl_req_init(opcode, tcon, server, (void **) &req, &total_len); if (rc) return rc; if (indatalen) { /* * indatalen is usually small at a couple of bytes max, so * just allocate through generic pool */ in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS); if (!in_data_buf) { cifs_small_buf_release(req); return -ENOMEM; } } req->CtlCode = cpu_to_le32(opcode); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; /* * If no input data, the size of ioctl struct in * protocol spec still includes a 1 byte data buffer, * but if input data passed to ioctl, we do not * want to double count this, so we do not send * the dummy one byte of data in iovec[0] if sending * input data (in iovec[1]). */ if (indatalen) { req->InputCount = cpu_to_le32(indatalen); /* do not set InputOffset if no input data */ req->InputOffset = cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer)); rqst->rq_nvec = 2; iov[0].iov_len = total_len - 1; iov[1].iov_base = in_data_buf; iov[1].iov_len = indatalen; } else { rqst->rq_nvec = 1; iov[0].iov_len = total_len; } req->OutputOffset = 0; req->OutputCount = 0; /* MBZ */ /* * In most cases max_response_size is set to 16K (CIFSMaxBufSize) * We Could increase default MaxOutputResponse, but that could require * more credits. Windows typically sets this smaller, but for some * ioctls it may be useful to allow server to send more. No point * limiting what the server can send as long as fits in one credit * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want * to increase this limit up in the future. * Note that for snapshot queries that servers like Azure expect that * the first query be minimal size (and just used to get the number/size * of previous versions) so response size must be specified as EXACTLY * sizeof(struct snapshot_array) which is 16 when rounded up to multiple * of eight bytes. Currently that is the only case where we set max * response size smaller. */ req->MaxOutputResponse = cpu_to_le32(max_response_size); req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), SMB2_MAX_BUFFER_SIZE)); /* always an FSCTL (for now) */ req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) req->hdr.Flags |= SMB2_FLAGS_SIGNED; return 0; } void SMB2_ioctl_free(struct smb_rqst *rqst) { int i; if (rqst && rqst->rq_iov) { cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ for (i = 1; i < rqst->rq_nvec; i++) if (rqst->rq_iov[i].iov_base != smb2_padding) kfree(rqst->rq_iov[i].iov_base); } } /* * SMB2 IOCTL is used for both IOCTLs and FSCTLs */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, u32 max_out_data_len, char **out_data, u32 *plen /* returned data len */) { struct smb_rqst rqst; struct smb2_ioctl_rsp *rsp = NULL; struct cifs_ses *ses; struct TCP_Server_Info *server; struct kvec iov[SMB2_IOCTL_IOV_SIZE]; struct kvec rsp_iov = {NULL, 0}; int resp_buftype = CIFS_NO_BUFFER; int rc = 0; int flags = 0; cifs_dbg(FYI, "SMB2 IOCTL\n"); if (out_data != NULL) *out_data = NULL; /* zero out returned data len, in case of error */ if (plen) *plen = 0; if (!tcon) return -EIO; ses = tcon->ses; if (!ses) return -EIO; server = cifs_pick_channel(ses); if (!server) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, &rqst, persistent_fid, volatile_fid, opcode, in_data, indatalen, max_out_data_len); if (rc) goto ioctl_exit; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; if (rc != 0) trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid, ses->Suid, 0, opcode, rc); if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } else if (rc == -EINVAL) { if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && (opcode != FSCTL_SRV_COPYCHUNK)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } } else if (rc == -E2BIG) { if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } } /* check if caller wants to look at return data or just return rc */ if ((plen == NULL) || (out_data == NULL)) goto ioctl_exit; /* * Although unlikely to be possible for rsp to be null and rc not set, * adding check below is slightly safer long term (and quiets Coverity * warning) */ if (rsp == NULL) { rc = -EIO; goto ioctl_exit; } *plen = le32_to_cpu(rsp->OutputCount); /* We check for obvious errors in the output buffer length and offset */ if (*plen == 0) goto ioctl_exit; /* server returned no data */ else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) { cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); *plen = 0; rc = -EIO; goto ioctl_exit; } if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) { cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, le32_to_cpu(rsp->OutputOffset)); *plen = 0; rc = -EIO; goto ioctl_exit; } *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset), *plen, GFP_KERNEL); if (*out_data == NULL) { rc = -ENOMEM; goto ioctl_exit; } ioctl_exit: SMB2_ioctl_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } /* * Individual callers to ioctl worker function follow */ int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { int rc; struct compress_ioctl fsctl_input; char *ret_data = NULL; fsctl_input.CompressionState = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SET_COMPRESSION, (char *)&fsctl_input /* data input */, 2 /* in data len */, CIFSMaxBufSize /* max out data */, &ret_data /* out data */, NULL); cifs_dbg(FYI, "set compression rc %d\n", rc); return rc; } int SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, bool query_attrs) { struct smb2_close_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server, (void **) &req, &total_len); if (rc) return rc; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; if (query_attrs) req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; else req->Flags = 0; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; return 0; } void SMB2_close_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ } int __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb2_file_network_open_info *pbuf) { struct smb_rqst rqst; struct smb2_close_rsp *rsp = NULL; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); struct kvec iov[1]; struct kvec rsp_iov; int resp_buftype = CIFS_NO_BUFFER; int rc = 0; int flags = 0; bool query_attrs = false; cifs_dbg(FYI, "Close\n"); if (!ses || !server) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = 1; /* check if need to ask server to return timestamps in close response */ if (pbuf) query_attrs = true; trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid); rc = SMB2_close_init(tcon, server, &rqst, persistent_fid, volatile_fid, query_attrs); if (rc) goto close_exit; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid, rc); goto close_exit; } else { trace_smb3_close_done(xid, persistent_fid, tcon->tid, ses->Suid); /* * Note that have to subtract 4 since struct network_open_info * has a final 4 byte pad that close response does not have */ if (pbuf) memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4); } atomic_dec(&tcon->num_remote_opens); close_exit: SMB2_close_free(&rqst); free_rsp_buf(resp_buftype, rsp); /* retry close in a worker thread if this one is interrupted */ if (is_interrupt_error(rc)) { int tmp_rc; tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid, volatile_fid); if (tmp_rc) cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n", persistent_fid, tmp_rc); } return rc; } int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL); } int smb2_validate_iov(unsigned int offset, unsigned int buffer_length, struct kvec *iov, unsigned int min_buf_size) { unsigned int smb_len = iov->iov_len; char *end_of_smb = smb_len + (char *)iov->iov_base; char *begin_of_buf = offset + (char *)iov->iov_base; char *end_of_buf = begin_of_buf + buffer_length; if (buffer_length < min_buf_size) { cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", buffer_length, min_buf_size); return -EINVAL; } /* check if beyond RFC1001 maximum length */ if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", buffer_length, smb_len); return -EINVAL; } if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { cifs_dbg(VFS, "Invalid server response, bad offset to data\n"); return -EINVAL; } return 0; } /* * If SMB buffer fields are valid, copy into temporary buffer to hold result. * Caller must free buffer. */ int smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length, struct kvec *iov, unsigned int minbufsize, char *data) { char *begin_of_buf = offset + (char *)iov->iov_base; int rc; if (!data) return -EINVAL; rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize); if (rc) return rc; memcpy(data, begin_of_buf, minbufsize); return 0; } int SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, u32 additional_info, size_t output_len, size_t input_len, void *input) { struct smb2_query_info_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, (void **) &req, &total_len); if (rc) return rc; req->InfoType = info_type; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; req->AdditionalInformation = cpu_to_le32(additional_info); req->OutputBufferLength = cpu_to_le32(output_len); if (input_len) { req->InputBufferLength = cpu_to_le32(input_len); /* total_len for smb query request never close to le16 max */ req->InputBufferOffset = cpu_to_le16(total_len - 1); memcpy(req->Buffer, input, input_len); } iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1 + input_len; return 0; } void SMB2_query_info_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ } static int query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, u32 additional_info, size_t output_len, size_t min_len, void **data, u32 *dlen) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov[1]; struct kvec rsp_iov; int rc = 0; int resp_buftype = CIFS_NO_BUFFER; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server; int flags = 0; bool allocated = false; cifs_dbg(FYI, "Query Info\n"); if (!ses) return -EIO; server = cifs_pick_channel(ses); if (!server) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = SMB2_query_info_init(tcon, server, &rqst, persistent_fid, volatile_fid, info_class, info_type, additional_info, output_len, 0, NULL); if (rc) goto qinf_exit; trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type); rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); trace_smb3_query_info_err(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type, rc); goto qinf_exit; } trace_smb3_query_info_done(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type); if (dlen) { *dlen = le32_to_cpu(rsp->OutputBufferLength); if (!*data) { *data = kmalloc(*dlen, GFP_KERNEL); if (!*data) { cifs_tcon_dbg(VFS, "Error %d allocating memory for acl\n", rc); *dlen = 0; rc = -ENOMEM; goto qinf_exit; } allocated = true; } } rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, dlen ? *dlen : min_len, *data); if (rc && allocated) { kfree(*data); *data = NULL; *dlen = 0; } qinf_exit: SMB2_query_info_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, sizeof(struct smb2_file_all_info) + PATH_MAX * 2, sizeof(struct smb2_file_all_info), (void **)&data, NULL); } #if 0 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */ int SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen) { size_t output_len = sizeof(struct smb311_posix_qinfo *) + (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2); *plen = 0; return query_info(xid, tcon, persistent_fid, volatile_fid, SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0, output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen); /* Note caller must free "data" (passed in above). It may be allocated in query_info call */ } #endif int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, void **data, u32 *plen, u32 extra_info) { __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | extra_info; *plen = 0; return query_info(xid, tcon, persistent_fid, volatile_fid, 0, SMB2_O_INFO_SECURITY, additional_info, SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); } int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0, sizeof(struct smb2_file_internal_info), sizeof(struct smb2_file_internal_info), (void **)&uniqueid, NULL); } /* * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory * See MS-SMB2 2.2.35 and 2.2.36 */ static int SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst, struct cifs_tcon *tcon, struct TCP_Server_Info *server, u64 persistent_fid, u64 volatile_fid, u32 completion_filter, bool watch_tree) { struct smb2_change_notify_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server, (void **) &req, &total_len); if (rc) return rc; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* See note 354 of MS-SMB2, 64K max */ req->OutputBufferLength = cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE); req->CompletionFilter = cpu_to_le32(completion_filter); if (watch_tree) req->Flags = cpu_to_le16(SMB2_WATCH_TREE); else req->Flags = 0; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; return 0; } int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, bool watch_tree, u32 completion_filter, u32 max_out_data_len, char **out_data, u32 *plen /* returned data len */) { struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); struct smb_rqst rqst; struct smb2_change_notify_rsp *smb_rsp; struct kvec iov[1]; struct kvec rsp_iov = {NULL, 0}; int resp_buftype = CIFS_NO_BUFFER; int flags = 0; int rc = 0; cifs_dbg(FYI, "change notify\n"); if (!ses || !server) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); if (plen) *plen = 0; rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = SMB2_notify_init(xid, &rqst, tcon, server, persistent_fid, volatile_fid, completion_filter, watch_tree); if (rc) goto cnotify_exit; trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid, (u8)watch_tree, completion_filter); rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE); trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid, (u8)watch_tree, completion_filter, rc); } else { trace_smb3_notify_done(xid, persistent_fid, tcon->tid, ses->Suid, (u8)watch_tree, completion_filter); /* validate that notify information is plausible */ if ((rsp_iov.iov_base == NULL) || (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1)) goto cnotify_exit; smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base; smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov, sizeof(struct file_notify_information)); *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset), le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL); if (*out_data == NULL) { rc = -ENOMEM; goto cnotify_exit; } else if (plen) *plen = le32_to_cpu(smb_rsp->OutputBufferLength); } cnotify_exit: if (rqst.rq_iov) cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */ free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void smb2_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; struct cifs_credits credits = { .value = 0, .instance = 0 }; if (mid->mid_state == MID_RESPONSE_RECEIVED || mid->mid_state == MID_RESPONSE_MALFORMED) { credits.value = le16_to_cpu(rsp->hdr.CreditRequest); credits.instance = server->reconnect_instance; } release_mid(mid); add_credits(server, &credits, CIFS_ECHO_OP); } void smb2_reconnect_server(struct work_struct *work) { struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, reconnect.work); struct TCP_Server_Info *pserver; struct cifs_ses *ses, *ses2; struct cifs_tcon *tcon, *tcon2; struct list_head tmp_list, tmp_ses_list; bool tcon_exist = false, ses_exist = false; bool tcon_selected = false; int rc; bool resched = false; /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ mutex_lock(&pserver->reconnect_mutex); INIT_LIST_HEAD(&tmp_list); INIT_LIST_HEAD(&tmp_ses_list); cifs_dbg(FYI, "Reconnecting tcons and channels\n"); spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING) { spin_unlock(&ses->ses_lock); continue; } spin_unlock(&ses->ses_lock); tcon_selected = false; list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->need_reconnect || tcon->need_reopen_files) { tcon->tc_count++; list_add_tail(&tcon->rlist, &tmp_list); tcon_selected = tcon_exist = true; } } /* * IPC has the same lifetime as its session and uses its * refcount. */ if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); tcon_selected = tcon_exist = true; cifs_smb_ses_inc_refcount(ses); } /* * handle the case where channel needs to reconnect * binding session, but tcon is healthy (some other channel * is active) */ spin_lock(&ses->chan_lock); if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) { list_add_tail(&ses->rlist, &tmp_ses_list); ses_exist = true; cifs_smb_ses_inc_refcount(ses); } spin_unlock(&ses->chan_lock); } /* * Get the reference to server struct to be sure that the last call of * cifs_put_tcon() in the loop below won't release the server pointer. */ if (tcon_exist || ses_exist) server->srv_count++; spin_unlock(&cifs_tcp_ses_lock); list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server); if (!rc) cifs_reopen_persistent_handles(tcon); else resched = true; list_del_init(&tcon->rlist); if (tcon->ipc) cifs_put_smb_ses(tcon->ses); else cifs_put_tcon(tcon); } if (!ses_exist) goto done; /* allocate a dummy tcon struct used for reconnect */ tcon = tcon_info_alloc(false); if (!tcon) { resched = true; list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { list_del_init(&ses->rlist); cifs_put_smb_ses(ses); } goto done; } tcon->status = TID_GOOD; tcon->retry = false; tcon->need_reconnect = false; /* now reconnect sessions for necessary channels */ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { tcon->ses = ses; rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server); if (rc) resched = true; list_del_init(&ses->rlist); cifs_put_smb_ses(ses); } tconInfoFree(tcon); done: cifs_dbg(FYI, "Reconnecting tcons and channels finished\n"); if (resched) queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); mutex_unlock(&pserver->reconnect_mutex); /* now we can safely release srv struct */ if (tcon_exist || ses_exist) cifs_put_tcp_session(server, 1); } int SMB2_echo(struct TCP_Server_Info *server) { struct smb2_echo_req *req; int rc = 0; struct kvec iov[1]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 1 }; unsigned int total_len; cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); spin_lock(&server->srv_lock); if (server->ops->need_neg && server->ops->need_neg(server)) { spin_unlock(&server->srv_lock); /* No need to send echo on newly established connections */ mod_delayed_work(cifsiod_wq, &server->reconnect, 0); return rc; } spin_unlock(&server->srv_lock); rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, (void **)&req, &total_len); if (rc) return rc; req->hdr.CreditRequest = cpu_to_le16(1); iov[0].iov_len = total_len; iov[0].iov_base = (char *)req; rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, server, CIFS_ECHO_OP, NULL); if (rc) cifs_dbg(FYI, "Echo request failed: %d\n", rc); cifs_small_buf_release(req); return rc; } void SMB2_flush_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ } int SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst, struct cifs_tcon *tcon, struct TCP_Server_Info *server, u64 persistent_fid, u64 volatile_fid) { struct smb2_flush_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server, (void **) &req, &total_len); if (rc) return rc; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; return 0; } int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { struct cifs_ses *ses = tcon->ses; struct smb_rqst rqst; struct kvec iov[1]; struct kvec rsp_iov = {NULL, 0}; struct TCP_Server_Info *server = cifs_pick_channel(ses); int resp_buftype = CIFS_NO_BUFFER; int flags = 0; int rc = 0; cifs_dbg(FYI, "flush\n"); if (!ses || !(ses->server)) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = SMB2_flush_init(xid, &rqst, tcon, server, persistent_fid, volatile_fid); if (rc) goto flush_exit; trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid); rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid, rc); } else trace_smb3_flush_done(xid, persistent_fid, tcon->tid, ses->Suid); flush_exit: SMB2_flush_free(&rqst); free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } #ifdef CONFIG_CIFS_SMB_DIRECT static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms) { struct TCP_Server_Info *server = io_parms->server; struct cifs_tcon *tcon = io_parms->tcon; /* we can only offload if we're connected */ if (!server || !tcon) return false; /* we can only offload on an rdma connection */ if (!server->rdma || !server->smbd_conn) return false; /* we don't support signed offload yet */ if (server->sign) return false; /* we don't support encrypted offload yet */ if (smb3_encryption_required(tcon)) return false; /* offload also has its overhead, so only do it if desired */ if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold) return false; return true; } #endif /* CONFIG_CIFS_SMB_DIRECT */ /* * To form a chain of read requests, any read requests after the first should * have the end_of_chain boolean set to true. */ static int smb2_new_read_req(void **buf, unsigned int *total_len, struct cifs_io_parms *io_parms, struct cifs_readdata *rdata, unsigned int remaining_bytes, int request_type) { int rc = -EACCES; struct smb2_read_req *req = NULL; struct smb2_hdr *shdr; struct TCP_Server_Info *server = io_parms->server; rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server, (void **) &req, total_len); if (rc) return rc; if (server == NULL) return -ECONNABORTED; shdr = &req->hdr; shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->ReadChannelInfoOffset = 0; /* reserved */ req->ReadChannelInfoLength = 0; /* reserved */ req->Channel = 0; /* reserved */ req->MinimumCount = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); trace_smb3_read_enter(0 /* xid */, io_parms->persistent_fid, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length); #ifdef CONFIG_CIFS_SMB_DIRECT /* * If we want to do a RDMA write, fill in and append * smbd_buffer_descriptor_v1 to the end of read request */ if (smb3_use_rdma_offload(io_parms)) { struct smbd_buffer_descriptor_v1 *v1; bool need_invalidate = server->dialect == SMB30_PROT_ID; rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->iter, true, need_invalidate); if (!rdata->mr) return -EAGAIN; req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; if (need_invalidate) req->Channel = SMB2_CHANNEL_RDMA_V1; req->ReadChannelInfoOffset = cpu_to_le16(offsetof(struct smb2_read_req, Buffer)); req->ReadChannelInfoLength = cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; v1->offset = cpu_to_le64(rdata->mr->mr->iova); v1->token = cpu_to_le32(rdata->mr->mr->rkey); v1->length = cpu_to_le32(rdata->mr->mr->length); *total_len += sizeof(*v1) - 1; } #endif if (request_type & CHAINED_REQUEST) { if (!(request_type & END_OF_CHAIN)) { /* next 8-byte aligned request */ *total_len = ALIGN(*total_len, 8); shdr->NextCommand = cpu_to_le32(*total_len); } else /* END_OF_CHAIN */ shdr->NextCommand = 0; if (request_type & RELATED_REQUEST) { shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; /* * Related requests use info from previous read request * in chain. */ shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF); req->PersistentFileId = (u64)-1; req->VolatileFileId = (u64)-1; } } if (remaining_bytes > io_parms->length) req->RemainingBytes = cpu_to_le32(remaining_bytes); else req->RemainingBytes = 0; *buf = req; return rc; } static void smb2_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = rdata->server; struct smb2_hdr *shdr = (struct smb2_hdr *)rdata->iov[0].iov_base; struct cifs_credits credits = { .value = 0, .instance = 0 }; struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 }; if (rdata->got_bytes) { rqst.rq_iter = rdata->iter; rqst.rq_iter_size = iov_iter_count(&rdata->iter); } WARN_ONCE(rdata->server != mid->server, "rdata server %p != mid server %p", rdata->server, mid->server); cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n", __func__, mid->mid, mid->mid_state, rdata->result, rdata->bytes); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits.value = le16_to_cpu(shdr->CreditRequest); credits.instance = server->reconnect_instance; /* result already set, check signature */ if (server->sign && !mid->decrypted) { int rc; iov_iter_revert(&rqst.rq_iter, rdata->got_bytes); iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes); rc = smb2_verify_signature(&rqst, server); if (rc) cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; if (server->sign && rdata->got_bytes) /* reset bytes number since we can not check a sign */ rdata->got_bytes = 0; /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; case MID_RESPONSE_MALFORMED: credits.value = le16_to_cpu(shdr->CreditRequest); credits.instance = server->reconnect_instance; fallthrough; default: rdata->result = -EIO; } #ifdef CONFIG_CIFS_SMB_DIRECT /* * If this rdata has a memmory registered, the MR can be freed * MR needs to be freed as soon as I/O finishes to prevent deadlock * because they have limited number and are used for future I/Os */ if (rdata->mr) { smbd_deregister_mr(rdata->mr); rdata->mr = NULL; } #endif if (rdata->result && rdata->result != -ENODATA) { cifs_stats_fail_inc(tcon, SMB2_READ_HE); trace_smb3_read_err(0 /* xid */, rdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, rdata->offset, rdata->bytes, rdata->result); } else trace_smb3_read_done(0 /* xid */, rdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, rdata->offset, rdata->got_bytes); queue_work(cifsiod_wq, &rdata->work); release_mid(mid); add_credits(server, &credits, 0); } /* smb2_async_readv - send an async read, and set up mid to handle result */ int smb2_async_readv(struct cifs_readdata *rdata) { int rc, flags = 0; char *buf; struct smb2_hdr *shdr; struct cifs_io_parms io_parms; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 1 }; struct TCP_Server_Info *server; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); unsigned int total_len; int credit_request; cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", __func__, rdata->offset, rdata->bytes); if (!rdata->server) rdata->server = cifs_pick_channel(tcon->ses); io_parms.tcon = tlink_tcon(rdata->cfile->tlink); io_parms.server = server = rdata->server; io_parms.offset = rdata->offset; io_parms.length = rdata->bytes; io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; io_parms.pid = rdata->pid; rc = smb2_new_read_req( (void **) &buf, &total_len, &io_parms, rdata, 0, 0); if (rc) return rc; if (smb3_encryption_required(io_parms.tcon)) flags |= CIFS_TRANSFORM_REQ; rdata->iov[0].iov_base = buf; rdata->iov[0].iov_len = total_len; shdr = (struct smb2_hdr *)buf; if (rdata->credits.value > 0) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, SMB2_MAX_BUFFER_SIZE)); credit_request = le16_to_cpu(shdr->CreditCharge) + 8; if (server->credits >= server->max_credits) shdr->CreditRequest = cpu_to_le16(0); else shdr->CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, credit_request)); rc = adjust_credits(server, &rdata->credits, rdata->bytes); if (rc) goto async_readv_out; flags |= CIFS_HAS_CREDITS; } kref_get(&rdata->refcount); rc = cifs_call_async(server, &rqst, cifs_readv_receive, smb2_readv_callback, smb3_handle_read_data, rdata, flags, &rdata->credits); if (rc) { kref_put(&rdata->refcount, cifs_readdata_release); cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid, io_parms.tcon->tid, io_parms.tcon->ses->Suid, io_parms.offset, io_parms.length, rc); } async_readv_out: cifs_small_buf_release(buf); return rc; } int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *buf_type) { struct smb_rqst rqst; int resp_buftype, rc; struct smb2_read_req *req = NULL; struct smb2_read_rsp *rsp = NULL; struct kvec iov[1]; struct kvec rsp_iov; unsigned int total_len; int flags = CIFS_LOG_ERROR; struct cifs_ses *ses = io_parms->tcon->ses; if (!io_parms->server) io_parms->server = cifs_pick_channel(io_parms->tcon->ses); *nbytes = 0; rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0); if (rc) return rc; if (smb3_encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, io_parms->server, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; if (rc) { if (rc != -ENODATA) { cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); cifs_dbg(VFS, "Send error in read = %d\n", rc); trace_smb3_read_err(xid, req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, io_parms->length, rc); } else trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, 0); free_rsp_buf(resp_buftype, rsp_iov.iov_base); cifs_small_buf_release(req); return rc == -ENODATA ? 0 : rc; } else trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, io_parms->length); cifs_small_buf_release(req); *nbytes = le32_to_cpu(rsp->DataLength); if ((*nbytes > CIFS_MAX_MSGSIZE) || (*nbytes > io_parms->length)) { cifs_dbg(FYI, "bad length %d for count %d\n", *nbytes, io_parms->length); rc = -EIO; *nbytes = 0; } if (*buf) { memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes); free_rsp_buf(resp_buftype, rsp_iov.iov_base); } else if (resp_buftype != CIFS_NO_BUFFER) { *buf = rsp_iov.iov_base; if (resp_buftype == CIFS_SMALL_BUFFER) *buf_type = CIFS_SMALL_BUFFER; else if (resp_buftype == CIFS_LARGE_BUFFER) *buf_type = CIFS_LARGE_BUFFER; } return rc; } /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. */ static void smb2_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = wdata->server; unsigned int written; struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; struct cifs_credits credits = { .value = 0, .instance = 0 }; WARN_ONCE(wdata->server != mid->server, "wdata server %p != mid server %p", wdata->server, mid->server); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits.value = le16_to_cpu(rsp->hdr.CreditRequest); credits.instance = server->reconnect_instance; wdata->result = smb2_check_receive(mid, server, 0); if (wdata->result != 0) break; written = le32_to_cpu(rsp->DataLength); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; case MID_RESPONSE_MALFORMED: credits.value = le16_to_cpu(rsp->hdr.CreditRequest); credits.instance = server->reconnect_instance; fallthrough; default: wdata->result = -EIO; break; } #ifdef CONFIG_CIFS_SMB_DIRECT /* * If this wdata has a memory registered, the MR can be freed * The number of MRs available is limited, it's important to recover * used MR as soon as I/O is finished. Hold MR longer in the later * I/O process can possibly result in I/O deadlock due to lack of MR * to send request on I/O retry */ if (wdata->mr) { smbd_deregister_mr(wdata->mr); wdata->mr = NULL; } #endif if (wdata->result) { cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); trace_smb3_write_err(0 /* no xid */, wdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes, wdata->result); if (wdata->result == -ENOSPC) pr_warn_once("Out of space writing to %s\n", tcon->tree_name); } else trace_smb3_write_done(0 /* no xid */, wdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes); queue_work(cifsiod_wq, &wdata->work); release_mid(mid); add_credits(server, &credits, 0); } /* smb2_async_writev - send an async write, and set up mid to handle result */ int smb2_async_writev(struct cifs_writedata *wdata, void (*release)(struct kref *kref)) { int rc = -EACCES, flags = 0; struct smb2_write_req *req = NULL; struct smb2_hdr *shdr; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = wdata->server; struct kvec iov[1]; struct smb_rqst rqst = { }; unsigned int total_len; struct cifs_io_parms _io_parms; struct cifs_io_parms *io_parms = NULL; int credit_request; if (!wdata->server) server = wdata->server = cifs_pick_channel(tcon->ses); /* * in future we may get cifs_io_parms passed in from the caller, * but for now we construct it here... */ _io_parms = (struct cifs_io_parms) { .tcon = tcon, .server = server, .offset = wdata->offset, .length = wdata->bytes, .persistent_fid = wdata->cfile->fid.persistent_fid, .volatile_fid = wdata->cfile->fid.volatile_fid, .pid = wdata->pid, }; io_parms = &_io_parms; rc = smb2_plain_req_init(SMB2_WRITE, tcon, server, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; shdr = (struct smb2_hdr *)req; shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = SMB2_CHANNEL_NONE; req->Offset = cpu_to_le64(io_parms->offset); req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer)); req->RemainingBytes = 0; trace_smb3_write_enter(0 /* xid */, io_parms->persistent_fid, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length); #ifdef CONFIG_CIFS_SMB_DIRECT /* * If we want to do a server RDMA read, fill in and append * smbd_buffer_descriptor_v1 to the end of write request */ if (smb3_use_rdma_offload(io_parms)) { struct smbd_buffer_descriptor_v1 *v1; size_t data_size = iov_iter_count(&wdata->iter); bool need_invalidate = server->dialect == SMB30_PROT_ID; wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->iter, false, need_invalidate); if (!wdata->mr) { rc = -EAGAIN; goto async_writev_out; } req->Length = 0; req->DataOffset = 0; req->RemainingBytes = cpu_to_le32(data_size); req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; if (need_invalidate) req->Channel = SMB2_CHANNEL_RDMA_V1; req->WriteChannelInfoOffset = cpu_to_le16(offsetof(struct smb2_write_req, Buffer)); req->WriteChannelInfoLength = cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; v1->offset = cpu_to_le64(wdata->mr->mr->iova); v1->token = cpu_to_le32(wdata->mr->mr->rkey); v1->length = cpu_to_le32(wdata->mr->mr->length); } #endif iov[0].iov_len = total_len - 1; iov[0].iov_base = (char *)req; rqst.rq_iov = iov; rqst.rq_nvec = 1; rqst.rq_iter = wdata->iter; rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter); #ifdef CONFIG_CIFS_SMB_DIRECT if (wdata->mr) iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1); #endif cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", io_parms->offset, io_parms->length, iov_iter_count(&rqst.rq_iter)); #ifdef CONFIG_CIFS_SMB_DIRECT /* For RDMA read, I/O size is in RemainingBytes not in Length */ if (!wdata->mr) req->Length = cpu_to_le32(io_parms->length); #else req->Length = cpu_to_le32(io_parms->length); #endif if (wdata->credits.value > 0) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, SMB2_MAX_BUFFER_SIZE)); credit_request = le16_to_cpu(shdr->CreditCharge) + 8; if (server->credits >= server->max_credits) shdr->CreditRequest = cpu_to_le16(0); else shdr->CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, credit_request)); rc = adjust_credits(server, &wdata->credits, io_parms->length); if (rc) goto async_writev_out; flags |= CIFS_HAS_CREDITS; } kref_get(&wdata->refcount); rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, wdata, flags, &wdata->credits); if (rc) { trace_smb3_write_err(0 /* no xid */, io_parms->persistent_fid, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length, rc); kref_put(&wdata->refcount, release); cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); } async_writev_out: cifs_small_buf_release(req); return rc; } /* * SMB2_write function gets iov pointer to kvec array with n_vec as a length. * The length field from io_parms must be at least 1 and indicates a number of * elements with data to write that begins with position 1 in iov array. All * data length is specified by count. */ int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec) { struct smb_rqst rqst; int rc = 0; struct smb2_write_req *req = NULL; struct smb2_write_rsp *rsp = NULL; int resp_buftype; struct kvec rsp_iov; int flags = 0; unsigned int total_len; struct TCP_Server_Info *server; *nbytes = 0; if (n_vec < 1) return rc; if (!io_parms->server) io_parms->server = cifs_pick_channel(io_parms->tcon->ses); server = io_parms->server; if (server == NULL) return -ECONNABORTED; rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer)); req->RemainingBytes = 0; trace_smb3_write_enter(xid, io_parms->persistent_fid, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length); iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = n_vec + 1; rc = cifs_send_recv(xid, io_parms->tcon->ses, server, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; if (rc) { trace_smb3_write_err(xid, req->PersistentFileId, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length, rc); cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); cifs_dbg(VFS, "Send error in write = %d\n", rc); } else { *nbytes = le32_to_cpu(rsp->DataLength); trace_smb3_write_done(xid, req->PersistentFileId, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, *nbytes); } cifs_small_buf_release(req); free_rsp_buf(resp_buftype, rsp); return rc; } int posix_info_sid_size(const void *beg, const void *end) { size_t subauth; int total; if (beg + 1 > end) return -1; subauth = *(u8 *)(beg+1); if (subauth < 1 || subauth > 15) return -1; total = 1 + 1 + 6 + 4*subauth; if (beg + total > end) return -1; return total; } int posix_info_parse(const void *beg, const void *end, struct smb2_posix_info_parsed *out) { int total_len = 0; int owner_len, group_len; int name_len; const void *owner_sid; const void *group_sid; const void *name; /* if no end bound given, assume payload to be correct */ if (!end) { const struct smb2_posix_info *p = beg; end = beg + le32_to_cpu(p->NextEntryOffset); /* last element will have a 0 offset, pick a sensible bound */ if (end == beg) end += 0xFFFF; } /* check base buf */ if (beg + sizeof(struct smb2_posix_info) > end) return -1; total_len = sizeof(struct smb2_posix_info); /* check owner sid */ owner_sid = beg + total_len; owner_len = posix_info_sid_size(owner_sid, end); if (owner_len < 0) return -1; total_len += owner_len; /* check group sid */ group_sid = beg + total_len; group_len = posix_info_sid_size(group_sid, end); if (group_len < 0) return -1; total_len += group_len; /* check name len */ if (beg + total_len + 4 > end) return -1; name_len = le32_to_cpu(*(__le32 *)(beg + total_len)); if (name_len < 1 || name_len > 0xFFFF) return -1; total_len += 4; /* check name */ name = beg + total_len; if (name + name_len > end) return -1; total_len += name_len; if (out) { out->base = beg; out->size = total_len; out->name_len = name_len; out->name = name; memcpy(&out->owner, owner_sid, owner_len); memcpy(&out->group, group_sid, group_len); } return total_len; } static int posix_info_extra_size(const void *beg, const void *end) { int len = posix_info_parse(beg, end, NULL); if (len < 0) return -1; return len - sizeof(struct smb2_posix_info); } static unsigned int num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry, size_t size) { int len; unsigned int entrycount = 0; unsigned int next_offset = 0; char *entryptr; FILE_DIRECTORY_INFO *dir_info; if (bufstart == NULL) return 0; entryptr = bufstart; while (1) { if (entryptr + next_offset < entryptr || entryptr + next_offset > end_of_buf || entryptr + next_offset + size > end_of_buf) { cifs_dbg(VFS, "malformed search entry would overflow\n"); break; } entryptr = entryptr + next_offset; dir_info = (FILE_DIRECTORY_INFO *)entryptr; if (infotype == SMB_FIND_FILE_POSIX_INFO) len = posix_info_extra_size(entryptr, end_of_buf); else len = le32_to_cpu(dir_info->FileNameLength); if (len < 0 || entryptr + len < entryptr || entryptr + len > end_of_buf || entryptr + len + size > end_of_buf) { cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", end_of_buf); break; } *lastentry = entryptr; entrycount++; next_offset = le32_to_cpu(dir_info->NextEntryOffset); if (!next_offset) break; } return entrycount; } /* * Readdir/FindFirst */ int SMB2_query_directory_init(const unsigned int xid, struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, int index, int info_level) { struct smb2_query_directory_req *req; unsigned char *bufptr; __le16 asteriks = cpu_to_le16('*'); unsigned int output_size = CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE; unsigned int total_len; struct kvec *iov = rqst->rq_iov; int len, rc; rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server, (void **) &req, &total_len); if (rc) return rc; switch (info_level) { case SMB_FIND_FILE_DIRECTORY_INFO: req->FileInformationClass = FILE_DIRECTORY_INFORMATION; break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; break; case SMB_FIND_FILE_POSIX_INFO: req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO; break; default: cifs_tcon_dbg(VFS, "info level %u isn't supported\n", info_level); return -EINVAL; } req->FileIndex = cpu_to_le32(index); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; len = 0x2; bufptr = req->Buffer; memcpy(bufptr, &asteriks, len); req->FileNameOffset = cpu_to_le16(sizeof(struct smb2_query_directory_req)); req->FileNameLength = cpu_to_le16(len); /* * BB could be 30 bytes or so longer if we used SMB2 specific * buffer lengths, but this is safe and close enough. */ output_size = min_t(unsigned int, output_size, server->maxBuf); output_size = min_t(unsigned int, output_size, 2 << 15); req->OutputBufferLength = cpu_to_le32(output_size); iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1; iov[1].iov_base = (char *)(req->Buffer); iov[1].iov_len = len; trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, output_size); return 0; } void SMB2_query_directory_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) { cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ } } int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov, int resp_buftype, struct cifs_search_info *srch_inf) { struct smb2_query_directory_rsp *rsp; size_t info_buf_size; char *end_of_smb; int rc; rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base; switch (srch_inf->info_level) { case SMB_FIND_FILE_DIRECTORY_INFO: info_buf_size = sizeof(FILE_DIRECTORY_INFO); break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO); break; case SMB_FIND_FILE_POSIX_INFO: /* note that posix payload are variable size */ info_buf_size = sizeof(struct smb2_posix_info); break; default: cifs_tcon_dbg(VFS, "info level %u isn't supported\n", srch_inf->info_level); return -EINVAL; } rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), rsp_iov, info_buf_size); if (rc) { cifs_tcon_dbg(VFS, "bad info payload"); return rc; } srch_inf->unicode = true; if (srch_inf->ntwrk_buf_start) { if (srch_inf->smallBuf) cifs_small_buf_release(srch_inf->ntwrk_buf_start); else cifs_buf_release(srch_inf->ntwrk_buf_start); } srch_inf->ntwrk_buf_start = (char *)rsp; srch_inf->srch_entries_start = srch_inf->last_entry = (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset); end_of_smb = rsp_iov->iov_len + (char *)rsp; srch_inf->entries_in_buffer = num_entries( srch_inf->info_level, srch_inf->srch_entries_start, end_of_smb, &srch_inf->last_entry, info_buf_size); srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, srch_inf->srch_entries_start, srch_inf->last_entry); if (resp_buftype == CIFS_LARGE_BUFFER) srch_inf->smallBuf = false; else if (resp_buftype == CIFS_SMALL_BUFFER) srch_inf->smallBuf = true; else cifs_tcon_dbg(VFS, "Invalid search buffer type\n"); return 0; } int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int index, struct cifs_search_info *srch_inf) { struct smb_rqst rqst; struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE]; struct smb2_query_directory_rsp *rsp = NULL; int resp_buftype = CIFS_NO_BUFFER; struct kvec rsp_iov; int rc = 0; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); int flags = 0; if (!ses || !(ses->server)) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE; rc = SMB2_query_directory_init(xid, tcon, server, &rqst, persistent_fid, volatile_fid, index, srch_inf->info_level); if (rc) goto qdir_exit; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; if (rc) { if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) { trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, 0); srch_inf->endOfSearch = true; rc = 0; } else { trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, 0, rc); cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); } goto qdir_exit; } rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype, srch_inf); if (rc) { trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, 0, rc); goto qdir_exit; } resp_buftype = CIFS_NO_BUFFER; trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, srch_inf->entries_in_buffer); qdir_exit: SMB2_query_directory_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, u8 info_type, u32 additional_info, void **data, unsigned int *size) { struct smb2_set_info_req *req; struct kvec *iov = rqst->rq_iov; unsigned int i, total_len; int rc; rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server, (void **) &req, &total_len); if (rc) return rc; req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); req->InfoType = info_type; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; req->AdditionalInformation = cpu_to_le32(additional_info); req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req)); req->BufferLength = cpu_to_le32(*size); memcpy(req->Buffer, *data, *size); total_len += *size; iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1; for (i = 1; i < rqst->rq_nvec; i++) { le32_add_cpu(&req->BufferLength, size[i]); iov[i].iov_base = (char *)data[i]; iov[i].iov_len = size[i]; } return 0; } void SMB2_set_info_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ } static int send_set_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, u8 info_type, u32 additional_info, unsigned int num, void **data, unsigned int *size) { struct smb_rqst rqst; struct smb2_set_info_rsp *rsp = NULL; struct kvec *iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); int flags = 0; if (!ses || !server) return -EIO; if (!num) return -EINVAL; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL); if (!iov) return -ENOMEM; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = num; rc = SMB2_set_info_init(tcon, server, &rqst, persistent_fid, volatile_fid, pid, info_class, info_type, additional_info, data, size); if (rc) { kfree(iov); return rc; } rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); SMB2_set_info_free(&rqst); rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); trace_smb3_set_info_err(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type, rc); } free_rsp_buf(resp_buftype, rsp); kfree(iov); return rc; } int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, __le64 *eof) { struct smb2_file_eof_info info; void *data; unsigned int size; info.EndOfFile = *eof; data = &info; size = sizeof(struct smb2_file_eof_info); trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, le64_to_cpu(*eof)); return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 0, 1, &data, &size); } int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct cifs_ntsd *pnntsd, int pacllen, int aclflag) { return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag, 1, (void **)&pnntsd, &pacllen); } int SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb2_file_full_ea_info *buf, int len) { return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, 1, (void **)&buf, &len); } int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, const u64 persistent_fid, const u64 volatile_fid, __u8 oplock_level) { struct smb_rqst rqst; int rc; struct smb2_oplock_break *req = NULL; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); int flags = CIFS_OBREAK_OP; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; cifs_dbg(FYI, "SMB2_oplock_break\n"); rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->VolatileFid = volatile_fid; req->PersistentFid = persistent_fid; req->OplockLevel = oplock_level; req->hdr.CreditRequest = cpu_to_le16(1); flags |= CIFS_NO_RSP_BUF; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); } return rc; } void smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, struct kstatfs *kst) { kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); kst->f_bfree = kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); return; } static void copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data, struct kstatfs *kst) { kst->f_bsize = le32_to_cpu(response_data->BlockSize); kst->f_blocks = le64_to_cpu(response_data->TotalBlocks); kst->f_bfree = le64_to_cpu(response_data->BlocksAvail); if (response_data->UserBlocksAvail == cpu_to_le64(-1)) kst->f_bavail = kst->f_bfree; else kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); if (response_data->TotalFileNodes != cpu_to_le64(-1)) kst->f_files = le64_to_cpu(response_data->TotalFileNodes); if (response_data->FreeFileNodes != cpu_to_le64(-1)) kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes); return; } static int build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, struct TCP_Server_Info *server, int level, int outbuf_len, u64 persistent_fid, u64 volatile_fid) { int rc; struct smb2_query_info_req *req; unsigned int total_len; cifs_dbg(FYI, "Query FSInfo level %d\n", level); if ((tcon->ses == NULL) || server == NULL) return -EIO; rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, (void **) &req, &total_len); if (rc) return rc; req->InfoType = SMB2_O_INFO_FILESYSTEM; req->FileInfoClass = level; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 1 for pad */ req->InputBufferOffset = cpu_to_le16(sizeof(struct smb2_query_info_req)); req->OutputBufferLength = cpu_to_le32( outbuf_len + sizeof(struct smb2_query_info_rsp)); iov->iov_base = (char *)req; iov->iov_len = total_len; return 0; } int SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); FILE_SYSTEM_POSIX_INFO *info = NULL; int flags = 0; rc = build_qfs_info_req(&iov, tcon, server, FS_POSIX_INFORMATION, sizeof(FILE_SYSTEM_POSIX_INFO), persistent_fid, volatile_fid); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = &iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto posix_qfsinf_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; info = (FILE_SYSTEM_POSIX_INFO *)( le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, sizeof(FILE_SYSTEM_POSIX_INFO)); if (!rc) copy_posix_fs_info_to_kstatfs(info, fsdata); posix_qfsinf_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); struct smb2_fs_full_size_info *info = NULL; int flags = 0; rc = build_qfs_info_req(&iov, tcon, server, FS_FULL_SIZE_INFORMATION, sizeof(struct smb2_fs_full_size_info), persistent_fid, volatile_fid); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = &iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsinf_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; info = (struct smb2_fs_full_size_info *)( le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, sizeof(struct smb2_fs_full_size_info)); if (!rc) smb2_copy_fs_info_to_kstatfs(info, fsdata); qfsinf_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int level) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype, max_len, min_len; struct cifs_ses *ses = tcon->ses; struct TCP_Server_Info *server = cifs_pick_channel(ses); unsigned int rsp_len, offset; int flags = 0; if (level == FS_DEVICE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); } else if (level == FS_ATTRIBUTE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); min_len = MIN_FS_ATTR_INFO_SIZE; } else if (level == FS_SECTOR_SIZE_INFORMATION) { max_len = sizeof(struct smb3_fs_ss_info); min_len = sizeof(struct smb3_fs_ss_info); } else if (level == FS_VOLUME_INFORMATION) { max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN; min_len = sizeof(struct smb3_fs_vol_info); } else { cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); return -EINVAL; } rc = build_qfs_info_req(&iov, tcon, server, level, max_len, persistent_fid, volatile_fid); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = &iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsattr_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; rsp_len = le32_to_cpu(rsp->OutputBufferLength); offset = le16_to_cpu(rsp->OutputBufferOffset); rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len); if (rc) goto qfsattr_exit; if (level == FS_ATTRIBUTE_INFORMATION) memcpy(&tcon->fsAttrInfo, offset + (char *)rsp, min_t(unsigned int, rsp_len, max_len)); else if (level == FS_DEVICE_INFORMATION) memcpy(&tcon->fsDevInfo, offset + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO)); else if (level == FS_SECTOR_SIZE_INFORMATION) { struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) (offset + (char *)rsp); tcon->ss_flags = le32_to_cpu(ss_info->Flags); tcon->perf_sector_size = le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); } else if (level == FS_VOLUME_INFORMATION) { struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *) (offset + (char *)rsp); tcon->vol_serial_number = vol_info->VolumeSerialNumber; tcon->vol_create_time = vol_info->VolumeCreationTime; } qfsattr_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u32 num_lock, struct smb2_lock_element *buf) { struct smb_rqst rqst; int rc = 0; struct smb2_lock_req *req = NULL; struct kvec iov[2]; struct kvec rsp_iov; int resp_buf_type; unsigned int count; int flags = CIFS_NO_RSP_BUF; unsigned int total_len; struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); rc = smb2_plain_req_init(SMB2_LOCK, tcon, server, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); req->LockCount = cpu_to_le16(num_lock); req->PersistentFileId = persist_fid; req->VolatileFileId = volatile_fid; count = num_lock * sizeof(struct smb2_lock_element); iov[0].iov_base = (char *)req; iov[0].iov_len = total_len - sizeof(struct smb2_lock_element); iov[1].iov_base = (char *)buf; iov[1].iov_len = count; cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 2; rc = cifs_send_recv(xid, tcon->ses, server, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) { cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); trace_smb3_lock_err(xid, persist_fid, tcon->tid, tcon->ses->Suid, rc); } return rc; } int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u64 length, const __u64 offset, const __u32 lock_flags, const bool wait) { struct smb2_lock_element lock; lock.Offset = cpu_to_le64(offset); lock.Length = cpu_to_le64(length); lock.Flags = cpu_to_le32(lock_flags); if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); } int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, __u8 *lease_key, const __le32 lease_state) { struct smb_rqst rqst; int rc; struct smb2_lease_ack *req = NULL; struct cifs_ses *ses = tcon->ses; int flags = CIFS_OBREAK_OP; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; __u64 *please_key_high; __u64 *please_key_low; struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); cifs_dbg(FYI, "SMB2_lease_break\n"); rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->hdr.CreditRequest = cpu_to_le16(1); req->StructureSize = cpu_to_le16(36); total_len += 12; memcpy(req->LeaseKey, lease_key, 16); req->LeaseState = lease_state; flags |= CIFS_NO_RSP_BUF; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, server, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); please_key_low = (__u64 *)lease_key; please_key_high = (__u64 *)(lease_key+8); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, ses->Suid, *please_key_low, *please_key_high, rc); cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); } else trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid, ses->Suid, *please_key_low, *please_key_high); return rc; }
linux-master
fs/smb/client/smb2pdu.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2007 * Author(s): Steve French ([email protected]) * * Common Internet FileSystem (CIFS) client * * Operations related to support for exporting files via NFSD * */ /* * See Documentation/filesystems/nfs/exporting.rst * and examples in fs/exportfs * * Since cifs is a network file system, an "fsid" must be included for * any nfs exports file entries which refer to cifs paths. In addition * the cifs mount must be mounted with the "serverino" option (ie use stable * server inode numbers instead of locally generated temporary ones). * Although cifs inodes do not use generation numbers (have generation number * of zero) - the inode number alone should be good enough for simple cases * in which users want to export cifs shares with NFS. The decode and encode * could be improved by using a new routine which expects 64 bit inode numbers * instead of the default 32 bit routines in fs/exportfs * */ #include <linux/fs.h> #include <linux/exportfs.h> #include "cifsglob.h" #include "cifs_debug.h" #include "cifsfs.h" #ifdef CONFIG_CIFS_NFSD_EXPORT static struct dentry *cifs_get_parent(struct dentry *dentry) { /* BB need to add code here eventually to enable export via NFSD */ cifs_dbg(FYI, "get parent for %p\n", dentry); return ERR_PTR(-EACCES); } const struct export_operations cifs_export_ops = { .get_parent = cifs_get_parent, /* Following five export operations are unneeded so far and can default: .get_dentry = .get_name = .find_exported_dentry = .decode_fh = .encode_fs = */ }; #endif /* CONFIG_CIFS_NFSD_EXPORT */
linux-master
fs/smb/client/export.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2020, Microsoft Corporation. * * Author(s): Steve French <[email protected]> * Suresh Jayaraman <[email protected]> * Jeff Layton <[email protected]> */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/inet.h> #include <linux/ctype.h> #include "cifsglob.h" #include "cifsproto.h" /* extract the host portion of the UNC string */ char *extract_hostname(const char *unc) { const char *src; char *dst, *delim; unsigned int len; /* skip double chars at beginning of string */ /* BB: check validity of these bytes? */ if (strlen(unc) < 3) return ERR_PTR(-EINVAL); for (src = unc; *src && *src == '\\'; src++) ; if (!*src) return ERR_PTR(-EINVAL); /* delimiter between hostname and sharename is always '\\' now */ delim = strchr(src, '\\'); if (!delim) return ERR_PTR(-EINVAL); len = delim - src; dst = kmalloc((len + 1), GFP_KERNEL); if (dst == NULL) return ERR_PTR(-ENOMEM); memcpy(dst, src, len); dst[len] = '\0'; return dst; } char *extract_sharename(const char *unc) { const char *src; char *delim, *dst; /* skip double chars at the beginning */ src = unc + 2; /* share name is always preceded by '\\' now */ delim = strchr(src, '\\'); if (!delim) return ERR_PTR(-EINVAL); delim++; /* caller has to free the memory */ dst = kstrdup(delim, GFP_KERNEL); if (!dst) return ERR_PTR(-ENOMEM); return dst; }
linux-master
fs/smb/client/unc.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2011 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/net.h> #include <linux/string.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/ctype.h> #include <linux/utsname.h> #include <linux/mempool.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/pagevec.h> #include <linux/freezer.h> #include <linux/namei.h> #include <linux/uuid.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <linux/inet.h> #include <linux/module.h> #include <keys/user-type.h> #include <net/ipv6.h> #include <linux/parser.h> #include <linux/bvec.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" #include "fscache.h" #include "smb2proto.h" #include "smbdirect.h" #include "dns_resolve.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs.h" #include "dfs_cache.h" #endif #include "fs_context.h" #include "cifs_swn.h" extern mempool_t *cifs_req_poolp; extern bool disable_legacy_dialects; /* FIXME: should these be tunable? */ #define TLINK_ERROR_EXPIRE (1 * HZ) #define TLINK_IDLE_EXPIRE (600 * HZ) /* Drop the connection to not overload the server */ #define MAX_STATUS_IO_TIMEOUT 5 static int ip_connect(struct TCP_Server_Info *server); static int generic_ip_connect(struct TCP_Server_Info *server); static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); static void cifs_prune_tlinks(struct work_struct *work); /* * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may * get their ip addresses changed at some point. * * This should be called with server->srv_mutex held. */ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) { int rc; int len; char *unc; struct sockaddr_storage ss; if (!server->hostname) return -EINVAL; /* if server hostname isn't populated, there's nothing to do here */ if (server->hostname[0] == '\0') return 0; len = strlen(server->hostname) + 3; unc = kmalloc(len, GFP_KERNEL); if (!unc) { cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__); return -ENOMEM; } scnprintf(unc, len, "\\\\%s", server->hostname); spin_lock(&server->srv_lock); ss = server->dstaddr; spin_unlock(&server->srv_lock); rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); kfree(unc); if (rc < 0) { cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", __func__, server->hostname, rc); } else { spin_lock(&server->srv_lock); memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr)); spin_unlock(&server->srv_lock); rc = 0; } return rc; } static void smb2_query_server_interfaces(struct work_struct *work) { int rc; struct cifs_tcon *tcon = container_of(work, struct cifs_tcon, query_interfaces.work); /* * query server network interfaces, in case they change */ rc = SMB3_request_interfaces(0, tcon, false); if (rc) { cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", __func__, rc); } queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, (SMB_INTERFACE_POLL_INTERVAL * HZ)); } /* * Update the tcpStatus for the server. * This is used to signal the cifsd thread to call cifs_reconnect * ONLY cifsd thread should call cifs_reconnect. For any other * thread, use this function * * @server: the tcp ses for which reconnect is needed * @all_channels: if this needs to be done for all channels */ void cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, bool all_channels) { struct TCP_Server_Info *pserver; struct cifs_ses *ses; int i; /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; spin_lock(&pserver->srv_lock); if (!all_channels) { pserver->tcpStatus = CifsNeedReconnect; spin_unlock(&pserver->srv_lock); return; } spin_unlock(&pserver->srv_lock); spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { spin_lock(&ses->chan_lock); for (i = 0; i < ses->chan_count; i++) { spin_lock(&ses->chans[i].server->srv_lock); ses->chans[i].server->tcpStatus = CifsNeedReconnect; spin_unlock(&ses->chans[i].server->srv_lock); } spin_unlock(&ses->chan_lock); } spin_unlock(&cifs_tcp_ses_lock); } /* * Mark all sessions and tcons for reconnect. * IMPORTANT: make sure that this gets called only from * cifsd thread. For any other thread, use * cifs_signal_cifsd_for_reconnect * * @server: the tcp ses for which reconnect is needed * @server needs to be previously set to CifsNeedReconnect. * @mark_smb_session: whether even sessions need to be marked */ void cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { struct TCP_Server_Info *pserver; struct cifs_ses *ses, *nses; struct cifs_tcon *tcon; /* * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they * are not used until reconnected. */ cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__); /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { /* check if iface is still active */ if (!cifs_chan_is_iface_active(ses, server)) cifs_chan_update_iface(ses, server); spin_lock(&ses->chan_lock); if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) { spin_unlock(&ses->chan_lock); continue; } if (mark_smb_session) CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses); else cifs_chan_set_need_reconnect(ses, server); cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", __func__, ses->chans_need_reconnect); /* If all channels need reconnect, then tcon needs reconnect */ if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { spin_unlock(&ses->chan_lock); continue; } spin_unlock(&ses->chan_lock); spin_lock(&ses->ses_lock); ses->ses_status = SES_NEED_RECON; spin_unlock(&ses->ses_lock); list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { tcon->need_reconnect = true; spin_lock(&tcon->tc_lock); tcon->status = TID_NEED_RECON; spin_unlock(&tcon->tc_lock); } if (ses->tcon_ipc) { ses->tcon_ipc->need_reconnect = true; spin_lock(&ses->tcon_ipc->tc_lock); ses->tcon_ipc->status = TID_NEED_RECON; spin_unlock(&ses->tcon_ipc->tc_lock); } } spin_unlock(&cifs_tcp_ses_lock); } static void cifs_abort_connection(struct TCP_Server_Info *server) { struct mid_q_entry *mid, *nmid; struct list_head retry_list; server->maxBuf = 0; server->max_read = 0; /* do not want to be sending data on a socket we are freeing */ cifs_dbg(FYI, "%s: tearing down socket\n", __func__); cifs_server_lock(server); if (server->ssocket) { cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state, server->ssocket->flags); kernel_sock_shutdown(server->ssocket, SHUT_WR); cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state, server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; } server->sequence_number = 0; server->session_estab = false; kfree_sensitive(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; server->lstrp = jiffies; /* mark submitted MIDs for retry and issue callback */ INIT_LIST_HEAD(&retry_list); cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); spin_lock(&server->mid_lock); list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { kref_get(&mid->refcount); if (mid->mid_state == MID_REQUEST_SUBMITTED) mid->mid_state = MID_RETRY_NEEDED; list_move(&mid->qhead, &retry_list); mid->mid_flags |= MID_DELETED; } spin_unlock(&server->mid_lock); cifs_server_unlock(server); cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { list_del_init(&mid->qhead); mid->callback(mid); release_mid(mid); } if (cifs_rdma_enabled(server)) { cifs_server_lock(server); smbd_destroy(server); cifs_server_unlock(server); } } static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) { spin_lock(&server->srv_lock); server->nr_targets = num_targets; if (server->tcpStatus == CifsExiting) { /* the demux thread will exit normally next time through the loop */ spin_unlock(&server->srv_lock); wake_up(&server->response_q); return false; } cifs_dbg(FYI, "Mark tcp session as need reconnect\n"); trace_smb3_reconnect(server->CurrentMid, server->conn_id, server->hostname); server->tcpStatus = CifsNeedReconnect; spin_unlock(&server->srv_lock); return true; } /* * cifs tcp session reconnection * * mark tcp session as reconnecting so temporarily locked * mark all smb sessions as reconnecting for tcp session * reconnect tcp session * wake up waiters on reconnection? - (not needed currently) * * if mark_smb_session is passed as true, unconditionally mark * the smb session (and tcon) for reconnect as well. This value * doesn't really matter for non-multichannel scenario. * */ static int __cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { int rc = 0; if (!cifs_tcp_ses_needs_reconnect(server, 1)) return 0; cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session); cifs_abort_connection(server); do { try_to_freeze(); cifs_server_lock(server); if (!cifs_swn_set_server_dstaddr(server)) { /* resolve the hostname again to make sure that IP address is up-to-date */ rc = reconn_set_ipaddr_from_hostname(server); cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); } if (cifs_rdma_enabled(server)) rc = smbd_reconnect(server); else rc = generic_ip_connect(server); if (rc) { cifs_server_unlock(server); cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); msleep(3000); } else { atomic_inc(&tcpSesReconnectCount); set_credits(server, 1); spin_lock(&server->srv_lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&server->srv_lock); cifs_swn_reset_server_dstaddr(server); cifs_server_unlock(server); mod_delayed_work(cifsiod_wq, &server->reconnect, 0); } } while (server->tcpStatus == CifsNeedReconnect); spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); spin_unlock(&server->srv_lock); wake_up(&server->response_q); return rc; } #ifdef CONFIG_CIFS_DFS_UPCALL static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target) { int rc; char *hostname; if (!cifs_swn_set_server_dstaddr(server)) { if (server->hostname != target) { hostname = extract_hostname(target); if (!IS_ERR(hostname)) { spin_lock(&server->srv_lock); kfree(server->hostname); server->hostname = hostname; spin_unlock(&server->srv_lock); } else { cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n", __func__, PTR_ERR(hostname)); cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__, server->hostname); } } /* resolve the hostname again to make sure that IP address is up-to-date. */ rc = reconn_set_ipaddr_from_hostname(server); cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); } /* Reconnect the socket */ if (cifs_rdma_enabled(server)) rc = smbd_reconnect(server); else rc = generic_ip_connect(server); return rc; } static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl, struct dfs_cache_tgt_iterator **target_hint) { int rc; struct dfs_cache_tgt_iterator *tit; *target_hint = NULL; /* If dfs target list is empty, then reconnect to last server */ tit = dfs_cache_get_tgt_iterator(tl); if (!tit) return __reconnect_target_unlocked(server, server->hostname); /* Otherwise, try every dfs target in @tl */ for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) { rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit)); if (!rc) { *target_hint = tit; break; } } return rc; } static int reconnect_dfs_server(struct TCP_Server_Info *server) { struct dfs_cache_tgt_iterator *target_hint = NULL; DFS_CACHE_TGT_LIST(tl); int num_targets = 0; int rc = 0; /* * Determine the number of dfs targets the referral path in @cifs_sb resolves to. * * smb2_reconnect() needs to know how long it should wait based upon the number of dfs * targets (server->nr_targets). It's also possible that the cached referral was cleared * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after * refreshing the referral, so, in this case, default it to 1. */ mutex_lock(&server->refpath_lock); if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl)) num_targets = dfs_cache_get_nr_tgts(&tl); mutex_unlock(&server->refpath_lock); if (!num_targets) num_targets = 1; if (!cifs_tcp_ses_needs_reconnect(server, num_targets)) return 0; /* * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a * different server or share during failover. It could be improved by adding some logic to * only do that in case it connects to a different server or share, though. */ cifs_mark_tcp_ses_conns_for_reconnect(server, true); cifs_abort_connection(server); do { try_to_freeze(); cifs_server_lock(server); rc = reconnect_target_unlocked(server, &tl, &target_hint); if (rc) { /* Failed to reconnect socket */ cifs_server_unlock(server); cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); msleep(3000); continue; } /* * Socket was created. Update tcp session status to CifsNeedNegotiate so that a * process waiting for reconnect will know it needs to re-establish session and tcon * through the reconnected target server. */ atomic_inc(&tcpSesReconnectCount); set_credits(server, 1); spin_lock(&server->srv_lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&server->srv_lock); cifs_swn_reset_server_dstaddr(server); cifs_server_unlock(server); mod_delayed_work(cifsiod_wq, &server->reconnect, 0); } while (server->tcpStatus == CifsNeedReconnect); mutex_lock(&server->refpath_lock); dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint); mutex_unlock(&server->refpath_lock); dfs_cache_free_tgts(&tl); /* Need to set up echo worker again once connection has been established */ spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); spin_unlock(&server->srv_lock); wake_up(&server->response_q); return rc; } int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { mutex_lock(&server->refpath_lock); if (!server->leaf_fullpath) { mutex_unlock(&server->refpath_lock); return __cifs_reconnect(server, mark_smb_session); } mutex_unlock(&server->refpath_lock); return reconnect_dfs_server(server); } #else int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { return __cifs_reconnect(server, mark_smb_session); } #endif static void cifs_echo_request(struct work_struct *work) { int rc; struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); /* * We cannot send an echo if it is disabled. * Also, no need to ping if we got a response recently. */ if (server->tcpStatus == CifsNeedReconnect || server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || (server->ops->can_echo && !server->ops->can_echo(server)) || time_before(jiffies, server->lstrp + server->echo_interval - HZ)) goto requeue_echo; rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc); /* Check witness registrations */ cifs_swn_check(); requeue_echo: queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval); } static bool allocate_buffers(struct TCP_Server_Info *server) { if (!server->bigbuf) { server->bigbuf = (char *)cifs_buf_get(); if (!server->bigbuf) { cifs_server_dbg(VFS, "No memory for large SMB response\n"); msleep(3000); /* retry will check if exiting */ return false; } } else if (server->large_buf) { /* we are reusing a dirty large buf, clear its start */ memset(server->bigbuf, 0, HEADER_SIZE(server)); } if (!server->smallbuf) { server->smallbuf = (char *)cifs_small_buf_get(); if (!server->smallbuf) { cifs_server_dbg(VFS, "No memory for SMB response\n"); msleep(1000); /* retry will check if exiting */ return false; } /* beginning of smb buffer is cleared in our buf_get */ } else { /* if existing small buf clear beginning */ memset(server->smallbuf, 0, HEADER_SIZE(server)); } return true; } static bool server_unresponsive(struct TCP_Server_Info *server) { /* * We need to wait 3 echo intervals to make sure we handle such * situations right: * 1s client sends a normal SMB request * 2s client gets a response * 30s echo workqueue job pops, and decides we got a response recently * and don't need to send another * ... * 65s kernel_recvmsg times out, and we see that we haven't gotten * a response in >60s. */ spin_lock(&server->srv_lock); if ((server->tcpStatus == CifsGood || server->tcpStatus == CifsNeedNegotiate) && (!server->ops->can_echo || server->ops->can_echo(server)) && time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { spin_unlock(&server->srv_lock); cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", (3 * server->echo_interval) / HZ); cifs_reconnect(server, false); return true; } spin_unlock(&server->srv_lock); return false; } static inline bool zero_credits(struct TCP_Server_Info *server) { int val; spin_lock(&server->req_lock); val = server->credits + server->echo_credits + server->oplock_credits; if (server->in_flight == 0 && val == 0) { spin_unlock(&server->req_lock); return true; } spin_unlock(&server->req_lock); return false; } static int cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) { int length = 0; int total_read; for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze(); /* reconnect if no credits and no requests in flight */ if (zero_credits(server)) { cifs_reconnect(server, false); return -ECONNABORTED; } if (server_unresponsive(server)) return -ECONNABORTED; if (cifs_rdma_enabled(server) && server->smbd_conn) length = smbd_recv(server->smbd_conn, smb_msg); else length = sock_recvmsg(server->ssocket, smb_msg, 0); spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { spin_unlock(&server->srv_lock); return -ESHUTDOWN; } if (server->tcpStatus == CifsNeedReconnect) { spin_unlock(&server->srv_lock); cifs_reconnect(server, false); return -ECONNABORTED; } spin_unlock(&server->srv_lock); if (length == -ERESTARTSYS || length == -EAGAIN || length == -EINTR) { /* * Minimum sleep to prevent looping, allowing socket * to clear and app threads to set tcpStatus * CifsNeedReconnect if server hung. */ usleep_range(1000, 2000); length = 0; continue; } if (length <= 0) { cifs_dbg(FYI, "Received no data or error: %d\n", length); cifs_reconnect(server, false); return -ECONNABORTED; } } return total_read; } int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { struct msghdr smb_msg = {}; struct kvec iov = {.iov_base = buf, .iov_len = to_read}; iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) { struct msghdr smb_msg = {}; /* * iov_iter_discard already sets smb_msg.type and count and iov_offset * and cifs_readv_from_socket sets msg_control and msg_controllen * so little to initialize in struct msghdr */ iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read); return cifs_readv_from_socket(server, &smb_msg); } int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, unsigned int to_read) { struct msghdr smb_msg = {}; struct bio_vec bv; bvec_set_page(&bv, page, to_read, page_offset); iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } int cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter, unsigned int to_read) { struct msghdr smb_msg = { .msg_iter = *iter }; int ret; iov_iter_truncate(&smb_msg.msg_iter, to_read); ret = cifs_readv_from_socket(server, &smb_msg); if (ret > 0) iov_iter_advance(iter, ret); return ret; } static bool is_smb_response(struct TCP_Server_Info *server, unsigned char type) { /* * The first byte big endian of the length field, * is actually not part of the length but the type * with the most common, zero, as regular data. */ switch (type) { case RFC1002_SESSION_MESSAGE: /* Regular SMB response */ return true; case RFC1002_SESSION_KEEP_ALIVE: cifs_dbg(FYI, "RFC 1002 session keep alive\n"); break; case RFC1002_POSITIVE_SESSION_RESPONSE: cifs_dbg(FYI, "RFC 1002 positive session response\n"); break; case RFC1002_NEGATIVE_SESSION_RESPONSE: /* * We get this from Windows 98 instead of an error on * SMB negprot response. */ cifs_dbg(FYI, "RFC 1002 negative session response\n"); /* give server a second to clean up */ msleep(1000); /* * Always try 445 first on reconnect since we get NACK * on some if we ever connected to port 139 (the NACK * is since we do not begin with RFC1001 session * initialize frame). */ cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT); cifs_reconnect(server, true); break; default: cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type); cifs_reconnect(server, true); } return false; } void dequeue_mid(struct mid_q_entry *mid, bool malformed) { #ifdef CONFIG_CIFS_STATS2 mid->when_received = jiffies; #endif spin_lock(&mid->server->mid_lock); if (!malformed) mid->mid_state = MID_RESPONSE_RECEIVED; else mid->mid_state = MID_RESPONSE_MALFORMED; /* * Trying to handle/dequeue a mid after the send_recv() * function has finished processing it is a bug. */ if (mid->mid_flags & MID_DELETED) { spin_unlock(&mid->server->mid_lock); pr_warn_once("trying to dequeue a deleted mid\n"); } else { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; spin_unlock(&mid->server->mid_lock); } } static unsigned int smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; /* * SMB1 does not use credits. */ if (is_smb1(server)) return 0; return le16_to_cpu(shdr->CreditRequest); } static void handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, char *buf, int malformed) { if (server->ops->check_trans2 && server->ops->check_trans2(mid, server, buf, malformed)) return; mid->credits_received = smb2_get_credits_from_hdr(buf, server); mid->resp_buf = buf; mid->large_buf = server->large_buf; /* Was previous buf put in mpx struct for multi-rsp? */ if (!mid->multiRsp) { /* smb buffer will be freed by user thread */ if (server->large_buf) server->bigbuf = NULL; else server->smallbuf = NULL; } dequeue_mid(mid, malformed); } int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) { bool srv_sign_required = server->sec_mode & server->vals->signing_required; bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; bool mnt_sign_enabled; /* * Is signing required by mnt options? If not then check * global_secflags to see if it is there. */ if (!mnt_sign_required) mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN); /* * If signing is required then it's automatically enabled too, * otherwise, check to see if the secflags allow it. */ mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : (global_secflags & CIFSSEC_MAY_SIGN); /* If server requires signing, does client allow it? */ if (srv_sign_required) { if (!mnt_sign_enabled) { cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); return -EOPNOTSUPP; } server->sign = true; } /* If client requires signing, does server allow it? */ if (mnt_sign_required) { if (!srv_sign_enabled) { cifs_dbg(VFS, "Server does not support signing!\n"); return -EOPNOTSUPP; } server->sign = true; } if (cifs_rdma_enabled(server) && server->sign) cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); return 0; } static noinline_for_stack void clean_demultiplex_info(struct TCP_Server_Info *server) { int length; /* take it off the list, if it's not already */ spin_lock(&server->srv_lock); list_del_init(&server->tcp_ses_list); spin_unlock(&server->srv_lock); cancel_delayed_work_sync(&server->echo); spin_lock(&server->srv_lock); server->tcpStatus = CifsExiting; spin_unlock(&server->srv_lock); wake_up_all(&server->response_q); /* check if we have blocked requests that need to free */ spin_lock(&server->req_lock); if (server->credits <= 0) server->credits = 1; spin_unlock(&server->req_lock); /* * Although there should not be any requests blocked on this queue it * can not hurt to be paranoid and try to wake up requests that may * haven been blocked when more than 50 at time were on the wire to the * same server - they now will see the session is in exit state and get * out of SendReceive. */ wake_up_all(&server->request_q); /* give those requests time to exit */ msleep(125); if (cifs_rdma_enabled(server)) smbd_destroy(server); if (server->ssocket) { sock_release(server->ssocket); server->ssocket = NULL; } if (!list_empty(&server->pending_mid_q)) { struct list_head dispose_list; struct mid_q_entry *mid_entry; struct list_head *tmp, *tmp2; INIT_LIST_HEAD(&dispose_list); spin_lock(&server->mid_lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); kref_get(&mid_entry->refcount); mid_entry->mid_state = MID_SHUTDOWN; list_move(&mid_entry->qhead, &dispose_list); mid_entry->mid_flags |= MID_DELETED; } spin_unlock(&server->mid_lock); /* now walk dispose list and issue callbacks */ list_for_each_safe(tmp, tmp2, &dispose_list) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); release_mid(mid_entry); } /* 1/8th of sec is more than enough time for them to exit */ msleep(125); } if (!list_empty(&server->pending_mid_q)) { /* * mpx threads have not exited yet give them at least the smb * send timeout time for long ops. * * Due to delays on oplock break requests, we need to wait at * least 45 seconds before giving up on a request getting a * response and going ahead and killing cifsd. */ cifs_dbg(FYI, "Wait for exit from demultiplex thread\n"); msleep(46000); /* * If threads still have not exited they are probably never * coming home not much else we can do but free the memory. */ } kfree(server->leaf_fullpath); kfree(server); length = atomic_dec_return(&tcpSesAllocCount); if (length > 0) mempool_resize(cifs_req_poolp, length + cifs_min_rcv); } static int standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) { int length; char *buf = server->smallbuf; unsigned int pdu_length = server->pdu_size; /* make sure this will fit in a large buffer */ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - HEADER_PREAMBLE_SIZE(server)) { cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); cifs_reconnect(server, true); return -ECONNABORTED; } /* switch to large buffer if too big for a small one */ if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { server->large_buf = true; memcpy(server->bigbuf, buf, server->total_read); buf = server->bigbuf; } /* now read the rest */ length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, pdu_length - MID_HEADER_SIZE(server)); if (length < 0) return length; server->total_read += length; dump_smb(buf, server->total_read); return cifs_handle_standard(server, mid); } int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { char *buf = server->large_buf ? server->bigbuf : server->smallbuf; int rc; /* * We know that we received enough to get to the MID as we * checked the pdu_length earlier. Now check to see * if the rest of the header is OK. * * 48 bytes is enough to display the header and a little bit * into the payload for debugging purposes. */ rc = server->ops->check_message(buf, server->total_read, server); if (rc) cifs_dump_mem("Bad SMB: ", buf, min_t(unsigned int, server->total_read, 48)); if (server->ops->is_session_expired && server->ops->is_session_expired(buf)) { cifs_reconnect(server, true); return -1; } if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server)) return -1; if (!mid) return rc; handle_mid(mid, server, buf, rc); return 0; } static void smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; int scredits, in_flight; /* * SMB1 does not use credits. */ if (is_smb1(server)) return; if (shdr->CreditRequest) { spin_lock(&server->req_lock); server->credits += le16_to_cpu(shdr->CreditRequest); scredits = server->credits; in_flight = server->in_flight; spin_unlock(&server->req_lock); wake_up(&server->request_q); trace_smb3_hdr_credits(server->CurrentMid, server->conn_id, server->hostname, scredits, le16_to_cpu(shdr->CreditRequest), in_flight); cifs_server_dbg(FYI, "%s: added %u credits total=%d\n", __func__, le16_to_cpu(shdr->CreditRequest), scredits); } } static int cifs_demultiplex_thread(void *p) { int i, num_mids, length; struct TCP_Server_Info *server = p; unsigned int pdu_length; unsigned int next_offset; char *buf = NULL; struct task_struct *task_to_wake = NULL; struct mid_q_entry *mids[MAX_COMPOUND]; char *bufs[MAX_COMPOUND]; unsigned int noreclaim_flag, num_io_timeout = 0; bool pending_reconnect = false; noreclaim_flag = memalloc_noreclaim_save(); cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current)); length = atomic_inc_return(&tcpSesAllocCount); if (length > 1) mempool_resize(cifs_req_poolp, length + cifs_min_rcv); set_freezable(); allow_kernel_signal(SIGKILL); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; if (!allocate_buffers(server)) continue; server->large_buf = false; buf = server->smallbuf; pdu_length = 4; /* enough to get RFC1001 header */ length = cifs_read_from_socket(server, buf, pdu_length); if (length < 0) continue; if (is_smb1(server)) server->total_read = length; else server->total_read = 0; /* * The right amount was read from socket - 4 bytes, * so we can now interpret the length field. */ pdu_length = get_rfc1002_length(buf); cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length); if (!is_smb_response(server, buf[0])) continue; pending_reconnect = false; next_pdu: server->pdu_size = pdu_length; /* make sure we have enough to get to the MID */ if (server->pdu_size < MID_HEADER_SIZE(server)) { cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n", server->pdu_size); cifs_reconnect(server, true); continue; } /* read down to the MID */ length = cifs_read_from_socket(server, buf + HEADER_PREAMBLE_SIZE(server), MID_HEADER_SIZE(server)); if (length < 0) continue; server->total_read += length; if (server->ops->next_header) { next_offset = server->ops->next_header(buf); if (next_offset) server->pdu_size = next_offset; } memset(mids, 0, sizeof(mids)); memset(bufs, 0, sizeof(bufs)); num_mids = 0; if (server->ops->is_transform_hdr && server->ops->receive_transform && server->ops->is_transform_hdr(buf)) { length = server->ops->receive_transform(server, mids, bufs, &num_mids); } else { mids[0] = server->ops->find_mid(server, buf); bufs[0] = buf; num_mids = 1; if (!mids[0] || !mids[0]->receive) length = standard_receive3(server, mids[0]); else length = mids[0]->receive(server, mids[0]); } if (length < 0) { for (i = 0; i < num_mids; i++) if (mids[i]) release_mid(mids[i]); continue; } if (server->ops->is_status_io_timeout && server->ops->is_status_io_timeout(buf)) { num_io_timeout++; if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) { cifs_server_dbg(VFS, "Number of request timeouts exceeded %d. Reconnecting", MAX_STATUS_IO_TIMEOUT); pending_reconnect = true; num_io_timeout = 0; } } server->lstrp = jiffies; for (i = 0; i < num_mids; i++) { if (mids[i] != NULL) { mids[i]->resp_buf_size = server->pdu_size; if (bufs[i] != NULL) { if (server->ops->is_network_name_deleted && server->ops->is_network_name_deleted(bufs[i], server)) { cifs_server_dbg(FYI, "Share deleted. Reconnect needed"); } } if (!mids[i]->multiRsp || mids[i]->multiEnd) mids[i]->callback(mids[i]); release_mid(mids[i]); } else if (server->ops->is_oplock_break && server->ops->is_oplock_break(bufs[i], server)) { smb2_add_credits_from_hdr(bufs[i], server); cifs_dbg(FYI, "Received oplock break\n"); } else { cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", atomic_read(&mid_count)); cifs_dump_mem("Received Data is: ", bufs[i], HEADER_SIZE(server)); smb2_add_credits_from_hdr(bufs[i], server); #ifdef CONFIG_CIFS_DEBUG2 if (server->ops->dump_detail) server->ops->dump_detail(bufs[i], server); cifs_dump_mids(server); #endif /* CIFS_DEBUG2 */ } } if (pdu_length > server->pdu_size) { if (!allocate_buffers(server)) continue; pdu_length -= server->pdu_size; server->total_read = 0; server->large_buf = false; buf = server->smallbuf; goto next_pdu; } /* do this reconnect at the very end after processing all MIDs */ if (pending_reconnect) cifs_reconnect(server, true); } /* end while !EXITING */ /* buffer usually freed in free_mid - need to free it here on exit */ cifs_buf_release(server->bigbuf); if (server->smallbuf) /* no sense logging a debug message if NULL */ cifs_small_buf_release(server->smallbuf); task_to_wake = xchg(&server->tsk, NULL); clean_demultiplex_info(server); /* if server->tsk was NULL then wait for a signal before exiting */ if (!task_to_wake) { set_current_state(TASK_INTERRUPTIBLE); while (!signal_pending(current)) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); } memalloc_noreclaim_restore(noreclaim_flag); module_put_and_kthread_exit(0); } int cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs) { struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; switch (srcaddr->sa_family) { case AF_UNSPEC: switch (rhs->sa_family) { case AF_UNSPEC: return 0; case AF_INET: case AF_INET6: return 1; default: return -1; } case AF_INET: { switch (rhs->sa_family) { case AF_UNSPEC: return -1; case AF_INET: return memcmp(saddr4, vaddr4, sizeof(struct sockaddr_in)); case AF_INET6: return 1; default: return -1; } } case AF_INET6: { switch (rhs->sa_family) { case AF_UNSPEC: case AF_INET: return -1; case AF_INET6: return memcmp(saddr6, vaddr6, sizeof(struct sockaddr_in6)); default: return -1; } } default: return -1; /* don't expect to be here */ } } /* * Returns true if srcaddr isn't specified and rhs isn't specified, or * if srcaddr is specified and matches the IP address of the rhs argument */ bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs) { switch (srcaddr->sa_family) { case AF_UNSPEC: return (rhs->sa_family == AF_UNSPEC); case AF_INET: { struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr); } case AF_INET6: { struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr) && saddr6->sin6_scope_id == vaddr6->sin6_scope_id); } default: WARN_ON(1); return false; /* don't expect to be here */ } } /* * If no port is specified in addr structure, we try to match with 445 port * and if it fails - with 139 ports. It should be called only if address * families of server and addr are equal. */ static bool match_port(struct TCP_Server_Info *server, struct sockaddr *addr) { __be16 port, *sport; /* SMBDirect manages its own ports, don't match it here */ if (server->rdma) return true; switch (addr->sa_family) { case AF_INET: sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; port = ((struct sockaddr_in *) addr)->sin_port; break; case AF_INET6: sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; port = ((struct sockaddr_in6 *) addr)->sin6_port; break; default: WARN_ON(1); return false; } if (!port) { port = htons(CIFS_PORT); if (port == *sport) return true; port = htons(RFC1001_PORT); } return port == *sport; } static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr) { if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr)) return false; return true; } static bool match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { /* * The select_sectype function should either return the ctx->sectype * that was specified, or "Unspecified" if that sectype was not * compatible with the given NEGOTIATE request. */ if (server->ops->select_sectype(server, ctx->sectype) == Unspecified) return false; /* * Now check if signing mode is acceptable. No need to check * global_secflags at this point since if MUST_SIGN is set then * the server->sign had better be too. */ if (ctx->sign && !server->sign) return false; return true; } /* this function must be called with srv_lock held */ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx, bool match_super) { struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; lockdep_assert_held(&server->srv_lock); if (ctx->nosharesock) return 0; /* this server does not share socket */ if (server->nosharesock) return 0; /* If multidialect negotiation see if existing sessions match one */ if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { if (server->vals->protocol_id < SMB30_PROT_ID) return 0; } else if (strcmp(ctx->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { if (server->vals->protocol_id < SMB21_PROT_ID) return 0; } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops)) return 0; if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) return 0; if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr, (struct sockaddr *)&server->srcaddr)) return 0; /* * When matching cifs.ko superblocks (@match_super == true), we can't * really match either @server->leaf_fullpath or @server->dstaddr * directly since this @server might belong to a completely different * server -- in case of domain-based DFS referrals or DFS links -- as * provided earlier by mount(2) through 'source' and 'ip' options. * * Otherwise, match the DFS referral in @server->leaf_fullpath or the * destination address in @server->dstaddr. * * When using 'nodfs' mount option, we avoid sharing it with DFS * connections as they might failover. */ if (!match_super) { if (!ctx->nodfs) { if (server->leaf_fullpath) { if (!ctx->leaf_fullpath || strcasecmp(server->leaf_fullpath, ctx->leaf_fullpath)) return 0; } else if (ctx->leaf_fullpath) { return 0; } } else if (server->leaf_fullpath) { return 0; } } /* * Match for a regular connection (address/hostname/port) which has no * DFS referrals set. */ if (!server->leaf_fullpath && (strcasecmp(server->hostname, ctx->server_hostname) || !match_server_address(server, addr) || !match_port(server, addr))) return 0; if (!match_security(server, ctx)) return 0; if (server->echo_interval != ctx->echo_interval * HZ) return 0; if (server->rdma != ctx->rdma) return 0; if (server->ignore_signature != ctx->ignore_signature) return 0; if (server->min_offload != ctx->min_offload) return 0; return 1; } struct TCP_Server_Info * cifs_find_tcp_session(struct smb3_fs_context *ctx) { struct TCP_Server_Info *server; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { spin_lock(&server->srv_lock); /* * Skip ses channels since they're only handled in lower layers * (e.g. cifs_send_recv). */ if (SERVER_IS_CHAN(server) || !match_server(server, ctx, false)) { spin_unlock(&server->srv_lock); continue; } spin_unlock(&server->srv_lock); ++server->srv_count; spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "Existing tcp session with server found\n"); return server; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } void cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) { struct task_struct *task; spin_lock(&cifs_tcp_ses_lock); if (--server->srv_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } /* srv_count can never go negative */ WARN_ON(server->srv_count < 0); put_net(cifs_net_ns(server)); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); /* For secondary channels, we pick up ref-count on the primary server */ if (SERVER_IS_CHAN(server)) cifs_put_tcp_session(server->primary_server, from_reconnect); cancel_delayed_work_sync(&server->echo); if (from_reconnect) /* * Avoid deadlock here: reconnect work calls * cifs_put_tcp_session() at its end. Need to be sure * that reconnect work does nothing with server pointer after * that step. */ cancel_delayed_work(&server->reconnect); else cancel_delayed_work_sync(&server->reconnect); spin_lock(&server->srv_lock); server->tcpStatus = CifsExiting; spin_unlock(&server->srv_lock); cifs_crypto_secmech_release(server); kfree_sensitive(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; kfree(server->hostname); server->hostname = NULL; task = xchg(&server->tsk, NULL); if (task) send_sig(SIGKILL, task, 1); } struct TCP_Server_Info * cifs_get_tcp_session(struct smb3_fs_context *ctx, struct TCP_Server_Info *primary_server) { struct TCP_Server_Info *tcp_ses = NULL; int rc; cifs_dbg(FYI, "UNC: %s\n", ctx->UNC); /* see if we already have a matching tcp_ses */ tcp_ses = cifs_find_tcp_session(ctx); if (tcp_ses) return tcp_ses; tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); if (!tcp_ses) { rc = -ENOMEM; goto out_err; } tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL); if (!tcp_ses->hostname) { rc = -ENOMEM; goto out_err; } if (ctx->leaf_fullpath) { tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL); if (!tcp_ses->leaf_fullpath) { rc = -ENOMEM; goto out_err; } } if (ctx->nosharesock) tcp_ses->nosharesock = true; tcp_ses->ops = ctx->ops; tcp_ses->vals = ctx->vals; cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId); tcp_ses->noblockcnt = ctx->rootfs; tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs; tcp_ses->noautotune = ctx->noautotune; tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay; tcp_ses->rdma = ctx->rdma; tcp_ses->in_flight = 0; tcp_ses->max_in_flight = 0; tcp_ses->credits = 1; if (primary_server) { spin_lock(&cifs_tcp_ses_lock); ++primary_server->srv_count; spin_unlock(&cifs_tcp_ses_lock); tcp_ses->primary_server = primary_server; } init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); INIT_LIST_HEAD(&tcp_ses->pending_mid_q); mutex_init(&tcp_ses->_srv_mutex); memcpy(tcp_ses->workstation_RFC1001_name, ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); memcpy(tcp_ses->server_RFC1001_name, ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); tcp_ses->session_estab = false; tcp_ses->sequence_number = 0; tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */ tcp_ses->reconnect_instance = 1; tcp_ses->lstrp = jiffies; tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); spin_lock_init(&tcp_ses->req_lock); spin_lock_init(&tcp_ses->srv_lock); spin_lock_init(&tcp_ses->mid_lock); INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); mutex_init(&tcp_ses->reconnect_mutex); #ifdef CONFIG_CIFS_DFS_UPCALL mutex_init(&tcp_ses->refpath_lock); #endif memcpy(&tcp_ses->srcaddr, &ctx->srcaddr, sizeof(tcp_ses->srcaddr)); memcpy(&tcp_ses->dstaddr, &ctx->dstaddr, sizeof(tcp_ses->dstaddr)); if (ctx->use_client_guid) memcpy(tcp_ses->client_guid, ctx->client_guid, SMB2_CLIENT_GUID_SIZE); else generate_random_uuid(tcp_ses->client_guid); /* * at this point we are the only ones with the pointer * to the struct since the kernel thread not created yet * no need to spinlock this init of tcpStatus or srv_count */ tcp_ses->tcpStatus = CifsNew; ++tcp_ses->srv_count; if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN && ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX) tcp_ses->echo_interval = ctx->echo_interval * HZ; else tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ; if (tcp_ses->rdma) { #ifndef CONFIG_CIFS_SMB_DIRECT cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n"); rc = -ENOENT; goto out_err_crypto_release; #endif tcp_ses->smbd_conn = smbd_get_connection( tcp_ses, (struct sockaddr *)&ctx->dstaddr); if (tcp_ses->smbd_conn) { cifs_dbg(VFS, "RDMA transport established\n"); rc = 0; goto smbd_connected; } else { rc = -ENOENT; goto out_err_crypto_release; } } rc = ip_connect(tcp_ses); if (rc < 0) { cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); goto out_err_crypto_release; } smbd_connected: /* * since we're in a cifs function already, we know that * this will succeed. No need for try_module_get(). */ __module_get(THIS_MODULE); tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, tcp_ses, "cifsd"); if (IS_ERR(tcp_ses->tsk)) { rc = PTR_ERR(tcp_ses->tsk); cifs_dbg(VFS, "error %d create cifsd thread\n", rc); module_put(THIS_MODULE); goto out_err_crypto_release; } tcp_ses->min_offload = ctx->min_offload; /* * at this point we are the only ones with the pointer * to the struct since the kernel thread not created yet * no need to spinlock this update of tcpStatus */ spin_lock(&tcp_ses->srv_lock); tcp_ses->tcpStatus = CifsNeedNegotiate; spin_unlock(&tcp_ses->srv_lock); if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; else tcp_ses->max_credits = ctx->max_credits; tcp_ses->nr_targets = 1; tcp_ses->ignore_signature = ctx->ignore_signature; /* thread spawned, put it on the list */ spin_lock(&cifs_tcp_ses_lock); list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); /* queue echo request delayed work */ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); return tcp_ses; out_err_crypto_release: cifs_crypto_secmech_release(tcp_ses); put_net(cifs_net_ns(tcp_ses)); out_err: if (tcp_ses) { if (SERVER_IS_CHAN(tcp_ses)) cifs_put_tcp_session(tcp_ses->primary_server, false); kfree(tcp_ses->hostname); kfree(tcp_ses->leaf_fullpath); if (tcp_ses->ssocket) sock_release(tcp_ses->ssocket); kfree(tcp_ses); } return ERR_PTR(rc); } /* this function must be called with ses_lock and chan_lock held */ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) { if (ctx->sectype != Unspecified && ctx->sectype != ses->sectype) return 0; /* * If an existing session is limited to less channels than * requested, it should not be reused */ if (ses->chan_max < ctx->max_channels) return 0; switch (ses->sectype) { case Kerberos: if (!uid_eq(ctx->cred_uid, ses->cred_uid)) return 0; break; default: /* NULL username means anonymous session */ if (ses->user_name == NULL) { if (!ctx->nullauth) return 0; break; } /* anything else takes username/password */ if (strncmp(ses->user_name, ctx->username ? ctx->username : "", CIFS_MAX_USERNAME_LEN)) return 0; if ((ctx->username && strlen(ctx->username) != 0) && ses->password != NULL && strncmp(ses->password, ctx->password ? ctx->password : "", CIFS_MAX_PASSWORD_LEN)) return 0; } if (strcmp(ctx->local_nls->charset, ses->local_nls->charset)) return 0; return 1; } /** * cifs_setup_ipc - helper to setup the IPC tcon for the session * @ses: smb session to issue the request on * @ctx: the superblock configuration context to use for building the * new tree connection for the IPC (interprocess communication RPC) * * A new IPC connection is made and stored in the session * tcon_ipc. The IPC tcon has the same lifetime as the session. */ static int cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) { int rc = 0, xid; struct cifs_tcon *tcon; char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; bool seal = false; struct TCP_Server_Info *server = ses->server; /* * If the mount request that resulted in the creation of the * session requires encryption, force IPC to be encrypted too. */ if (ctx->seal) { if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) seal = true; else { cifs_server_dbg(VFS, "IPC: server doesn't support encryption\n"); return -EOPNOTSUPP; } } /* no need to setup directory caching on IPC share, so pass in false */ tcon = tcon_info_alloc(false); if (tcon == NULL) return -ENOMEM; spin_lock(&server->srv_lock); scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); spin_unlock(&server->srv_lock); xid = get_xid(); tcon->ses = ses; tcon->ipc = true; tcon->seal = seal; rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); free_xid(xid); if (rc) { cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); tconInfoFree(tcon); goto out; } cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); spin_lock(&tcon->tc_lock); tcon->status = TID_GOOD; spin_unlock(&tcon->tc_lock); ses->tcon_ipc = tcon; out: return rc; } /** * cifs_free_ipc - helper to release the session IPC tcon * @ses: smb session to unmount the IPC from * * Needs to be called everytime a session is destroyed. * * On session close, the IPC is closed and the server must release all tcons of the session. * No need to send a tree disconnect here. * * Besides, it will make the server to not close durable and resilient files on session close, as * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request. */ static int cifs_free_ipc(struct cifs_ses *ses) { struct cifs_tcon *tcon = ses->tcon_ipc; if (tcon == NULL) return 0; tconInfoFree(tcon); ses->tcon_ipc = NULL; return 0; } static struct cifs_ses * cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { struct cifs_ses *ses, *ret = NULL; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING) { spin_unlock(&ses->ses_lock); continue; } spin_lock(&ses->chan_lock); if (match_session(ses, ctx)) { spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); ret = ses; break; } spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); } if (ret) cifs_smb_ses_inc_refcount(ret); spin_unlock(&cifs_tcp_ses_lock); return ret; } void __cifs_put_smb_ses(struct cifs_ses *ses) { unsigned int rc, xid; unsigned int chan_count; struct TCP_Server_Info *server = ses->server; spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING) { spin_unlock(&ses->ses_lock); return; } spin_unlock(&ses->ses_lock); cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE"); spin_lock(&cifs_tcp_ses_lock); if (--ses->ses_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } spin_lock(&ses->ses_lock); if (ses->ses_status == SES_GOOD) ses->ses_status = SES_EXITING; spin_unlock(&ses->ses_lock); spin_unlock(&cifs_tcp_ses_lock); /* ses_count can never go negative */ WARN_ON(ses->ses_count < 0); spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING && server->ops->logoff) { spin_unlock(&ses->ses_lock); cifs_free_ipc(ses); xid = get_xid(); rc = server->ops->logoff(xid, ses); if (rc) cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n", __func__, rc); _free_xid(xid); } else { spin_unlock(&ses->ses_lock); cifs_free_ipc(ses); } spin_lock(&cifs_tcp_ses_lock); list_del_init(&ses->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); chan_count = ses->chan_count; /* close any extra channels */ if (chan_count > 1) { int i; for (i = 1; i < chan_count; i++) { if (ses->chans[i].iface) { kref_put(&ses->chans[i].iface->refcount, release_iface); ses->chans[i].iface = NULL; } cifs_put_tcp_session(ses->chans[i].server, 0); ses->chans[i].server = NULL; } } sesInfoFree(ses); cifs_put_tcp_session(server, 0); } #ifdef CONFIG_KEYS /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) /* Populate username and pw fields from keyring if possible */ static int cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) { int rc = 0; int is_domain = 0; const char *delim, *payload; char *desc; ssize_t len; struct key *key; struct TCP_Server_Info *server = ses->server; struct sockaddr_in *sa; struct sockaddr_in6 *sa6; const struct user_key_payload *upayload; desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); if (!desc) return -ENOMEM; /* try to find an address key first */ switch (server->dstaddr.ss_family) { case AF_INET: sa = (struct sockaddr_in *)&server->dstaddr; sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); break; case AF_INET6: sa6 = (struct sockaddr_in6 *)&server->dstaddr; sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); break; default: cifs_dbg(FYI, "Bad ss_family (%hu)\n", server->dstaddr.ss_family); rc = -EINVAL; goto out_err; } cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); key = request_key(&key_type_logon, desc, ""); if (IS_ERR(key)) { if (!ses->domainName) { cifs_dbg(FYI, "domainName is NULL\n"); rc = PTR_ERR(key); goto out_err; } /* didn't work, try to find a domain key */ sprintf(desc, "cifs:d:%s", ses->domainName); cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); key = request_key(&key_type_logon, desc, ""); if (IS_ERR(key)) { rc = PTR_ERR(key); goto out_err; } is_domain = 1; } down_read(&key->sem); upayload = user_key_payload_locked(key); if (IS_ERR_OR_NULL(upayload)) { rc = upayload ? PTR_ERR(upayload) : -EINVAL; goto out_key_put; } /* find first : in payload */ payload = upayload->data; delim = strnchr(payload, upayload->datalen, ':'); cifs_dbg(FYI, "payload=%s\n", payload); if (!delim) { cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n", upayload->datalen); rc = -EINVAL; goto out_key_put; } len = delim - payload; if (len > CIFS_MAX_USERNAME_LEN || len <= 0) { cifs_dbg(FYI, "Bad value from username search (len=%zd)\n", len); rc = -EINVAL; goto out_key_put; } ctx->username = kstrndup(payload, len, GFP_KERNEL); if (!ctx->username) { cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n", len); rc = -ENOMEM; goto out_key_put; } cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username); len = key->datalen - (len + 1); if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) { cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len); rc = -EINVAL; kfree(ctx->username); ctx->username = NULL; goto out_key_put; } ++delim; ctx->password = kstrndup(delim, len, GFP_KERNEL); if (!ctx->password) { cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n", len); rc = -ENOMEM; kfree(ctx->username); ctx->username = NULL; goto out_key_put; } /* * If we have a domain key then we must set the domainName in the * for the request. */ if (is_domain && ses->domainName) { ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL); if (!ctx->domainname) { cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n", len); rc = -ENOMEM; kfree(ctx->username); ctx->username = NULL; kfree_sensitive(ctx->password); ctx->password = NULL; goto out_key_put; } } strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name)); out_key_put: up_read(&key->sem); key_put(key); out_err: kfree(desc); cifs_dbg(FYI, "%s: returning %d\n", __func__, rc); return rc; } #else /* ! CONFIG_KEYS */ static inline int cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), struct cifs_ses *ses __attribute__((unused))) { return -ENOSYS; } #endif /* CONFIG_KEYS */ /** * cifs_get_smb_ses - get a session matching @ctx data from @server * @server: server to setup the session to * @ctx: superblock configuration context to use to setup the session * * This function assumes it is being called from cifs_mount() where we * already got a server reference (server refcount +1). See * cifs_get_tcon() for refcount explanations. */ struct cifs_ses * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { int rc = 0; unsigned int xid; struct cifs_ses *ses; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; xid = get_xid(); ses = cifs_find_smb_ses(server, ctx); if (ses) { cifs_dbg(FYI, "Existing smb sess found (status=%d)\n", ses->ses_status); spin_lock(&ses->chan_lock); if (cifs_chan_needs_reconnect(ses, server)) { spin_unlock(&ses->chan_lock); cifs_dbg(FYI, "Session needs reconnect\n"); mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses, server); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our ses reference */ cifs_put_smb_ses(ses); free_xid(xid); return ERR_PTR(rc); } rc = cifs_setup_session(xid, ses, server, ctx->local_nls); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our reference */ cifs_put_smb_ses(ses); free_xid(xid); return ERR_PTR(rc); } mutex_unlock(&ses->session_mutex); spin_lock(&ses->chan_lock); } spin_unlock(&ses->chan_lock); /* existing SMB ses has a server reference already */ cifs_put_tcp_session(server, 0); free_xid(xid); return ses; } rc = -ENOMEM; cifs_dbg(FYI, "Existing smb sess not found\n"); ses = sesInfoAlloc(); if (ses == NULL) goto get_ses_fail; /* new SMB session uses our server ref */ ses->server = server; if (server->dstaddr.ss_family == AF_INET6) sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr); else sprintf(ses->ip_addr, "%pI4", &addr->sin_addr); if (ctx->username) { ses->user_name = kstrdup(ctx->username, GFP_KERNEL); if (!ses->user_name) goto get_ses_fail; } /* ctx->password freed at unmount */ if (ctx->password) { ses->password = kstrdup(ctx->password, GFP_KERNEL); if (!ses->password) goto get_ses_fail; } if (ctx->domainname) { ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL); if (!ses->domainName) goto get_ses_fail; } strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name)); if (ctx->domainauto) ses->domainAuto = ctx->domainauto; ses->cred_uid = ctx->cred_uid; ses->linux_uid = ctx->linux_uid; ses->sectype = ctx->sectype; ses->sign = ctx->sign; ses->local_nls = load_nls(ctx->local_nls->charset); /* add server as first channel */ spin_lock(&ses->chan_lock); ses->chans[0].server = server; ses->chan_count = 1; ses->chan_max = ctx->multichannel ? ctx->max_channels:1; ses->chans_need_reconnect = 1; spin_unlock(&ses->chan_lock); mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses, server); if (!rc) rc = cifs_setup_session(xid, ses, server, ctx->local_nls); mutex_unlock(&ses->session_mutex); /* each channel uses a different signing key */ spin_lock(&ses->chan_lock); memcpy(ses->chans[0].signkey, ses->smb3signingkey, sizeof(ses->smb3signingkey)); spin_unlock(&ses->chan_lock); if (rc) goto get_ses_fail; /* * success, put it on the list and add it as first channel * note: the session becomes active soon after this. So you'll * need to lock before changing something in the session. */ spin_lock(&cifs_tcp_ses_lock); ses->dfs_root_ses = ctx->dfs_root_ses; if (ses->dfs_root_ses) ses->dfs_root_ses->ses_count++; list_add(&ses->smb_ses_list, &server->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); cifs_setup_ipc(ses, ctx); free_xid(xid); return ses; get_ses_fail: sesInfoFree(ses); free_xid(xid); return ERR_PTR(rc); } /* this function must be called with tc_lock held */ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { struct TCP_Server_Info *server = tcon->ses->server; if (tcon->status == TID_EXITING) return 0; if (tcon->origin_fullpath) { if (!ctx->source || !dfs_src_pathname_equal(ctx->source, tcon->origin_fullpath)) return 0; } else if (!server->leaf_fullpath && strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) { return 0; } if (tcon->seal != ctx->seal) return 0; if (tcon->snapshot_time != ctx->snapshot_time) return 0; if (tcon->handle_timeout != ctx->handle_timeout) return 0; if (tcon->no_lease != ctx->no_lease) return 0; if (tcon->nodelete != ctx->nodelete) return 0; return 1; } static struct cifs_tcon * cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) { struct cifs_tcon *tcon; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { spin_lock(&tcon->tc_lock); if (!match_tcon(tcon, ctx)) { spin_unlock(&tcon->tc_lock); continue; } ++tcon->tc_count; spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return tcon; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } void cifs_put_tcon(struct cifs_tcon *tcon) { unsigned int xid; struct cifs_ses *ses; /* * IPC tcon share the lifetime of their session and are * destroyed in the session put function */ if (tcon == NULL || tcon->ipc) return; ses = tcon->ses; cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock); if (--tcon->tc_count > 0) { spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return; } /* tc_count can never go negative */ WARN_ON(tcon->tc_count < 0); list_del_init(&tcon->tcon_list); tcon->status = TID_EXITING; spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); /* cancel polling of interfaces */ cancel_delayed_work_sync(&tcon->query_interfaces); #ifdef CONFIG_CIFS_DFS_UPCALL cancel_delayed_work_sync(&tcon->dfs_cache_work); #endif if (tcon->use_witness) { int rc; rc = cifs_swn_unregister(tcon); if (rc < 0) { cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n", __func__, rc); } } xid = get_xid(); if (ses->server->ops->tree_disconnect) ses->server->ops->tree_disconnect(xid, tcon); _free_xid(xid); cifs_fscache_release_super_cookie(tcon); tconInfoFree(tcon); cifs_put_smb_ses(ses); } /** * cifs_get_tcon - get a tcon matching @ctx data from @ses * @ses: smb session to issue the request on * @ctx: the superblock configuration context to use for building the * * - tcon refcount is the number of mount points using the tcon. * - ses refcount is the number of tcon using the session. * * 1. This function assumes it is being called from cifs_mount() where * we already got a session reference (ses refcount +1). * * 2. Since we're in the context of adding a mount point, the end * result should be either: * * a) a new tcon already allocated with refcount=1 (1 mount point) and * its session refcount incremented (1 new tcon). This +1 was * already done in (1). * * b) an existing tcon with refcount+1 (add a mount point to it) and * identical ses refcount (no new tcon). Because of (1) we need to * decrement the ses refcount. */ static struct cifs_tcon * cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) { int rc, xid; struct cifs_tcon *tcon; tcon = cifs_find_tcon(ses, ctx); if (tcon) { /* * tcon has refcount already incremented but we need to * decrement extra ses reference gotten by caller (case b) */ cifs_dbg(FYI, "Found match on UNC path\n"); cifs_put_smb_ses(ses); return tcon; } if (!ses->server->ops->tree_connect) { rc = -ENOSYS; goto out_fail; } if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) tcon = tcon_info_alloc(true); else tcon = tcon_info_alloc(false); if (tcon == NULL) { rc = -ENOMEM; goto out_fail; } if (ctx->snapshot_time) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "Use SMB2 or later for snapshot mount option\n"); rc = -EOPNOTSUPP; goto out_fail; } else tcon->snapshot_time = ctx->snapshot_time; } if (ctx->handle_timeout) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "Use SMB2.1 or later for handle timeout option\n"); rc = -EOPNOTSUPP; goto out_fail; } else tcon->handle_timeout = ctx->handle_timeout; } tcon->ses = ses; if (ctx->password) { tcon->password = kstrdup(ctx->password, GFP_KERNEL); if (!tcon->password) { rc = -ENOMEM; goto out_fail; } } if (ctx->seal) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "SMB3 or later required for encryption\n"); rc = -EOPNOTSUPP; goto out_fail; } else if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) tcon->seal = true; else { cifs_dbg(VFS, "Encryption is not supported on share\n"); rc = -EOPNOTSUPP; goto out_fail; } } if (ctx->linux_ext) { if (ses->server->posix_ext_supported) { tcon->posix_extensions = true; pr_warn_once("SMB3.11 POSIX Extensions are experimental\n"); } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) || (strcmp(ses->server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) || (strcmp(ses->server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0)) { cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n"); rc = -EOPNOTSUPP; goto out_fail; } else { cifs_dbg(VFS, "Check vers= mount option. SMB3.11 " "disabled but required for POSIX extensions\n"); rc = -EOPNOTSUPP; goto out_fail; } } xid = get_xid(); rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon, ctx->local_nls); free_xid(xid); cifs_dbg(FYI, "Tcon rc = %d\n", rc); if (rc) goto out_fail; tcon->use_persistent = false; /* check if SMB2 or later, CIFS does not support persistent handles */ if (ctx->persistent) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "SMB3 or later required for persistent handles\n"); rc = -EOPNOTSUPP; goto out_fail; } else if (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) tcon->use_persistent = true; else /* persistent handles requested but not supported */ { cifs_dbg(VFS, "Persistent handles not supported on share\n"); rc = -EOPNOTSUPP; goto out_fail; } } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) && (ctx->nopersistent == false)) { cifs_dbg(FYI, "enabling persistent handles\n"); tcon->use_persistent = true; } else if (ctx->resilient) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "SMB2.1 or later required for resilient handles\n"); rc = -EOPNOTSUPP; goto out_fail; } tcon->use_resilient = true; } tcon->use_witness = false; if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) { if (ses->server->vals->protocol_id >= SMB30_PROT_ID) { if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) { /* * Set witness in use flag in first place * to retry registration in the echo task */ tcon->use_witness = true; /* And try to register immediately */ rc = cifs_swn_register(tcon); if (rc < 0) { cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc); goto out_fail; } } else { /* TODO: try to extend for non-cluster uses (eg multichannel) */ cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n"); rc = -EOPNOTSUPP; goto out_fail; } } else { cifs_dbg(VFS, "SMB3 or later required for witness option\n"); rc = -EOPNOTSUPP; goto out_fail; } } /* If the user really knows what they are doing they can override */ if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) { if (ctx->cache_ro) cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n"); else if (ctx->cache_rw) cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n"); } if (ctx->no_lease) { if (ses->server->vals->protocol_id == 0) { cifs_dbg(VFS, "SMB2 or later required for nolease option\n"); rc = -EOPNOTSUPP; goto out_fail; } else tcon->no_lease = ctx->no_lease; } /* * We can have only one retry value for a connection to a share so for * resources mounted more than once to the same server share the last * value passed in for the retry flag is used. */ tcon->retry = ctx->retry; tcon->nocase = ctx->nocase; tcon->broken_sparse_sup = ctx->no_sparse; tcon->max_cached_dirs = ctx->max_cached_dirs; if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) tcon->nohandlecache = ctx->nohandlecache; else tcon->nohandlecache = true; tcon->nodelete = ctx->nodelete; tcon->local_lease = ctx->local_lease; INIT_LIST_HEAD(&tcon->pending_opens); tcon->status = TID_GOOD; INIT_DELAYED_WORK(&tcon->query_interfaces, smb2_query_server_interfaces); if (ses->server->dialect >= SMB30_PROT_ID && (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { /* schedule query interfaces poll */ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, (SMB_INTERFACE_POLL_INTERVAL * HZ)); } #ifdef CONFIG_CIFS_DFS_UPCALL INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh); #endif spin_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &ses->tcon_list); spin_unlock(&cifs_tcp_ses_lock); return tcon; out_fail: tconInfoFree(tcon); return ERR_PTR(rc); } void cifs_put_tlink(struct tcon_link *tlink) { if (!tlink || IS_ERR(tlink)) return; if (!atomic_dec_and_test(&tlink->tl_count) || test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) { tlink->tl_time = jiffies; return; } if (!IS_ERR(tlink_tcon(tlink))) cifs_put_tcon(tlink_tcon(tlink)); kfree(tlink); return; } static int compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) { struct cifs_sb_info *old = CIFS_SB(sb); struct cifs_sb_info *new = mnt_data->cifs_sb; unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK; unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK; if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) return 0; if (old->mnt_cifs_serverino_autodisabled) newflags &= ~CIFS_MOUNT_SERVER_INUM; if (oldflags != newflags) return 0; /* * We want to share sb only if we don't specify an r/wsize or * specified r/wsize is greater than or equal to existing one. */ if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize) return 0; if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize) return 0; if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) || !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid)) return 0; if (old->ctx->file_mode != new->ctx->file_mode || old->ctx->dir_mode != new->ctx->dir_mode) return 0; if (strcmp(old->local_nls->charset, new->local_nls->charset)) return 0; if (old->ctx->acregmax != new->ctx->acregmax) return 0; if (old->ctx->acdirmax != new->ctx->acdirmax) return 0; if (old->ctx->closetimeo != new->ctx->closetimeo) return 0; return 1; } static int match_prepath(struct super_block *sb, struct cifs_tcon *tcon, struct cifs_mnt_data *mnt_data) { struct smb3_fs_context *ctx = mnt_data->ctx; struct cifs_sb_info *old = CIFS_SB(sb); struct cifs_sb_info *new = mnt_data->cifs_sb; bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && old->prepath; bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && new->prepath; if (tcon->origin_fullpath && dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source)) return 1; if (old_set && new_set && !strcmp(new->prepath, old->prepath)) return 1; else if (!old_set && !new_set) return 1; return 0; } int cifs_match_super(struct super_block *sb, void *data) { struct cifs_mnt_data *mnt_data = data; struct smb3_fs_context *ctx; struct cifs_sb_info *cifs_sb; struct TCP_Server_Info *tcp_srv; struct cifs_ses *ses; struct cifs_tcon *tcon; struct tcon_link *tlink; int rc = 0; spin_lock(&cifs_tcp_ses_lock); cifs_sb = CIFS_SB(sb); /* We do not want to use a superblock that has been shutdown */ if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) { spin_unlock(&cifs_tcp_ses_lock); return 0; } tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); if (IS_ERR_OR_NULL(tlink)) { pr_warn_once("%s: skip super matching due to bad tlink(%p)\n", __func__, tlink); spin_unlock(&cifs_tcp_ses_lock); return 0; } tcon = tlink_tcon(tlink); ses = tcon->ses; tcp_srv = ses->server; ctx = mnt_data->ctx; spin_lock(&tcp_srv->srv_lock); spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); spin_lock(&tcon->tc_lock); if (!match_server(tcp_srv, ctx, true) || !match_session(ses, ctx) || !match_tcon(tcon, ctx) || !match_prepath(sb, tcon, mnt_data)) { rc = 0; goto out; } rc = compare_mount_options(sb, mnt_data); out: spin_unlock(&tcon->tc_lock); spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); spin_unlock(&tcp_srv->srv_lock); spin_unlock(&cifs_tcp_ses_lock); cifs_put_tlink(tlink); return rc; } #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key cifs_key[2]; static struct lock_class_key cifs_slock_key[2]; static inline void cifs_reclassify_socket4(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); } static inline void cifs_reclassify_socket6(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); } #else static inline void cifs_reclassify_socket4(struct socket *sock) { } static inline void cifs_reclassify_socket6(struct socket *sock) { } #endif /* See RFC1001 section 14 on representation of Netbios names */ static void rfc1002mangle(char *target, char *source, unsigned int length) { unsigned int i, j; for (i = 0, j = 0; i < (length); i++) { /* mask a nibble at a time and encode */ target[j] = 'A' + (0x0F & (source[i] >> 4)); target[j+1] = 'A' + (0x0F & source[i]); j += 2; } } static int bind_socket(struct TCP_Server_Info *server) { int rc = 0; if (server->srcaddr.ss_family != AF_UNSPEC) { /* Bind to the specified local IP address */ struct socket *socket = server->ssocket; rc = socket->ops->bind(socket, (struct sockaddr *) &server->srcaddr, sizeof(server->srcaddr)); if (rc < 0) { struct sockaddr_in *saddr4; struct sockaddr_in6 *saddr6; saddr4 = (struct sockaddr_in *)&server->srcaddr; saddr6 = (struct sockaddr_in6 *)&server->srcaddr; if (saddr6->sin6_family == AF_INET6) cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n", &saddr6->sin6_addr, rc); else cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n", &saddr4->sin_addr.s_addr, rc); } } return rc; } static int ip_rfc1001_connect(struct TCP_Server_Info *server) { int rc = 0; /* * some servers require RFC1001 sessinit before sending * negprot - BB check reconnection in case where second * sessinit is sent but no second negprot */ struct rfc1002_session_packet req = {}; struct smb_hdr *smb_buf = (struct smb_hdr *)&req; unsigned int len; req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name); if (server->server_RFC1001_name[0] != 0) rfc1002mangle(req.trailer.session_req.called_name, server->server_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(req.trailer.session_req.called_name, DEFAULT_CIFS_CALLED_NAME, RFC1001_NAME_LEN_WITH_NULL); req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name); /* calling name ends in null (byte 16) from old smb convention */ if (server->workstation_RFC1001_name[0] != 0) rfc1002mangle(req.trailer.session_req.calling_name, server->workstation_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(req.trailer.session_req.calling_name, "LINUX_CIFS_CLNT", RFC1001_NAME_LEN_WITH_NULL); /* * As per rfc1002, @len must be the number of bytes that follows the * length field of a rfc1002 session request payload. */ len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req); smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len); rc = smb_send(server, smb_buf, len); /* * RFC1001 layer in at least one server requires very short break before * negprot presumably because not expecting negprot to follow so fast. * This is a simple solution that works without complicating the code * and causes no significant slowing down on mount for everyone else */ usleep_range(1000, 2000); return rc; } static int generic_ip_connect(struct TCP_Server_Info *server) { struct sockaddr *saddr; struct socket *socket; int slen, sfamily; __be16 sport; int rc = 0; saddr = (struct sockaddr *) &server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) { struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr; sport = ipv6->sin6_port; slen = sizeof(struct sockaddr_in6); sfamily = AF_INET6; cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr, ntohs(sport)); } else { struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr; sport = ipv4->sin_port; slen = sizeof(struct sockaddr_in); sfamily = AF_INET; cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr, ntohs(sport)); } if (server->ssocket) { socket = server->ssocket; } else { rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket, 1); if (rc < 0) { cifs_server_dbg(VFS, "Error %d creating socket\n", rc); return rc; } /* BB other socket options to set KEEPALIVE, NODELAY? */ cifs_dbg(FYI, "Socket created\n"); socket = server->ssocket; socket->sk->sk_allocation = GFP_NOFS; socket->sk->sk_use_task_frag = false; if (sfamily == AF_INET6) cifs_reclassify_socket6(socket); else cifs_reclassify_socket4(socket); } rc = bind_socket(server); if (rc < 0) return rc; /* * Eventually check for other socket options to change from * the default. sock_setsockopt not used because it expects * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; /* make the bufsizes depend on wsize/rsize and max requests */ if (server->noautotune) { if (socket->sk->sk_sndbuf < (200 * 1024)) socket->sk->sk_sndbuf = 200 * 1024; if (socket->sk->sk_rcvbuf < (140 * 1024)) socket->sk->sk_rcvbuf = 140 * 1024; } if (server->tcp_nodelay) tcp_sock_set_nodelay(socket->sk); cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); rc = socket->ops->connect(socket, saddr, slen, server->noblockcnt ? O_NONBLOCK : 0); /* * When mounting SMB root file systems, we do not want to block in * connect. Otherwise bail out and then let cifs_reconnect() perform * reconnect failover - if possible. */ if (server->noblockcnt && rc == -EINPROGRESS) rc = 0; if (rc < 0) { cifs_dbg(FYI, "Error %d connecting to server\n", rc); trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc); sock_release(socket); server->ssocket = NULL; return rc; } trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr); if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server); return rc; } static int ip_connect(struct TCP_Server_Info *server) { __be16 *sport; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) sport = &addr6->sin6_port; else sport = &addr->sin_port; if (*sport == 0) { int rc; /* try with 445 port at first */ *sport = htons(CIFS_PORT); rc = generic_ip_connect(server); if (rc >= 0) return rc; /* if it failed, try with 139 port */ *sport = htons(RFC1001_PORT); } return generic_ip_connect(server); } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) { /* * If we are reconnecting then should we check to see if * any requested capabilities changed locally e.g. via * remount but we can not do much about it here * if they have (even if we could detect it by the following) * Perhaps we could add a backpointer to array of sb from tcon * or if we change to make all sb to same share the same * sb as NFS - then we only have one backpointer to sb. * What if we wanted to mount the server share twice once with * and once without posixacls or posix paths? */ __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); if (ctx && ctx->no_linux_ext) { tcon->fsUnixInfo.Capability = 0; tcon->unix_ext = 0; /* Unix Extensions disabled */ cifs_dbg(FYI, "Linux protocol extensions disabled\n"); return; } else if (ctx) tcon->unix_ext = 1; /* Unix Extensions supported */ if (!tcon->unix_ext) { cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n"); return; } if (!CIFSSMBQFSUnixInfo(xid, tcon)) { __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); /* * check for reconnect case in which we do not * want to change the mount behavior if we can avoid it */ if (ctx == NULL) { /* * turn off POSIX ACL and PATHNAMES if not set * originally at mount time */ if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(VFS, "POSIXPATH support change\n"); cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { cifs_dbg(VFS, "possible reconnect error\n"); cifs_dbg(VFS, "server disabled POSIX path support\n"); } } if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(VFS, "per-share encryption not supported yet\n"); cap &= CIFS_UNIX_CAP_MASK; if (ctx && ctx->no_psx_acl) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { cifs_dbg(FYI, "negotiated posix acl support\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIXACL; } if (ctx && ctx->posix_paths == 0) cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { cifs_dbg(FYI, "negotiate posix pathnames\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; } cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); #ifdef CONFIG_CIFS_DEBUG2 if (cap & CIFS_UNIX_FCNTL_CAP) cifs_dbg(FYI, "FCNTL cap\n"); if (cap & CIFS_UNIX_EXTATTR_CAP) cifs_dbg(FYI, "EXTATTR cap\n"); if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(FYI, "POSIX path cap\n"); if (cap & CIFS_UNIX_XATTR_CAP) cifs_dbg(FYI, "XATTR cap\n"); if (cap & CIFS_UNIX_POSIX_ACL_CAP) cifs_dbg(FYI, "POSIX ACL cap\n"); if (cap & CIFS_UNIX_LARGE_READ_CAP) cifs_dbg(FYI, "very large read cap\n"); if (cap & CIFS_UNIX_LARGE_WRITE_CAP) cifs_dbg(FYI, "very large write cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) cifs_dbg(FYI, "transport encryption cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(FYI, "mandatory transport encryption cap\n"); #endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { if (ctx == NULL) cifs_dbg(FYI, "resetting capabilities failed\n"); else cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n"); } } } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) { struct smb3_fs_context *ctx = cifs_sb->ctx; INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); spin_lock_init(&cifs_sb->tlink_tree_lock); cifs_sb->tlink_tree = RB_ROOT; cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n", ctx->file_mode, ctx->dir_mode); /* this is needed for ASCII cp to Unicode converts */ if (ctx->iocharset == NULL) { /* load_nls_default cannot return null */ cifs_sb->local_nls = load_nls_default(); } else { cifs_sb->local_nls = load_nls(ctx->iocharset); if (cifs_sb->local_nls == NULL) { cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n", ctx->iocharset); return -ELIBACC; } } ctx->local_nls = cifs_sb->local_nls; smb3_update_mnt_flags(cifs_sb); if (ctx->direct_io) cifs_dbg(FYI, "mounting share using direct i/o\n"); if (ctx->cache_ro) { cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE; } else if (ctx->cache_rw) { cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n"); cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE); } if ((ctx->cifs_acl) && (ctx->dynperm)) cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); if (ctx->prepath) { cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL); if (cifs_sb->prepath == NULL) return -ENOMEM; cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; } return 0; } /* Release all succeed connections */ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx) { int rc = 0; if (mnt_ctx->tcon) cifs_put_tcon(mnt_ctx->tcon); else if (mnt_ctx->ses) cifs_put_smb_ses(mnt_ctx->ses); else if (mnt_ctx->server) cifs_put_tcp_session(mnt_ctx->server, 0); mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS; free_xid(mnt_ctx->xid); } int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx) { struct TCP_Server_Info *server = NULL; struct smb3_fs_context *ctx; struct cifs_ses *ses = NULL; unsigned int xid; int rc = 0; xid = get_xid(); if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) { rc = -EINVAL; goto out; } ctx = mnt_ctx->fs_ctx; /* get a reference to a tcp session */ server = cifs_get_tcp_session(ctx, NULL); if (IS_ERR(server)) { rc = PTR_ERR(server); server = NULL; goto out; } /* get a reference to a SMB session */ ses = cifs_get_smb_ses(server, ctx); if (IS_ERR(ses)) { rc = PTR_ERR(ses); ses = NULL; goto out; } if ((ctx->persistent == true) && (!(ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) { cifs_server_dbg(VFS, "persistent handles not supported by server\n"); rc = -EOPNOTSUPP; } out: mnt_ctx->xid = xid; mnt_ctx->server = server; mnt_ctx->ses = ses; mnt_ctx->tcon = NULL; return rc; } int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx) { struct TCP_Server_Info *server; struct cifs_sb_info *cifs_sb; struct smb3_fs_context *ctx; struct cifs_tcon *tcon = NULL; int rc = 0; if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx || !mnt_ctx->cifs_sb)) { rc = -EINVAL; goto out; } server = mnt_ctx->server; ctx = mnt_ctx->fs_ctx; cifs_sb = mnt_ctx->cifs_sb; /* search for existing tcon to this server share */ tcon = cifs_get_tcon(mnt_ctx->ses, ctx); if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; goto out; } /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ if (tcon->posix_extensions) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* tell server which Unix caps we support */ if (cap_unix(tcon->ses)) { /* * reset of caps checks mount to see if unix extensions disabled * for just this mount. */ reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx); spin_lock(&tcon->ses->server->srv_lock); if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && (le64_to_cpu(tcon->fsUnixInfo.Capability) & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { spin_unlock(&tcon->ses->server->srv_lock); rc = -EACCES; goto out; } spin_unlock(&tcon->ses->server->srv_lock); } else #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ tcon->unix_ext = 0; /* server does not support them */ /* do not care if a following call succeed - informational */ if (!tcon->pipe && server->ops->qfs_tcon) { server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) { if (tcon->fsDevInfo.DeviceCharacteristics & cpu_to_le32(FILE_READ_ONLY_DEVICE)) cifs_dbg(VFS, "mounted to read only share\n"); else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) == 0) cifs_dbg(VFS, "read only mount of RW share\n"); /* no need to log a RW mount of a typical RW share */ } } /* * Clamp the rsize/wsize mount arguments if they are too big for the server * and set the rsize/wsize to the negotiated values if not passed in by * the user on mount */ if ((cifs_sb->ctx->wsize == 0) || (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx); if ((cifs_sb->ctx->rsize == 0) || (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx))) cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx); /* * The cookie is initialized from volume info returned above. * Inside cifs_fscache_get_super_cookie it checks * that we do not get super cookie twice. */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) cifs_fscache_get_super_cookie(tcon); out: mnt_ctx->tcon = tcon; return rc; } static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, struct cifs_tcon *tcon) { struct tcon_link *tlink; /* hang the tcon off of the superblock */ tlink = kzalloc(sizeof(*tlink), GFP_KERNEL); if (tlink == NULL) return -ENOMEM; tlink->tl_uid = ses->linux_uid; tlink->tl_tcon = tcon; tlink->tl_time = jiffies; set_bit(TCON_LINK_MASTER, &tlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); cifs_sb->master_tlink = tlink; spin_lock(&cifs_sb->tlink_tree_lock); tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); return 0; } static int cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, char *full_path, int added_treename) { int rc; char *s; char sep, tmp; int skip = added_treename ? 1 : 0; sep = CIFS_DIR_SEP(cifs_sb); s = full_path; rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); while (rc == 0) { /* skip separators */ while (*s == sep) s++; if (!*s) break; /* next separator */ while (*s && *s != sep) s++; /* * if the treename is added, we then have to skip the first * part within the separators */ if (skip) { skip = 0; continue; } /* * temporarily null-terminate the path at the end of * the current component */ tmp = *s; *s = 0; rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, full_path); *s = tmp; } return rc; } /* * Check if path is remote (i.e. a DFS share). * * Return -EREMOTE if it is, otherwise 0 or -errno. */ int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx) { int rc; struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; struct TCP_Server_Info *server = mnt_ctx->server; unsigned int xid = mnt_ctx->xid; struct cifs_tcon *tcon = mnt_ctx->tcon; struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; char *full_path; if (!server->ops->is_path_accessible) return -EOPNOTSUPP; /* * cifs_build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon, tcon->Flags & SMB_SHARE_IS_IN_DFS); if (full_path == NULL) return -ENOMEM; cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path); rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, full_path); if (rc != 0 && rc != -EREMOTE) goto out; if (rc != -EREMOTE) { rc = cifs_are_all_path_components_accessible(server, xid, tcon, cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS); if (rc != 0) { cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; rc = 0; } } out: kfree(full_path); return rc; } #ifdef CONFIG_CIFS_DFS_UPCALL int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) { struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; bool isdfs; int rc; INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list); rc = dfs_mount_share(&mnt_ctx, &isdfs); if (rc) goto error; if (!isdfs) goto out; /* * After reconnecting to a different server, unique ids won't match anymore, so we disable * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE). */ cifs_autodisable_serverino(cifs_sb); /* * Force the use of prefix path to support failover on DFS paths that resolve to targets * that have different prefix paths. */ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; kfree(cifs_sb->prepath); cifs_sb->prepath = ctx->prepath; ctx->prepath = NULL; out: cifs_try_adding_channels(cifs_sb, mnt_ctx.ses); rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); if (rc) goto error; free_xid(mnt_ctx.xid); return rc; error: dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list); cifs_mount_put_conns(&mnt_ctx); return rc; } #else int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) { int rc = 0; struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; rc = cifs_mount_get_session(&mnt_ctx); if (rc) goto error; rc = cifs_mount_get_tcon(&mnt_ctx); if (rc) goto error; rc = cifs_is_path_remote(&mnt_ctx); if (rc == -EREMOTE) rc = -EOPNOTSUPP; if (rc) goto error; rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); if (rc) goto error; free_xid(mnt_ctx.xid); return rc; error: cifs_mount_put_conns(&mnt_ctx); return rc; } #endif /* * Issue a TREE_CONNECT request. */ int CIFSTCon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; struct smb_hdr *smb_buffer_response; TCONX_REQ *pSMB; TCONX_RSP *pSMBr; unsigned char *bcc_ptr; int rc = 0; int length; __u16 bytes_left, count; if (ses == NULL) return -EIO; smb_buffer = cifs_buf_get(); if (smb_buffer == NULL) return -ENOMEM; smb_buffer_response = smb_buffer; header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, NULL /*no tid */ , 4 /*wct */ ); smb_buffer->Mid = get_next_mid(ses->server); smb_buffer->Uid = ses->Suid; pSMB = (TCONX_REQ *) smb_buffer; pSMBr = (TCONX_RSP *) smb_buffer_response; pSMB->AndXCommand = 0xFF; pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); bcc_ptr = &pSMB->Password[0]; pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ *bcc_ptr = 0; /* password is null byte */ bcc_ptr++; /* skip password */ /* already aligned so no need to do it below */ if (ses->server->sign) smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_STATUS32) { smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; } if (ses->capabilities & CAP_DFS) { smb_buffer->Flags2 |= SMBFLG2_DFS; } if (ses->capabilities & CAP_UNICODE) { smb_buffer->Flags2 |= SMBFLG2_UNICODE; length = cifs_strtoUTF16((__le16 *) bcc_ptr, tree, 6 /* max utf8 char length in bytes */ * (/* server len*/ + 256 /* share len */), nls_codepage); bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ bcc_ptr += 2; /* skip trailing null */ } else { /* ASCII */ strcpy(bcc_ptr, tree); bcc_ptr += strlen(tree) + 1; } strcpy(bcc_ptr, "?????"); bcc_ptr += strlen("?????"); bcc_ptr += 1; count = bcc_ptr - &pSMB->Password[0]; be32_add_cpu(&pSMB->hdr.smb_buf_length, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0); /* above now done in SendReceive */ if (rc == 0) { bool is_unicode; tcon->tid = smb_buffer_response->Tid; bcc_ptr = pByteArea(smb_buffer_response); bytes_left = get_bcc(smb_buffer_response); length = strnlen(bcc_ptr, bytes_left - 2); if (smb_buffer->Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* skip service field (NB: this field is always ASCII) */ if (length == 3) { if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && (bcc_ptr[2] == 'C')) { cifs_dbg(FYI, "IPC connection\n"); tcon->ipc = true; tcon->pipe = true; } } else if (length == 2) { if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { /* the most common case */ cifs_dbg(FYI, "disk share connection\n"); } } bcc_ptr += length + 1; bytes_left -= (length + 1); strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, bytes_left, is_unicode, nls_codepage); cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem); if ((smb_buffer_response->WordCount == 3) || (smb_buffer_response->WordCount == 7)) /* field is in same location */ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); else tcon->Flags = 0; cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); } cifs_buf_release(smb_buffer); return rc; } static void delayed_free(struct rcu_head *p) { struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu); unload_nls(cifs_sb->local_nls); smb3_cleanup_fs_context(cifs_sb->ctx); kfree(cifs_sb); } void cifs_umount(struct cifs_sb_info *cifs_sb) { struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node; struct tcon_link *tlink; cancel_delayed_work_sync(&cifs_sb->prune_tlinks); spin_lock(&cifs_sb->tlink_tree_lock); while ((node = rb_first(root))) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(node, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); kfree(cifs_sb->prepath); call_rcu(&cifs_sb->rcu, delayed_free); } int cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { int rc = 0; if (!server->ops->need_neg || !server->ops->negotiate) return -ENOSYS; /* only send once per connect */ spin_lock(&server->srv_lock); if (server->tcpStatus != CifsGood && server->tcpStatus != CifsNew && server->tcpStatus != CifsNeedNegotiate) { spin_unlock(&server->srv_lock); return -EHOSTDOWN; } if (!server->ops->need_neg(server) && server->tcpStatus == CifsGood) { spin_unlock(&server->srv_lock); return 0; } server->tcpStatus = CifsInNegotiate; spin_unlock(&server->srv_lock); rc = server->ops->negotiate(xid, ses, server); if (rc == 0) { spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; spin_unlock(&server->srv_lock); } else { spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&server->srv_lock); } return rc; } int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server, struct nls_table *nls_info) { int rc = -ENOSYS; struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr; bool is_binding = false; spin_lock(&ses->ses_lock); cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", __func__, ses->chans_need_reconnect); if (ses->ses_status != SES_GOOD && ses->ses_status != SES_NEW && ses->ses_status != SES_NEED_RECON) { spin_unlock(&ses->ses_lock); return -EHOSTDOWN; } /* only send once per connect */ spin_lock(&ses->chan_lock); if (CIFS_ALL_CHANS_GOOD(ses)) { if (ses->ses_status == SES_NEED_RECON) ses->ses_status = SES_GOOD; spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); return 0; } cifs_chan_set_in_reconnect(ses, server); is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); spin_unlock(&ses->chan_lock); if (!is_binding) ses->ses_status = SES_IN_SETUP; spin_unlock(&ses->ses_lock); /* update ses ip_addr only for primary chan */ if (server == pserver) { if (server->dstaddr.ss_family == AF_INET6) scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); else scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr); } if (!is_binding) { ses->capabilities = server->capabilities; if (!linuxExtEnabled) ses->capabilities &= (~server->vals->cap_unix); if (ses->auth_key.response) { cifs_dbg(FYI, "Free previous auth_key.response = %p\n", ses->auth_key.response); kfree_sensitive(ses->auth_key.response); ses->auth_key.response = NULL; ses->auth_key.len = 0; } } cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", server->sec_mode, server->capabilities, server->timeAdj); if (server->ops->sess_setup) rc = server->ops->sess_setup(xid, ses, server, nls_info); if (rc) { cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); spin_lock(&ses->ses_lock); if (ses->ses_status == SES_IN_SETUP) ses->ses_status = SES_NEED_RECON; spin_lock(&ses->chan_lock); cifs_chan_clear_in_reconnect(ses, server); spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); } else { spin_lock(&ses->ses_lock); if (ses->ses_status == SES_IN_SETUP) ses->ses_status = SES_GOOD; spin_lock(&ses->chan_lock); cifs_chan_clear_in_reconnect(ses, server); cifs_chan_clear_need_reconnect(ses, server); spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); } return rc; } static int cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses) { ctx->sectype = ses->sectype; /* krb5 is special, since we don't need username or pw */ if (ctx->sectype == Kerberos) return 0; return cifs_set_cifscreds(ctx, ses); } static struct cifs_tcon * cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) { int rc; struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); struct cifs_ses *ses; struct cifs_tcon *tcon = NULL; struct smb3_fs_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (ctx == NULL) return ERR_PTR(-ENOMEM); ctx->local_nls = cifs_sb->local_nls; ctx->linux_uid = fsuid; ctx->cred_uid = fsuid; ctx->UNC = master_tcon->tree_name; ctx->retry = master_tcon->retry; ctx->nocase = master_tcon->nocase; ctx->nohandlecache = master_tcon->nohandlecache; ctx->local_lease = master_tcon->local_lease; ctx->no_lease = master_tcon->no_lease; ctx->resilient = master_tcon->use_resilient; ctx->persistent = master_tcon->use_persistent; ctx->handle_timeout = master_tcon->handle_timeout; ctx->no_linux_ext = !master_tcon->unix_ext; ctx->linux_ext = master_tcon->posix_extensions; ctx->sectype = master_tcon->ses->sectype; ctx->sign = master_tcon->ses->sign; ctx->seal = master_tcon->seal; ctx->witness = master_tcon->use_witness; rc = cifs_set_vol_auth(ctx, master_tcon->ses); if (rc) { tcon = ERR_PTR(rc); goto out; } /* get a reference for the same TCP session */ spin_lock(&cifs_tcp_ses_lock); ++master_tcon->ses->server->srv_count; spin_unlock(&cifs_tcp_ses_lock); ses = cifs_get_smb_ses(master_tcon->ses->server, ctx); if (IS_ERR(ses)) { tcon = (struct cifs_tcon *)ses; cifs_put_tcp_session(master_tcon->ses->server, 0); goto out; } tcon = cifs_get_tcon(ses, ctx); if (IS_ERR(tcon)) { cifs_put_smb_ses(ses); goto out; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, ctx); #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ out: kfree(ctx->username); kfree_sensitive(ctx->password); kfree(ctx); return tcon; } struct cifs_tcon * cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) { return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); } /* find and return a tlink with given uid */ static struct tcon_link * tlink_rb_search(struct rb_root *root, kuid_t uid) { struct rb_node *node = root->rb_node; struct tcon_link *tlink; while (node) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); if (uid_gt(tlink->tl_uid, uid)) node = node->rb_left; else if (uid_lt(tlink->tl_uid, uid)) node = node->rb_right; else return tlink; } return NULL; } /* insert a tcon_link into the tree */ static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct tcon_link *tlink; while (*new) { tlink = rb_entry(*new, struct tcon_link, tl_rbnode); parent = *new; if (uid_gt(tlink->tl_uid, new_tlink->tl_uid)) new = &((*new)->rb_left); else new = &((*new)->rb_right); } rb_link_node(&new_tlink->tl_rbnode, parent, new); rb_insert_color(&new_tlink->tl_rbnode, root); } /* * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the * current task. * * If the superblock doesn't refer to a multiuser mount, then just return * the master tcon for the mount. * * First, search the rbtree for an existing tcon for this fsuid. If one * exists, then check to see if it's pending construction. If it is then wait * for construction to complete. Once it's no longer pending, check to see if * it failed and either return an error or retry construction, depending on * the timeout. * * If one doesn't exist then insert a new tcon_link struct into the tree and * try to construct a new one. */ struct tcon_link * cifs_sb_tlink(struct cifs_sb_info *cifs_sb) { int ret; kuid_t fsuid = current_fsuid(); struct tcon_link *tlink, *newtlink; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); spin_lock(&cifs_sb->tlink_tree_lock); tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); if (tlink == NULL) { newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); if (newtlink == NULL) return ERR_PTR(-ENOMEM); newtlink->tl_uid = fsuid; newtlink->tl_tcon = ERR_PTR(-EACCES); set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); cifs_get_tlink(newtlink); spin_lock(&cifs_sb->tlink_tree_lock); /* was one inserted after previous search? */ tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) { cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); kfree(newtlink); goto wait_for_construction; } tlink = newtlink; tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); } else { wait_for_construction: ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, TASK_INTERRUPTIBLE); if (ret) { cifs_put_tlink(tlink); return ERR_PTR(-ERESTARTSYS); } /* if it's good, return it */ if (!IS_ERR(tlink->tl_tcon)) return tlink; /* return error if we tried this already recently */ if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags)) goto wait_for_construction; } tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid); clear_bit(TCON_LINK_PENDING, &tlink->tl_flags); wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING); if (IS_ERR(tlink->tl_tcon)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } return tlink; } /* * periodic workqueue job that scans tcon_tree for a superblock and closes * out tcons. */ static void cifs_prune_tlinks(struct work_struct *work) { struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, prune_tlinks.work); struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node; struct rb_node *tmp; struct tcon_link *tlink; /* * Because we drop the spinlock in the loop in order to put the tlink * it's not guarded against removal of links from the tree. The only * places that remove entries from the tree are this function and * umounts. Because this function is non-reentrant and is canceled * before umount can proceed, this is safe. */ spin_lock(&cifs_sb->tlink_tree_lock); node = rb_first(root); while (node != NULL) { tmp = node; node = rb_next(tmp); tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || atomic_read(&tlink->tl_count) != 0 || time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) continue; cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(tmp, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); } #ifndef CONFIG_CIFS_DFS_UPCALL int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc) { int rc; const struct smb_version_operations *ops = tcon->ses->server->ops; /* only send once per connect */ spin_lock(&tcon->tc_lock); if (tcon->status == TID_GOOD) { spin_unlock(&tcon->tc_lock); return 0; } if (tcon->status != TID_NEW && tcon->status != TID_NEED_TCON) { spin_unlock(&tcon->tc_lock); return -EHOSTDOWN; } tcon->status = TID_IN_TCON; spin_unlock(&tcon->tc_lock); rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc); if (rc) { spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_NEED_TCON; spin_unlock(&tcon->tc_lock); } else { spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_GOOD; tcon->need_reconnect = false; spin_unlock(&tcon->tc_lock); } return rc; } #endif
linux-master
fs/smb/client/connect.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French ([email protected]) * * Common Internet FileSystem (CIFS) client * */ /* Note that BB means BUGBUG (ie something to fix eventually) */ #include <linux/module.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/list.h> #include <linux/seq_file.h> #include <linux/vfs.h> #include <linux/mempool.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/namei.h> #include <linux/random.h> #include <linux/uuid.h> #include <linux/xattr.h> #include <uapi/linux/magic.h> #include <net/ipv6.h> #include "cifsfs.h" #include "cifspdu.h" #define DECLARE_GLOBALS_HERE #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include <linux/mm.h> #include <linux/key-type.h> #include "cifs_spnego.h" #include "fscache.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif #ifdef CONFIG_CIFS_SWN_UPCALL #include "netlink.h" #endif #include "fs_context.h" #include "cached_dir.h" /* * DOS dates from 1980/1/1 through 2107/12/31 * Protocol specifications indicate the range should be to 119, which * limits maximum year to 2099. But this range has not been checked. */ #define SMB_DATE_MAX (127<<9 | 12<<5 | 31) #define SMB_DATE_MIN (0<<9 | 1<<5 | 1) #define SMB_TIME_MAX (23<<11 | 59<<5 | 29) int cifsFYI = 0; bool traceSMB; bool enable_oplocks = true; bool linuxExtEnabled = true; bool lookupCacheEnabled = true; bool disable_legacy_dialects; /* false by default */ bool enable_gcm_256 = true; bool require_gcm_256; /* false by default */ bool enable_negotiate_signing; /* false by default */ unsigned int global_secflags = CIFSSEC_DEF; /* unsigned int ntlmv2_support = 0; */ unsigned int sign_CIFS_PDUs = 1; /* * Global transaction id (XID) information */ unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ /* * Global counters, updated atomically */ atomic_t sesInfoAllocCount; atomic_t tconInfoAllocCount; atomic_t tcpSesNextId; atomic_t tcpSesAllocCount; atomic_t tcpSesReconnectCount; atomic_t tconInfoReconnectCount; atomic_t mid_count; atomic_t buf_alloc_count; atomic_t small_buf_alloc_count; #ifdef CONFIG_CIFS_STATS2 atomic_t total_buf_alloc_count; atomic_t total_small_buf_alloc_count; #endif/* STATS2 */ struct list_head cifs_tcp_ses_list; spinlock_t cifs_tcp_ses_lock; static const struct super_operations cifs_super_ops; unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; module_param(CIFSMaxBufSize, uint, 0444); MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) " "for CIFS requests. " "Default: 16384 Range: 8192 to 130048"); unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; module_param(cifs_min_rcv, uint, 0444); MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " "1 to 64"); unsigned int cifs_min_small = 30; module_param(cifs_min_small, uint, 0444); MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " "Range: 2 to 256"); unsigned int cifs_max_pending = CIFS_MAX_REQ; module_param(cifs_max_pending, uint, 0444); MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for " "CIFS/SMB1 dialect (N/A for SMB3) " "Default: 32767 Range: 2 to 32767."); unsigned int dir_cache_timeout = 30; module_param(dir_cache_timeout, uint, 0644); MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 " "Range: 1 to 65000 seconds, 0 to disable caching dir contents"); #ifdef CONFIG_CIFS_STATS2 unsigned int slow_rsp_threshold = 1; module_param(slow_rsp_threshold, uint, 0644); MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait " "before logging that a response is delayed. " "Default: 1 (if set to 0 disables msg)."); #endif /* STATS2 */ module_param(enable_oplocks, bool, 0644); MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); module_param(enable_gcm_256, bool, 0644); MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0"); module_param(require_gcm_256, bool, 0644); MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); module_param(enable_negotiate_signing, bool, 0644); MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0"); module_param(disable_legacy_dialects, bool, 0644); MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be " "helpful to restrict the ability to " "override the default dialects (SMB2.1, " "SMB3 and SMB3.02) on mount with old " "dialects (CIFS/SMB1 and SMB2) since " "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker" " and less secure. Default: n/N/0"); extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_mid_poolp; struct workqueue_struct *cifsiod_wq; struct workqueue_struct *decrypt_wq; struct workqueue_struct *fileinfo_put_wq; struct workqueue_struct *cifsoplockd_wq; struct workqueue_struct *deferredclose_wq; __u32 cifs_lock_secret; /* * Bumps refcount for cifs super block. * Note that it should be only called if a referece to VFS super block is * already held, e.g. in open-type syscalls context. Otherwise it can race with * atomic_dec_and_test in deactivate_locked_super. */ void cifs_sb_active(struct super_block *sb) { struct cifs_sb_info *server = CIFS_SB(sb); if (atomic_inc_return(&server->active) == 1) atomic_inc(&sb->s_active); } void cifs_sb_deactive(struct super_block *sb) { struct cifs_sb_info *server = CIFS_SB(sb); if (atomic_dec_and_test(&server->active)) deactivate_super(sb); } static int cifs_read_super(struct super_block *sb) { struct inode *inode; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct timespec64 ts; int rc = 0; cifs_sb = CIFS_SB(sb); tcon = cifs_sb_master_tcon(cifs_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) sb->s_flags |= SB_POSIXACL; if (tcon->snapshot_time) sb->s_flags |= SB_RDONLY; if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files) sb->s_maxbytes = MAX_LFS_FILESIZE; else sb->s_maxbytes = MAX_NON_LFS; /* * Some very old servers like DOS and OS/2 used 2 second granularity * (while all current servers use 100ns granularity - see MS-DTYP) * but 1 second is the maximum allowed granularity for the VFS * so for old servers set time granularity to 1 second while for * everything else (current servers) set it to 100ns. */ if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) && ((tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find) == 0) && !tcon->unix_ext) { sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */ ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0); sb->s_time_min = ts.tv_sec; ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), cpu_to_le16(SMB_TIME_MAX), 0); sb->s_time_max = ts.tv_sec; } else { /* * Almost every server, including all SMB2+, uses DCE TIME * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC */ sb->s_time_gran = 100; ts = cifs_NTtimeToUnix(0); sb->s_time_min = ts.tv_sec; ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX)); sb->s_time_max = ts.tv_sec; } sb->s_magic = CIFS_SUPER_MAGIC; sb->s_op = &cifs_super_ops; sb->s_xattr = cifs_xattr_handlers; rc = super_setup_bdi(sb); if (rc) goto out_no_root; /* tune readahead according to rsize if readahead size not set on mount */ if (cifs_sb->ctx->rsize == 0) cifs_sb->ctx->rsize = tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx); if (cifs_sb->ctx->rasize) sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE; else sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE); sb->s_blocksize = CIFS_MAX_MSGSIZE; sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ inode = cifs_root_iget(sb); if (IS_ERR(inode)) { rc = PTR_ERR(inode); goto out_no_root; } if (tcon->nocase) sb->s_d_op = &cifs_ci_dentry_ops; else sb->s_d_op = &cifs_dentry_ops; sb->s_root = d_make_root(inode); if (!sb->s_root) { rc = -ENOMEM; goto out_no_root; } #ifdef CONFIG_CIFS_NFSD_EXPORT if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cifs_dbg(FYI, "export ops supported\n"); sb->s_export_op = &cifs_export_ops; } #endif /* CONFIG_CIFS_NFSD_EXPORT */ return 0; out_no_root: cifs_dbg(VFS, "%s: get root inode failed\n", __func__); return rc; } static void cifs_kill_sb(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); /* * We ned to release all dentries for the cached directories * before we kill the sb. */ if (cifs_sb->root) { close_all_cached_dirs(cifs_sb); /* finally release root dentry */ dput(cifs_sb->root); cifs_sb->root = NULL; } kill_anon_super(sb); cifs_umount(cifs_sb); } static int cifs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct TCP_Server_Info *server = tcon->ses->server; unsigned int xid; int rc = 0; xid = get_xid(); if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) buf->f_namelen = le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); else buf->f_namelen = PATH_MAX; buf->f_fsid.val[0] = tcon->vol_serial_number; /* are using part of create time for more randomness, see man statfs */ buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); buf->f_files = 0; /* undefined */ buf->f_ffree = 0; /* unlimited */ if (server->ops->queryfs) rc = server->ops->queryfs(xid, tcon, cifs_sb, buf); free_xid(xid); return rc; } static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) { struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct TCP_Server_Info *server = tcon->ses->server; if (server->ops->fallocate) return server->ops->fallocate(file, tcon, mode, off, len); return -EOPNOTSUPP; } static int cifs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { struct cifs_sb_info *cifs_sb; cifs_sb = CIFS_SB(inode->i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { if ((mask & MAY_EXEC) && !execute_ok(inode)) return -EACCES; else return 0; } else /* file mode might have been restricted at mount time on the client (above and beyond ACL on servers) for servers which do not support setting and viewing mode bits, so allowing client to check permissions is useful */ return generic_permission(&nop_mnt_idmap, inode, mask); } static struct kmem_cache *cifs_inode_cachep; static struct kmem_cache *cifs_req_cachep; static struct kmem_cache *cifs_mid_cachep; static struct kmem_cache *cifs_sm_req_cachep; mempool_t *cifs_sm_req_poolp; mempool_t *cifs_req_poolp; mempool_t *cifs_mid_poolp; static struct inode * cifs_alloc_inode(struct super_block *sb) { struct cifsInodeInfo *cifs_inode; cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL); if (!cifs_inode) return NULL; cifs_inode->cifsAttrs = 0x20; /* default */ cifs_inode->time = 0; /* * Until the file is open and we have gotten oplock info back from the * server, can not assume caching of file data or metadata. */ cifs_set_oplock_level(cifs_inode, 0); cifs_inode->flags = 0; spin_lock_init(&cifs_inode->writers_lock); cifs_inode->writers = 0; cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ cifs_inode->server_eof = 0; cifs_inode->uniqueid = 0; cifs_inode->createtime = 0; cifs_inode->epoch = 0; spin_lock_init(&cifs_inode->open_file_lock); generate_random_uuid(cifs_inode->lease_key); cifs_inode->symlink_target = NULL; /* * Can not set i_flags here - they get immediately overwritten to zero * by the VFS. */ /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ INIT_LIST_HEAD(&cifs_inode->openFileList); INIT_LIST_HEAD(&cifs_inode->llist); INIT_LIST_HEAD(&cifs_inode->deferred_closes); spin_lock_init(&cifs_inode->deferred_lock); return &cifs_inode->netfs.inode; } static void cifs_free_inode(struct inode *inode) { struct cifsInodeInfo *cinode = CIFS_I(inode); if (S_ISLNK(inode->i_mode)) kfree(cinode->symlink_target); kmem_cache_free(cifs_inode_cachep, cinode); } static void cifs_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); if (inode->i_state & I_PINNING_FSCACHE_WB) cifs_fscache_unuse_inode_cookie(inode, true); cifs_fscache_release_inode_cookie(inode); clear_inode(inode); } static void cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) { struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; seq_puts(s, ",addr="); switch (server->dstaddr.ss_family) { case AF_INET: seq_printf(s, "%pI4", &sa->sin_addr.s_addr); break; case AF_INET6: seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); if (sa6->sin6_scope_id) seq_printf(s, "%%%u", sa6->sin6_scope_id); break; default: seq_puts(s, "(unknown)"); } if (server->rdma) seq_puts(s, ",rdma"); } static void cifs_show_security(struct seq_file *s, struct cifs_ses *ses) { if (ses->sectype == Unspecified) { if (ses->user_name == NULL) seq_puts(s, ",sec=none"); return; } seq_puts(s, ",sec="); switch (ses->sectype) { case NTLMv2: seq_puts(s, "ntlmv2"); break; case Kerberos: seq_puts(s, "krb5"); break; case RawNTLMSSP: seq_puts(s, "ntlmssp"); break; default: /* shouldn't ever happen */ seq_puts(s, "unknown"); break; } if (ses->sign) seq_puts(s, "i"); if (ses->sectype == Kerberos) seq_printf(s, ",cruid=%u", from_kuid_munged(&init_user_ns, ses->cred_uid)); } static void cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb) { seq_puts(s, ",cache="); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) seq_puts(s, "strict"); else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) seq_puts(s, "none"); else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) seq_puts(s, "singleclient"); /* assume only one client access */ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) seq_puts(s, "ro"); /* read only caching assumed */ else seq_puts(s, "loose"); } /* * cifs_show_devname() is used so we show the mount device name with correct * format (e.g. forward slashes vs. back slashes) in /proc/mounts */ static int cifs_show_devname(struct seq_file *m, struct dentry *root) { struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL); if (devname == NULL) seq_puts(m, "none"); else { convert_delimiter(devname, '/'); /* escape all spaces in share names */ seq_escape(m, devname, " \t"); kfree(devname); } return 0; } /* * cifs_show_options() is for displaying mount options in /proc/mounts. * Not all settable options are displayed but most of the important * ones are. */ static int cifs_show_options(struct seq_file *s, struct dentry *root) { struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct sockaddr *srcaddr; srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; seq_show_option(s, "vers", tcon->ses->server->vals->version_string); cifs_show_security(s, tcon->ses); cifs_show_cache_flavor(s, cifs_sb); if (tcon->no_lease) seq_puts(s, ",nolease"); if (cifs_sb->ctx->multiuser) seq_puts(s, ",multiuser"); else if (tcon->ses->user_name) seq_show_option(s, "username", tcon->ses->user_name); if (tcon->ses->domainName && tcon->ses->domainName[0] != 0) seq_show_option(s, "domain", tcon->ses->domainName); if (srcaddr->sa_family != AF_UNSPEC) { struct sockaddr_in *saddr4; struct sockaddr_in6 *saddr6; saddr4 = (struct sockaddr_in *)srcaddr; saddr6 = (struct sockaddr_in6 *)srcaddr; if (srcaddr->sa_family == AF_INET6) seq_printf(s, ",srcaddr=%pI6c", &saddr6->sin6_addr); else if (srcaddr->sa_family == AF_INET) seq_printf(s, ",srcaddr=%pI4", &saddr4->sin_addr.s_addr); else seq_printf(s, ",srcaddr=BAD-AF:%i", (int)(srcaddr->sa_family)); } seq_printf(s, ",uid=%u", from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid)); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) seq_puts(s, ",forceuid"); else seq_puts(s, ",noforceuid"); seq_printf(s, ",gid=%u", from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid)); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) seq_puts(s, ",forcegid"); else seq_puts(s, ",noforcegid"); cifs_show_address(s, tcon->ses->server); if (!tcon->unix_ext) seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho", cifs_sb->ctx->file_mode, cifs_sb->ctx->dir_mode); if (cifs_sb->ctx->iocharset) seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset); if (tcon->seal) seq_puts(s, ",seal"); else if (tcon->ses->server->ignore_signature) seq_puts(s, ",signloosely"); if (tcon->nocase) seq_puts(s, ",nocase"); if (tcon->nodelete) seq_puts(s, ",nodelete"); if (cifs_sb->ctx->no_sparse) seq_puts(s, ",nosparse"); if (tcon->local_lease) seq_puts(s, ",locallease"); if (tcon->retry) seq_puts(s, ",hard"); else seq_puts(s, ",soft"); if (tcon->use_persistent) seq_puts(s, ",persistenthandles"); else if (tcon->use_resilient) seq_puts(s, ",resilienthandles"); if (tcon->posix_extensions) seq_puts(s, ",posix"); else if (tcon->unix_ext) seq_puts(s, ",unix"); else seq_puts(s, ",nounix"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) seq_puts(s, ",nodfs"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) seq_puts(s, ",posixpaths"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) seq_puts(s, ",setuids"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) seq_puts(s, ",idsfromsid"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) seq_puts(s, ",serverino"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) seq_puts(s, ",rwpidforward"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) seq_puts(s, ",forcemand"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) seq_puts(s, ",nouser_xattr"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) seq_puts(s, ",mapchars"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) seq_puts(s, ",mapposix"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) seq_puts(s, ",sfu"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) seq_puts(s, ",nobrl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE) seq_puts(s, ",nohandlecache"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) seq_puts(s, ",modefromsid"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) seq_puts(s, ",cifsacl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) seq_puts(s, ",dynperm"); if (root->d_sb->s_flags & SB_POSIXACL) seq_puts(s, ",acl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) seq_puts(s, ",mfsymlinks"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) seq_puts(s, ",fsc"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) seq_puts(s, ",nostrictsync"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) seq_puts(s, ",noperm"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) seq_printf(s, ",backupuid=%u", from_kuid_munged(&init_user_ns, cifs_sb->ctx->backupuid)); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) seq_printf(s, ",backupgid=%u", from_kgid_munged(&init_user_ns, cifs_sb->ctx->backupgid)); seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize); if (cifs_sb->ctx->rasize) seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize); if (tcon->ses->server->min_offload) seq_printf(s, ",esize=%u", tcon->ses->server->min_offload); seq_printf(s, ",echo_interval=%lu", tcon->ses->server->echo_interval / HZ); /* Only display the following if overridden on mount */ if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); if (tcon->ses->server->tcp_nodelay) seq_puts(s, ",tcpnodelay"); if (tcon->ses->server->noautotune) seq_puts(s, ",noautotune"); if (tcon->ses->server->noblocksnd) seq_puts(s, ",noblocksend"); if (tcon->ses->server->nosharesock) seq_puts(s, ",nosharesock"); if (tcon->snapshot_time) seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); if (tcon->handle_timeout) seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); if (tcon->max_cached_dirs != MAX_CACHED_FIDS) seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs); /* * Display file and directory attribute timeout in seconds. * If file and directory attribute timeout the same then actimeo * was likely specified on mount */ if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax) seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ); else { seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); } seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); if (tcon->ses->chan_max > 1) seq_printf(s, ",multichannel,max_channels=%zu", tcon->ses->chan_max); if (tcon->use_witness) seq_puts(s, ",witness"); return 0; } static void cifs_umount_begin(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon; if (cifs_sb == NULL) return; tcon = cifs_sb_master_tcon(cifs_sb); spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock); if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { /* we have other mounts to same share or we have already tried to umount this and woken up all waiting network requests, nothing to do */ spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return; } /* * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent */ spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); cifs_close_all_deferred_files(tcon); /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ /* cancel_notify_requests(tcon); */ if (tcon->ses && tcon->ses->server) { cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n"); wake_up_all(&tcon->ses->server->request_q); wake_up_all(&tcon->ses->server->response_q); msleep(1); /* yield */ /* we have to kick the requests once more */ wake_up_all(&tcon->ses->server->response_q); msleep(1); } return; } static int cifs_freeze(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon; if (cifs_sb == NULL) return 0; tcon = cifs_sb_master_tcon(cifs_sb); cifs_close_all_deferred_files(tcon); return 0; } #ifdef CONFIG_CIFS_STATS2 static int cifs_show_stats(struct seq_file *s, struct dentry *root) { /* BB FIXME */ return 0; } #endif static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc) { fscache_unpin_writeback(wbc, cifs_inode_cookie(inode)); return 0; } static int cifs_drop_inode(struct inode *inode) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); /* no serverino => unconditional eviction */ return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || generic_drop_inode(inode); } static const struct super_operations cifs_super_ops = { .statfs = cifs_statfs, .alloc_inode = cifs_alloc_inode, .write_inode = cifs_write_inode, .free_inode = cifs_free_inode, .drop_inode = cifs_drop_inode, .evict_inode = cifs_evict_inode, /* .show_path = cifs_show_path, */ /* Would we ever need show path? */ .show_devname = cifs_show_devname, /* .delete_inode = cifs_delete_inode, */ /* Do not need above function unless later we add lazy close of inodes or unless the kernel forgets to call us with the same number of releases (closes) as opens */ .show_options = cifs_show_options, .umount_begin = cifs_umount_begin, .freeze_fs = cifs_freeze, #ifdef CONFIG_CIFS_STATS2 .show_stats = cifs_show_stats, #endif }; /* * Get root dentry from superblock according to prefix path mount option. * Return dentry with refcount + 1 on success and NULL otherwise. */ static struct dentry * cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb) { struct dentry *dentry; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); char *full_path = NULL; char *s, *p; char sep; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) return dget(sb->s_root); full_path = cifs_build_path_to_root(ctx, cifs_sb, cifs_sb_master_tcon(cifs_sb), 0); if (full_path == NULL) return ERR_PTR(-ENOMEM); cifs_dbg(FYI, "Get root dentry for %s\n", full_path); sep = CIFS_DIR_SEP(cifs_sb); dentry = dget(sb->s_root); s = full_path; do { struct inode *dir = d_inode(dentry); struct dentry *child; if (!S_ISDIR(dir->i_mode)) { dput(dentry); dentry = ERR_PTR(-ENOTDIR); break; } /* skip separators */ while (*s == sep) s++; if (!*s) break; p = s++; /* next separator */ while (*s && *s != sep) s++; child = lookup_positive_unlocked(p, dentry, s - p); dput(dentry); dentry = child; } while (!IS_ERR(dentry)); kfree(full_path); return dentry; } static int cifs_set_super(struct super_block *sb, void *data) { struct cifs_mnt_data *mnt_data = data; sb->s_fs_info = mnt_data->cifs_sb; return set_anon_super(sb, NULL); } struct dentry * cifs_smb3_do_mount(struct file_system_type *fs_type, int flags, struct smb3_fs_context *old_ctx) { struct cifs_mnt_data mnt_data; struct cifs_sb_info *cifs_sb; struct super_block *sb; struct dentry *root; int rc; if (cifsFYI) { cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__, old_ctx->source, flags); } else { cifs_info("Attempting to mount %s\n", old_ctx->source); } cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); if (!cifs_sb) return ERR_PTR(-ENOMEM); cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); if (!cifs_sb->ctx) { root = ERR_PTR(-ENOMEM); goto out; } rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx); if (rc) { root = ERR_PTR(rc); goto out; } rc = cifs_setup_cifs_sb(cifs_sb); if (rc) { root = ERR_PTR(rc); goto out; } rc = cifs_mount(cifs_sb, cifs_sb->ctx); if (rc) { if (!(flags & SB_SILENT)) cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", rc); root = ERR_PTR(rc); goto out; } mnt_data.ctx = cifs_sb->ctx; mnt_data.cifs_sb = cifs_sb; mnt_data.flags = flags; /* BB should we make this contingent on mount parm? */ flags |= SB_NODIRATIME | SB_NOATIME; sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); if (IS_ERR(sb)) { cifs_umount(cifs_sb); return ERR_CAST(sb); } if (sb->s_root) { cifs_dbg(FYI, "Use existing superblock\n"); cifs_umount(cifs_sb); cifs_sb = NULL; } else { rc = cifs_read_super(sb); if (rc) { root = ERR_PTR(rc); goto out_super; } sb->s_flags |= SB_ACTIVE; } root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb); if (IS_ERR(root)) goto out_super; if (cifs_sb) cifs_sb->root = dget(root); cifs_dbg(FYI, "dentry root is: %p\n", root); return root; out_super: deactivate_locked_super(sb); return root; out: kfree(cifs_sb->prepath); smb3_cleanup_fs_context(cifs_sb->ctx); kfree(cifs_sb); return root; } static ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) { ssize_t rc; struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_DIRECT) return cifs_user_readv(iocb, iter); rc = cifs_revalidate_mapping(inode); if (rc) return rc; return generic_file_read_iter(iocb, iter); } static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); struct cifsInodeInfo *cinode = CIFS_I(inode); ssize_t written; int rc; if (iocb->ki_filp->f_flags & O_DIRECT) { written = cifs_user_writev(iocb, from); if (written > 0 && CIFS_CACHE_READ(cinode)) { cifs_zap_mapping(inode); cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n", inode); cinode->oplock = 0; } return written; } written = cifs_get_writer(cinode); if (written) return written; written = generic_file_write_iter(iocb, from); if (CIFS_CACHE_WRITE(CIFS_I(inode))) goto out; rc = filemap_fdatawrite(inode->i_mapping); if (rc) cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", rc, inode); out: cifs_put_writer(cinode); return written; } static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) { struct cifsFileInfo *cfile = file->private_data; struct cifs_tcon *tcon; /* * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate * the cached file length */ if (whence != SEEK_SET && whence != SEEK_CUR) { int rc; struct inode *inode = file_inode(file); /* * We need to be sure that all dirty pages are written and the * server has the newest file length. */ if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && inode->i_mapping->nrpages != 0) { rc = filemap_fdatawait(inode->i_mapping); if (rc) { mapping_set_error(inode->i_mapping, rc); return rc; } } /* * Some applications poll for the file length in this strange * way so we must seek to end on non-oplocked files by * setting the revalidate time to zero. */ CIFS_I(inode)->time = 0; rc = cifs_revalidate_file_attr(file); if (rc < 0) return (loff_t)rc; } if (cfile && cfile->tlink) { tcon = tlink_tcon(cfile->tlink); if (tcon->ses->server->ops->llseek) return tcon->ses->server->ops->llseek(file, tcon, offset, whence); } return generic_file_llseek(file, offset, whence); } static int cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv) { /* * Note that this is called by vfs setlease with i_lock held to * protect *lease from going away. */ struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; if (!(S_ISREG(inode->i_mode))) return -EINVAL; /* Check if file is oplocked if this is request for new lease */ if (arg == F_UNLCK || ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode)))) return generic_setlease(file, arg, lease, priv); else if (tlink_tcon(cfile->tlink)->local_lease && !CIFS_CACHE_READ(CIFS_I(inode))) /* * If the server claims to support oplock on this file, then we * still need to check oplock even if the local_lease mount * option is set, but there are servers which do not support * oplock for which this mount option may be useful if the user * knows that the file won't be changed on the server by anyone * else. */ return generic_setlease(file, arg, lease, priv); else return -EAGAIN; } struct file_system_type cifs_fs_type = { .owner = THIS_MODULE, .name = "cifs", .init_fs_context = smb3_init_fs_context, .parameters = smb3_fs_parameters, .kill_sb = cifs_kill_sb, .fs_flags = FS_RENAME_DOES_D_MOVE, }; MODULE_ALIAS_FS("cifs"); struct file_system_type smb3_fs_type = { .owner = THIS_MODULE, .name = "smb3", .init_fs_context = smb3_init_fs_context, .parameters = smb3_fs_parameters, .kill_sb = cifs_kill_sb, .fs_flags = FS_RENAME_DOES_D_MOVE, }; MODULE_ALIAS_FS("smb3"); MODULE_ALIAS("smb3"); const struct inode_operations cifs_dir_inode_ops = { .create = cifs_create, .atomic_open = cifs_atomic_open, .lookup = cifs_lookup, .getattr = cifs_getattr, .unlink = cifs_unlink, .link = cifs_hardlink, .mkdir = cifs_mkdir, .rmdir = cifs_rmdir, .rename = cifs_rename2, .permission = cifs_permission, .setattr = cifs_setattr, .symlink = cifs_symlink, .mknod = cifs_mknod, .listxattr = cifs_listxattr, .get_acl = cifs_get_acl, .set_acl = cifs_set_acl, }; const struct inode_operations cifs_file_inode_ops = { .setattr = cifs_setattr, .getattr = cifs_getattr, .permission = cifs_permission, .listxattr = cifs_listxattr, .fiemap = cifs_fiemap, .get_acl = cifs_get_acl, .set_acl = cifs_set_acl, }; const char *cifs_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { char *target_path; target_path = kmalloc(PATH_MAX, GFP_KERNEL); if (!target_path) return ERR_PTR(-ENOMEM); spin_lock(&inode->i_lock); if (likely(CIFS_I(inode)->symlink_target)) { strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX); } else { kfree(target_path); target_path = ERR_PTR(-EOPNOTSUPP); } spin_unlock(&inode->i_lock); if (!IS_ERR(target_path)) set_delayed_call(done, kfree_link, target_path); return target_path; } const struct inode_operations cifs_symlink_inode_ops = { .get_link = cifs_get_link, .permission = cifs_permission, .listxattr = cifs_listxattr, }; static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, struct file *dst_file, loff_t destoff, loff_t len, unsigned int remap_flags) { struct inode *src_inode = file_inode(src_file); struct inode *target_inode = file_inode(dst_file); struct cifsFileInfo *smb_file_src = src_file->private_data; struct cifsFileInfo *smb_file_target; struct cifs_tcon *target_tcon; unsigned int xid; int rc; if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) return -EINVAL; cifs_dbg(FYI, "clone range\n"); xid = get_xid(); if (!src_file->private_data || !dst_file->private_data) { rc = -EBADF; cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); goto out; } smb_file_target = dst_file->private_data; target_tcon = tlink_tcon(smb_file_target->tlink); /* * Note: cifs case is easier than btrfs since server responsible for * checks for proper open modes and file type and if it wants * server could even support copy of range where source = target */ lock_two_nondirectories(target_inode, src_inode); if (len == 0) len = src_inode->i_size - off; cifs_dbg(FYI, "about to flush pages\n"); /* should we flush first and last page first */ truncate_inode_pages_range(&target_inode->i_data, destoff, PAGE_ALIGN(destoff + len)-1); if (target_tcon->ses->server->ops->duplicate_extents) rc = target_tcon->ses->server->ops->duplicate_extents(xid, smb_file_src, smb_file_target, off, len, destoff); else rc = -EOPNOTSUPP; /* force revalidate of size and timestamps of target file now that target is updated on the server */ CIFS_I(target_inode)->time = 0; /* although unlocking in the reverse order from locking is not strictly necessary here it is a little cleaner to be consistent */ unlock_two_nondirectories(src_inode, target_inode); out: free_xid(xid); return rc < 0 ? rc : len; } ssize_t cifs_file_copychunk_range(unsigned int xid, struct file *src_file, loff_t off, struct file *dst_file, loff_t destoff, size_t len, unsigned int flags) { struct inode *src_inode = file_inode(src_file); struct inode *target_inode = file_inode(dst_file); struct cifsFileInfo *smb_file_src; struct cifsFileInfo *smb_file_target; struct cifs_tcon *src_tcon; struct cifs_tcon *target_tcon; ssize_t rc; cifs_dbg(FYI, "copychunk range\n"); if (!src_file->private_data || !dst_file->private_data) { rc = -EBADF; cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); goto out; } rc = -EXDEV; smb_file_target = dst_file->private_data; smb_file_src = src_file->private_data; src_tcon = tlink_tcon(smb_file_src->tlink); target_tcon = tlink_tcon(smb_file_target->tlink); if (src_tcon->ses != target_tcon->ses) { cifs_dbg(VFS, "source and target of copy not on same server\n"); goto out; } rc = -EOPNOTSUPP; if (!target_tcon->ses->server->ops->copychunk_range) goto out; /* * Note: cifs case is easier than btrfs since server responsible for * checks for proper open modes and file type and if it wants * server could even support copy of range where source = target */ lock_two_nondirectories(target_inode, src_inode); cifs_dbg(FYI, "about to flush pages\n"); rc = filemap_write_and_wait_range(src_inode->i_mapping, off, off + len - 1); if (rc) goto unlock; /* should we flush first and last page first */ truncate_inode_pages(&target_inode->i_data, 0); rc = file_modified(dst_file); if (!rc) rc = target_tcon->ses->server->ops->copychunk_range(xid, smb_file_src, smb_file_target, off, len, destoff); file_accessed(src_file); /* force revalidate of size and timestamps of target file now * that target is updated on the server */ CIFS_I(target_inode)->time = 0; unlock: /* although unlocking in the reverse order from locking is not * strictly necessary here it is a little cleaner to be consistent */ unlock_two_nondirectories(src_inode, target_inode); out: return rc; } /* * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() * is a dummy operation. */ static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) { cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", file, datasync); return 0; } static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, struct file *dst_file, loff_t destoff, size_t len, unsigned int flags) { unsigned int xid = get_xid(); ssize_t rc; struct cifsFileInfo *cfile = dst_file->private_data; if (cfile->swapfile) { rc = -EOPNOTSUPP; free_xid(xid); return rc; } rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, len, flags); free_xid(xid); if (rc == -EOPNOTSUPP || rc == -EXDEV) rc = generic_copy_file_range(src_file, off, dst_file, destoff, len, flags); return rc; } const struct file_operations cifs_file_ops = { .read_iter = cifs_loose_read_iter, .write_iter = cifs_file_write_iter, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, .flock = cifs_flock, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_file_strict_ops = { .read_iter = cifs_strict_readv, .write_iter = cifs_strict_writev, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, .flock = cifs_flock, .fsync = cifs_strict_fsync, .flush = cifs_flush, .mmap = cifs_file_strict_mmap, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_file_direct_ops = { .read_iter = cifs_direct_readv, .write_iter = cifs_direct_writev, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, .flock = cifs_flock, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, .remap_file_range = cifs_remap_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_file_nobrl_ops = { .read_iter = cifs_loose_read_iter, .write_iter = cifs_file_write_iter, .open = cifs_open, .release = cifs_close, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_file_strict_nobrl_ops = { .read_iter = cifs_strict_readv, .write_iter = cifs_strict_writev, .open = cifs_open, .release = cifs_close, .fsync = cifs_strict_fsync, .flush = cifs_flush, .mmap = cifs_file_strict_mmap, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_file_direct_nobrl_ops = { .read_iter = cifs_direct_readv, .write_iter = cifs_direct_writev, .open = cifs_open, .release = cifs_close, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, .remap_file_range = cifs_remap_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_dir_ops = { .iterate_shared = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, .remap_file_range = cifs_remap_file_range, .llseek = generic_file_llseek, .fsync = cifs_dir_fsync, }; static void cifs_init_once(void *inode) { struct cifsInodeInfo *cifsi = inode; inode_init_once(&cifsi->netfs.inode); init_rwsem(&cifsi->lock_sem); } static int __init cifs_init_inodecache(void) { cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", sizeof(struct cifsInodeInfo), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), cifs_init_once); if (cifs_inode_cachep == NULL) return -ENOMEM; return 0; } static void cifs_destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(cifs_inode_cachep); } static int cifs_init_request_bufs(void) { /* * SMB2 maximum header size is bigger than CIFS one - no problems to * allocate some more bytes for CIFS. */ size_t max_hdr_size = MAX_SMB2_HDR_SIZE; if (CIFSMaxBufSize < 8192) { /* Buffer size can not be smaller than 2 * PATH_MAX since maximum Unicode path name has to fit in any SMB/CIFS path based frames */ CIFSMaxBufSize = 8192; } else if (CIFSMaxBufSize > 1024*127) { CIFSMaxBufSize = 1024 * 127; } else { CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ } /* cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", CIFSMaxBufSize, CIFSMaxBufSize); */ cifs_req_cachep = kmem_cache_create_usercopy("cifs_request", CIFSMaxBufSize + max_hdr_size, 0, SLAB_HWCACHE_ALIGN, 0, CIFSMaxBufSize + max_hdr_size, NULL); if (cifs_req_cachep == NULL) return -ENOMEM; if (cifs_min_rcv < 1) cifs_min_rcv = 1; else if (cifs_min_rcv > 64) { cifs_min_rcv = 64; cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n"); } cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, cifs_req_cachep); if (cifs_req_poolp == NULL) { kmem_cache_destroy(cifs_req_cachep); return -ENOMEM; } /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and almost all handle based requests (but not write response, nor is it sufficient for path based requests). A smaller size would have been more efficient (compacting multiple slab items on one 4k page) for the case in which debug was on, but this larger size allows more SMBs to use small buffer alloc and is still much more efficient to alloc 1 per page off the slab compared to 17K (5page) alloc of large cifs buffers even when page debugging is on */ cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq", MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); if (cifs_sm_req_cachep == NULL) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); return -ENOMEM; } if (cifs_min_small < 2) cifs_min_small = 2; else if (cifs_min_small > 256) { cifs_min_small = 256; cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n"); } cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, cifs_sm_req_cachep); if (cifs_sm_req_poolp == NULL) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); kmem_cache_destroy(cifs_sm_req_cachep); return -ENOMEM; } return 0; } static void cifs_destroy_request_bufs(void) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); mempool_destroy(cifs_sm_req_poolp); kmem_cache_destroy(cifs_sm_req_cachep); } static int init_mids(void) { cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", sizeof(struct mid_q_entry), 0, SLAB_HWCACHE_ALIGN, NULL); if (cifs_mid_cachep == NULL) return -ENOMEM; /* 3 is a reasonable minimum number of simultaneous operations */ cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); if (cifs_mid_poolp == NULL) { kmem_cache_destroy(cifs_mid_cachep); return -ENOMEM; } return 0; } static void destroy_mids(void) { mempool_destroy(cifs_mid_poolp); kmem_cache_destroy(cifs_mid_cachep); } static int __init init_cifs(void) { int rc = 0; cifs_proc_init(); INIT_LIST_HEAD(&cifs_tcp_ses_list); /* * Initialize Global counters */ atomic_set(&sesInfoAllocCount, 0); atomic_set(&tconInfoAllocCount, 0); atomic_set(&tcpSesNextId, 0); atomic_set(&tcpSesAllocCount, 0); atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); atomic_set(&buf_alloc_count, 0); atomic_set(&small_buf_alloc_count, 0); #ifdef CONFIG_CIFS_STATS2 atomic_set(&total_buf_alloc_count, 0); atomic_set(&total_small_buf_alloc_count, 0); if (slow_rsp_threshold < 1) cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); else if (slow_rsp_threshold > 32767) cifs_dbg(VFS, "slow response threshold set higher than recommended (0 to 32767)\n"); #endif /* CONFIG_CIFS_STATS2 */ atomic_set(&mid_count, 0); GlobalCurrentXid = 0; GlobalTotalActiveXid = 0; GlobalMaxActiveXid = 0; spin_lock_init(&cifs_tcp_ses_lock); spin_lock_init(&GlobalMid_Lock); cifs_lock_secret = get_random_u32(); if (cifs_max_pending < 2) { cifs_max_pending = 2; cifs_dbg(FYI, "cifs_max_pending set to min of 2\n"); } else if (cifs_max_pending > CIFS_MAX_REQ) { cifs_max_pending = CIFS_MAX_REQ; cifs_dbg(FYI, "cifs_max_pending set to max of %u\n", CIFS_MAX_REQ); } /* Limit max to about 18 hours, and setting to zero disables directory entry caching */ if (dir_cache_timeout > 65000) { dir_cache_timeout = 65000; cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n"); } cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!cifsiod_wq) { rc = -ENOMEM; goto out_clean_proc; } /* * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) * so that we don't launch too many worker threads but * Documentation/core-api/workqueue.rst recommends setting it to 0 */ /* WQ_UNBOUND allows decrypt tasks to run on any CPU */ decrypt_wq = alloc_workqueue("smb3decryptd", WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!decrypt_wq) { rc = -ENOMEM; goto out_destroy_cifsiod_wq; } fileinfo_put_wq = alloc_workqueue("cifsfileinfoput", WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!fileinfo_put_wq) { rc = -ENOMEM; goto out_destroy_decrypt_wq; } cifsoplockd_wq = alloc_workqueue("cifsoplockd", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!cifsoplockd_wq) { rc = -ENOMEM; goto out_destroy_fileinfo_put_wq; } deferredclose_wq = alloc_workqueue("deferredclose", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!deferredclose_wq) { rc = -ENOMEM; goto out_destroy_cifsoplockd_wq; } rc = cifs_init_inodecache(); if (rc) goto out_destroy_deferredclose_wq; rc = init_mids(); if (rc) goto out_destroy_inodecache; rc = cifs_init_request_bufs(); if (rc) goto out_destroy_mids; #ifdef CONFIG_CIFS_DFS_UPCALL rc = dfs_cache_init(); if (rc) goto out_destroy_request_bufs; #endif /* CONFIG_CIFS_DFS_UPCALL */ #ifdef CONFIG_CIFS_UPCALL rc = init_cifs_spnego(); if (rc) goto out_destroy_dfs_cache; #endif /* CONFIG_CIFS_UPCALL */ #ifdef CONFIG_CIFS_SWN_UPCALL rc = cifs_genl_init(); if (rc) goto out_register_key_type; #endif /* CONFIG_CIFS_SWN_UPCALL */ rc = init_cifs_idmap(); if (rc) goto out_cifs_swn_init; rc = register_filesystem(&cifs_fs_type); if (rc) goto out_init_cifs_idmap; rc = register_filesystem(&smb3_fs_type); if (rc) { unregister_filesystem(&cifs_fs_type); goto out_init_cifs_idmap; } return 0; out_init_cifs_idmap: exit_cifs_idmap(); out_cifs_swn_init: #ifdef CONFIG_CIFS_SWN_UPCALL cifs_genl_exit(); out_register_key_type: #endif #ifdef CONFIG_CIFS_UPCALL exit_cifs_spnego(); out_destroy_dfs_cache: #endif #ifdef CONFIG_CIFS_DFS_UPCALL dfs_cache_destroy(); out_destroy_request_bufs: #endif cifs_destroy_request_bufs(); out_destroy_mids: destroy_mids(); out_destroy_inodecache: cifs_destroy_inodecache(); out_destroy_deferredclose_wq: destroy_workqueue(deferredclose_wq); out_destroy_cifsoplockd_wq: destroy_workqueue(cifsoplockd_wq); out_destroy_fileinfo_put_wq: destroy_workqueue(fileinfo_put_wq); out_destroy_decrypt_wq: destroy_workqueue(decrypt_wq); out_destroy_cifsiod_wq: destroy_workqueue(cifsiod_wq); out_clean_proc: cifs_proc_clean(); return rc; } static void __exit exit_cifs(void) { cifs_dbg(NOISY, "exit_smb3\n"); unregister_filesystem(&cifs_fs_type); unregister_filesystem(&smb3_fs_type); cifs_release_automount_timer(); exit_cifs_idmap(); #ifdef CONFIG_CIFS_SWN_UPCALL cifs_genl_exit(); #endif #ifdef CONFIG_CIFS_UPCALL exit_cifs_spnego(); #endif #ifdef CONFIG_CIFS_DFS_UPCALL dfs_cache_destroy(); #endif cifs_destroy_request_bufs(); destroy_mids(); cifs_destroy_inodecache(); destroy_workqueue(deferredclose_wq); destroy_workqueue(cifsoplockd_wq); destroy_workqueue(decrypt_wq); destroy_workqueue(fileinfo_put_wq); destroy_workqueue(cifsiod_wq); cifs_proc_clean(); } MODULE_AUTHOR("Steve French"); MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ MODULE_DESCRIPTION ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " "also older servers complying with the SNIA CIFS Specification)"); MODULE_VERSION(CIFS_VERSION); MODULE_SOFTDEP("ecb"); MODULE_SOFTDEP("hmac"); MODULE_SOFTDEP("md5"); MODULE_SOFTDEP("nls"); MODULE_SOFTDEP("aes"); MODULE_SOFTDEP("cmac"); MODULE_SOFTDEP("sha256"); MODULE_SOFTDEP("sha512"); MODULE_SOFTDEP("aead2"); MODULE_SOFTDEP("ccm"); MODULE_SOFTDEP("gcm"); module_init(init_cifs) module_exit(exit_cifs)
linux-master
fs/smb/client/cifsfs.c
// SPDX-License-Identifier: LGPL-2.1 /* * * vfs operations that deal with files * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French ([email protected]) * Jeremy Allison ([email protected]) * */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/backing-dev.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/writeback.h> #include <linux/task_io_accounting_ops.h> #include <linux/delay.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/mm.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "fscache.h" #include "smbdirect.h" #include "fs_context.h" #include "cifs_ioctl.h" #include "cached_dir.h" /* * Remove the dirty flags from a span of pages. */ static void cifs_undirty_folios(struct inode *inode, loff_t start, unsigned int len) { struct address_space *mapping = inode->i_mapping; struct folio *folio; pgoff_t end; XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); rcu_read_lock(); end = (start + len - 1) / PAGE_SIZE; xas_for_each_marked(&xas, folio, end, PAGECACHE_TAG_DIRTY) { if (xas_retry(&xas, folio)) continue; xas_pause(&xas); rcu_read_unlock(); folio_lock(folio); folio_clear_dirty_for_io(folio); folio_unlock(folio); rcu_read_lock(); } rcu_read_unlock(); } /* * Completion of write to server. */ void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len) { struct address_space *mapping = inode->i_mapping; struct folio *folio; pgoff_t end; XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); if (!len) return; rcu_read_lock(); end = (start + len - 1) / PAGE_SIZE; xas_for_each(&xas, folio, end) { if (xas_retry(&xas, folio)) continue; if (!folio_test_writeback(folio)) { WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", len, start, folio_index(folio), end); continue; } folio_detach_private(folio); folio_end_writeback(folio); } rcu_read_unlock(); } /* * Failure of write to server. */ void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len) { struct address_space *mapping = inode->i_mapping; struct folio *folio; pgoff_t end; XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); if (!len) return; rcu_read_lock(); end = (start + len - 1) / PAGE_SIZE; xas_for_each(&xas, folio, end) { if (xas_retry(&xas, folio)) continue; if (!folio_test_writeback(folio)) { WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", len, start, folio_index(folio), end); continue; } folio_set_error(folio); folio_end_writeback(folio); } rcu_read_unlock(); } /* * Redirty pages after a temporary failure. */ void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len) { struct address_space *mapping = inode->i_mapping; struct folio *folio; pgoff_t end; XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); if (!len) return; rcu_read_lock(); end = (start + len - 1) / PAGE_SIZE; xas_for_each(&xas, folio, end) { if (!folio_test_writeback(folio)) { WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", len, start, folio_index(folio), end); continue; } filemap_dirty_folio(folio->mapping, folio); folio_end_writeback(folio); } rcu_read_unlock(); } /* * Mark as invalid, all open files on tree connections since they * were closed when session to server was lost. */ void cifs_mark_open_files_invalid(struct cifs_tcon *tcon) { struct cifsFileInfo *open_file = NULL; struct list_head *tmp; struct list_head *tmp1; /* only send once per connect */ spin_lock(&tcon->tc_lock); if (tcon->status != TID_NEED_RECON) { spin_unlock(&tcon->tc_lock); return; } tcon->status = TID_IN_FILES_INVALIDATE; spin_unlock(&tcon->tc_lock); /* list all files open on tree connection and mark them invalid */ spin_lock(&tcon->open_file_lock); list_for_each_safe(tmp, tmp1, &tcon->openFileList) { open_file = list_entry(tmp, struct cifsFileInfo, tlist); open_file->invalidHandle = true; open_file->oplock_break_cancelled = true; } spin_unlock(&tcon->open_file_lock); invalidate_all_cached_dirs(tcon); spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_FILES_INVALIDATE) tcon->status = TID_NEED_TCON; spin_unlock(&tcon->tc_lock); /* * BB Add call to invalidate_inodes(sb) for all superblocks mounted * to this tcon. */ } static inline int cifs_convert_flags(unsigned int flags) { if ((flags & O_ACCMODE) == O_RDONLY) return GENERIC_READ; else if ((flags & O_ACCMODE) == O_WRONLY) return GENERIC_WRITE; else if ((flags & O_ACCMODE) == O_RDWR) { /* GENERIC_ALL is too much permission to request can cause unnecessary access denied on create */ /* return GENERIC_ALL; */ return (GENERIC_READ | GENERIC_WRITE); } return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | FILE_READ_DATA); } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static u32 cifs_posix_convert_flags(unsigned int flags) { u32 posix_flags = 0; if ((flags & O_ACCMODE) == O_RDONLY) posix_flags = SMB_O_RDONLY; else if ((flags & O_ACCMODE) == O_WRONLY) posix_flags = SMB_O_WRONLY; else if ((flags & O_ACCMODE) == O_RDWR) posix_flags = SMB_O_RDWR; if (flags & O_CREAT) { posix_flags |= SMB_O_CREAT; if (flags & O_EXCL) posix_flags |= SMB_O_EXCL; } else if (flags & O_EXCL) cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n", current->comm, current->tgid); if (flags & O_TRUNC) posix_flags |= SMB_O_TRUNC; /* be safe and imply O_SYNC for O_DSYNC */ if (flags & O_DSYNC) posix_flags |= SMB_O_SYNC; if (flags & O_DIRECTORY) posix_flags |= SMB_O_DIRECTORY; if (flags & O_NOFOLLOW) posix_flags |= SMB_O_NOFOLLOW; if (flags & O_DIRECT) posix_flags |= SMB_O_DIRECT; return posix_flags; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static inline int cifs_get_disposition(unsigned int flags) { if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) return FILE_CREATE; else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) return FILE_OVERWRITE_IF; else if ((flags & O_CREAT) == O_CREAT) return FILE_OPEN_IF; else if ((flags & O_TRUNC) == O_TRUNC) return FILE_OVERWRITE; else return FILE_OPEN; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY int cifs_posix_open(const char *full_path, struct inode **pinode, struct super_block *sb, int mode, unsigned int f_flags, __u32 *poplock, __u16 *pnetfid, unsigned int xid) { int rc; FILE_UNIX_BASIC_INFO *presp_data; __u32 posix_flags = 0; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_fattr fattr; struct tcon_link *tlink; struct cifs_tcon *tcon; cifs_dbg(FYI, "posix open %s\n", full_path); presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); if (presp_data == NULL) return -ENOMEM; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); goto posix_open_ret; } tcon = tlink_tcon(tlink); mode &= ~current_umask(); posix_flags = cifs_posix_convert_flags(f_flags); rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, poplock, full_path, cifs_sb->local_nls, cifs_remap(cifs_sb)); cifs_put_tlink(tlink); if (rc) goto posix_open_ret; if (presp_data->Type == cpu_to_le32(-1)) goto posix_open_ret; /* open ok, caller does qpathinfo */ if (!pinode) goto posix_open_ret; /* caller does not need info */ cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb); /* get new inode and set it up */ if (*pinode == NULL) { cifs_fill_uniqueid(sb, &fattr); *pinode = cifs_iget(sb, &fattr); if (!*pinode) { rc = -ENOMEM; goto posix_open_ret; } } else { cifs_revalidate_mapping(*pinode); rc = cifs_fattr_to_inode(*pinode, &fattr); } posix_open_ret: kfree(presp_data); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock, struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf) { int rc; int desired_access; int disposition; int create_options = CREATE_NOT_DIR; struct TCP_Server_Info *server = tcon->ses->server; struct cifs_open_parms oparms; if (!server->ops->open) return -ENOSYS; desired_access = cifs_convert_flags(f_flags); /********************************************************************* * open flag mapping table: * * POSIX Flag CIFS Disposition * ---------- ---------------- * O_CREAT FILE_OPEN_IF * O_CREAT | O_EXCL FILE_CREATE * O_CREAT | O_TRUNC FILE_OVERWRITE_IF * O_TRUNC FILE_OVERWRITE * none of the above FILE_OPEN * * Note that there is not a direct match between disposition * FILE_SUPERSEDE (ie create whether or not file exists although * O_CREAT | O_TRUNC is similar but truncates the existing * file rather than creating a new file as FILE_SUPERSEDE does * (which uses the attributes / metadata passed in on open call) *? *? O_SYNC is a reasonable match to CIFS writethrough flag *? and the read write flags match reasonably. O_LARGEFILE *? is irrelevant because largefile support is always used *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation *********************************************************************/ disposition = cifs_get_disposition(f_flags); /* BB pass O_SYNC flag through on file attributes .. BB */ /* O_SYNC also has bit for O_DSYNC so following check picks up either */ if (f_flags & O_SYNC) create_options |= CREATE_WRITE_THROUGH; if (f_flags & O_DIRECT) create_options |= CREATE_NO_BUFFER; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = desired_access, .create_options = cifs_create_options(cifs_sb, create_options), .disposition = disposition, .path = full_path, .fid = fid, }; rc = server->ops->open(xid, &oparms, oplock, buf); if (rc) return rc; /* TODO: Add support for calling posix query info but with passing in fid */ if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, xid); else rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, xid, fid); if (rc) { server->ops->close(xid, tcon, fid); if (rc == -ESTALE) rc = -EOPENSTALE; } return rc; } static bool cifs_has_mand_locks(struct cifsInodeInfo *cinode) { struct cifs_fid_locks *cur; bool has_locks = false; down_read(&cinode->lock_sem); list_for_each_entry(cur, &cinode->llist, llist) { if (!list_empty(&cur->locks)) { has_locks = true; break; } } up_read(&cinode->lock_sem); return has_locks; } void cifs_down_write(struct rw_semaphore *sem) { while (!down_write_trylock(sem)) msleep(10); } static void cifsFileInfo_put_work(struct work_struct *work); struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, __u32 oplock, const char *symlink_target) { struct dentry *dentry = file_dentry(file); struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifsFileInfo *cfile; struct cifs_fid_locks *fdlocks; struct cifs_tcon *tcon = tlink_tcon(tlink); struct TCP_Server_Info *server = tcon->ses->server; cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); if (cfile == NULL) return cfile; fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL); if (!fdlocks) { kfree(cfile); return NULL; } if (symlink_target) { cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL); if (!cfile->symlink_target) { kfree(fdlocks); kfree(cfile); return NULL; } } INIT_LIST_HEAD(&fdlocks->locks); fdlocks->cfile = cfile; cfile->llist = fdlocks; cfile->count = 1; cfile->pid = current->tgid; cfile->uid = current_fsuid(); cfile->dentry = dget(dentry); cfile->f_flags = file->f_flags; cfile->invalidHandle = false; cfile->deferred_close_scheduled = false; cfile->tlink = cifs_get_tlink(tlink); INIT_WORK(&cfile->oplock_break, cifs_oplock_break); INIT_WORK(&cfile->put, cifsFileInfo_put_work); INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); mutex_init(&cfile->fh_mutex); spin_lock_init(&cfile->file_info_lock); cifs_sb_active(inode->i_sb); /* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None. */ if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); oplock = 0; } cifs_down_write(&cinode->lock_sem); list_add(&fdlocks->llist, &cinode->llist); up_write(&cinode->lock_sem); spin_lock(&tcon->open_file_lock); if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) oplock = fid->pending_open->oplock; list_del(&fid->pending_open->olist); fid->purge_cache = false; server->ops->set_fid(cfile, fid, oplock); list_add(&cfile->tlist, &tcon->openFileList); atomic_inc(&tcon->num_local_opens); /* if readable file instance put first in list*/ spin_lock(&cinode->open_file_lock); if (file->f_mode & FMODE_READ) list_add(&cfile->flist, &cinode->openFileList); else list_add_tail(&cfile->flist, &cinode->openFileList); spin_unlock(&cinode->open_file_lock); spin_unlock(&tcon->open_file_lock); if (fid->purge_cache) cifs_zap_mapping(inode); file->private_data = cfile; return cfile; } struct cifsFileInfo * cifsFileInfo_get(struct cifsFileInfo *cifs_file) { spin_lock(&cifs_file->file_info_lock); cifsFileInfo_get_locked(cifs_file); spin_unlock(&cifs_file->file_info_lock); return cifs_file; } static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file) { struct inode *inode = d_inode(cifs_file->dentry); struct cifsInodeInfo *cifsi = CIFS_I(inode); struct cifsLockInfo *li, *tmp; struct super_block *sb = inode->i_sb; /* * Delete any outstanding lock records. We'll lose them when the file * is closed anyway. */ cifs_down_write(&cifsi->lock_sem); list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); } list_del(&cifs_file->llist->llist); kfree(cifs_file->llist); up_write(&cifsi->lock_sem); cifs_put_tlink(cifs_file->tlink); dput(cifs_file->dentry); cifs_sb_deactive(sb); kfree(cifs_file->symlink_target); kfree(cifs_file); } static void cifsFileInfo_put_work(struct work_struct *work) { struct cifsFileInfo *cifs_file = container_of(work, struct cifsFileInfo, put); cifsFileInfo_put_final(cifs_file); } /** * cifsFileInfo_put - release a reference of file priv data * * Always potentially wait for oplock handler. See _cifsFileInfo_put(). * * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file */ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) { _cifsFileInfo_put(cifs_file, true, true); } /** * _cifsFileInfo_put - release a reference of file priv data * * This may involve closing the filehandle @cifs_file out on the * server. Must be called without holding tcon->open_file_lock, * cinode->open_file_lock and cifs_file->file_info_lock. * * If @wait_for_oplock_handler is true and we are releasing the last * reference, wait for any running oplock break handler of the file * and cancel any pending one. * * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file * @wait_oplock_handler: must be false if called from oplock_break_handler * @offload: not offloaded on close and oplock breaks * */ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler, bool offload) { struct inode *inode = d_inode(cifs_file->dentry); struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct cifsInodeInfo *cifsi = CIFS_I(inode); struct super_block *sb = inode->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_fid fid = {}; struct cifs_pending_open open; bool oplock_break_cancelled; spin_lock(&tcon->open_file_lock); spin_lock(&cifsi->open_file_lock); spin_lock(&cifs_file->file_info_lock); if (--cifs_file->count > 0) { spin_unlock(&cifs_file->file_info_lock); spin_unlock(&cifsi->open_file_lock); spin_unlock(&tcon->open_file_lock); return; } spin_unlock(&cifs_file->file_info_lock); if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); /* store open in pending opens to make sure we don't miss lease break */ cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open); /* remove it from the lists */ list_del(&cifs_file->flist); list_del(&cifs_file->tlist); atomic_dec(&tcon->num_local_opens); if (list_empty(&cifsi->openFileList)) { cifs_dbg(FYI, "closing last open instance for inode %p\n", d_inode(cifs_file->dentry)); /* * In strict cache mode we need invalidate mapping on the last * close because it may cause a error when we open this file * again and get at least level II oplock. */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); cifs_set_oplock_level(cifsi, 0); } spin_unlock(&cifsi->open_file_lock); spin_unlock(&tcon->open_file_lock); oplock_break_cancelled = wait_oplock_handler ? cancel_work_sync(&cifs_file->oplock_break) : false; if (!tcon->need_reconnect && !cifs_file->invalidHandle) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int xid; xid = get_xid(); if (server->ops->close_getattr) server->ops->close_getattr(xid, tcon, cifs_file); else if (server->ops->close) server->ops->close(xid, tcon, &cifs_file->fid); _free_xid(xid); } if (oplock_break_cancelled) cifs_done_oplock_break(cifsi); cifs_del_pending_open(&open); if (offload) queue_work(fileinfo_put_wq, &cifs_file->put); else cifsFileInfo_put_final(cifs_file); } int cifs_open(struct inode *inode, struct file *file) { int rc = -EACCES; unsigned int xid; __u32 oplock; struct cifs_sb_info *cifs_sb; struct TCP_Server_Info *server; struct cifs_tcon *tcon; struct tcon_link *tlink; struct cifsFileInfo *cfile = NULL; void *page; const char *full_path; bool posix_open_ok = false; struct cifs_fid fid = {}; struct cifs_pending_open open; struct cifs_open_info_data data = {}; xid = get_xid(); cifs_sb = CIFS_SB(inode->i_sb); if (unlikely(cifs_forced_shutdown(cifs_sb))) { free_xid(xid); return -EIO; } tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { free_xid(xid); return PTR_ERR(tlink); } tcon = tlink_tcon(tlink); server = tcon->ses->server; page = alloc_dentry_path(); full_path = build_path_from_dentry(file_dentry(file), page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto out; } cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", inode, file->f_flags, full_path); if (file->f_flags & O_DIRECT && cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) file->f_op = &cifs_file_direct_nobrl_ops; else file->f_op = &cifs_file_direct_ops; } /* Get the cached handle as SMB2 close is deferred */ rc = cifs_get_readable_path(tcon, full_path, &cfile); if (rc == 0) { if (file->f_flags == cfile->f_flags) { file->private_data = cfile; spin_lock(&CIFS_I(inode)->deferred_lock); cifs_del_deferred_close(cfile); spin_unlock(&CIFS_I(inode)->deferred_lock); goto use_cache; } else { _cifsFileInfo_put(cfile, true, false); } } if (server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (!tcon->broken_posix_open && tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { /* can not refresh inode info since size could be stale */ rc = cifs_posix_open(full_path, &inode, inode->i_sb, cifs_sb->ctx->file_mode /* ignored */, file->f_flags, &oplock, &fid.netfid, xid); if (rc == 0) { cifs_dbg(FYI, "posix open succeeded\n"); posix_open_ok = true; } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { if (tcon->ses->serverNOS) cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n", tcon->ses->ip_addr, tcon->ses->serverNOS); tcon->broken_posix_open = true; } else if ((rc != -EIO) && (rc != -EREMOTE) && (rc != -EOPNOTSUPP)) /* path not found or net err */ goto out; /* * Else fallthrough to retry open the old way on network i/o * or DFS errors. */ } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); cifs_add_pending_open(&fid, tlink, &open); if (!posix_open_ok) { if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid, xid, &data); if (rc) { cifs_del_pending_open(&open); goto out; } } cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target); if (cfile == NULL) { if (server->ops->close) server->ops->close(xid, tcon, &fid); cifs_del_pending_open(&open); rc = -ENOMEM; goto out; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { /* * Time to set mode which we can not set earlier due to * problems creating new read-only files. */ struct cifs_unix_set_info_args args = { .mode = inode->i_mode, .uid = INVALID_UID, /* no change */ .gid = INVALID_GID, /* no change */ .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, cfile->pid); } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ use_cache: fscache_use_cookie(cifs_inode_cookie(file_inode(file)), file->f_mode & FMODE_WRITE); if (file->f_flags & O_DIRECT && (!((file->f_flags & O_ACCMODE) != O_RDONLY) || file->f_flags & O_APPEND)) cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE); out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); cifs_free_open_info(&data); return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_push_posix_locks(struct cifsFileInfo *cfile); #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* * Try to reacquire byte range locks that were released when session * to server was lost. */ static int cifs_relock_file(struct cifsFileInfo *cfile) { struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); if (cinode->can_cache_brlcks) { /* can cache locks - no need to relock */ up_read(&cinode->lock_sem); return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = tcon->ses->server->ops->push_mand_locks(cfile); up_read(&cinode->lock_sem); return rc; } static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) { int rc = -EACCES; unsigned int xid; __u32 oplock; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifsInodeInfo *cinode; struct inode *inode; void *page; const char *full_path; int desired_access; int disposition = FILE_OPEN; int create_options = CREATE_NOT_DIR; struct cifs_open_parms oparms; xid = get_xid(); mutex_lock(&cfile->fh_mutex); if (!cfile->invalidHandle) { mutex_unlock(&cfile->fh_mutex); free_xid(xid); return 0; } inode = d_inode(cfile->dentry); cifs_sb = CIFS_SB(inode->i_sb); tcon = tlink_tcon(cfile->tlink); server = tcon->ses->server; /* * Can not grab rename sem here because various ops, including those * that already have the rename sem can end up causing writepage to get * called and if the server was down that means we end up here, and we * can never tell if the caller already has the rename_sem. */ page = alloc_dentry_path(); full_path = build_path_from_dentry(cfile->dentry, page); if (IS_ERR(full_path)) { mutex_unlock(&cfile->fh_mutex); free_dentry_path(page); free_xid(xid); return PTR_ERR(full_path); } cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n", inode, cfile->f_flags, full_path); if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { /* * O_CREAT, O_EXCL and O_TRUNC already had their effect on the * original open. Must mask them off for a reopen. */ unsigned int oflags = cfile->f_flags & ~(O_CREAT | O_EXCL | O_TRUNC); rc = cifs_posix_open(full_path, NULL, inode->i_sb, cifs_sb->ctx->file_mode /* ignored */, oflags, &oplock, &cfile->fid.netfid, xid); if (rc == 0) { cifs_dbg(FYI, "posix reopen succeeded\n"); oparms.reconnect = true; goto reopen_success; } /* * fallthrough to retry open the old way on errors, especially * in the reconnect path it is important to retry hard */ } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ desired_access = cifs_convert_flags(cfile->f_flags); /* O_SYNC also has bit for O_DSYNC so following check picks up either */ if (cfile->f_flags & O_SYNC) create_options |= CREATE_WRITE_THROUGH; if (cfile->f_flags & O_DIRECT) create_options |= CREATE_NO_BUFFER; if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &cfile->fid); oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = desired_access, .create_options = cifs_create_options(cifs_sb, create_options), .disposition = disposition, .path = full_path, .fid = &cfile->fid, .reconnect = true, }; /* * Can not refresh inode by passing in file_info buf to be returned by * ops->open and then calling get_inode_info with returned buf since * file might have write behind data that needs to be flushed and server * version of file size can be stale. If we knew for sure that inode was * not dirty locally we could do this. */ rc = server->ops->open(xid, &oparms, &oplock, NULL); if (rc == -ENOENT && oparms.reconnect == false) { /* durable handle timeout is expired - open the file again */ rc = server->ops->open(xid, &oparms, &oplock, NULL); /* indicate that we need to relock the file */ oparms.reconnect = true; } if (rc) { mutex_unlock(&cfile->fh_mutex); cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc); cifs_dbg(FYI, "oplock: %d\n", oplock); goto reopen_error_exit; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY reopen_success: #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ cfile->invalidHandle = false; mutex_unlock(&cfile->fh_mutex); cinode = CIFS_I(inode); if (can_flush) { rc = filemap_write_and_wait(inode->i_mapping); if (!is_interrupt_error(rc)) mapping_set_error(inode->i_mapping, rc); if (tcon->posix_extensions) rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid); else if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, xid); else rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb, xid, NULL); } /* * Else we are writing out data to server already and could deadlock if * we tried to flush data, and since we do not know if we have data that * would invalidate the current end of file on the server we can not go * to the server to get the new inode info. */ /* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None. */ if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); oplock = 0; } server->ops->set_fid(cfile, &cfile->fid, oplock); if (oparms.reconnect) cifs_relock_file(cfile); reopen_error_exit: free_dentry_path(page); free_xid(xid); return rc; } void smb2_deferred_work_close(struct work_struct *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, deferred.work); spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); cifs_del_deferred_close(cfile); cfile->deferred_close_scheduled = false; spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); _cifsFileInfo_put(cfile, true, false); } int cifs_close(struct inode *inode, struct file *file) { struct cifsFileInfo *cfile; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_deferred_close *dclose; cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE); if (file->private_data != NULL) { cfile = file->private_data; file->private_data = NULL; dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); if ((cifs_sb->ctx->closetimeo && cinode->oplock == CIFS_CACHE_RHW_FLG) && cinode->lease_granted && !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) && dclose) { if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { inode->i_mtime = inode_set_ctime_current(inode); } spin_lock(&cinode->deferred_lock); cifs_add_deferred_close(cfile, dclose); if (cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) { /* * If there is no pending work, mod_delayed_work queues new work. * So, Increase the ref count to avoid use-after-free. */ if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, cifs_sb->ctx->closetimeo)) cifsFileInfo_get(cfile); } else { /* Deferred close for files */ queue_delayed_work(deferredclose_wq, &cfile->deferred, cifs_sb->ctx->closetimeo); cfile->deferred_close_scheduled = true; spin_unlock(&cinode->deferred_lock); return 0; } spin_unlock(&cinode->deferred_lock); _cifsFileInfo_put(cfile, true, false); } else { _cifsFileInfo_put(cfile, true, false); kfree(dclose); } } /* return code from the ->release op is always ignored */ return 0; } void cifs_reopen_persistent_handles(struct cifs_tcon *tcon) { struct cifsFileInfo *open_file, *tmp; struct list_head tmp_list; if (!tcon->use_persistent || !tcon->need_reopen_files) return; tcon->need_reopen_files = false; cifs_dbg(FYI, "Reopen persistent handles\n"); INIT_LIST_HEAD(&tmp_list); /* list all files open on tree connection, reopen resilient handles */ spin_lock(&tcon->open_file_lock); list_for_each_entry(open_file, &tcon->openFileList, tlist) { if (!open_file->invalidHandle) continue; cifsFileInfo_get(open_file); list_add_tail(&open_file->rlist, &tmp_list); } spin_unlock(&tcon->open_file_lock); list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { if (cifs_reopen_file(open_file, false /* do not flush */)) tcon->need_reopen_files = true; list_del_init(&open_file->rlist); cifsFileInfo_put(open_file); } } int cifs_closedir(struct inode *inode, struct file *file) { int rc = 0; unsigned int xid; struct cifsFileInfo *cfile = file->private_data; struct cifs_tcon *tcon; struct TCP_Server_Info *server; char *buf; cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode); if (cfile == NULL) return rc; xid = get_xid(); tcon = tlink_tcon(cfile->tlink); server = tcon->ses->server; cifs_dbg(FYI, "Freeing private data in close dir\n"); spin_lock(&cfile->file_info_lock); if (server->ops->dir_needs_close(cfile)) { cfile->invalidHandle = true; spin_unlock(&cfile->file_info_lock); if (server->ops->close_dir) rc = server->ops->close_dir(xid, tcon, &cfile->fid); else rc = -ENOSYS; cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc); /* not much we can do if it fails anyway, ignore rc */ rc = 0; } else spin_unlock(&cfile->file_info_lock); buf = cfile->srch_inf.ntwrk_buf_start; if (buf) { cifs_dbg(FYI, "closedir free smb buf in srch struct\n"); cfile->srch_inf.ntwrk_buf_start = NULL; if (cfile->srch_inf.smallBuf) cifs_small_buf_release(buf); else cifs_buf_release(buf); } cifs_put_tlink(cfile->tlink); kfree(file->private_data); file->private_data = NULL; /* BB can we lock the filestruct while this is going on? */ free_xid(xid); return rc; } static struct cifsLockInfo * cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags) { struct cifsLockInfo *lock = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); if (!lock) return lock; lock->offset = offset; lock->length = length; lock->type = type; lock->pid = current->tgid; lock->flags = flags; INIT_LIST_HEAD(&lock->blist); init_waitqueue_head(&lock->block_q); return lock; } void cifs_del_lock_waiters(struct cifsLockInfo *lock) { struct cifsLockInfo *li, *tmp; list_for_each_entry_safe(li, tmp, &lock->blist, blist) { list_del_init(&li->blist); wake_up(&li->block_q); } } #define CIFS_LOCK_OP 0 #define CIFS_READ_OP 1 #define CIFS_WRITE_OP 2 /* @rw_check : 0 - no op, 1 - read, 2 - write */ static bool cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset, __u64 length, __u8 type, __u16 flags, struct cifsFileInfo *cfile, struct cifsLockInfo **conf_lock, int rw_check) { struct cifsLockInfo *li; struct cifsFileInfo *cur_cfile = fdlocks->cfile; struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; list_for_each_entry(li, &fdlocks->locks, llist) { if (offset + length <= li->offset || offset >= li->offset + li->length) continue; if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid && server->ops->compare_fids(cfile, cur_cfile)) { /* shared lock prevents write op through the same fid */ if (!(li->type & server->vals->shared_lock_type) || rw_check != CIFS_WRITE_OP) continue; } if ((type & server->vals->shared_lock_type) && ((server->ops->compare_fids(cfile, cur_cfile) && current->tgid == li->pid) || type == li->type)) continue; if (rw_check == CIFS_LOCK_OP && (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) && server->ops->compare_fids(cfile, cur_cfile)) continue; if (conf_lock) *conf_lock = li; return true; } return false; } bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u8 type, __u16 flags, struct cifsLockInfo **conf_lock, int rw_check) { bool rc = false; struct cifs_fid_locks *cur; struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); list_for_each_entry(cur, &cinode->llist, llist) { rc = cifs_find_fid_lock_conflict(cur, offset, length, type, flags, cfile, conf_lock, rw_check); if (rc) break; } return rc; } /* * Check if there is another lock that prevents us to set the lock (mandatory * style). If such a lock exists, update the flock structure with its * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks * or leave it the same if we can't. Returns 0 if we don't need to request to * the server or 1 otherwise. */ static int cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u8 type, struct file_lock *flock) { int rc = 0; struct cifsLockInfo *conf_lock; struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; bool exist; down_read(&cinode->lock_sem); exist = cifs_find_lock_conflict(cfile, offset, length, type, flock->fl_flags, &conf_lock, CIFS_LOCK_OP); if (exist) { flock->fl_start = conf_lock->offset; flock->fl_end = conf_lock->offset + conf_lock->length - 1; flock->fl_pid = conf_lock->pid; if (conf_lock->type & server->vals->shared_lock_type) flock->fl_type = F_RDLCK; else flock->fl_type = F_WRLCK; } else if (!cinode->can_cache_brlcks) rc = 1; else flock->fl_type = F_UNLCK; up_read(&cinode->lock_sem); return rc; } static void cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) { struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); cifs_down_write(&cinode->lock_sem); list_add_tail(&lock->llist, &cfile->llist->locks); up_write(&cinode->lock_sem); } /* * Set the byte-range lock (mandatory style). Returns: * 1) 0, if we set the lock and don't need to request to the server; * 2) 1, if no locks prevent us but we need to request to the server; * 3) -EACCES, if there is a lock that prevents us and wait is false. */ static int cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, bool wait) { struct cifsLockInfo *conf_lock; struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); bool exist; int rc = 0; try_again: exist = false; cifs_down_write(&cinode->lock_sem); exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, lock->type, lock->flags, &conf_lock, CIFS_LOCK_OP); if (!exist && cinode->can_cache_brlcks) { list_add_tail(&lock->llist, &cfile->llist->locks); up_write(&cinode->lock_sem); return rc; } if (!exist) rc = 1; else if (!wait) rc = -EACCES; else { list_add_tail(&lock->blist, &conf_lock->blist); up_write(&cinode->lock_sem); rc = wait_event_interruptible(lock->block_q, (lock->blist.prev == &lock->blist) && (lock->blist.next == &lock->blist)); if (!rc) goto try_again; cifs_down_write(&cinode->lock_sem); list_del_init(&lock->blist); } up_write(&cinode->lock_sem); return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Check if there is another lock that prevents us to set the lock (posix * style). If such a lock exists, update the flock structure with its * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks * or leave it the same if we can't. Returns 0 if we don't need to request to * the server or 1 otherwise. */ static int cifs_posix_lock_test(struct file *file, struct file_lock *flock) { int rc = 0; struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); unsigned char saved_type = flock->fl_type; if ((flock->fl_flags & FL_POSIX) == 0) return 1; down_read(&cinode->lock_sem); posix_test_lock(file, flock); if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) { flock->fl_type = saved_type; rc = 1; } up_read(&cinode->lock_sem); return rc; } /* * Set the byte-range lock (posix style). Returns: * 1) <0, if the error occurs while setting the lock; * 2) 0, if we set the lock and don't need to request to the server; * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock; * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server. */ static int cifs_posix_lock_set(struct file *file, struct file_lock *flock) { struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); int rc = FILE_LOCK_DEFERRED + 1; if ((flock->fl_flags & FL_POSIX) == 0) return rc; cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; } rc = posix_lock_file(file, flock, NULL); up_write(&cinode->lock_sem); return rc; } int cifs_push_mandatory_locks(struct cifsFileInfo *cfile) { unsigned int xid; int rc = 0, stored_rc; struct cifsLockInfo *li, *tmp; struct cifs_tcon *tcon; unsigned int num, max_num, max_buf; LOCKING_ANDX_RANGE *buf, *cur; static const int types[] = { LOCKING_ANDX_LARGE_FILES, LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES }; int i; xid = get_xid(); tcon = tlink_tcon(cfile->tlink); /* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it before using. */ max_buf = tcon->ses->server->maxBuf; if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { free_xid(xid); return -EINVAL; } BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > PAGE_SIZE); max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), PAGE_SIZE); max_num = (max_buf - sizeof(struct smb_hdr)) / sizeof(LOCKING_ANDX_RANGE); buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); if (!buf) { free_xid(xid); return -ENOMEM; } for (i = 0; i < 2; i++) { cur = buf; num = 0; list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (li->type != types[i]) continue; cur->Pid = cpu_to_le16(li->pid); cur->LengthLow = cpu_to_le32((u32)li->length); cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); cur->OffsetLow = cpu_to_le32((u32)li->offset); cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); if (++num == max_num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, (__u8)li->type, 0, num, buf); if (stored_rc) rc = stored_rc; cur = buf; num = 0; } else cur++; } if (num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, (__u8)types[i], 0, num, buf); if (stored_rc) rc = stored_rc; } } kfree(buf); free_xid(xid); return rc; } static __u32 hash_lockowner(fl_owner_t owner) { return cifs_lock_secret ^ hash32_ptr((const void *)owner); } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ struct lock_to_push { struct list_head llist; __u64 offset; __u64 length; __u32 pid; __u16 netfid; __u8 type; }; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_push_posix_locks(struct cifsFileInfo *cfile) { struct inode *inode = d_inode(cfile->dentry); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct file_lock *flock; struct file_lock_context *flctx = locks_inode_context(inode); unsigned int count = 0, i; int rc = 0, xid, type; struct list_head locks_to_send, *el; struct lock_to_push *lck, *tmp; __u64 length; xid = get_xid(); if (!flctx) goto out; spin_lock(&flctx->flc_lock); list_for_each(el, &flctx->flc_posix) { count++; } spin_unlock(&flctx->flc_lock); INIT_LIST_HEAD(&locks_to_send); /* * Allocating count locks is enough because no FL_POSIX locks can be * added to the list while we are holding cinode->lock_sem that * protects locking operations of this inode. */ for (i = 0; i < count; i++) { lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); if (!lck) { rc = -ENOMEM; goto err_out; } list_add_tail(&lck->llist, &locks_to_send); } el = locks_to_send.next; spin_lock(&flctx->flc_lock); list_for_each_entry(flock, &flctx->flc_posix, fl_list) { if (el == &locks_to_send) { /* * The list ended. We don't have enough allocated * structures - something is really wrong. */ cifs_dbg(VFS, "Can't push all brlocks!\n"); break; } length = cifs_flock_len(flock); if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) type = CIFS_RDLCK; else type = CIFS_WRLCK; lck = list_entry(el, struct lock_to_push, llist); lck->pid = hash_lockowner(flock->fl_owner); lck->netfid = cfile->fid.netfid; lck->length = length; lck->type = type; lck->offset = flock->fl_start; } spin_unlock(&flctx->flc_lock); list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { int stored_rc; stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid, lck->offset, lck->length, NULL, lck->type, 0); if (stored_rc) rc = stored_rc; list_del(&lck->llist); kfree(lck); } out: free_xid(xid); return rc; err_out: list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { list_del(&lck->llist); kfree(lck); } goto out; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_push_locks(struct cifsFileInfo *cfile) { struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* we are going to update can_cache_brlcks here - need a write access */ cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = tcon->ses->server->ops->push_mand_locks(cfile); cinode->can_cache_brlcks = false; up_write(&cinode->lock_sem); return rc; } static void cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, bool *wait_flag, struct TCP_Server_Info *server) { if (flock->fl_flags & FL_POSIX) cifs_dbg(FYI, "Posix\n"); if (flock->fl_flags & FL_FLOCK) cifs_dbg(FYI, "Flock\n"); if (flock->fl_flags & FL_SLEEP) { cifs_dbg(FYI, "Blocking lock\n"); *wait_flag = true; } if (flock->fl_flags & FL_ACCESS) cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n"); if (flock->fl_flags & FL_LEASE) cifs_dbg(FYI, "Lease on file - not implemented yet\n"); if (flock->fl_flags & (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK))) cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags); *type = server->vals->large_lock_type; if (flock->fl_type == F_WRLCK) { cifs_dbg(FYI, "F_WRLCK\n"); *type |= server->vals->exclusive_lock_type; *lock = 1; } else if (flock->fl_type == F_UNLCK) { cifs_dbg(FYI, "F_UNLCK\n"); *type |= server->vals->unlock_lock_type; *unlock = 1; /* Check if unlock includes more than one lock range */ } else if (flock->fl_type == F_RDLCK) { cifs_dbg(FYI, "F_RDLCK\n"); *type |= server->vals->shared_lock_type; *lock = 1; } else if (flock->fl_type == F_EXLCK) { cifs_dbg(FYI, "F_EXLCK\n"); *type |= server->vals->exclusive_lock_type; *lock = 1; } else if (flock->fl_type == F_SHLCK) { cifs_dbg(FYI, "F_SHLCK\n"); *type |= server->vals->shared_lock_type; *lock = 1; } else cifs_dbg(FYI, "Unknown type of lock\n"); } static int cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, bool wait_flag, bool posix_lck, unsigned int xid) { int rc = 0; __u64 length = cifs_flock_len(flock); struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY __u16 netfid = cfile->fid.netfid; if (posix_lck) { int posix_lock_type; rc = cifs_posix_lock_test(file, flock); if (!rc) return rc; if (type & server->vals->shared_lock_type) posix_lock_type = CIFS_RDLCK; else posix_lock_type = CIFS_WRLCK; rc = CIFSSMBPosixLock(xid, tcon, netfid, hash_lockowner(flock->fl_owner), flock->fl_start, length, flock, posix_lock_type, wait_flag); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); if (!rc) return rc; /* BB we could chain these into one lock request BB */ rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 1, 0, false); if (rc == 0) { rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 0, 1, false); flock->fl_type = F_UNLCK; if (rc != 0) cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", rc); return 0; } if (type & server->vals->shared_lock_type) { flock->fl_type = F_WRLCK; return 0; } type &= ~server->vals->exclusive_lock_type; rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type | server->vals->shared_lock_type, 1, 0, false); if (rc == 0) { rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type | server->vals->shared_lock_type, 0, 1, false); flock->fl_type = F_RDLCK; if (rc != 0) cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", rc); } else flock->fl_type = F_WRLCK; return 0; } void cifs_move_llist(struct list_head *source, struct list_head *dest) { struct list_head *li, *tmp; list_for_each_safe(li, tmp, source) list_move(li, dest); } void cifs_free_llist(struct list_head *llist) { struct cifsLockInfo *li, *tmp; list_for_each_entry_safe(li, tmp, llist, llist) { cifs_del_lock_waiters(li); list_del(&li->llist); kfree(li); } } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY int cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, unsigned int xid) { int rc = 0, stored_rc; static const int types[] = { LOCKING_ANDX_LARGE_FILES, LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES }; unsigned int i; unsigned int max_num, num, max_buf; LOCKING_ANDX_RANGE *buf, *cur; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifsLockInfo *li, *tmp; __u64 length = cifs_flock_len(flock); struct list_head tmp_llist; INIT_LIST_HEAD(&tmp_llist); /* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it before using. */ max_buf = tcon->ses->server->maxBuf; if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) return -EINVAL; BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > PAGE_SIZE); max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), PAGE_SIZE); max_num = (max_buf - sizeof(struct smb_hdr)) / sizeof(LOCKING_ANDX_RANGE); buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); if (!buf) return -ENOMEM; cifs_down_write(&cinode->lock_sem); for (i = 0; i < 2; i++) { cur = buf; num = 0; list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (flock->fl_start > li->offset || (flock->fl_start + length) < (li->offset + li->length)) continue; if (current->tgid != li->pid) continue; if (types[i] != li->type) continue; if (cinode->can_cache_brlcks) { /* * We can cache brlock requests - simply remove * a lock from the file's list. */ list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); continue; } cur->Pid = cpu_to_le16(li->pid); cur->LengthLow = cpu_to_le32((u32)li->length); cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); cur->OffsetLow = cpu_to_le32((u32)li->offset); cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); /* * We need to save a lock here to let us add it again to * the file's list if the unlock range request fails on * the server. */ list_move(&li->llist, &tmp_llist); if (++num == max_num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, li->type, num, 0, buf); if (stored_rc) { /* * We failed on the unlock range * request - add all locks from the tmp * list to the head of the file's list. */ cifs_move_llist(&tmp_llist, &cfile->llist->locks); rc = stored_rc; } else /* * The unlock range request succeed - * free the tmp list. */ cifs_free_llist(&tmp_llist); cur = buf; num = 0; } else cur++; } if (num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, types[i], num, 0, buf); if (stored_rc) { cifs_move_llist(&tmp_llist, &cfile->llist->locks); rc = stored_rc; } else cifs_free_llist(&tmp_llist); } } up_write(&cinode->lock_sem); kfree(buf); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, bool wait_flag, bool posix_lck, int lock, int unlock, unsigned int xid) { int rc = 0; __u64 length = cifs_flock_len(flock); struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct inode *inode = d_inode(cfile->dentry); #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (posix_lck) { int posix_lock_type; rc = cifs_posix_lock_set(file, flock); if (rc <= FILE_LOCK_DEFERRED) return rc; if (type & server->vals->shared_lock_type) posix_lock_type = CIFS_RDLCK; else posix_lock_type = CIFS_WRLCK; if (unlock == 1) posix_lock_type = CIFS_UNLCK; rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, hash_lockowner(flock->fl_owner), flock->fl_start, length, NULL, posix_lock_type, wait_flag); goto out; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (lock) { struct cifsLockInfo *lock; lock = cifs_lock_init(flock->fl_start, length, type, flock->fl_flags); if (!lock) return -ENOMEM; rc = cifs_lock_add_if(cfile, lock, wait_flag); if (rc < 0) { kfree(lock); return rc; } if (!rc) goto out; /* * Windows 7 server can delay breaking lease from read to None * if we set a byte-range lock on a file - break it explicitly * before sending the lock to the server to be sure the next * read won't conflict with non-overlapted locks due to * pagereading. */ if (!CIFS_CACHE_WRITE(CIFS_I(inode)) && CIFS_CACHE_READ(CIFS_I(inode))) { cifs_zap_mapping(inode); cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n", inode); CIFS_I(inode)->oplock = 0; } rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 1, 0, wait_flag); if (rc) { kfree(lock); return rc; } cifs_lock_add(cfile, lock); } else if (unlock) rc = server->ops->mand_unlock_range(cfile, flock, xid); out: if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) { /* * If this is a request to remove all locks because we * are closing the file, it doesn't matter if the * unlocking failed as both cifs.ko and the SMB server * remove the lock on file close */ if (rc) { cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc); if (!(flock->fl_flags & FL_CLOSE)) return rc; } rc = locks_lock_file_wait(file, flock); } return rc; } int cifs_flock(struct file *file, int cmd, struct file_lock *fl) { int rc, xid; int lock = 0, unlock = 0; bool wait_flag = false; bool posix_lck = false; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct cifsFileInfo *cfile; __u32 type; xid = get_xid(); if (!(fl->fl_flags & FL_FLOCK)) { rc = -ENOLCK; free_xid(xid); return rc; } cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag, tcon->ses->server); cifs_sb = CIFS_FILE_SB(file); if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) posix_lck = true; if (!lock && !unlock) { /* * if no lock or unlock then nothing to do since we do not * know what it is */ rc = -EOPNOTSUPP; free_xid(xid); return rc; } rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, xid); free_xid(xid); return rc; } int cifs_lock(struct file *file, int cmd, struct file_lock *flock) { int rc, xid; int lock = 0, unlock = 0; bool wait_flag = false; bool posix_lck = false; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct cifsFileInfo *cfile; __u32 type; rc = -EACCES; xid = get_xid(); cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd, flock->fl_flags, flock->fl_type, (long long)flock->fl_start, (long long)flock->fl_end); cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag, tcon->ses->server); cifs_sb = CIFS_FILE_SB(file); set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags); if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) posix_lck = true; /* * BB add code here to normalize offset and length to account for * negative length which we can not accept over the wire. */ if (IS_GETLK(cmd)) { rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid); free_xid(xid); return rc; } if (!lock && !unlock) { /* * if no lock or unlock then nothing to do since we do not * know what it is */ free_xid(xid); return -EOPNOTSUPP; } rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock, xid); free_xid(xid); return rc; } /* * update the file size (if needed) after a write. Should be called with * the inode->i_lock held */ void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, unsigned int bytes_written) { loff_t end_of_write = offset + bytes_written; if (end_of_write > cifsi->server_eof) cifsi->server_eof = end_of_write; } static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, size_t write_size, loff_t *offset) { int rc = 0; unsigned int bytes_written = 0; unsigned int total_written; struct cifs_tcon *tcon; struct TCP_Server_Info *server; unsigned int xid; struct dentry *dentry = open_file->dentry; struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry)); struct cifs_io_parms io_parms = {0}; cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n", write_size, *offset, dentry); tcon = tlink_tcon(open_file->tlink); server = tcon->ses->server; if (!server->ops->sync_write) return -ENOSYS; xid = get_xid(); for (total_written = 0; write_size > total_written; total_written += bytes_written) { rc = -EAGAIN; while (rc == -EAGAIN) { struct kvec iov[2]; unsigned int len; if (open_file->invalidHandle) { /* we could deadlock if we called filemap_fdatawait from here so tell reopen_file not to flush data to server now */ rc = cifs_reopen_file(open_file, false); if (rc != 0) break; } len = min(server->ops->wp_retry_size(d_inode(dentry)), (unsigned int)write_size - total_written); /* iov[0] is reserved for smb header */ iov[1].iov_base = (char *)write_data + total_written; iov[1].iov_len = len; io_parms.pid = pid; io_parms.tcon = tcon; io_parms.offset = *offset; io_parms.length = len; rc = server->ops->sync_write(xid, &open_file->fid, &io_parms, &bytes_written, iov, 1); } if (rc || (bytes_written == 0)) { if (total_written) break; else { free_xid(xid); return rc; } } else { spin_lock(&d_inode(dentry)->i_lock); cifs_update_eof(cifsi, *offset, bytes_written); spin_unlock(&d_inode(dentry)->i_lock); *offset += bytes_written; } } cifs_stats_bytes_written(tcon, total_written); if (total_written > 0) { spin_lock(&d_inode(dentry)->i_lock); if (*offset > d_inode(dentry)->i_size) { i_size_write(d_inode(dentry), *offset); d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9; } spin_unlock(&d_inode(dentry)->i_lock); } mark_inode_dirty_sync(d_inode(dentry)); free_xid(xid); return total_written; } struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only) { struct cifsFileInfo *open_file = NULL; struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) fsuid_only = false; spin_lock(&cifs_inode->open_file_lock); /* we could simply get the first_list_entry since write-only entries are always at the end of the list but since the first entry might have a close pending, we go through the whole list */ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { if ((!open_file->invalidHandle)) { /* found a good file */ /* lock it so it will not be closed on us */ cifsFileInfo_get(open_file); spin_unlock(&cifs_inode->open_file_lock); return open_file; } /* else might as well continue, and look for another, or simply have the caller reopen it again rather than trying to fix this handle */ } else /* write only file */ break; /* write only files are last so must be done */ } spin_unlock(&cifs_inode->open_file_lock); return NULL; } /* Return -EBADF if no handle is found and general rc otherwise */ int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, struct cifsFileInfo **ret_file) { struct cifsFileInfo *open_file, *inv_file = NULL; struct cifs_sb_info *cifs_sb; bool any_available = false; int rc = -EBADF; unsigned int refind = 0; bool fsuid_only = flags & FIND_WR_FSUID_ONLY; bool with_delete = flags & FIND_WR_WITH_DELETE; *ret_file = NULL; /* * Having a null inode here (because mapping->host was set to zero by * the VFS or MM) should not happen but we had reports of on oops (due * to it being zero) during stress testcases so we need to check for it */ if (cifs_inode == NULL) { cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n"); dump_stack(); return rc; } cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) fsuid_only = false; spin_lock(&cifs_inode->open_file_lock); refind_writable: if (refind > MAX_REOPEN_ATT) { spin_unlock(&cifs_inode->open_file_lock); return rc; } list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (!any_available && open_file->pid != current->tgid) continue; if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; if (with_delete && !(open_file->fid.access & DELETE)) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { if (!open_file->invalidHandle) { /* found a good writable file */ cifsFileInfo_get(open_file); spin_unlock(&cifs_inode->open_file_lock); *ret_file = open_file; return 0; } else { if (!inv_file) inv_file = open_file; } } } /* couldn't find useable FH with same pid, try any available */ if (!any_available) { any_available = true; goto refind_writable; } if (inv_file) { any_available = false; cifsFileInfo_get(inv_file); } spin_unlock(&cifs_inode->open_file_lock); if (inv_file) { rc = cifs_reopen_file(inv_file, false); if (!rc) { *ret_file = inv_file; return 0; } spin_lock(&cifs_inode->open_file_lock); list_move_tail(&inv_file->flist, &cifs_inode->openFileList); spin_unlock(&cifs_inode->open_file_lock); cifsFileInfo_put(inv_file); ++refind; inv_file = NULL; spin_lock(&cifs_inode->open_file_lock); goto refind_writable; } return rc; } struct cifsFileInfo * find_writable_file(struct cifsInodeInfo *cifs_inode, int flags) { struct cifsFileInfo *cfile; int rc; rc = cifs_get_writable_file(cifs_inode, flags, &cfile); if (rc) cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc); return cfile; } int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, int flags, struct cifsFileInfo **ret_file) { struct cifsFileInfo *cfile; void *page = alloc_dentry_path(); *ret_file = NULL; spin_lock(&tcon->open_file_lock); list_for_each_entry(cfile, &tcon->openFileList, tlist) { struct cifsInodeInfo *cinode; const char *full_path = build_path_from_dentry(cfile->dentry, page); if (IS_ERR(full_path)) { spin_unlock(&tcon->open_file_lock); free_dentry_path(page); return PTR_ERR(full_path); } if (strcmp(full_path, name)) continue; cinode = CIFS_I(d_inode(cfile->dentry)); spin_unlock(&tcon->open_file_lock); free_dentry_path(page); return cifs_get_writable_file(cinode, flags, ret_file); } spin_unlock(&tcon->open_file_lock); free_dentry_path(page); return -ENOENT; } int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, struct cifsFileInfo **ret_file) { struct cifsFileInfo *cfile; void *page = alloc_dentry_path(); *ret_file = NULL; spin_lock(&tcon->open_file_lock); list_for_each_entry(cfile, &tcon->openFileList, tlist) { struct cifsInodeInfo *cinode; const char *full_path = build_path_from_dentry(cfile->dentry, page); if (IS_ERR(full_path)) { spin_unlock(&tcon->open_file_lock); free_dentry_path(page); return PTR_ERR(full_path); } if (strcmp(full_path, name)) continue; cinode = CIFS_I(d_inode(cfile->dentry)); spin_unlock(&tcon->open_file_lock); free_dentry_path(page); *ret_file = find_readable_file(cinode, 0); return *ret_file ? 0 : -ENOENT; } spin_unlock(&tcon->open_file_lock); free_dentry_path(page); return -ENOENT; } void cifs_writedata_release(struct kref *refcount) { struct cifs_writedata *wdata = container_of(refcount, struct cifs_writedata, refcount); #ifdef CONFIG_CIFS_SMB_DIRECT if (wdata->mr) { smbd_deregister_mr(wdata->mr); wdata->mr = NULL; } #endif if (wdata->cfile) cifsFileInfo_put(wdata->cfile); kfree(wdata); } /* * Write failed with a retryable error. Resend the write request. It's also * possible that the page was redirtied so re-clean the page. */ static void cifs_writev_requeue(struct cifs_writedata *wdata) { int rc = 0; struct inode *inode = d_inode(wdata->cfile->dentry); struct TCP_Server_Info *server; unsigned int rest_len = wdata->bytes; loff_t fpos = wdata->offset; server = tlink_tcon(wdata->cfile->tlink)->ses->server; do { struct cifs_writedata *wdata2; unsigned int wsize, cur_len; wsize = server->ops->wp_retry_size(inode); if (wsize < rest_len) { if (wsize < PAGE_SIZE) { rc = -EOPNOTSUPP; break; } cur_len = min(round_down(wsize, PAGE_SIZE), rest_len); } else { cur_len = rest_len; } wdata2 = cifs_writedata_alloc(cifs_writev_complete); if (!wdata2) { rc = -ENOMEM; break; } wdata2->sync_mode = wdata->sync_mode; wdata2->offset = fpos; wdata2->bytes = cur_len; wdata2->iter = wdata->iter; iov_iter_advance(&wdata2->iter, fpos - wdata->offset); iov_iter_truncate(&wdata2->iter, wdata2->bytes); if (iov_iter_is_xarray(&wdata2->iter)) /* Check for pages having been redirtied and clean * them. We can do this by walking the xarray. If * it's not an xarray, then it's a DIO and we shouldn't * be mucking around with the page bits. */ cifs_undirty_folios(inode, fpos, cur_len); rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &wdata2->cfile); if (!wdata2->cfile) { cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", rc); if (!is_retryable_error(rc)) rc = -EBADF; } else { wdata2->pid = wdata2->cfile->pid; rc = server->ops->async_writev(wdata2, cifs_writedata_release); } kref_put(&wdata2->refcount, cifs_writedata_release); if (rc) { if (is_retryable_error(rc)) continue; fpos += cur_len; rest_len -= cur_len; break; } fpos += cur_len; rest_len -= cur_len; } while (rest_len > 0); /* Clean up remaining pages from the original wdata */ if (iov_iter_is_xarray(&wdata->iter)) cifs_pages_write_failed(inode, fpos, rest_len); if (rc != 0 && !is_retryable_error(rc)) mapping_set_error(inode->i_mapping, rc); kref_put(&wdata->refcount, cifs_writedata_release); } void cifs_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = d_inode(wdata->cfile->dentry); if (wdata->result == 0) { spin_lock(&inode->i_lock); cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); spin_unlock(&inode->i_lock); cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), wdata->bytes); } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) return cifs_writev_requeue(wdata); if (wdata->result == -EAGAIN) cifs_pages_write_redirty(inode, wdata->offset, wdata->bytes); else if (wdata->result < 0) cifs_pages_write_failed(inode, wdata->offset, wdata->bytes); else cifs_pages_written_back(inode, wdata->offset, wdata->bytes); if (wdata->result != -EAGAIN) mapping_set_error(inode->i_mapping, wdata->result); kref_put(&wdata->refcount, cifs_writedata_release); } struct cifs_writedata *cifs_writedata_alloc(work_func_t complete) { struct cifs_writedata *wdata; wdata = kzalloc(sizeof(*wdata), GFP_NOFS); if (wdata != NULL) { kref_init(&wdata->refcount); INIT_LIST_HEAD(&wdata->list); init_completion(&wdata->done); INIT_WORK(&wdata->work, complete); } return wdata; } static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; loff_t offset = (loff_t)page->index << PAGE_SHIFT; char *write_data; int rc = -EFAULT; int bytes_written = 0; struct inode *inode; struct cifsFileInfo *open_file; if (!mapping || !mapping->host) return -EFAULT; inode = page->mapping->host; offset += (loff_t)from; write_data = kmap(page); write_data += from; if ((to > PAGE_SIZE) || (from > to)) { kunmap(page); return -EIO; } /* racing with truncate? */ if (offset > mapping->host->i_size) { kunmap(page); return 0; /* don't care */ } /* check to make sure that we are not extending the file */ if (mapping->host->i_size - offset < (loff_t)to) to = (unsigned)(mapping->host->i_size - offset); rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY, &open_file); if (!rc) { bytes_written = cifs_write(open_file, open_file->pid, write_data, to - from, &offset); cifsFileInfo_put(open_file); /* Does mm or vfs already set times? */ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); if ((bytes_written > 0) && (offset)) rc = 0; else if (bytes_written < 0) rc = bytes_written; else rc = -EFAULT; } else { cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc); if (!is_retryable_error(rc)) rc = -EIO; } kunmap(page); return rc; } /* * Extend the region to be written back to include subsequent contiguously * dirty pages if possible, but don't sleep while doing so. */ static void cifs_extend_writeback(struct address_space *mapping, long *_count, loff_t start, int max_pages, size_t max_len, unsigned int *_len) { struct folio_batch batch; struct folio *folio; unsigned int psize, nr_pages; size_t len = *_len; pgoff_t index = (start + len) / PAGE_SIZE; bool stop = true; unsigned int i; XA_STATE(xas, &mapping->i_pages, index); folio_batch_init(&batch); do { /* Firstly, we gather up a batch of contiguous dirty pages * under the RCU read lock - but we can't clear the dirty flags * there if any of those pages are mapped. */ rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) { stop = true; if (xas_retry(&xas, folio)) continue; if (xa_is_value(folio)) break; if (folio_index(folio) != index) break; if (!folio_try_get_rcu(folio)) { xas_reset(&xas); continue; } nr_pages = folio_nr_pages(folio); if (nr_pages > max_pages) break; /* Has the page moved or been split? */ if (unlikely(folio != xas_reload(&xas))) { folio_put(folio); break; } if (!folio_trylock(folio)) { folio_put(folio); break; } if (!folio_test_dirty(folio) || folio_test_writeback(folio)) { folio_unlock(folio); folio_put(folio); break; } max_pages -= nr_pages; psize = folio_size(folio); len += psize; stop = false; if (max_pages <= 0 || len >= max_len || *_count <= 0) stop = true; index += nr_pages; if (!folio_batch_add(&batch, folio)) break; if (stop) break; } if (!stop) xas_pause(&xas); rcu_read_unlock(); /* Now, if we obtained any pages, we can shift them to being * writable and mark them for caching. */ if (!folio_batch_count(&batch)) break; for (i = 0; i < folio_batch_count(&batch); i++) { folio = batch.folios[i]; /* The folio should be locked, dirty and not undergoing * writeback from the loop above. */ if (!folio_clear_dirty_for_io(folio)) WARN_ON(1); if (folio_start_writeback(folio)) WARN_ON(1); *_count -= folio_nr_pages(folio); folio_unlock(folio); } folio_batch_release(&batch); cond_resched(); } while (!stop); *_len = len; } /* * Write back the locked page and any subsequent non-locked dirty pages. */ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping, struct writeback_control *wbc, struct folio *folio, loff_t start, loff_t end) { struct inode *inode = mapping->host; struct TCP_Server_Info *server; struct cifs_writedata *wdata; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_credits credits_on_stack; struct cifs_credits *credits = &credits_on_stack; struct cifsFileInfo *cfile = NULL; unsigned int xid, wsize, len; loff_t i_size = i_size_read(inode); size_t max_len; long count = wbc->nr_to_write; int rc; /* The folio should be locked, dirty and not undergoing writeback. */ if (folio_start_writeback(folio)) WARN_ON(1); count -= folio_nr_pages(folio); len = folio_size(folio); xid = get_xid(); server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses); rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); if (rc) { cifs_dbg(VFS, "No writable handle in writepages rc=%d\n", rc); goto err_xid; } rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize, &wsize, credits); if (rc != 0) goto err_close; wdata = cifs_writedata_alloc(cifs_writev_complete); if (!wdata) { rc = -ENOMEM; goto err_uncredit; } wdata->sync_mode = wbc->sync_mode; wdata->offset = folio_pos(folio); wdata->pid = cfile->pid; wdata->credits = credits_on_stack; wdata->cfile = cfile; wdata->server = server; cfile = NULL; /* Find all consecutive lockable dirty pages, stopping when we find a * page that is not immediately lockable, is not dirty or is missing, * or we reach the end of the range. */ if (start < i_size) { /* Trim the write to the EOF; the extra data is ignored. Also * put an upper limit on the size of a single storedata op. */ max_len = wsize; max_len = min_t(unsigned long long, max_len, end - start + 1); max_len = min_t(unsigned long long, max_len, i_size - start); if (len < max_len) { int max_pages = INT_MAX; #ifdef CONFIG_CIFS_SMB_DIRECT if (server->smbd_conn) max_pages = server->smbd_conn->max_frmr_depth; #endif max_pages -= folio_nr_pages(folio); if (max_pages > 0) cifs_extend_writeback(mapping, &count, start, max_pages, max_len, &len); } len = min_t(loff_t, len, max_len); } wdata->bytes = len; /* We now have a contiguous set of dirty pages, each with writeback * set; the first page is still locked at this point, but all the rest * have been unlocked. */ folio_unlock(folio); if (start < i_size) { iov_iter_xarray(&wdata->iter, ITER_SOURCE, &mapping->i_pages, start, len); rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes); if (rc) goto err_wdata; if (wdata->cfile->invalidHandle) rc = -EAGAIN; else rc = wdata->server->ops->async_writev(wdata, cifs_writedata_release); if (rc >= 0) { kref_put(&wdata->refcount, cifs_writedata_release); goto err_close; } } else { /* The dirty region was entirely beyond the EOF. */ cifs_pages_written_back(inode, start, len); rc = 0; } err_wdata: kref_put(&wdata->refcount, cifs_writedata_release); err_uncredit: add_credits_and_wake_if(server, credits, 0); err_close: if (cfile) cifsFileInfo_put(cfile); err_xid: free_xid(xid); if (rc == 0) { wbc->nr_to_write = count; rc = len; } else if (is_retryable_error(rc)) { cifs_pages_write_redirty(inode, start, len); } else { cifs_pages_write_failed(inode, start, len); mapping_set_error(mapping, rc); } /* Indication to update ctime and mtime as close is deferred */ set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } /* * write a region of pages back to the server */ static int cifs_writepages_region(struct address_space *mapping, struct writeback_control *wbc, loff_t start, loff_t end, loff_t *_next) { struct folio_batch fbatch; int skips = 0; folio_batch_init(&fbatch); do { int nr; pgoff_t index = start / PAGE_SIZE; nr = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY, &fbatch); if (!nr) break; for (int i = 0; i < nr; i++) { ssize_t ret; struct folio *folio = fbatch.folios[i]; redo_folio: start = folio_pos(folio); /* May regress with THPs */ /* At this point we hold neither the i_pages lock nor the * page lock: the page may be truncated or invalidated * (changing page->mapping to NULL), or even swizzled * back from swapper_space to tmpfs file mapping */ if (wbc->sync_mode != WB_SYNC_NONE) { ret = folio_lock_killable(folio); if (ret < 0) goto write_error; } else { if (!folio_trylock(folio)) goto skip_write; } if (folio_mapping(folio) != mapping || !folio_test_dirty(folio)) { start += folio_size(folio); folio_unlock(folio); continue; } if (folio_test_writeback(folio) || folio_test_fscache(folio)) { folio_unlock(folio); if (wbc->sync_mode == WB_SYNC_NONE) goto skip_write; folio_wait_writeback(folio); #ifdef CONFIG_CIFS_FSCACHE folio_wait_fscache(folio); #endif goto redo_folio; } if (!folio_clear_dirty_for_io(folio)) /* We hold the page lock - it should've been dirty. */ WARN_ON(1); ret = cifs_write_back_from_locked_folio(mapping, wbc, folio, start, end); if (ret < 0) goto write_error; start += ret; continue; write_error: folio_batch_release(&fbatch); *_next = start; return ret; skip_write: /* * Too many skipped writes, or need to reschedule? * Treat it as a write error without an error code. */ if (skips >= 5 || need_resched()) { ret = 0; goto write_error; } /* Otherwise, just skip that folio and go on to the next */ skips++; start += folio_size(folio); continue; } folio_batch_release(&fbatch); cond_resched(); } while (wbc->nr_to_write > 0); *_next = start; return 0; } /* * Write some of the pending data back to the server */ static int cifs_writepages(struct address_space *mapping, struct writeback_control *wbc) { loff_t start, next; int ret; /* We have to be careful as we can end up racing with setattr() * truncating the pagecache since the caller doesn't take a lock here * to prevent it. */ if (wbc->range_cyclic) { start = mapping->writeback_index * PAGE_SIZE; ret = cifs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); if (ret == 0) { mapping->writeback_index = next / PAGE_SIZE; if (start > 0 && wbc->nr_to_write > 0) { ret = cifs_writepages_region(mapping, wbc, 0, start, &next); if (ret == 0) mapping->writeback_index = next / PAGE_SIZE; } } } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { ret = cifs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); if (wbc->nr_to_write > 0 && ret == 0) mapping->writeback_index = next / PAGE_SIZE; } else { ret = cifs_writepages_region(mapping, wbc, wbc->range_start, wbc->range_end, &next); } return ret; } static int cifs_writepage_locked(struct page *page, struct writeback_control *wbc) { int rc; unsigned int xid; xid = get_xid(); /* BB add check for wbc flags */ get_page(page); if (!PageUptodate(page)) cifs_dbg(FYI, "ppw - page not up to date\n"); /* * Set the "writeback" flag, and clear "dirty" in the radix tree. * * A writepage() implementation always needs to do either this, * or re-dirty the page with "redirty_page_for_writepage()" in * the case of a failure. * * Just unlocking the page will cause the radix tree tag-bits * to fail to update with the state of the page correctly. */ set_page_writeback(page); retry_write: rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); if (is_retryable_error(rc)) { if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) goto retry_write; redirty_page_for_writepage(wbc, page); } else if (rc != 0) { SetPageError(page); mapping_set_error(page->mapping, rc); } else { SetPageUptodate(page); } end_page_writeback(page); put_page(page); free_xid(xid); return rc; } static int cifs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int rc; struct inode *inode = mapping->host; struct cifsFileInfo *cfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct folio *folio = page_folio(page); __u32 pid; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = cfile->pid; else pid = current->tgid; cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n", page, pos, copied); if (folio_test_checked(folio)) { if (copied == len) folio_mark_uptodate(folio); folio_clear_checked(folio); } else if (!folio_test_uptodate(folio) && copied == PAGE_SIZE) folio_mark_uptodate(folio); if (!folio_test_uptodate(folio)) { char *page_data; unsigned offset = pos & (PAGE_SIZE - 1); unsigned int xid; xid = get_xid(); /* this is probably better than directly calling partialpage_write since in this function the file handle is known which we might as well leverage */ /* BB check if anything else missing out of ppw such as updating last write time */ page_data = kmap(page); rc = cifs_write(cfile, pid, page_data + offset, copied, &pos); /* if (rc < 0) should we set writebehind rc? */ kunmap(page); free_xid(xid); } else { rc = copied; pos += copied; set_page_dirty(page); } if (rc > 0) { spin_lock(&inode->i_lock); if (pos > inode->i_size) { i_size_write(inode, pos); inode->i_blocks = (512 - 1 + pos) >> 9; } spin_unlock(&inode->i_lock); } unlock_page(page); put_page(page); /* Indication to update ctime and mtime as close is deferred */ set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, int datasync) { unsigned int xid; int rc = 0; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifsFileInfo *smbfile = file->private_data; struct inode *inode = file_inode(file); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); rc = file_write_and_wait_range(file, start, end); if (rc) { trace_cifs_fsync_err(inode->i_ino, rc); return rc; } xid = get_xid(); cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", file, datasync); if (!CIFS_CACHE_READ(CIFS_I(inode))) { rc = cifs_zap_mapping(inode); if (rc) { cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc); rc = 0; /* don't care about it in fsync */ } } tcon = tlink_tcon(smbfile->tlink); if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { server = tcon->ses->server; if (server->ops->flush == NULL) { rc = -ENOSYS; goto strict_fsync_exit; } if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); if (smbfile) { rc = server->ops->flush(xid, tcon, &smbfile->fid); cifsFileInfo_put(smbfile); } else cifs_dbg(FYI, "ignore fsync for file not open for write\n"); } else rc = server->ops->flush(xid, tcon, &smbfile->fid); } strict_fsync_exit: free_xid(xid); return rc; } int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { unsigned int xid; int rc = 0; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifsFileInfo *smbfile = file->private_data; struct inode *inode = file_inode(file); struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); rc = file_write_and_wait_range(file, start, end); if (rc) { trace_cifs_fsync_err(file_inode(file)->i_ino, rc); return rc; } xid = get_xid(); cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", file, datasync); tcon = tlink_tcon(smbfile->tlink); if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { server = tcon->ses->server; if (server->ops->flush == NULL) { rc = -ENOSYS; goto fsync_exit; } if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); if (smbfile) { rc = server->ops->flush(xid, tcon, &smbfile->fid); cifsFileInfo_put(smbfile); } else cifs_dbg(FYI, "ignore fsync for file not open for write\n"); } else rc = server->ops->flush(xid, tcon, &smbfile->fid); } fsync_exit: free_xid(xid); return rc; } /* * As file closes, flush all cached write data for this inode checking * for write behind errors. */ int cifs_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); int rc = 0; if (file->f_mode & FMODE_WRITE) rc = filemap_write_and_wait(inode->i_mapping); cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc); if (rc) { /* get more nuanced writeback errors */ rc = filemap_check_wb_err(file->f_mapping, 0); trace_cifs_flush_err(inode->i_ino, rc); } return rc; } static void cifs_uncached_writedata_release(struct kref *refcount) { struct cifs_writedata *wdata = container_of(refcount, struct cifs_writedata, refcount); kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release); cifs_writedata_release(refcount); } static void collect_uncached_write_data(struct cifs_aio_ctx *ctx); static void cifs_uncached_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = d_inode(wdata->cfile->dentry); struct cifsInodeInfo *cifsi = CIFS_I(inode); spin_lock(&inode->i_lock); cifs_update_eof(cifsi, wdata->offset, wdata->bytes); if (cifsi->server_eof > inode->i_size) i_size_write(inode, cifsi->server_eof); spin_unlock(&inode->i_lock); complete(&wdata->done); collect_uncached_write_data(wdata->ctx); /* the below call can possibly free the last ref to aio ctx */ kref_put(&wdata->refcount, cifs_uncached_writedata_release); } static int cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list, struct cifs_aio_ctx *ctx) { unsigned int wsize; struct cifs_credits credits; int rc; struct TCP_Server_Info *server = wdata->server; do { if (wdata->cfile->invalidHandle) { rc = cifs_reopen_file(wdata->cfile, false); if (rc == -EAGAIN) continue; else if (rc) break; } /* * Wait for credits to resend this wdata. * Note: we are attempting to resend the whole wdata not in * segments */ do { rc = server->ops->wait_mtu_credits(server, wdata->bytes, &wsize, &credits); if (rc) goto fail; if (wsize < wdata->bytes) { add_credits_and_wake_if(server, &credits, 0); msleep(1000); } } while (wsize < wdata->bytes); wdata->credits = credits; rc = adjust_credits(server, &wdata->credits, wdata->bytes); if (!rc) { if (wdata->cfile->invalidHandle) rc = -EAGAIN; else { #ifdef CONFIG_CIFS_SMB_DIRECT if (wdata->mr) { wdata->mr->need_invalidate = true; smbd_deregister_mr(wdata->mr); wdata->mr = NULL; } #endif rc = server->ops->async_writev(wdata, cifs_uncached_writedata_release); } } /* If the write was successfully sent, we are done */ if (!rc) { list_add_tail(&wdata->list, wdata_list); return 0; } /* Roll back credits and retry if needed */ add_credits_and_wake_if(server, &wdata->credits, 0); } while (rc == -EAGAIN); fail: kref_put(&wdata->refcount, cifs_uncached_writedata_release); return rc; } /* * Select span of a bvec iterator we're going to use. Limit it by both maximum * size and maximum number of segments. */ static size_t cifs_limit_bvec_subset(const struct iov_iter *iter, size_t max_size, size_t max_segs, unsigned int *_nsegs) { const struct bio_vec *bvecs = iter->bvec; unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0; size_t len, span = 0, n = iter->count; size_t skip = iter->iov_offset; if (WARN_ON(!iov_iter_is_bvec(iter)) || n == 0) return 0; while (n && ix < nbv && skip) { len = bvecs[ix].bv_len; if (skip < len) break; skip -= len; n -= len; ix++; } while (n && ix < nbv) { len = min3(n, bvecs[ix].bv_len - skip, max_size); span += len; max_size -= len; nsegs++; ix++; if (max_size == 0 || nsegs >= max_segs) break; skip = 0; n -= len; } *_nsegs = nsegs; return span; } static int cifs_write_from_iter(loff_t fpos, size_t len, struct iov_iter *from, struct cifsFileInfo *open_file, struct cifs_sb_info *cifs_sb, struct list_head *wdata_list, struct cifs_aio_ctx *ctx) { int rc = 0; size_t cur_len, max_len; struct cifs_writedata *wdata; pid_t pid; struct TCP_Server_Info *server; unsigned int xid, max_segs = INT_MAX; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); xid = get_xid(); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->smbd_conn) max_segs = server->smbd_conn->max_frmr_depth; #endif do { struct cifs_credits credits_on_stack; struct cifs_credits *credits = &credits_on_stack; unsigned int wsize, nsegs = 0; if (signal_pending(current)) { rc = -EINTR; break; } if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, false); if (rc == -EAGAIN) continue; else if (rc) break; } rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize, &wsize, credits); if (rc) break; max_len = min_t(const size_t, len, wsize); if (!max_len) { rc = -EAGAIN; add_credits_and_wake_if(server, credits, 0); break; } cur_len = cifs_limit_bvec_subset(from, max_len, max_segs, &nsegs); cifs_dbg(FYI, "write_from_iter len=%zx/%zx nsegs=%u/%lu/%u\n", cur_len, max_len, nsegs, from->nr_segs, max_segs); if (cur_len == 0) { rc = -EIO; add_credits_and_wake_if(server, credits, 0); break; } wdata = cifs_writedata_alloc(cifs_uncached_writev_complete); if (!wdata) { rc = -ENOMEM; add_credits_and_wake_if(server, credits, 0); break; } wdata->sync_mode = WB_SYNC_ALL; wdata->offset = (__u64)fpos; wdata->cfile = cifsFileInfo_get(open_file); wdata->server = server; wdata->pid = pid; wdata->bytes = cur_len; wdata->credits = credits_on_stack; wdata->iter = *from; wdata->ctx = ctx; kref_get(&ctx->refcount); iov_iter_truncate(&wdata->iter, cur_len); rc = adjust_credits(server, &wdata->credits, wdata->bytes); if (!rc) { if (wdata->cfile->invalidHandle) rc = -EAGAIN; else rc = server->ops->async_writev(wdata, cifs_uncached_writedata_release); } if (rc) { add_credits_and_wake_if(server, &wdata->credits, 0); kref_put(&wdata->refcount, cifs_uncached_writedata_release); if (rc == -EAGAIN) continue; break; } list_add_tail(&wdata->list, wdata_list); iov_iter_advance(from, cur_len); fpos += cur_len; len -= cur_len; } while (len > 0); free_xid(xid); return rc; } static void collect_uncached_write_data(struct cifs_aio_ctx *ctx) { struct cifs_writedata *wdata, *tmp; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb; struct dentry *dentry = ctx->cfile->dentry; ssize_t rc; tcon = tlink_tcon(ctx->cfile->tlink); cifs_sb = CIFS_SB(dentry->d_sb); mutex_lock(&ctx->aio_mutex); if (list_empty(&ctx->list)) { mutex_unlock(&ctx->aio_mutex); return; } rc = ctx->rc; /* * Wait for and collect replies for any successful sends in order of * increasing offset. Once an error is hit, then return without waiting * for any more replies. */ restart_loop: list_for_each_entry_safe(wdata, tmp, &ctx->list, list) { if (!rc) { if (!try_wait_for_completion(&wdata->done)) { mutex_unlock(&ctx->aio_mutex); return; } if (wdata->result) rc = wdata->result; else ctx->total_len += wdata->bytes; /* resend call if it's a retryable error */ if (rc == -EAGAIN) { struct list_head tmp_list; struct iov_iter tmp_from = ctx->iter; INIT_LIST_HEAD(&tmp_list); list_del_init(&wdata->list); if (ctx->direct_io) rc = cifs_resend_wdata( wdata, &tmp_list, ctx); else { iov_iter_advance(&tmp_from, wdata->offset - ctx->pos); rc = cifs_write_from_iter(wdata->offset, wdata->bytes, &tmp_from, ctx->cfile, cifs_sb, &tmp_list, ctx); kref_put(&wdata->refcount, cifs_uncached_writedata_release); } list_splice(&tmp_list, &ctx->list); goto restart_loop; } } list_del_init(&wdata->list); kref_put(&wdata->refcount, cifs_uncached_writedata_release); } cifs_stats_bytes_written(tcon, ctx->total_len); set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags); ctx->rc = (rc == 0) ? ctx->total_len : rc; mutex_unlock(&ctx->aio_mutex); if (ctx->iocb && ctx->iocb->ki_complete) ctx->iocb->ki_complete(ctx->iocb, ctx->rc); else complete(&ctx->done); } static ssize_t __cifs_writev( struct kiocb *iocb, struct iov_iter *from, bool direct) { struct file *file = iocb->ki_filp; ssize_t total_written = 0; struct cifsFileInfo *cfile; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb; struct cifs_aio_ctx *ctx; int rc; rc = generic_write_checks(iocb, from); if (rc <= 0) return rc; cifs_sb = CIFS_FILE_SB(file); cfile = file->private_data; tcon = tlink_tcon(cfile->tlink); if (!tcon->ses->server->ops->async_writev) return -ENOSYS; ctx = cifs_aio_ctx_alloc(); if (!ctx) return -ENOMEM; ctx->cfile = cifsFileInfo_get(cfile); if (!is_sync_kiocb(iocb)) ctx->iocb = iocb; ctx->pos = iocb->ki_pos; ctx->direct_io = direct; ctx->nr_pinned_pages = 0; if (user_backed_iter(from)) { /* * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as * they contain references to the calling process's virtual * memory layout which won't be available in an async worker * thread. This also takes a pin on every folio involved. */ rc = netfs_extract_user_iter(from, iov_iter_count(from), &ctx->iter, 0); if (rc < 0) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return rc; } ctx->nr_pinned_pages = rc; ctx->bv = (void *)ctx->iter.bvec; ctx->bv_need_unpin = iov_iter_extract_will_pin(from); } else if ((iov_iter_is_bvec(from) || iov_iter_is_kvec(from)) && !is_sync_kiocb(iocb)) { /* * If the op is asynchronous, we need to copy the list attached * to a BVEC/KVEC-type iterator, but we assume that the storage * will be pinned by the caller; in any case, we may or may not * be able to pin the pages, so we don't try. */ ctx->bv = (void *)dup_iter(&ctx->iter, from, GFP_KERNEL); if (!ctx->bv) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return -ENOMEM; } } else { /* * Otherwise, we just pass the iterator down as-is and rely on * the caller to make sure the pages referred to by the * iterator don't evaporate. */ ctx->iter = *from; } ctx->len = iov_iter_count(&ctx->iter); /* grab a lock here due to read response handlers can access ctx */ mutex_lock(&ctx->aio_mutex); rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &ctx->iter, cfile, cifs_sb, &ctx->list, ctx); /* * If at least one write was successfully sent, then discard any rc * value from the later writes. If the other write succeeds, then * we'll end up returning whatever was written. If it fails, then * we'll get a new rc value from that. */ if (!list_empty(&ctx->list)) rc = 0; mutex_unlock(&ctx->aio_mutex); if (rc) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return rc; } if (!is_sync_kiocb(iocb)) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return -EIOCBQUEUED; } rc = wait_for_completion_killable(&ctx->done); if (rc) { mutex_lock(&ctx->aio_mutex); ctx->rc = rc = -EINTR; total_written = ctx->total_len; mutex_unlock(&ctx->aio_mutex); } else { rc = ctx->rc; total_written = ctx->total_len; } kref_put(&ctx->refcount, cifs_aio_ctx_release); if (unlikely(!total_written)) return rc; iocb->ki_pos += total_written; return total_written; } ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; cifs_revalidate_mapping(file->f_inode); return __cifs_writev(iocb, from, true); } ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) { return __cifs_writev(iocb, from, false); } static ssize_t cifs_writev(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct inode *inode = file->f_mapping->host; struct cifsInodeInfo *cinode = CIFS_I(inode); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; ssize_t rc; inode_lock(inode); /* * We need to hold the sem to be sure nobody modifies lock list * with a brlock that prevents writing. */ down_read(&cinode->lock_sem); rc = generic_write_checks(iocb, from); if (rc <= 0) goto out; if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), server->vals->exclusive_lock_type, 0, NULL, CIFS_WRITE_OP)) rc = __generic_file_write_iter(iocb, from); else rc = -EACCES; out: up_read(&cinode->lock_sem); inode_unlock(inode); if (rc > 0) rc = generic_write_sync(iocb, rc); return rc; } ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsFileInfo *cfile = (struct cifsFileInfo *) iocb->ki_filp->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); ssize_t written; written = cifs_get_writer(cinode); if (written) return written; if (CIFS_CACHE_WRITE(cinode)) { if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { written = generic_file_write_iter(iocb, from); goto out; } written = cifs_writev(iocb, from); goto out; } /* * For non-oplocked files in strict cache mode we need to write the data * to the server exactly from the pos to pos+len-1 rather than flush all * affected pages because it may cause a error with mandatory locks on * these pages but not on the region from pos to ppos+len-1. */ written = cifs_user_writev(iocb, from); if (CIFS_CACHE_READ(cinode)) { /* * We have read level caching and we have just sent a write * request to the server thus making data in the cache stale. * Zap the cache and set oplock/lease level to NONE to avoid * reading stale data from the cache. All subsequent read * operations will read new data from the server. */ cifs_zap_mapping(inode); cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n", inode); cinode->oplock = 0; } out: cifs_put_writer(cinode); return written; } static struct cifs_readdata *cifs_readdata_alloc(work_func_t complete) { struct cifs_readdata *rdata; rdata = kzalloc(sizeof(*rdata), GFP_KERNEL); if (rdata) { kref_init(&rdata->refcount); INIT_LIST_HEAD(&rdata->list); init_completion(&rdata->done); INIT_WORK(&rdata->work, complete); } return rdata; } void cifs_readdata_release(struct kref *refcount) { struct cifs_readdata *rdata = container_of(refcount, struct cifs_readdata, refcount); if (rdata->ctx) kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release); #ifdef CONFIG_CIFS_SMB_DIRECT if (rdata->mr) { smbd_deregister_mr(rdata->mr); rdata->mr = NULL; } #endif if (rdata->cfile) cifsFileInfo_put(rdata->cfile); kfree(rdata); } static void collect_uncached_read_data(struct cifs_aio_ctx *ctx); static void cifs_uncached_readv_complete(struct work_struct *work) { struct cifs_readdata *rdata = container_of(work, struct cifs_readdata, work); complete(&rdata->done); collect_uncached_read_data(rdata->ctx); /* the below call can possibly free the last ref to aio ctx */ kref_put(&rdata->refcount, cifs_readdata_release); } static int cifs_resend_rdata(struct cifs_readdata *rdata, struct list_head *rdata_list, struct cifs_aio_ctx *ctx) { unsigned int rsize; struct cifs_credits credits; int rc; struct TCP_Server_Info *server; /* XXX: should we pick a new channel here? */ server = rdata->server; do { if (rdata->cfile->invalidHandle) { rc = cifs_reopen_file(rdata->cfile, true); if (rc == -EAGAIN) continue; else if (rc) break; } /* * Wait for credits to resend this rdata. * Note: we are attempting to resend the whole rdata not in * segments */ do { rc = server->ops->wait_mtu_credits(server, rdata->bytes, &rsize, &credits); if (rc) goto fail; if (rsize < rdata->bytes) { add_credits_and_wake_if(server, &credits, 0); msleep(1000); } } while (rsize < rdata->bytes); rdata->credits = credits; rc = adjust_credits(server, &rdata->credits, rdata->bytes); if (!rc) { if (rdata->cfile->invalidHandle) rc = -EAGAIN; else { #ifdef CONFIG_CIFS_SMB_DIRECT if (rdata->mr) { rdata->mr->need_invalidate = true; smbd_deregister_mr(rdata->mr); rdata->mr = NULL; } #endif rc = server->ops->async_readv(rdata); } } /* If the read was successfully sent, we are done */ if (!rc) { /* Add to aio pending list */ list_add_tail(&rdata->list, rdata_list); return 0; } /* Roll back credits and retry if needed */ add_credits_and_wake_if(server, &rdata->credits, 0); } while (rc == -EAGAIN); fail: kref_put(&rdata->refcount, cifs_readdata_release); return rc; } static int cifs_send_async_read(loff_t fpos, size_t len, struct cifsFileInfo *open_file, struct cifs_sb_info *cifs_sb, struct list_head *rdata_list, struct cifs_aio_ctx *ctx) { struct cifs_readdata *rdata; unsigned int rsize, nsegs, max_segs = INT_MAX; struct cifs_credits credits_on_stack; struct cifs_credits *credits = &credits_on_stack; size_t cur_len, max_len; int rc; pid_t pid; struct TCP_Server_Info *server; server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->smbd_conn) max_segs = server->smbd_conn->max_frmr_depth; #endif if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; do { if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); if (rc == -EAGAIN) continue; else if (rc) break; } if (cifs_sb->ctx->rsize == 0) cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tlink_tcon(open_file->tlink), cifs_sb->ctx); rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize, credits); if (rc) break; max_len = min_t(size_t, len, rsize); cur_len = cifs_limit_bvec_subset(&ctx->iter, max_len, max_segs, &nsegs); cifs_dbg(FYI, "read-to-iter len=%zx/%zx nsegs=%u/%lu/%u\n", cur_len, max_len, nsegs, ctx->iter.nr_segs, max_segs); if (cur_len == 0) { rc = -EIO; add_credits_and_wake_if(server, credits, 0); break; } rdata = cifs_readdata_alloc(cifs_uncached_readv_complete); if (!rdata) { add_credits_and_wake_if(server, credits, 0); rc = -ENOMEM; break; } rdata->server = server; rdata->cfile = cifsFileInfo_get(open_file); rdata->offset = fpos; rdata->bytes = cur_len; rdata->pid = pid; rdata->credits = credits_on_stack; rdata->ctx = ctx; kref_get(&ctx->refcount); rdata->iter = ctx->iter; iov_iter_truncate(&rdata->iter, cur_len); rc = adjust_credits(server, &rdata->credits, rdata->bytes); if (!rc) { if (rdata->cfile->invalidHandle) rc = -EAGAIN; else rc = server->ops->async_readv(rdata); } if (rc) { add_credits_and_wake_if(server, &rdata->credits, 0); kref_put(&rdata->refcount, cifs_readdata_release); if (rc == -EAGAIN) continue; break; } list_add_tail(&rdata->list, rdata_list); iov_iter_advance(&ctx->iter, cur_len); fpos += cur_len; len -= cur_len; } while (len > 0); return rc; } static void collect_uncached_read_data(struct cifs_aio_ctx *ctx) { struct cifs_readdata *rdata, *tmp; struct cifs_sb_info *cifs_sb; int rc; cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb); mutex_lock(&ctx->aio_mutex); if (list_empty(&ctx->list)) { mutex_unlock(&ctx->aio_mutex); return; } rc = ctx->rc; /* the loop below should proceed in the order of increasing offsets */ again: list_for_each_entry_safe(rdata, tmp, &ctx->list, list) { if (!rc) { if (!try_wait_for_completion(&rdata->done)) { mutex_unlock(&ctx->aio_mutex); return; } if (rdata->result == -EAGAIN) { /* resend call if it's a retryable error */ struct list_head tmp_list; unsigned int got_bytes = rdata->got_bytes; list_del_init(&rdata->list); INIT_LIST_HEAD(&tmp_list); if (ctx->direct_io) { /* * Re-use rdata as this is a * direct I/O */ rc = cifs_resend_rdata( rdata, &tmp_list, ctx); } else { rc = cifs_send_async_read( rdata->offset + got_bytes, rdata->bytes - got_bytes, rdata->cfile, cifs_sb, &tmp_list, ctx); kref_put(&rdata->refcount, cifs_readdata_release); } list_splice(&tmp_list, &ctx->list); goto again; } else if (rdata->result) rc = rdata->result; /* if there was a short read -- discard anything left */ if (rdata->got_bytes && rdata->got_bytes < rdata->bytes) rc = -ENODATA; ctx->total_len += rdata->got_bytes; } list_del_init(&rdata->list); kref_put(&rdata->refcount, cifs_readdata_release); } /* mask nodata case */ if (rc == -ENODATA) rc = 0; ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc; mutex_unlock(&ctx->aio_mutex); if (ctx->iocb && ctx->iocb->ki_complete) ctx->iocb->ki_complete(ctx->iocb, ctx->rc); else complete(&ctx->done); } static ssize_t __cifs_readv( struct kiocb *iocb, struct iov_iter *to, bool direct) { size_t len; struct file *file = iocb->ki_filp; struct cifs_sb_info *cifs_sb; struct cifsFileInfo *cfile; struct cifs_tcon *tcon; ssize_t rc, total_read = 0; loff_t offset = iocb->ki_pos; struct cifs_aio_ctx *ctx; len = iov_iter_count(to); if (!len) return 0; cifs_sb = CIFS_FILE_SB(file); cfile = file->private_data; tcon = tlink_tcon(cfile->tlink); if (!tcon->ses->server->ops->async_readv) return -ENOSYS; if ((file->f_flags & O_ACCMODE) == O_WRONLY) cifs_dbg(FYI, "attempting read on write only file instance\n"); ctx = cifs_aio_ctx_alloc(); if (!ctx) return -ENOMEM; ctx->pos = offset; ctx->direct_io = direct; ctx->len = len; ctx->cfile = cifsFileInfo_get(cfile); ctx->nr_pinned_pages = 0; if (!is_sync_kiocb(iocb)) ctx->iocb = iocb; if (user_backed_iter(to)) { /* * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as * they contain references to the calling process's virtual * memory layout which won't be available in an async worker * thread. This also takes a pin on every folio involved. */ rc = netfs_extract_user_iter(to, iov_iter_count(to), &ctx->iter, 0); if (rc < 0) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return rc; } ctx->nr_pinned_pages = rc; ctx->bv = (void *)ctx->iter.bvec; ctx->bv_need_unpin = iov_iter_extract_will_pin(to); ctx->should_dirty = true; } else if ((iov_iter_is_bvec(to) || iov_iter_is_kvec(to)) && !is_sync_kiocb(iocb)) { /* * If the op is asynchronous, we need to copy the list attached * to a BVEC/KVEC-type iterator, but we assume that the storage * will be retained by the caller; in any case, we may or may * not be able to pin the pages, so we don't try. */ ctx->bv = (void *)dup_iter(&ctx->iter, to, GFP_KERNEL); if (!ctx->bv) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return -ENOMEM; } } else { /* * Otherwise, we just pass the iterator down as-is and rely on * the caller to make sure the pages referred to by the * iterator don't evaporate. */ ctx->iter = *to; } if (direct) { rc = filemap_write_and_wait_range(file->f_inode->i_mapping, offset, offset + len - 1); if (rc) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return -EAGAIN; } } /* grab a lock here due to read response handlers can access ctx */ mutex_lock(&ctx->aio_mutex); rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx); /* if at least one read request send succeeded, then reset rc */ if (!list_empty(&ctx->list)) rc = 0; mutex_unlock(&ctx->aio_mutex); if (rc) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return rc; } if (!is_sync_kiocb(iocb)) { kref_put(&ctx->refcount, cifs_aio_ctx_release); return -EIOCBQUEUED; } rc = wait_for_completion_killable(&ctx->done); if (rc) { mutex_lock(&ctx->aio_mutex); ctx->rc = rc = -EINTR; total_read = ctx->total_len; mutex_unlock(&ctx->aio_mutex); } else { rc = ctx->rc; total_read = ctx->total_len; } kref_put(&ctx->refcount, cifs_aio_ctx_release); if (total_read) { iocb->ki_pos += total_read; return total_read; } return rc; } ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to) { return __cifs_readv(iocb, to, true); } ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) { return __cifs_readv(iocb, to, false); } ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsFileInfo *cfile = (struct cifsFileInfo *) iocb->ki_filp->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = -EACCES; /* * In strict cache mode we need to read from the server all the time * if we don't have level II oplock because the server can delay mtime * change - so we can't make a decision about inode invalidating. * And we can also fail with pagereading if there are mandatory locks * on pages affected by this read but not on the region from pos to * pos+len-1. */ if (!CIFS_CACHE_READ(cinode)) return cifs_user_readv(iocb, to); if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) return generic_file_read_iter(iocb, to); /* * We need to hold the sem to be sure nobody modifies lock list * with a brlock that prevents reading. */ down_read(&cinode->lock_sem); if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to), tcon->ses->server->vals->shared_lock_type, 0, NULL, CIFS_READ_OP)) rc = generic_file_read_iter(iocb, to); up_read(&cinode->lock_sem); return rc; } static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) { int rc = -EACCES; unsigned int bytes_read = 0; unsigned int total_read; unsigned int current_read_size; unsigned int rsize; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct TCP_Server_Info *server; unsigned int xid; char *cur_offset; struct cifsFileInfo *open_file; struct cifs_io_parms io_parms = {0}; int buf_type = CIFS_NO_BUFFER; __u32 pid; xid = get_xid(); cifs_sb = CIFS_FILE_SB(file); /* FIXME: set up handlers for larger reads and/or convert to async */ rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize); if (file->private_data == NULL) { rc = -EBADF; free_xid(xid); return rc; } open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); server = cifs_pick_channel(tcon->ses); if (!server->ops->sync_read) { free_xid(xid); return -ENOSYS; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; if ((file->f_flags & O_ACCMODE) == O_WRONLY) cifs_dbg(FYI, "attempting read on write only file instance\n"); for (total_read = 0, cur_offset = read_data; read_size > total_read; total_read += bytes_read, cur_offset += bytes_read) { do { current_read_size = min_t(uint, read_size - total_read, rsize); /* * For windows me and 9x we do not want to request more * than it negotiated since it will refuse the read * then. */ if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)) { current_read_size = min_t(uint, current_read_size, CIFSMaxBufSize); } if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); if (rc != 0) break; } io_parms.pid = pid; io_parms.tcon = tcon; io_parms.offset = *offset; io_parms.length = current_read_size; io_parms.server = server; rc = server->ops->sync_read(xid, &open_file->fid, &io_parms, &bytes_read, &cur_offset, &buf_type); } while (rc == -EAGAIN); if (rc || (bytes_read == 0)) { if (total_read) { break; } else { free_xid(xid); return rc; } } else { cifs_stats_bytes_read(tcon, total_read); *offset += bytes_read; } } free_xid(xid); return total_read; } /* * If the page is mmap'ed into a process' page tables, then we need to make * sure that it doesn't change while being written back. */ static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); /* Wait for the folio to be written to the cache before we allow it to * be modified. We then assume the entire folio will need writing back. */ #ifdef CONFIG_CIFS_FSCACHE if (folio_test_fscache(folio) && folio_wait_fscache_killable(folio) < 0) return VM_FAULT_RETRY; #endif folio_wait_writeback(folio); if (folio_lock_killable(folio) < 0) return VM_FAULT_RETRY; return VM_FAULT_LOCKED; } static const struct vm_operations_struct cifs_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = cifs_page_mkwrite, }; int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) { int xid, rc = 0; struct inode *inode = file_inode(file); xid = get_xid(); if (!CIFS_CACHE_READ(CIFS_I(inode))) rc = cifs_zap_mapping(inode); if (!rc) rc = generic_file_mmap(file, vma); if (!rc) vma->vm_ops = &cifs_file_vm_ops; free_xid(xid); return rc; } int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) { int rc, xid; xid = get_xid(); rc = cifs_revalidate_file(file); if (rc) cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", rc); if (!rc) rc = generic_file_mmap(file, vma); if (!rc) vma->vm_ops = &cifs_file_vm_ops; free_xid(xid); return rc; } /* * Unlock a bunch of folios in the pagecache. */ static void cifs_unlock_folios(struct address_space *mapping, pgoff_t first, pgoff_t last) { struct folio *folio; XA_STATE(xas, &mapping->i_pages, first); rcu_read_lock(); xas_for_each(&xas, folio, last) { folio_unlock(folio); } rcu_read_unlock(); } static void cifs_readahead_complete(struct work_struct *work) { struct cifs_readdata *rdata = container_of(work, struct cifs_readdata, work); struct folio *folio; pgoff_t last; bool good = rdata->result == 0 || (rdata->result == -EAGAIN && rdata->got_bytes); XA_STATE(xas, &rdata->mapping->i_pages, rdata->offset / PAGE_SIZE); if (good) cifs_readahead_to_fscache(rdata->mapping->host, rdata->offset, rdata->bytes); if (iov_iter_count(&rdata->iter) > 0) iov_iter_zero(iov_iter_count(&rdata->iter), &rdata->iter); last = (rdata->offset + rdata->bytes - 1) / PAGE_SIZE; rcu_read_lock(); xas_for_each(&xas, folio, last) { if (good) { flush_dcache_folio(folio); folio_mark_uptodate(folio); } folio_unlock(folio); } rcu_read_unlock(); kref_put(&rdata->refcount, cifs_readdata_release); } static void cifs_readahead(struct readahead_control *ractl) { struct cifsFileInfo *open_file = ractl->file->private_data; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file); struct TCP_Server_Info *server; unsigned int xid, nr_pages, cache_nr_pages = 0; unsigned int ra_pages; pgoff_t next_cached = ULONG_MAX, ra_index; bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) && cifs_inode_cookie(ractl->mapping->host)->cache_priv; bool check_cache = caching; pid_t pid; int rc = 0; /* Note that readahead_count() lags behind our dequeuing of pages from * the ractl, wo we have to keep track for ourselves. */ ra_pages = readahead_count(ractl); ra_index = readahead_index(ractl); xid = get_xid(); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", __func__, ractl->file, ractl->mapping, ra_pages); /* * Chop the readahead request up into rsize-sized read requests. */ while ((nr_pages = ra_pages)) { unsigned int i, rsize; struct cifs_readdata *rdata; struct cifs_credits credits_on_stack; struct cifs_credits *credits = &credits_on_stack; struct folio *folio; pgoff_t fsize; /* * Find out if we have anything cached in the range of * interest, and if so, where the next chunk of cached data is. */ if (caching) { if (check_cache) { rc = cifs_fscache_query_occupancy( ractl->mapping->host, ra_index, nr_pages, &next_cached, &cache_nr_pages); if (rc < 0) caching = false; check_cache = false; } if (ra_index == next_cached) { /* * TODO: Send a whole batch of pages to be read * by the cache. */ folio = readahead_folio(ractl); fsize = folio_nr_pages(folio); ra_pages -= fsize; ra_index += fsize; if (cifs_readpage_from_fscache(ractl->mapping->host, &folio->page) < 0) { /* * TODO: Deal with cache read failure * here, but for the moment, delegate * that to readpage. */ caching = false; } folio_unlock(folio); next_cached += fsize; cache_nr_pages -= fsize; if (cache_nr_pages == 0) check_cache = true; continue; } } if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); if (rc) { if (rc == -EAGAIN) continue; break; } } if (cifs_sb->ctx->rsize == 0) cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tlink_tcon(open_file->tlink), cifs_sb->ctx); rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize, credits); if (rc) break; nr_pages = min_t(size_t, rsize / PAGE_SIZE, ra_pages); if (next_cached != ULONG_MAX) nr_pages = min_t(size_t, nr_pages, next_cached - ra_index); /* * Give up immediately if rsize is too small to read an entire * page. The VFS will fall back to readpage. We should never * reach this point however since we set ra_pages to 0 when the * rsize is smaller than a cache page. */ if (unlikely(!nr_pages)) { add_credits_and_wake_if(server, credits, 0); break; } rdata = cifs_readdata_alloc(cifs_readahead_complete); if (!rdata) { /* best to give up if we're out of mem */ add_credits_and_wake_if(server, credits, 0); break; } rdata->offset = ra_index * PAGE_SIZE; rdata->bytes = nr_pages * PAGE_SIZE; rdata->cfile = cifsFileInfo_get(open_file); rdata->server = server; rdata->mapping = ractl->mapping; rdata->pid = pid; rdata->credits = credits_on_stack; for (i = 0; i < nr_pages; i++) { if (!readahead_folio(ractl)) WARN_ON(1); } ra_pages -= nr_pages; ra_index += nr_pages; iov_iter_xarray(&rdata->iter, ITER_DEST, &rdata->mapping->i_pages, rdata->offset, rdata->bytes); rc = adjust_credits(server, &rdata->credits, rdata->bytes); if (!rc) { if (rdata->cfile->invalidHandle) rc = -EAGAIN; else rc = server->ops->async_readv(rdata); } if (rc) { add_credits_and_wake_if(server, &rdata->credits, 0); cifs_unlock_folios(rdata->mapping, rdata->offset / PAGE_SIZE, (rdata->offset + rdata->bytes - 1) / PAGE_SIZE); /* Fallback to the readpage in error/reconnect cases */ kref_put(&rdata->refcount, cifs_readdata_release); break; } kref_put(&rdata->refcount, cifs_readdata_release); } free_xid(xid); } /* * cifs_readpage_worker must be called with the page pinned */ static int cifs_readpage_worker(struct file *file, struct page *page, loff_t *poffset) { char *read_data; int rc; /* Is the page cached? */ rc = cifs_readpage_from_fscache(file_inode(file), page); if (rc == 0) goto read_complete; read_data = kmap(page); /* for reads over a certain size could initiate async read ahead */ rc = cifs_read(file, read_data, PAGE_SIZE, poffset); if (rc < 0) goto io_error; else cifs_dbg(FYI, "Bytes read %d\n", rc); /* we do not want atime to be less than mtime, it broke some apps */ file_inode(file)->i_atime = current_time(file_inode(file)); if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime))) file_inode(file)->i_atime = file_inode(file)->i_mtime; else file_inode(file)->i_atime = current_time(file_inode(file)); if (PAGE_SIZE > rc) memset(read_data + rc, 0, PAGE_SIZE - rc); flush_dcache_page(page); SetPageUptodate(page); rc = 0; io_error: kunmap(page); read_complete: unlock_page(page); return rc; } static int cifs_read_folio(struct file *file, struct folio *folio) { struct page *page = &folio->page; loff_t offset = page_file_offset(page); int rc = -EACCES; unsigned int xid; xid = get_xid(); if (file->private_data == NULL) { rc = -EBADF; free_xid(xid); return rc; } cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n", page, (int)offset, (int)offset); rc = cifs_readpage_worker(file, page, &offset); free_xid(xid); return rc; } static int is_inode_writable(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *open_file; spin_lock(&cifs_inode->open_file_lock); list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { spin_unlock(&cifs_inode->open_file_lock); return 1; } } spin_unlock(&cifs_inode->open_file_lock); return 0; } /* We do not want to update the file size from server for inodes open for write - to avoid races with writepage extending the file - in the future we could consider allowing refreshing the inode only on increases in the file size but this is tricky to do without racing with writebehind page caching in the current Linux kernel design */ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) { if (!cifsInode) return true; if (is_inode_writable(cifsInode)) { /* This inode is open for write at least once */ struct cifs_sb_info *cifs_sb; cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { /* since no page cache to corrupt on directio we can change size safely */ return true; } if (i_size_read(&cifsInode->netfs.inode) < end_of_file) return true; return false; } else return true; } static int cifs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int oncethru = 0; pgoff_t index = pos >> PAGE_SHIFT; loff_t offset = pos & (PAGE_SIZE - 1); loff_t page_start = pos & PAGE_MASK; loff_t i_size; struct page *page; int rc = 0; cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len); start: page = grab_cache_page_write_begin(mapping, index); if (!page) { rc = -ENOMEM; goto out; } if (PageUptodate(page)) goto out; /* * If we write a full page it will be up to date, no need to read from * the server. If the write is short, we'll end up doing a sync write * instead. */ if (len == PAGE_SIZE) goto out; /* * optimize away the read when we have an oplock, and we're not * expecting to use any of the data we'd be reading in. That * is, when the page lies beyond the EOF, or straddles the EOF * and the write will cover all of the existing data. */ if (CIFS_CACHE_READ(CIFS_I(mapping->host))) { i_size = i_size_read(mapping->host); if (page_start >= i_size || (offset == 0 && (pos + len) >= i_size)) { zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE); /* * PageChecked means that the parts of the page * to which we're not writing are considered up * to date. Once the data is copied to the * page, it can be set uptodate. */ SetPageChecked(page); goto out; } } if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) { /* * might as well read a page, it is fast enough. If we get * an error, we don't need to return it. cifs_write_end will * do a sync write instead since PG_uptodate isn't set. */ cifs_readpage_worker(file, page, &page_start); put_page(page); oncethru = 1; goto start; } else { /* we could try using another file handle if there is one - but how would we lock it to prevent close of that handle racing with this read? In any case this will be written out by write_end so is fine */ } out: *pagep = page; return rc; } static bool cifs_release_folio(struct folio *folio, gfp_t gfp) { if (folio_test_private(folio)) return 0; if (folio_test_fscache(folio)) { if (current_is_kswapd() || !(gfp & __GFP_FS)) return false; folio_wait_fscache(folio); } fscache_note_page_release(cifs_inode_cookie(folio->mapping->host)); return true; } static void cifs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { folio_wait_fscache(folio); } static int cifs_launder_folio(struct folio *folio) { int rc = 0; loff_t range_start = folio_pos(folio); loff_t range_end = range_start + folio_size(folio); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, .range_start = range_start, .range_end = range_end, }; cifs_dbg(FYI, "Launder page: %lu\n", folio->index); if (folio_clear_dirty_for_io(folio)) rc = cifs_writepage_locked(&folio->page, &wbc); folio_wait_fscache(folio); return rc; } void cifs_oplock_break(struct work_struct *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = d_inode(cfile->dentry); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct tcon_link *tlink; int rc = 0; bool purge_cache = false, oplock_break_cancelled; __u64 persistent_fid, volatile_fid; __u16 net_fid; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) goto out; tcon = tlink_tcon(tlink); server = tcon->ses->server; server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, cfile->oplock_epoch, &purge_cache); if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && cifs_has_mand_locks(cinode)) { cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n", inode); cinode->oplock = 0; } if (inode && S_ISREG(inode->i_mode)) { if (CIFS_CACHE_READ(cinode)) break_lease(inode, O_RDONLY); else break_lease(inode, O_WRONLY); rc = filemap_fdatawrite(inode->i_mapping); if (!CIFS_CACHE_READ(cinode) || purge_cache) { rc = filemap_fdatawait(inode->i_mapping); mapping_set_error(inode->i_mapping, rc); cifs_zap_mapping(inode); } cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); if (CIFS_CACHE_WRITE(cinode)) goto oplock_break_ack; } rc = cifs_push_locks(cfile); if (rc) cifs_dbg(VFS, "Push locks rc = %d\n", rc); oplock_break_ack: /* * When oplock break is received and there are no active * file handles but cached, then schedule deferred close immediately. * So, new open will not use cached handle. */ if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes)) cifs_close_deferred_file(cinode); persistent_fid = cfile->fid.persistent_fid; volatile_fid = cfile->fid.volatile_fid; net_fid = cfile->fid.netfid; oplock_break_cancelled = cfile->oplock_break_cancelled; _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); /* * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require * an acknowledgment to be sent when the file has already been closed. */ spin_lock(&cinode->open_file_lock); /* check list empty since can race with kill_sb calling tree disconnect */ if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { spin_unlock(&cinode->open_file_lock); rc = server->ops->oplock_response(tcon, persistent_fid, volatile_fid, net_fid, cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } else spin_unlock(&cinode->open_file_lock); cifs_put_tlink(tlink); out: cifs_done_oplock_break(cinode); } /* * The presence of cifs_direct_io() in the address space ops vector * allowes open() O_DIRECT flags which would have failed otherwise. * * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests * so this method should never be called. * * Direct IO is not yet supported in the cached mode. */ static ssize_t cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter) { /* * FIXME * Eventually need to support direct IO for non forcedirectio mounts */ return -EINVAL; } static int cifs_swap_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { struct cifsFileInfo *cfile = swap_file->private_data; struct inode *inode = swap_file->f_mapping->host; unsigned long blocks; long long isize; cifs_dbg(FYI, "swap activate\n"); if (!swap_file->f_mapping->a_ops->swap_rw) /* Cannot support swap */ return -EINVAL; spin_lock(&inode->i_lock); blocks = inode->i_blocks; isize = inode->i_size; spin_unlock(&inode->i_lock); if (blocks*512 < isize) { pr_warn("swap activate: swapfile has holes\n"); return -EINVAL; } *span = sis->pages; pr_warn_once("Swap support over SMB3 is experimental\n"); /* * TODO: consider adding ACL (or documenting how) to prevent other * users (on this or other systems) from reading it */ /* TODO: add sk_set_memalloc(inet) or similar */ if (cfile) cfile->swapfile = true; /* * TODO: Since file already open, we can't open with DENY_ALL here * but we could add call to grab a byte range lock to prevent others * from reading or writing the file */ sis->flags |= SWP_FS_OPS; return add_swap_extent(sis, 0, sis->max, 0); } static void cifs_swap_deactivate(struct file *file) { struct cifsFileInfo *cfile = file->private_data; cifs_dbg(FYI, "swap deactivate\n"); /* TODO: undo sk_set_memalloc(inet) will eventually be needed */ if (cfile) cfile->swapfile = false; /* do we need to unpin (or unlock) the file */ } /* * Mark a page as having been made dirty and thus needing writeback. We also * need to pin the cache object to write back to. */ #ifdef CONFIG_CIFS_FSCACHE static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio) { return fscache_dirty_folio(mapping, folio, cifs_inode_cookie(mapping->host)); } #else #define cifs_dirty_folio filemap_dirty_folio #endif const struct address_space_operations cifs_addr_ops = { .read_folio = cifs_read_folio, .readahead = cifs_readahead, .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, .dirty_folio = cifs_dirty_folio, .release_folio = cifs_release_folio, .direct_IO = cifs_direct_io, .invalidate_folio = cifs_invalidate_folio, .launder_folio = cifs_launder_folio, .migrate_folio = filemap_migrate_folio, /* * TODO: investigate and if useful we could add an is_dirty_writeback * helper if needed */ .swap_activate = cifs_swap_activate, .swap_deactivate = cifs_swap_deactivate, }; /* * cifs_readahead requires the server to support a buffer large enough to * contain the header plus one complete page of data. Otherwise, we need * to leave cifs_readahead out of the address space operations. */ const struct address_space_operations cifs_addr_ops_smallbuf = { .read_folio = cifs_read_folio, .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, .dirty_folio = cifs_dirty_folio, .release_folio = cifs_release_folio, .invalidate_folio = cifs_invalidate_folio, .launder_folio = cifs_launder_folio, .migrate_folio = filemap_migrate_folio, };
linux-master
fs/smb/client/file.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2007,2008 * Author(s): Steve French ([email protected]) * * Contains the routines for mapping CIFS/NTFS ACLs * */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/keyctl.h> #include <linux/key-type.h> #include <uapi/linux/posix_acl.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include <keys/user-type.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "cifs_debug.h" #include "fs_context.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" /* security id for everyone/world system group */ static const struct cifs_sid sid_everyone = { 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; /* security id for Authenticated Users system group */ static const struct cifs_sid sid_authusers = { 1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} }; /* S-1-22-1 Unmapped Unix users */ static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22}, {cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-22-2 Unmapped Unix groups */ static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22}, {cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* * See https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ /* S-1-5-88 MS NFS and Apple style UID/GID/mode */ /* S-1-5-88-1 Unix uid */ static const struct cifs_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-5-88-2 Unix gid */ static const struct cifs_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-5-88-3 Unix mode */ static const struct cifs_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; static const struct cred *root_cred; static int cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { char *payload; /* * If the payload is less than or equal to the size of a pointer, then * an allocation here is wasteful. Just copy the data directly to the * payload.value union member instead. * * With this however, you must check the datalen before trying to * dereference payload.data! */ if (prep->datalen <= sizeof(key->payload)) { key->payload.data[0] = NULL; memcpy(&key->payload, prep->data, prep->datalen); } else { payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL); if (!payload) return -ENOMEM; key->payload.data[0] = payload; } key->datalen = prep->datalen; return 0; } static inline void cifs_idmap_key_destroy(struct key *key) { if (key->datalen > sizeof(key->payload)) kfree(key->payload.data[0]); } static struct key_type cifs_idmap_key_type = { .name = "cifs.idmap", .instantiate = cifs_idmap_key_instantiate, .destroy = cifs_idmap_key_destroy, .describe = user_describe, }; static char * sid_to_key_str(struct cifs_sid *sidptr, unsigned int type) { int i, len; unsigned int saval; char *sidstr, *strptr; unsigned long long id_auth_val; /* 3 bytes for prefix */ sidstr = kmalloc(3 + SID_STRING_BASE_SIZE + (SID_STRING_SUBAUTH_SIZE * sidptr->num_subauth), GFP_KERNEL); if (!sidstr) return sidstr; strptr = sidstr; len = sprintf(strptr, "%cs:S-%hhu", type == SIDOWNER ? 'o' : 'g', sidptr->revision); strptr += len; /* The authority field is a single 48-bit number */ id_auth_val = (unsigned long long)sidptr->authority[5]; id_auth_val |= (unsigned long long)sidptr->authority[4] << 8; id_auth_val |= (unsigned long long)sidptr->authority[3] << 16; id_auth_val |= (unsigned long long)sidptr->authority[2] << 24; id_auth_val |= (unsigned long long)sidptr->authority[1] << 32; id_auth_val |= (unsigned long long)sidptr->authority[0] << 48; /* * MS-DTYP states that if the authority is >= 2^32, then it should be * expressed as a hex value. */ if (id_auth_val <= UINT_MAX) len = sprintf(strptr, "-%llu", id_auth_val); else len = sprintf(strptr, "-0x%llx", id_auth_val); strptr += len; for (i = 0; i < sidptr->num_subauth; ++i) { saval = le32_to_cpu(sidptr->sub_auth[i]); len = sprintf(strptr, "-%u", saval); strptr += len; } return sidstr; } /* * if the two SIDs (roughly equivalent to a UUID for a user or group) are * the same returns zero, if they do not match returns non-zero. */ static int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid) { int i; int num_subauth, num_sat, num_saw; if ((!ctsid) || (!cwsid)) return 1; /* compare the revision */ if (ctsid->revision != cwsid->revision) { if (ctsid->revision > cwsid->revision) return 1; else return -1; } /* compare all of the six auth values */ for (i = 0; i < NUM_AUTHS; ++i) { if (ctsid->authority[i] != cwsid->authority[i]) { if (ctsid->authority[i] > cwsid->authority[i]) return 1; else return -1; } } /* compare all of the subauth values if any */ num_sat = ctsid->num_subauth; num_saw = cwsid->num_subauth; num_subauth = num_sat < num_saw ? num_sat : num_saw; if (num_subauth) { for (i = 0; i < num_subauth; ++i) { if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) { if (le32_to_cpu(ctsid->sub_auth[i]) > le32_to_cpu(cwsid->sub_auth[i])) return 1; else return -1; } } } return 0; /* sids compare/match */ } static bool is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group) { int i; int num_subauth; const struct cifs_sid *pwell_known_sid; if (!psid || (puid == NULL)) return false; num_subauth = psid->num_subauth; /* check if Mac (or Windows NFS) vs. Samba format for Unix owner SID */ if (num_subauth == 2) { if (is_group) pwell_known_sid = &sid_unix_groups; else pwell_known_sid = &sid_unix_users; } else if (num_subauth == 3) { if (is_group) pwell_known_sid = &sid_unix_NFS_groups; else pwell_known_sid = &sid_unix_NFS_users; } else return false; /* compare the revision */ if (psid->revision != pwell_known_sid->revision) return false; /* compare all of the six auth values */ for (i = 0; i < NUM_AUTHS; ++i) { if (psid->authority[i] != pwell_known_sid->authority[i]) { cifs_dbg(FYI, "auth %d did not match\n", i); return false; } } if (num_subauth == 2) { if (psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) return false; *puid = le32_to_cpu(psid->sub_auth[1]); } else /* 3 subauths, ie Windows/Mac style */ { *puid = le32_to_cpu(psid->sub_auth[0]); if ((psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) || (psid->sub_auth[1] != pwell_known_sid->sub_auth[1])) return false; *puid = le32_to_cpu(psid->sub_auth[2]); } cifs_dbg(FYI, "Unix UID %d returned from SID\n", *puid); return true; /* well known sid found, uid returned */ } static __u16 cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src) { int i; __u16 size = 1 + 1 + 6; dst->revision = src->revision; dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES); for (i = 0; i < NUM_AUTHS; ++i) dst->authority[i] = src->authority[i]; for (i = 0; i < dst->num_subauth; ++i) dst->sub_auth[i] = src->sub_auth[i]; size += (dst->num_subauth * 4); return size; } static int id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid) { int rc; struct key *sidkey; struct cifs_sid *ksid; unsigned int ksid_size; char desc[3 + 10 + 1]; /* 3 byte prefix + 10 bytes for value + NULL */ const struct cred *saved_cred; rc = snprintf(desc, sizeof(desc), "%ci:%u", sidtype == SIDOWNER ? 'o' : 'g', cid); if (rc >= sizeof(desc)) return -EINVAL; rc = 0; saved_cred = override_creds(root_cred); sidkey = request_key(&cifs_idmap_key_type, desc, ""); if (IS_ERR(sidkey)) { rc = -EINVAL; cifs_dbg(FYI, "%s: Can't map %cid %u to a SID\n", __func__, sidtype == SIDOWNER ? 'u' : 'g', cid); goto out_revert_creds; } else if (sidkey->datalen < CIFS_SID_BASE_SIZE) { rc = -EIO; cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n", __func__, sidkey->datalen); goto invalidate_key; } /* * A sid is usually too large to be embedded in payload.value, but if * there are no subauthorities and the host has 8-byte pointers, then * it could be. */ ksid = sidkey->datalen <= sizeof(sidkey->payload) ? (struct cifs_sid *)&sidkey->payload : (struct cifs_sid *)sidkey->payload.data[0]; ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32)); if (ksid_size > sidkey->datalen) { rc = -EIO; cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu, ksid_size=%u)\n", __func__, sidkey->datalen, ksid_size); goto invalidate_key; } cifs_copy_sid(ssid, ksid); out_key_put: key_put(sidkey); out_revert_creds: revert_creds(saved_cred); return rc; invalidate_key: key_invalidate(sidkey); goto out_key_put; } int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, struct cifs_fattr *fattr, uint sidtype) { int rc = 0; struct key *sidkey; char *sidstr; const struct cred *saved_cred; kuid_t fuid = cifs_sb->ctx->linux_uid; kgid_t fgid = cifs_sb->ctx->linux_gid; /* * If we have too many subauthorities, then something is really wrong. * Just return an error. */ if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) { cifs_dbg(FYI, "%s: %u subauthorities is too many!\n", __func__, psid->num_subauth); return -EIO; } if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) || (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) { uint32_t unix_id; bool is_group; if (sidtype != SIDOWNER) is_group = true; else is_group = false; if (is_well_known_sid(psid, &unix_id, is_group) == false) goto try_upcall_to_get_id; if (is_group) { kgid_t gid; gid_t id; id = (gid_t)unix_id; gid = make_kgid(&init_user_ns, id); if (gid_valid(gid)) { fgid = gid; goto got_valid_id; } } else { kuid_t uid; uid_t id; id = (uid_t)unix_id; uid = make_kuid(&init_user_ns, id); if (uid_valid(uid)) { fuid = uid; goto got_valid_id; } } /* If unable to find uid/gid easily from SID try via upcall */ } try_upcall_to_get_id: sidstr = sid_to_key_str(psid, sidtype); if (!sidstr) return -ENOMEM; saved_cred = override_creds(root_cred); sidkey = request_key(&cifs_idmap_key_type, sidstr, ""); if (IS_ERR(sidkey)) { cifs_dbg(FYI, "%s: Can't map SID %s to a %cid\n", __func__, sidstr, sidtype == SIDOWNER ? 'u' : 'g'); goto out_revert_creds; } /* * FIXME: Here we assume that uid_t and gid_t are same size. It's * probably a safe assumption but might be better to check based on * sidtype. */ BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t)); if (sidkey->datalen != sizeof(uid_t)) { cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n", __func__, sidkey->datalen); key_invalidate(sidkey); goto out_key_put; } if (sidtype == SIDOWNER) { kuid_t uid; uid_t id; memcpy(&id, &sidkey->payload.data[0], sizeof(uid_t)); uid = make_kuid(&init_user_ns, id); if (uid_valid(uid)) fuid = uid; } else { kgid_t gid; gid_t id; memcpy(&id, &sidkey->payload.data[0], sizeof(gid_t)); gid = make_kgid(&init_user_ns, id); if (gid_valid(gid)) fgid = gid; } out_key_put: key_put(sidkey); out_revert_creds: revert_creds(saved_cred); kfree(sidstr); /* * Note that we return 0 here unconditionally. If the mapping * fails then we just fall back to using the ctx->linux_uid/linux_gid. */ got_valid_id: rc = 0; if (sidtype == SIDOWNER) fattr->cf_uid = fuid; else fattr->cf_gid = fgid; return rc; } int init_cifs_idmap(void) { struct cred *cred; struct key *keyring; int ret; cifs_dbg(FYI, "Registering the %s key type\n", cifs_idmap_key_type.name); /* create an override credential set with a special thread keyring in * which requests are cached * * this is used to prevent malicious redirections from being installed * with add_key(). */ cred = prepare_kernel_cred(&init_task); if (!cred) return -ENOMEM; keyring = keyring_alloc(".cifs_idmap", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&cifs_idmap_key_type); if (ret < 0) goto failed_put_key; /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; root_cred = cred; cifs_dbg(FYI, "cifs idmap keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } void exit_cifs_idmap(void) { key_revoke(root_cred->thread_keyring); unregister_key_type(&cifs_idmap_key_type); put_cred(root_cred); cifs_dbg(FYI, "Unregistered %s key type\n", cifs_idmap_key_type.name); } /* copy ntsd, owner sid, and group sid from a security descriptor to another */ static __u32 copy_sec_desc(const struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, __u32 sidsoffset, struct cifs_sid *pownersid, struct cifs_sid *pgrpsid) { struct cifs_sid *owner_sid_ptr, *group_sid_ptr; struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr; /* copy security descriptor control portion */ pnntsd->revision = pntsd->revision; pnntsd->type = pntsd->type; pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd)); pnntsd->sacloffset = 0; pnntsd->osidoffset = cpu_to_le32(sidsoffset); pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid)); /* copy owner sid */ if (pownersid) owner_sid_ptr = pownersid; else owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset); cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr); /* copy group sid */ if (pgrpsid) group_sid_ptr = pgrpsid; else group_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset + sizeof(struct cifs_sid)); cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr); return sidsoffset + (2 * sizeof(struct cifs_sid)); } /* change posix mode to reflect permissions pmode is the existing mode (we only want to overwrite part of this bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007 */ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, umode_t *pdenied, umode_t mask) { __u32 flags = le32_to_cpu(ace_flags); /* * Do not assume "preferred" or "canonical" order. * The first DENY or ALLOW ACE which matches perfectly is * the permission to be used. Once allowed or denied, same * permission in later ACEs do not matter. */ /* If not already allowed, deny these bits */ if (type == ACCESS_DENIED) { if (flags & GENERIC_ALL && !(*pmode & mask & 0777)) *pdenied |= mask & 0777; if (((flags & GENERIC_WRITE) || ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) && !(*pmode & mask & 0222)) *pdenied |= mask & 0222; if (((flags & GENERIC_READ) || ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) && !(*pmode & mask & 0444)) *pdenied |= mask & 0444; if (((flags & GENERIC_EXECUTE) || ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) && !(*pmode & mask & 0111)) *pdenied |= mask & 0111; return; } else if (type != ACCESS_ALLOWED) { cifs_dbg(VFS, "unknown access control type %d\n", type); return; } /* else ACCESS_ALLOWED type */ if ((flags & GENERIC_ALL) && !(*pdenied & mask & 0777)) { *pmode |= mask & 0777; cifs_dbg(NOISY, "all perms\n"); return; } if (((flags & GENERIC_WRITE) || ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) && !(*pdenied & mask & 0222)) *pmode |= mask & 0222; if (((flags & GENERIC_READ) || ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) && !(*pdenied & mask & 0444)) *pmode |= mask & 0444; if (((flags & GENERIC_EXECUTE) || ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) && !(*pdenied & mask & 0111)) *pmode |= mask & 0111; /* If DELETE_CHILD is set only on an owner ACE, set sticky bit */ if (flags & FILE_DELETE_CHILD) { if (mask == ACL_OWNER_MASK) { if (!(*pdenied & 01000)) *pmode |= 01000; } else if (!(*pdenied & 01000)) { *pmode &= ~01000; *pdenied |= 01000; } } cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode); return; } /* Generate access flags to reflect permissions mode is the existing mode. This function is called for every ACE in the DACL whose SID matches with either owner or group or everyone. */ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, __u32 *pace_flags) { /* reset access mask */ *pace_flags = 0x0; /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */ mode &= bits_to_use; /* check for R/W/X UGO since we do not know whose flags is this but we have cleared all the bits sans RWX for either user or group or other as per bits_to_use */ if (mode & S_IRUGO) *pace_flags |= SET_FILE_READ_RIGHTS; if (mode & S_IWUGO) *pace_flags |= SET_FILE_WRITE_RIGHTS; if (mode & S_IXUGO) *pace_flags |= SET_FILE_EXEC_RIGHTS; cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n", mode, *pace_flags); return; } static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct cifs_sid *psid) { __u16 size = 1 + 1 + 2 + 4; dst->type = src->type; dst->flags = src->flags; dst->access_req = src->access_req; /* Check if there's a replacement sid specified */ if (psid) size += cifs_copy_sid(&dst->sid, psid); else size += cifs_copy_sid(&dst->sid, &src->sid); dst->size = cpu_to_le16(size); return size; } static __u16 fill_ace_for_sid(struct cifs_ace *pntace, const struct cifs_sid *psid, __u64 nmode, umode_t bits, __u8 access_type, bool allow_delete_child) { int i; __u16 size = 0; __u32 access_req = 0; pntace->type = access_type; pntace->flags = 0x0; mode_to_access_flags(nmode, bits, &access_req); if (access_type == ACCESS_ALLOWED && allow_delete_child) access_req |= FILE_DELETE_CHILD; if (access_type == ACCESS_ALLOWED && !access_req) access_req = SET_MINIMUM_RIGHTS; else if (access_type == ACCESS_DENIED) access_req &= ~SET_MINIMUM_RIGHTS; pntace->access_req = cpu_to_le32(access_req); pntace->sid.revision = psid->revision; pntace->sid.num_subauth = psid->num_subauth; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = psid->authority[i]; for (i = 0; i < psid->num_subauth; i++) pntace->sid.sub_auth[i] = psid->sub_auth[i]; size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4); pntace->size = cpu_to_le16(size); return size; } #ifdef CONFIG_CIFS_DEBUG2 static void dump_ace(struct cifs_ace *pace, char *end_of_acl) { int num_subauth; /* validate that we do not go past end of acl */ if (le16_to_cpu(pace->size) < 16) { cifs_dbg(VFS, "ACE too small %d\n", le16_to_cpu(pace->size)); return; } if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) { cifs_dbg(VFS, "ACL too small to parse ACE\n"); return; } num_subauth = pace->sid.num_subauth; if (num_subauth) { int i; cifs_dbg(FYI, "ACE revision %d num_auth %d type %d flags %d size %d\n", pace->sid.revision, pace->sid.num_subauth, pace->type, pace->flags, le16_to_cpu(pace->size)); for (i = 0; i < num_subauth; ++i) { cifs_dbg(FYI, "ACE sub_auth[%d]: 0x%x\n", i, le32_to_cpu(pace->sid.sub_auth[i])); } /* BB add length check to make sure that we do not have huge num auths and therefore go off the end */ } return; } #endif static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, struct cifs_sid *pownersid, struct cifs_sid *pgrpsid, struct cifs_fattr *fattr, bool mode_from_special_sid) { int i; int num_aces = 0; int acl_size; char *acl_base; struct cifs_ace **ppace; /* BB need to add parm so we can store the SID BB */ if (!pdacl) { /* no DACL in the security descriptor, set all the permissions for user/group/other */ fattr->cf_mode |= 0777; return; } /* validate that we do not go past end of acl */ if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { cifs_dbg(VFS, "ACL too small to parse DACL\n"); return; } cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n", le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), le32_to_cpu(pdacl->num_aces)); /* reset rwx permissions for user/group/other. Also, if num_aces is 0 i.e. DACL has no ACEs, user/group/other have no permissions */ fattr->cf_mode &= ~(0777); acl_base = (char *)pdacl; acl_size = sizeof(struct cifs_acl); num_aces = le32_to_cpu(pdacl->num_aces); if (num_aces > 0) { umode_t denied_mode = 0; if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *)) return; ppace = kmalloc_array(num_aces, sizeof(struct cifs_ace *), GFP_KERNEL); if (!ppace) return; for (i = 0; i < num_aces; ++i) { ppace[i] = (struct cifs_ace *) (acl_base + acl_size); #ifdef CONFIG_CIFS_DEBUG2 dump_ace(ppace[i], end_of_acl); #endif if (mode_from_special_sid && (compare_sids(&(ppace[i]->sid), &sid_unix_NFS_mode) == 0)) { /* * Full permissions are: * 07777 = S_ISUID | S_ISGID | S_ISVTX | * S_IRWXU | S_IRWXG | S_IRWXO */ fattr->cf_mode &= ~07777; fattr->cf_mode |= le32_to_cpu(ppace[i]->sid.sub_auth[2]); break; } else { if (compare_sids(&(ppace[i]->sid), pownersid) == 0) { access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, &fattr->cf_mode, &denied_mode, ACL_OWNER_MASK); } else if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0) { access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, &fattr->cf_mode, &denied_mode, ACL_GROUP_MASK); } else if ((compare_sids(&(ppace[i]->sid), &sid_everyone) == 0) || (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)) { access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, &fattr->cf_mode, &denied_mode, ACL_EVERYONE_MASK); } } /* memcpy((void *)(&(cifscred->aces[i])), (void *)ppace[i], sizeof(struct cifs_ace)); */ acl_base = (char *)ppace[i]; acl_size = le16_to_cpu(ppace[i]->size); } kfree(ppace); } return; } unsigned int setup_authusers_ACE(struct cifs_ace *pntace) { int i; unsigned int ace_size = 20; pntace->type = ACCESS_ALLOWED_ACE_TYPE; pntace->flags = 0x0; pntace->access_req = cpu_to_le32(GENERIC_ALL); pntace->sid.num_subauth = 1; pntace->sid.revision = 1; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = sid_authusers.authority[i]; pntace->sid.sub_auth[0] = sid_authusers.sub_auth[0]; /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */ pntace->size = cpu_to_le16(ace_size); return ace_size; } /* * Fill in the special SID based on the mode. See * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode) { int i; unsigned int ace_size = 28; pntace->type = ACCESS_DENIED_ACE_TYPE; pntace->flags = 0x0; pntace->access_req = 0; pntace->sid.num_subauth = 3; pntace->sid.revision = 1; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = sid_unix_NFS_mode.authority[i]; pntace->sid.sub_auth[0] = sid_unix_NFS_mode.sub_auth[0]; pntace->sid.sub_auth[1] = sid_unix_NFS_mode.sub_auth[1]; pntace->sid.sub_auth[2] = cpu_to_le32(nmode & 07777); /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */ pntace->size = cpu_to_le16(ace_size); return ace_size; } unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace) { int i; unsigned int ace_size = 28; pntace->type = ACCESS_ALLOWED_ACE_TYPE; pntace->flags = 0x0; pntace->access_req = cpu_to_le32(GENERIC_ALL); pntace->sid.num_subauth = 3; pntace->sid.revision = 1; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = sid_unix_NFS_users.authority[i]; pntace->sid.sub_auth[0] = sid_unix_NFS_users.sub_auth[0]; pntace->sid.sub_auth[1] = sid_unix_NFS_users.sub_auth[1]; pntace->sid.sub_auth[2] = cpu_to_le32(current_fsgid().val); /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */ pntace->size = cpu_to_le16(ace_size); return ace_size; } static void populate_new_aces(char *nacl_base, struct cifs_sid *pownersid, struct cifs_sid *pgrpsid, __u64 *pnmode, u32 *pnum_aces, u16 *pnsize, bool modefromsid) { __u64 nmode; u32 num_aces = 0; u16 nsize = 0; __u64 user_mode; __u64 group_mode; __u64 other_mode; __u64 deny_user_mode = 0; __u64 deny_group_mode = 0; bool sticky_set = false; struct cifs_ace *pnntace = NULL; nmode = *pnmode; num_aces = *pnum_aces; nsize = *pnsize; if (modefromsid) { pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += setup_special_mode_ACE(pnntace, nmode); num_aces++; pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += setup_authusers_ACE(pnntace); num_aces++; goto set_size; } /* * We'll try to keep the mode as requested by the user. * But in cases where we cannot meaningfully convert that * into ACL, return back the updated mode, so that it is * updated in the inode. */ if (!memcmp(pownersid, pgrpsid, sizeof(struct cifs_sid))) { /* * Case when owner and group SIDs are the same. * Set the more restrictive of the two modes. */ user_mode = nmode & (nmode << 3) & 0700; group_mode = nmode & (nmode >> 3) & 0070; } else { user_mode = nmode & 0700; group_mode = nmode & 0070; } other_mode = nmode & 0007; /* We need DENY ACE when the perm is more restrictive than the next sets. */ deny_user_mode = ~(user_mode) & ((group_mode << 3) | (other_mode << 6)) & 0700; deny_group_mode = ~(group_mode) & (other_mode << 3) & 0070; *pnmode = user_mode | group_mode | other_mode | (nmode & ~0777); /* This tells if we should allow delete child for group and everyone. */ if (nmode & 01000) sticky_set = true; if (deny_user_mode) { pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pownersid, deny_user_mode, 0700, ACCESS_DENIED, false); num_aces++; } /* Group DENY ACE does not conflict with owner ALLOW ACE. Keep in preferred order*/ if (deny_group_mode && !(deny_group_mode & (user_mode >> 3))) { pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode, 0070, ACCESS_DENIED, false); num_aces++; } pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pownersid, user_mode, 0700, ACCESS_ALLOWED, true); num_aces++; /* Group DENY ACE conflicts with owner ALLOW ACE. So keep it after. */ if (deny_group_mode && (deny_group_mode & (user_mode >> 3))) { pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode, 0070, ACCESS_DENIED, false); num_aces++; } pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pgrpsid, group_mode, 0070, ACCESS_ALLOWED, !sticky_set); num_aces++; pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, &sid_everyone, other_mode, 0007, ACCESS_ALLOWED, !sticky_set); num_aces++; set_size: *pnum_aces = num_aces; *pnsize = nsize; } static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl *pndacl, struct cifs_sid *pownersid, struct cifs_sid *pgrpsid, struct cifs_sid *pnownersid, struct cifs_sid *pngrpsid) { int i; u16 size = 0; struct cifs_ace *pntace = NULL; char *acl_base = NULL; u32 src_num_aces = 0; u16 nsize = 0; struct cifs_ace *pnntace = NULL; char *nacl_base = NULL; u16 ace_size = 0; acl_base = (char *)pdacl; size = sizeof(struct cifs_acl); src_num_aces = le32_to_cpu(pdacl->num_aces); nacl_base = (char *)pndacl; nsize = sizeof(struct cifs_acl); /* Go through all the ACEs */ for (i = 0; i < src_num_aces; ++i) { pntace = (struct cifs_ace *) (acl_base + size); pnntace = (struct cifs_ace *) (nacl_base + nsize); if (pnownersid && compare_sids(&pntace->sid, pownersid) == 0) ace_size = cifs_copy_ace(pnntace, pntace, pnownersid); else if (pngrpsid && compare_sids(&pntace->sid, pgrpsid) == 0) ace_size = cifs_copy_ace(pnntace, pntace, pngrpsid); else ace_size = cifs_copy_ace(pnntace, pntace, NULL); size += le16_to_cpu(pntace->size); nsize += ace_size; } return nsize; } static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl, struct cifs_sid *pownersid, struct cifs_sid *pgrpsid, __u64 *pnmode, bool mode_from_sid) { int i; u16 size = 0; struct cifs_ace *pntace = NULL; char *acl_base = NULL; u32 src_num_aces = 0; u16 nsize = 0; struct cifs_ace *pnntace = NULL; char *nacl_base = NULL; u32 num_aces = 0; bool new_aces_set = false; /* Assuming that pndacl and pnmode are never NULL */ nacl_base = (char *)pndacl; nsize = sizeof(struct cifs_acl); /* If pdacl is NULL, we don't have a src. Simply populate new ACL. */ if (!pdacl) { populate_new_aces(nacl_base, pownersid, pgrpsid, pnmode, &num_aces, &nsize, mode_from_sid); goto finalize_dacl; } acl_base = (char *)pdacl; size = sizeof(struct cifs_acl); src_num_aces = le32_to_cpu(pdacl->num_aces); /* Retain old ACEs which we can retain */ for (i = 0; i < src_num_aces; ++i) { pntace = (struct cifs_ace *) (acl_base + size); if (!new_aces_set && (pntace->flags & INHERITED_ACE)) { /* Place the new ACEs in between existing explicit and inherited */ populate_new_aces(nacl_base, pownersid, pgrpsid, pnmode, &num_aces, &nsize, mode_from_sid); new_aces_set = true; } /* If it's any one of the ACE we're replacing, skip! */ if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) || (compare_sids(&pntace->sid, pownersid) == 0) || (compare_sids(&pntace->sid, pgrpsid) == 0) || (compare_sids(&pntace->sid, &sid_everyone) == 0) || (compare_sids(&pntace->sid, &sid_authusers) == 0))) { goto next_ace; } /* update the pointer to the next ACE to populate*/ pnntace = (struct cifs_ace *) (nacl_base + nsize); nsize += cifs_copy_ace(pnntace, pntace, NULL); num_aces++; next_ace: size += le16_to_cpu(pntace->size); } /* If inherited ACEs are not present, place the new ones at the tail */ if (!new_aces_set) { populate_new_aces(nacl_base, pownersid, pgrpsid, pnmode, &num_aces, &nsize, mode_from_sid); new_aces_set = true; } finalize_dacl: pndacl->num_aces = cpu_to_le32(num_aces); pndacl->size = cpu_to_le16(nsize); return 0; } static int parse_sid(struct cifs_sid *psid, char *end_of_acl) { /* BB need to add parm so we can store the SID BB */ /* validate that we do not go past end of ACL - sid must be at least 8 bytes long (assuming no sub-auths - e.g. the null SID */ if (end_of_acl < (char *)psid + 8) { cifs_dbg(VFS, "ACL too small to parse SID %p\n", psid); return -EINVAL; } #ifdef CONFIG_CIFS_DEBUG2 if (psid->num_subauth) { int i; cifs_dbg(FYI, "SID revision %d num_auth %d\n", psid->revision, psid->num_subauth); for (i = 0; i < psid->num_subauth; i++) { cifs_dbg(FYI, "SID sub_auth[%d]: 0x%x\n", i, le32_to_cpu(psid->sub_auth[i])); } /* BB add length check to make sure that we do not have huge num auths and therefore go off the end */ cifs_dbg(FYI, "RID 0x%x\n", le32_to_cpu(psid->sub_auth[psid->num_subauth-1])); } #endif return 0; } /* Convert CIFS ACL to POSIX form */ static int parse_sec_desc(struct cifs_sb_info *cifs_sb, struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr, bool get_mode_from_special_sid) { int rc = 0; struct cifs_sid *owner_sid_ptr, *group_sid_ptr; struct cifs_acl *dacl_ptr; /* no need for SACL ptr */ char *end_of_acl = ((char *)pntsd) + acl_len; __u32 dacloffset; if (pntsd == NULL) return -EIO; owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); group_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); dacloffset = le32_to_cpu(pntsd->dacloffset); dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n", pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset), le32_to_cpu(pntsd->gsidoffset), le32_to_cpu(pntsd->sacloffset), dacloffset); /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */ rc = parse_sid(owner_sid_ptr, end_of_acl); if (rc) { cifs_dbg(FYI, "%s: Error %d parsing Owner SID\n", __func__, rc); return rc; } rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER); if (rc) { cifs_dbg(FYI, "%s: Error %d mapping Owner SID to uid\n", __func__, rc); return rc; } rc = parse_sid(group_sid_ptr, end_of_acl); if (rc) { cifs_dbg(FYI, "%s: Error %d mapping Owner SID to gid\n", __func__, rc); return rc; } rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP); if (rc) { cifs_dbg(FYI, "%s: Error %d mapping Group SID to gid\n", __func__, rc); return rc; } if (dacloffset) parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr, group_sid_ptr, fattr, get_mode_from_special_sid); else cifs_dbg(FYI, "no ACL\n"); /* BB grant all or default perms? */ return rc; } /* Convert permission bits from mode to equivalent CIFS ACL */ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, __u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid, bool mode_from_sid, bool id_from_sid, int *aclflag) { int rc = 0; __u32 dacloffset; __u32 ndacloffset; __u32 sidsoffset; struct cifs_sid *owner_sid_ptr, *group_sid_ptr; struct cifs_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL; struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */ struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */ char *end_of_acl = ((char *)pntsd) + secdesclen; u16 size = 0; dacloffset = le32_to_cpu(pntsd->dacloffset); if (dacloffset) { dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) { cifs_dbg(VFS, "Server returned illegal ACL size\n"); return -EINVAL; } } owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); group_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */ ndacloffset = sizeof(struct cifs_ntsd); ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset); ndacl_ptr->revision = dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION); ndacl_ptr->size = cpu_to_le16(0); ndacl_ptr->num_aces = cpu_to_le32(0); rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr, pnmode, mode_from_sid); sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size); /* copy the non-dacl portion of secdesc */ *pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset, NULL, NULL); *aclflag |= CIFS_ACL_DACL; } else { ndacloffset = sizeof(struct cifs_ntsd); ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset); ndacl_ptr->revision = dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION); ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0; if (uid_valid(uid)) { /* chown */ uid_t id; nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid), GFP_KERNEL); if (!nowner_sid_ptr) { rc = -ENOMEM; goto chown_chgrp_exit; } id = from_kuid(&init_user_ns, uid); if (id_from_sid) { struct owner_sid *osid = (struct owner_sid *)nowner_sid_ptr; /* Populate the user ownership fields S-1-5-88-1 */ osid->Revision = 1; osid->NumAuth = 3; osid->Authority[5] = 5; osid->SubAuthorities[0] = cpu_to_le32(88); osid->SubAuthorities[1] = cpu_to_le32(1); osid->SubAuthorities[2] = cpu_to_le32(id); } else { /* lookup sid with upcall */ rc = id_to_sid(id, SIDOWNER, nowner_sid_ptr); if (rc) { cifs_dbg(FYI, "%s: Mapping error %d for owner id %d\n", __func__, rc, id); goto chown_chgrp_exit; } } *aclflag |= CIFS_ACL_OWNER; } if (gid_valid(gid)) { /* chgrp */ gid_t id; ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid), GFP_KERNEL); if (!ngroup_sid_ptr) { rc = -ENOMEM; goto chown_chgrp_exit; } id = from_kgid(&init_user_ns, gid); if (id_from_sid) { struct owner_sid *gsid = (struct owner_sid *)ngroup_sid_ptr; /* Populate the group ownership fields S-1-5-88-2 */ gsid->Revision = 1; gsid->NumAuth = 3; gsid->Authority[5] = 5; gsid->SubAuthorities[0] = cpu_to_le32(88); gsid->SubAuthorities[1] = cpu_to_le32(2); gsid->SubAuthorities[2] = cpu_to_le32(id); } else { /* lookup sid with upcall */ rc = id_to_sid(id, SIDGROUP, ngroup_sid_ptr); if (rc) { cifs_dbg(FYI, "%s: Mapping error %d for group id %d\n", __func__, rc, id); goto chown_chgrp_exit; } } *aclflag |= CIFS_ACL_GROUP; } if (dacloffset) { /* Replace ACEs for old owner with new one */ size = replace_sids_and_copy_aces(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr, nowner_sid_ptr, ngroup_sid_ptr); ndacl_ptr->size = cpu_to_le16(size); } sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size); /* copy the non-dacl portion of secdesc */ *pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset, nowner_sid_ptr, ngroup_sid_ptr); chown_chgrp_exit: /* errors could jump here. So make sure we return soon after this */ kfree(nowner_sid_ptr); kfree(ngroup_sid_ptr); } return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, const struct cifs_fid *cifsfid, u32 *pacllen, u32 __maybe_unused unused) { struct cifs_ntsd *pntsd = NULL; unsigned int xid; int rc; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return ERR_CAST(tlink); xid = get_xid(); rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd, pacllen); free_xid(xid); cifs_put_tlink(tlink); cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); if (rc) return ERR_PTR(rc); return pntsd; } static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, u32 *pacllen) { struct cifs_ntsd *pntsd = NULL; int oplock = 0; unsigned int xid; int rc; struct cifs_tcon *tcon; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct cifs_fid fid; struct cifs_open_parms oparms; if (IS_ERR(tlink)) return ERR_CAST(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = READ_CONTROL, .create_options = cifs_create_options(cifs_sb, 0), .disposition = FILE_OPEN, .path = path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (!rc) { rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen); CIFSSMBClose(xid, tcon, fid.netfid); } cifs_put_tlink(tlink); free_xid(xid); cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); if (rc) return ERR_PTR(rc); return pntsd; } /* Retrieve an ACL from the server */ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, struct inode *inode, const char *path, u32 *pacllen, u32 info) { struct cifs_ntsd *pntsd = NULL; struct cifsFileInfo *open_file = NULL; if (inode) open_file = find_readable_file(CIFS_I(inode), true); if (!open_file) return get_cifs_acl_by_path(cifs_sb, path, pacllen); pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info); cifsFileInfo_put(open_file); return pntsd; } /* Set an ACL on the server */ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, struct inode *inode, const char *path, int aclflag) { int oplock = 0; unsigned int xid; int rc, access_flags; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct cifs_fid fid; struct cifs_open_parms oparms; if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP) access_flags = WRITE_OWNER; else access_flags = WRITE_DAC; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = access_flags, .create_options = cifs_create_options(cifs_sb, 0), .disposition = FILE_OPEN, .path = path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc) { cifs_dbg(VFS, "Unable to open file to set ACL\n"); goto out; } rc = CIFSSMBSetCIFSACL(xid, tcon, fid.netfid, pnntsd, acllen, aclflag); cifs_dbg(NOISY, "SetCIFSACL rc = %d\n", rc); CIFSSMBClose(xid, tcon, fid.netfid); out: free_xid(xid); cifs_put_tlink(tlink); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */ int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, struct inode *inode, bool mode_from_special_sid, const char *path, const struct cifs_fid *pfid) { struct cifs_ntsd *pntsd = NULL; u32 acllen = 0; int rc = 0; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct smb_version_operations *ops; const u32 info = 0; cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); if (IS_ERR(tlink)) return PTR_ERR(tlink); ops = tlink_tcon(tlink)->ses->server->ops; if (pfid && (ops->get_acl_by_fid)) pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen, info); else if (ops->get_acl) pntsd = ops->get_acl(cifs_sb, inode, path, &acllen, info); else { cifs_put_tlink(tlink); return -EOPNOTSUPP; } /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ if (IS_ERR(pntsd)) { rc = PTR_ERR(pntsd); cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); } else if (mode_from_special_sid) { rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true); kfree(pntsd); } else { /* get approximated mode from ACL */ rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false); kfree(pntsd); if (rc) cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); } cifs_put_tlink(tlink); return rc; } /* Convert mode bits to an ACL so we can update the ACL on the server */ int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode, kuid_t uid, kgid_t gid) { int rc = 0; int aclflag = CIFS_ACL_DACL; /* default flag to set */ __u32 secdesclen = 0; __u32 nsecdesclen = 0; __u32 dacloffset = 0; struct cifs_acl *dacl_ptr = NULL; struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct smb_version_operations *ops; bool mode_from_sid, id_from_sid; const u32 info = 0; if (IS_ERR(tlink)) return PTR_ERR(tlink); ops = tlink_tcon(tlink)->ses->server->ops; cifs_dbg(NOISY, "set ACL from mode for %s\n", path); /* Get the security descriptor */ if (ops->get_acl == NULL) { cifs_put_tlink(tlink); return -EOPNOTSUPP; } pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen, info); if (IS_ERR(pntsd)) { rc = PTR_ERR(pntsd); cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); cifs_put_tlink(tlink); return rc; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) mode_from_sid = true; else mode_from_sid = false; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) id_from_sid = true; else id_from_sid = false; /* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */ nsecdesclen = secdesclen; if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */ if (mode_from_sid) nsecdesclen += 2 * sizeof(struct cifs_ace); else /* cifsacl */ nsecdesclen += 5 * sizeof(struct cifs_ace); } else { /* chown */ /* When ownership changes, changes new owner sid length could be different */ nsecdesclen = sizeof(struct cifs_ntsd) + (sizeof(struct cifs_sid) * 2); dacloffset = le32_to_cpu(pntsd->dacloffset); if (dacloffset) { dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); if (mode_from_sid) nsecdesclen += le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct cifs_ace); else /* cifsacl */ nsecdesclen += le16_to_cpu(dacl_ptr->size); } } /* * Add three ACEs for owner, group, everyone getting rid of other ACEs * as chmod disables ACEs and set the security descriptor. Allocate * memory for the smb header, set security descriptor request security * descriptor parameters, and security descriptor itself */ nsecdesclen = max_t(u32, nsecdesclen, DEFAULT_SEC_DESC_LEN); pnntsd = kmalloc(nsecdesclen, GFP_KERNEL); if (!pnntsd) { kfree(pntsd); cifs_put_tlink(tlink); return -ENOMEM; } rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid, mode_from_sid, id_from_sid, &aclflag); cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); if (ops->set_acl == NULL) rc = -EOPNOTSUPP; if (!rc) { /* Set the security descriptor */ rc = ops->set_acl(pnntsd, nsecdesclen, inode, path, aclflag); cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); } cifs_put_tlink(tlink); kfree(pnntsd); kfree(pntsd); return rc; } struct posix_acl *cifs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, int type) { #if defined(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) && defined(CONFIG_CIFS_POSIX) struct posix_acl *acl = NULL; ssize_t rc = -EOPNOTSUPP; unsigned int xid; struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return ERR_CAST(tlink); pTcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { acl = ERR_CAST(full_path); goto out; } /* return alt name if available as pseudo attr */ switch (type) { case ACL_TYPE_ACCESS: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_get_acl(xid, pTcon, full_path, &acl, ACL_TYPE_ACCESS, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; case ACL_TYPE_DEFAULT: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_get_acl(xid, pTcon, full_path, &acl, ACL_TYPE_DEFAULT, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; } if (rc < 0) { if (rc == -EINVAL) acl = ERR_PTR(-EOPNOTSUPP); else acl = ERR_PTR(rc); } out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return acl; #else return ERR_PTR(-EOPNOTSUPP); #endif } int cifs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type) { #if defined(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) && defined(CONFIG_CIFS_POSIX) int rc = -EOPNOTSUPP; unsigned int xid; struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto out; } if (!acl) goto out; /* return dos attributes as pseudo xattr */ /* return alt name if available as pseudo attr */ /* if proc/fs/cifs/streamstoxattr is set then search server for EAs or streams to returns as xattrs */ if (posix_acl_xattr_size(acl->a_count) > CIFSMaxBufSize) { cifs_dbg(FYI, "size of EA value too large\n"); rc = -EOPNOTSUPP; goto out; } switch (type) { case ACL_TYPE_ACCESS: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_set_acl(xid, pTcon, full_path, acl, ACL_TYPE_ACCESS, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; case ACL_TYPE_DEFAULT: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_set_acl(xid, pTcon, full_path, acl, ACL_TYPE_DEFAULT, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; } out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return rc; #else return -EOPNOTSUPP; #endif }
linux-master
fs/smb/client/cifsacl.c
// SPDX-License-Identifier: LGPL-2.1 /* * SPNEGO upcall management for CIFS * * Copyright (c) 2007 Red Hat, Inc. * Author(s): Jeff Layton ([email protected]) * */ #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <keys/user-type.h> #include <linux/key-type.h> #include <linux/keyctl.h> #include <linux/inet.h> #include "cifsglob.h" #include "cifs_spnego.h" #include "cifs_debug.h" #include "cifsproto.h" static const struct cred *spnego_cred; /* create a new cifs key */ static int cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { char *payload; int ret; ret = -ENOMEM; payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL); if (!payload) goto error; /* attach the data */ key->payload.data[0] = payload; ret = 0; error: return ret; } static void cifs_spnego_key_destroy(struct key *key) { kfree(key->payload.data[0]); } /* * keytype for CIFS spnego keys */ struct key_type cifs_spnego_key_type = { .name = "cifs.spnego", .instantiate = cifs_spnego_key_instantiate, .destroy = cifs_spnego_key_destroy, .describe = user_describe, }; /* length of longest version string e.g. strlen("ver=0xFF") */ #define MAX_VER_STR_LEN 8 /* length of longest security mechanism name, eg in future could have * strlen(";sec=ntlmsspi") */ #define MAX_MECH_STR_LEN 13 /* strlen of "host=" */ #define HOST_KEY_LEN 5 /* strlen of ";ip4=" or ";ip6=" */ #define IP_KEY_LEN 5 /* strlen of ";uid=0x" */ #define UID_KEY_LEN 7 /* strlen of ";creduid=0x" */ #define CREDUID_KEY_LEN 11 /* strlen of ";user=" */ #define USER_KEY_LEN 6 /* strlen of ";pid=0x" */ #define PID_KEY_LEN 7 /* get a key struct with a SPNEGO security blob, suitable for session setup */ struct key * cifs_get_spnego_key(struct cifs_ses *sesInfo, struct TCP_Server_Info *server) { struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; char *description, *dp; size_t desc_len; struct key *spnego_key; const char *hostname = server->hostname; const struct cred *saved_cred; /* length of fields (with semicolons): ver=0xyz ip4=ipaddress host=hostname sec=mechanism uid=0xFF user=username */ desc_len = MAX_VER_STR_LEN + HOST_KEY_LEN + strlen(hostname) + IP_KEY_LEN + INET6_ADDRSTRLEN + MAX_MECH_STR_LEN + UID_KEY_LEN + (sizeof(uid_t) * 2) + CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; if (sesInfo->user_name) desc_len += USER_KEY_LEN + strlen(sesInfo->user_name); spnego_key = ERR_PTR(-ENOMEM); description = kzalloc(desc_len, GFP_KERNEL); if (description == NULL) goto out; dp = description; /* start with version and hostname portion of UNC string */ spnego_key = ERR_PTR(-EINVAL); sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION, hostname); dp = description + strlen(description); /* add the server address */ if (server->dstaddr.ss_family == AF_INET) sprintf(dp, "ip4=%pI4", &sa->sin_addr); else if (server->dstaddr.ss_family == AF_INET6) sprintf(dp, "ip6=%pI6", &sa6->sin6_addr); else goto out; dp = description + strlen(description); /* for now, only sec=krb5 and sec=mskrb5 are valid */ if (server->sec_kerberos) sprintf(dp, ";sec=krb5"); else if (server->sec_mskerberos) sprintf(dp, ";sec=mskrb5"); else { cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n"); sprintf(dp, ";sec=krb5"); } dp = description + strlen(description); sprintf(dp, ";uid=0x%x", from_kuid_munged(&init_user_ns, sesInfo->linux_uid)); dp = description + strlen(description); sprintf(dp, ";creduid=0x%x", from_kuid_munged(&init_user_ns, sesInfo->cred_uid)); if (sesInfo->user_name) { dp = description + strlen(description); sprintf(dp, ";user=%s", sesInfo->user_name); } dp = description + strlen(description); sprintf(dp, ";pid=0x%x", current->pid); cifs_dbg(FYI, "key description = %s\n", description); saved_cred = override_creds(spnego_cred); spnego_key = request_key(&cifs_spnego_key_type, description, ""); revert_creds(saved_cred); #ifdef CONFIG_CIFS_DEBUG2 if (cifsFYI && !IS_ERR(spnego_key)) { struct cifs_spnego_msg *msg = spnego_key->payload.data[0]; cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U, msg->secblob_len + msg->sesskey_len)); } #endif /* CONFIG_CIFS_DEBUG2 */ out: kfree(description); return spnego_key; } int init_cifs_spnego(void) { struct cred *cred; struct key *keyring; int ret; cifs_dbg(FYI, "Registering the %s key type\n", cifs_spnego_key_type.name); /* * Create an override credential set with special thread keyring for * spnego upcalls. */ cred = prepare_kernel_cred(&init_task); if (!cred) return -ENOMEM; keyring = keyring_alloc(".cifs_spnego", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&cifs_spnego_key_type); if (ret < 0) goto failed_put_key; /* * instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; spnego_cred = cred; cifs_dbg(FYI, "cifs spnego keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } void exit_cifs_spnego(void) { key_revoke(spnego_cred->thread_keyring); unregister_key_type(&cifs_spnego_key_type); put_cred(spnego_cred); cifs_dbg(FYI, "Unregistered %s key type\n", cifs_spnego_key_type.name); }
linux-master
fs/smb/client/cifs_spnego.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2022 Paulo Alcantara <[email protected]> */ #include "cifsproto.h" #include "cifs_debug.h" #include "dns_resolve.h" #include "fs_context.h" #include "dfs.h" /** * dfs_parse_target_referral - set fs context for dfs target referral * * @full_path: full path in UNC format. * @ref: dfs referral pointer. * @ctx: smb3 fs context pointer. * * Return zero if dfs referral was parsed correctly, otherwise non-zero. */ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref, struct smb3_fs_context *ctx) { int rc; const char *prepath = NULL; char *path; if (!full_path || !*full_path || !ref || !ctx) return -EINVAL; if (WARN_ON_ONCE(!ref->node_name || ref->path_consumed < 0)) return -EINVAL; if (strlen(full_path) - ref->path_consumed) { prepath = full_path + ref->path_consumed; /* skip initial delimiter */ if (*prepath == '/' || *prepath == '\\') prepath++; } path = cifs_build_devname(ref->node_name, prepath); if (IS_ERR(path)) return PTR_ERR(path); rc = smb3_parse_devname(path, ctx); if (rc) goto out; rc = dns_resolve_server_name_to_ip(path, (struct sockaddr *)&ctx->dstaddr, NULL); out: kfree(path); return rc; } static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path) { struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; int rc; ctx->leaf_fullpath = (char *)full_path; rc = cifs_mount_get_session(mnt_ctx); ctx->leaf_fullpath = NULL; return rc; } /* * Track individual DFS referral servers used by new DFS mount. * * On success, their lifetime will be shared by final tcon (dfs_ses_list). * Otherwise, they will be put by dfs_put_root_smb_sessions() in cifs_mount(). */ static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx) { struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct dfs_root_ses *root_ses; struct cifs_ses *ses = mnt_ctx->ses; if (ses) { root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL); if (!root_ses) return -ENOMEM; INIT_LIST_HEAD(&root_ses->list); spin_lock(&cifs_tcp_ses_lock); cifs_smb_ses_inc_refcount(ses); spin_unlock(&cifs_tcp_ses_lock); root_ses->ses = ses; list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list); } /* Select new DFS referral server so that new referrals go through it */ ctx->dfs_root_ses = ses; return 0; } static inline int parse_dfs_target(struct smb3_fs_context *ctx, struct dfs_ref_walk *rw, struct dfs_info3_param *tgt) { int rc; const char *fpath = ref_walk_fpath(rw) + 1; rc = ref_walk_get_tgt(rw, tgt); if (!rc) rc = dfs_parse_target_referral(fpath, tgt, ctx); return rc; } static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx, struct dfs_info3_param *tgt, struct dfs_ref_walk *rw) { struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; char *ref_path, *full_path; int rc; full_path = smb3_fs_context_fullpath(ctx, CIFS_DIR_SEP(cifs_sb)); if (IS_ERR(full_path)) return PTR_ERR(full_path); if (!tgt || (tgt->server_type == DFS_TYPE_LINK && DFS_INTERLINK(tgt->flags))) ref_path = dfs_get_path(cifs_sb, ctx->UNC); else ref_path = dfs_get_path(cifs_sb, full_path); if (IS_ERR(ref_path)) { rc = PTR_ERR(ref_path); kfree(full_path); return rc; } ref_walk_path(rw) = ref_path; ref_walk_fpath(rw) = full_path; return 0; } static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, struct dfs_ref_walk *rw) { struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct dfs_info3_param tgt = {}; bool is_refsrv; int rc = -ENOENT; again: do { if (ref_walk_empty(rw)) { rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1, NULL, ref_walk_tl(rw)); if (rc) { rc = cifs_mount_get_tcon(mnt_ctx); if (!rc) rc = cifs_is_path_remote(mnt_ctx); continue; } if (!ref_walk_num_tgts(rw)) { rc = -ENOENT; continue; } } while (ref_walk_next_tgt(rw)) { rc = parse_dfs_target(ctx, rw, &tgt); if (rc) continue; cifs_mount_put_conns(mnt_ctx); rc = get_session(mnt_ctx, ref_walk_path(rw)); if (rc) continue; is_refsrv = tgt.server_type == DFS_TYPE_ROOT || DFS_INTERLINK(tgt.flags); ref_walk_set_tgt_hint(rw); if (tgt.flags & DFSREF_STORAGE_SERVER) { rc = cifs_mount_get_tcon(mnt_ctx); if (!rc) rc = cifs_is_path_remote(mnt_ctx); if (!rc) break; if (rc != -EREMOTE) continue; } if (is_refsrv) { rc = add_root_smb_session(mnt_ctx); if (rc) goto out; } rc = ref_walk_advance(rw); if (!rc) { rc = set_ref_paths(mnt_ctx, &tgt, rw); if (!rc) { rc = -EREMOTE; goto again; } } if (rc != -ELOOP) goto out; } } while (rc && ref_walk_descend(rw)); out: free_dfs_info_param(&tgt); return rc; } static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx) { struct dfs_ref_walk *rw; int rc; rw = ref_walk_alloc(); if (IS_ERR(rw)) return PTR_ERR(rw); ref_walk_init(rw); rc = set_ref_paths(mnt_ctx, NULL, rw); if (!rc) rc = __dfs_referral_walk(mnt_ctx, rw); ref_walk_free(rw); return rc; } static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) { struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct cifs_tcon *tcon; char *origin_fullpath; int rc; origin_fullpath = dfs_get_path(cifs_sb, ctx->source); if (IS_ERR(origin_fullpath)) return PTR_ERR(origin_fullpath); rc = dfs_referral_walk(mnt_ctx); if (rc) goto out; tcon = mnt_ctx->tcon; spin_lock(&tcon->tc_lock); if (!tcon->origin_fullpath) { tcon->origin_fullpath = origin_fullpath; origin_fullpath = NULL; } spin_unlock(&tcon->tc_lock); if (list_empty(&tcon->dfs_ses_list)) { list_replace_init(&mnt_ctx->dfs_ses_list, &tcon->dfs_ses_list); queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, dfs_cache_get_ttl() * HZ); } else { dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list); } out: kfree(origin_fullpath); return rc; } /* Resolve UNC hostname in @ctx->source and set ip addr in @ctx->dstaddr */ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx) { struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; int rc; rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL); if (!rc) cifs_set_port(addr, ctx->port); return rc; } int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) { struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; bool nodfs = ctx->nodfs; int rc; rc = update_fs_context_dstaddr(ctx); if (rc) return rc; *isdfs = false; rc = get_session(mnt_ctx, NULL); if (rc) return rc; ctx->dfs_root_ses = mnt_ctx->ses; /* * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally * try to get an DFS referral (even cached) to determine whether it is an DFS mount. * * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem * to respond with PATH_NOT_COVERED to requests that include the prefix. */ if (!nodfs) { rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL); if (rc) { cifs_dbg(FYI, "%s: no dfs referral for %s: %d\n", __func__, ctx->UNC + 1, rc); cifs_dbg(FYI, "%s: assuming non-dfs mount...\n", __func__); nodfs = true; } } if (nodfs) { rc = cifs_mount_get_tcon(mnt_ctx); if (!rc) rc = cifs_is_path_remote(mnt_ctx); return rc; } *isdfs = true; add_root_smb_session(mnt_ctx); return __dfs_mount_share(mnt_ctx); } /* Update dfs referral path of superblock */ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb, const char *target) { int rc = 0; size_t len = strlen(target); char *refpath, *npath; if (unlikely(len < 2 || *target != '\\')) return -EINVAL; if (target[1] == '\\') { len += 1; refpath = kmalloc(len, GFP_KERNEL); if (!refpath) return -ENOMEM; scnprintf(refpath, len, "%s", target); } else { len += sizeof("\\"); refpath = kmalloc(len, GFP_KERNEL); if (!refpath) return -ENOMEM; scnprintf(refpath, len, "\\%s", target); } npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb)); kfree(refpath); if (IS_ERR(npath)) { rc = PTR_ERR(npath); } else { mutex_lock(&server->refpath_lock); spin_lock(&server->srv_lock); kfree(server->leaf_fullpath); server->leaf_fullpath = npath; spin_unlock(&server->srv_lock); mutex_unlock(&server->refpath_lock); } return rc; } static int target_share_matches_server(struct TCP_Server_Info *server, char *share, bool *target_match) { int rc = 0; const char *dfs_host; size_t dfs_host_len; *target_match = true; extract_unc_hostname(share, &dfs_host, &dfs_host_len); /* Check if hostnames or addresses match */ cifs_server_lock(server); if (dfs_host_len != strlen(server->hostname) || strncasecmp(dfs_host, server->hostname, dfs_host_len)) { cifs_dbg(FYI, "%s: %.*s doesn't match %s\n", __func__, (int)dfs_host_len, dfs_host, server->hostname); rc = match_target_ip(server, dfs_host, dfs_host_len, target_match); if (rc) cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc); } cifs_server_unlock(server); return rc; } static void __tree_connect_ipc(const unsigned int xid, char *tree, struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) { struct TCP_Server_Info *server = ses->server; struct cifs_tcon *tcon = ses->tcon_ipc; int rc; spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); if (cifs_chan_needs_reconnect(ses, server) || ses->ses_status != SES_GOOD) { spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); cifs_server_dbg(FYI, "%s: skipping ipc reconnect due to disconnected ses\n", __func__); return; } spin_unlock(&ses->chan_lock); spin_unlock(&ses->ses_lock); cifs_server_lock(server); scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname); cifs_server_unlock(server); rc = server->ops->tree_connect(xid, ses, tree, tcon, cifs_sb->local_nls); cifs_server_dbg(FYI, "%s: tree_reconnect %s: %d\n", __func__, tree, rc); spin_lock(&tcon->tc_lock); if (rc) { tcon->status = TID_NEED_TCON; } else { tcon->status = TID_GOOD; tcon->need_reconnect = false; } spin_unlock(&tcon->tc_lock); } static void tree_connect_ipc(const unsigned int xid, char *tree, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon) { struct cifs_ses *ses = tcon->ses; __tree_connect_ipc(xid, tree, cifs_sb, ses); __tree_connect_ipc(xid, tree, cifs_sb, CIFS_DFS_ROOT_SES(ses)); } static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, char *tree, bool islink, struct dfs_cache_tgt_list *tl) { int rc; struct TCP_Server_Info *server = tcon->ses->server; const struct smb_version_operations *ops = server->ops; struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses); char *share = NULL, *prefix = NULL; struct dfs_cache_tgt_iterator *tit; bool target_match; tit = dfs_cache_get_tgt_iterator(tl); if (!tit) { rc = -ENOENT; goto out; } /* Try to tree connect to all dfs targets */ for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) { const char *target = dfs_cache_get_tgt_name(tit); DFS_CACHE_TGT_LIST(ntl); kfree(share); kfree(prefix); share = prefix = NULL; /* Check if share matches with tcp ses */ rc = dfs_cache_get_tgt_share(server->leaf_fullpath + 1, tit, &share, &prefix); if (rc) { cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc); break; } rc = target_share_matches_server(server, share, &target_match); if (rc) break; if (!target_match) { rc = -EHOSTUNREACH; continue; } dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, tit); tree_connect_ipc(xid, tree, cifs_sb, tcon); scnprintf(tree, MAX_TREE_SIZE, "\\%s", share); if (!islink) { rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls); break; } /* * If no dfs referrals were returned from link target, then just do a TREE_CONNECT * to it. Otherwise, cache the dfs referral and then mark current tcp ses for * reconnect so either the demultiplex thread or the echo worker will reconnect to * newly resolved target. */ if (dfs_cache_find(xid, root_ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target, NULL, &ntl)) { rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls); if (rc) continue; rc = cifs_update_super_prepath(cifs_sb, prefix); } else { /* Target is another dfs share */ rc = update_server_fullpath(server, cifs_sb, target); dfs_cache_free_tgts(tl); if (!rc) { rc = -EREMOTE; list_replace_init(&ntl.tl_list, &tl->tl_list); } else dfs_cache_free_tgts(&ntl); } break; } out: kfree(share); kfree(prefix); return rc; } static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, char *tree, bool islink, struct dfs_cache_tgt_list *tl) { int rc; int num_links = 0; struct TCP_Server_Info *server = tcon->ses->server; char *old_fullpath = server->leaf_fullpath; do { rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl); if (!rc || rc != -EREMOTE) break; } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS); /* * If we couldn't tree connect to any targets from last referral path, then * retry it from newly resolved dfs referral. */ if (rc && server->leaf_fullpath != old_fullpath) cifs_signal_cifsd_for_reconnect(server, true); dfs_cache_free_tgts(tl); return rc; } int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc) { int rc; struct TCP_Server_Info *server = tcon->ses->server; const struct smb_version_operations *ops = server->ops; DFS_CACHE_TGT_LIST(tl); struct cifs_sb_info *cifs_sb = NULL; struct super_block *sb = NULL; struct dfs_info3_param ref = {0}; char *tree; /* only send once per connect */ spin_lock(&tcon->tc_lock); if (tcon->status == TID_GOOD) { spin_unlock(&tcon->tc_lock); return 0; } if (tcon->status != TID_NEW && tcon->status != TID_NEED_TCON) { spin_unlock(&tcon->tc_lock); return -EHOSTDOWN; } tcon->status = TID_IN_TCON; spin_unlock(&tcon->tc_lock); tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); if (!tree) { rc = -ENOMEM; goto out; } if (tcon->ipc) { cifs_server_lock(server); scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname); cifs_server_unlock(server); rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc); goto out; } sb = cifs_get_dfs_tcon_super(tcon); if (!IS_ERR(sb)) cifs_sb = CIFS_SB(sb); /* * Tree connect to last share in @tcon->tree_name whether dfs super or * cached dfs referral was not found. */ if (!cifs_sb || !server->leaf_fullpath || dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) { rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb ? cifs_sb->local_nls : nlsc); goto out; } rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK, &tl); free_dfs_info_param(&ref); out: kfree(tree); cifs_put_tcp_super(sb); if (rc) { spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_NEED_TCON; spin_unlock(&tcon->tc_lock); } else { spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_GOOD; spin_unlock(&tcon->tc_lock); tcon->need_reconnect = false; } return rc; }
linux-master
fs/smb/client/dfs.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP * for more detailed information * * Copyright (C) International Business Machines Corp., 2005,2013 * Author(s): Steve French ([email protected]) * */ #include <linux/fs.h> #include <linux/slab.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifs_unicode.h" #include "cifsproto.h" #include "ntlmssp.h" #include <linux/ctype.h> #include <linux/random.h> #include <linux/highmem.h> #include <linux/fips.h> #include "../common/arc4.h" #include <crypto/aead.h> /* * Hash data from a BVEC-type iterator. */ static int cifs_shash_bvec(const struct iov_iter *iter, ssize_t maxsize, struct shash_desc *shash) { const struct bio_vec *bv = iter->bvec; unsigned long start = iter->iov_offset; unsigned int i; void *p; int ret; for (i = 0; i < iter->nr_segs; i++) { size_t off, len; len = bv[i].bv_len; if (start >= len) { start -= len; continue; } len = min_t(size_t, maxsize, len - start); off = bv[i].bv_offset + start; p = kmap_local_page(bv[i].bv_page); ret = crypto_shash_update(shash, p + off, len); kunmap_local(p); if (ret < 0) return ret; maxsize -= len; if (maxsize <= 0) break; start = 0; } return 0; } /* * Hash data from a KVEC-type iterator. */ static int cifs_shash_kvec(const struct iov_iter *iter, ssize_t maxsize, struct shash_desc *shash) { const struct kvec *kv = iter->kvec; unsigned long start = iter->iov_offset; unsigned int i; int ret; for (i = 0; i < iter->nr_segs; i++) { size_t len; len = kv[i].iov_len; if (start >= len) { start -= len; continue; } len = min_t(size_t, maxsize, len - start); ret = crypto_shash_update(shash, kv[i].iov_base + start, len); if (ret < 0) return ret; maxsize -= len; if (maxsize <= 0) break; start = 0; } return 0; } /* * Hash data from an XARRAY-type iterator. */ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize, struct shash_desc *shash) { struct folio *folios[16], *folio; unsigned int nr, i, j, npages; loff_t start = iter->xarray_start + iter->iov_offset; pgoff_t last, index = start / PAGE_SIZE; ssize_t ret = 0; size_t len, offset, foffset; void *p; if (maxsize == 0) return 0; last = (start + maxsize - 1) / PAGE_SIZE; do { nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios), XA_PRESENT); if (nr == 0) return -EIO; for (i = 0; i < nr; i++) { folio = folios[i]; npages = folio_nr_pages(folio); foffset = start - folio_pos(folio); offset = foffset % PAGE_SIZE; for (j = foffset / PAGE_SIZE; j < npages; j++) { len = min_t(size_t, maxsize, PAGE_SIZE - offset); p = kmap_local_page(folio_page(folio, j)); ret = crypto_shash_update(shash, p, len); kunmap_local(p); if (ret < 0) return ret; maxsize -= len; if (maxsize <= 0) return 0; start += len; offset = 0; index++; } } } while (nr == ARRAY_SIZE(folios)); return 0; } /* * Pass the data from an iterator into a hash. */ static int cifs_shash_iter(const struct iov_iter *iter, size_t maxsize, struct shash_desc *shash) { if (maxsize == 0) return 0; switch (iov_iter_type(iter)) { case ITER_BVEC: return cifs_shash_bvec(iter, maxsize, shash); case ITER_KVEC: return cifs_shash_kvec(iter, maxsize, shash); case ITER_XARRAY: return cifs_shash_xarray(iter, maxsize, shash); default: pr_err("cifs_shash_iter(%u) unsupported\n", iov_iter_type(iter)); WARN_ON_ONCE(1); return -EIO; } } int __cifs_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, char *signature, struct shash_desc *shash) { int i; ssize_t rc; struct kvec *iov = rqst->rq_iov; int n_vec = rqst->rq_nvec; /* iov[0] is actual data and not the rfc1002 length for SMB2+ */ if (!is_smb1(server)) { if (iov[0].iov_len <= 4) return -EIO; i = 0; } else { if (n_vec < 2 || iov[0].iov_len != 4) return -EIO; i = 1; /* skip rfc1002 length */ } for (; i < n_vec; i++) { if (iov[i].iov_len == 0) continue; if (iov[i].iov_base == NULL) { cifs_dbg(VFS, "null iovec entry\n"); return -EIO; } rc = crypto_shash_update(shash, iov[i].iov_base, iov[i].iov_len); if (rc) { cifs_dbg(VFS, "%s: Could not update with payload\n", __func__); return rc; } } rc = cifs_shash_iter(&rqst->rq_iter, iov_iter_count(&rqst->rq_iter), shash); if (rc < 0) return rc; rc = crypto_shash_final(shash, signature); if (rc) cifs_dbg(VFS, "%s: Could not generate hash\n", __func__); return rc; } /* * Calculate and return the CIFS signature based on the mac key and SMB PDU. * The 16 byte signature must be allocated by the caller. Note we only use the * 1st eight bytes and that the smb header signature field on input contains * the sequence number before this function is called. Also, this function * should be called with the server->srv_mutex held. */ static int cifs_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, char *signature) { int rc; if (!rqst->rq_iov || !signature || !server) return -EINVAL; rc = cifs_alloc_hash("md5", &server->secmech.md5); if (rc) return -1; rc = crypto_shash_init(server->secmech.md5); if (rc) { cifs_dbg(VFS, "%s: Could not init md5\n", __func__); return rc; } rc = crypto_shash_update(server->secmech.md5, server->session_key.response, server->session_key.len); if (rc) { cifs_dbg(VFS, "%s: Could not update with response\n", __func__); return rc; } return __cifs_calc_signature(rqst, server, signature, server->secmech.md5); } /* must be called with server->srv_mutex held */ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number) { int rc = 0; char smb_signature[20]; struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base; if (rqst->rq_iov[0].iov_len != 4 || rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) return -EIO; if ((cifs_pdu == NULL) || (server == NULL)) return -EINVAL; spin_lock(&server->srv_lock); if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) || server->tcpStatus == CifsNeedNegotiate) { spin_unlock(&server->srv_lock); return rc; } spin_unlock(&server->srv_lock); if (!server->session_estab) { memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8); return rc; } cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(server->sequence_number); cifs_pdu->Signature.Sequence.Reserved = 0; *pexpected_response_sequence_number = ++server->sequence_number; ++server->sequence_number; rc = cifs_calc_signature(rqst, server, smb_signature); if (rc) memset(cifs_pdu->Signature.SecuritySignature, 0, 8); else memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8); return rc; } int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence) { struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = n_vec }; return cifs_sign_rqst(&rqst, server, pexpected_response_sequence); } /* must be called with server->srv_mutex held */ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number) { struct kvec iov[2]; iov[0].iov_base = cifs_pdu; iov[0].iov_len = 4; iov[1].iov_base = (char *)cifs_pdu + 4; iov[1].iov_len = be32_to_cpu(cifs_pdu->smb_buf_length); return cifs_sign_smbv(iov, 2, server, pexpected_response_sequence_number); } int cifs_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, __u32 expected_sequence_number) { unsigned int rc; char server_response_sig[8]; char what_we_think_sig_should_be[20]; struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base; if (rqst->rq_iov[0].iov_len != 4 || rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) return -EIO; if (cifs_pdu == NULL || server == NULL) return -EINVAL; if (!server->session_estab) return 0; if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) { struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)cifs_pdu; if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE) return 0; } /* BB what if signatures are supposed to be on for session but server does not send one? BB */ /* Do not need to verify session setups with signature "BSRSPYL " */ if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0) cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n", cifs_pdu->Command); /* save off the origiginal signature so we can modify the smb and check its signature against what the server sent */ memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8); cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(expected_sequence_number); cifs_pdu->Signature.Sequence.Reserved = 0; cifs_server_lock(server); rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be); cifs_server_unlock(server); if (rc) return rc; /* cifs_dump_mem("what we think it should be: ", what_we_think_sig_should_be, 16); */ if (memcmp(server_response_sig, what_we_think_sig_should_be, 8)) return -EACCES; else return 0; } /* Build a proper attribute value/target info pairs blob. * Fill in netbios and dns domain name and workstation name * and client time (total five av pairs and + one end of fields indicator. * Allocate domain name which gets freed when session struct is deallocated. */ static int build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp) { unsigned int dlen; unsigned int size = 2 * sizeof(struct ntlmssp2_name); char *defdmname = "WORKGROUP"; unsigned char *blobptr; struct ntlmssp2_name *attrptr; if (!ses->domainName) { ses->domainName = kstrdup(defdmname, GFP_KERNEL); if (!ses->domainName) return -ENOMEM; } dlen = strlen(ses->domainName); /* * The length of this blob is two times the size of a * structure (av pair) which holds name/size * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) + * unicode length of a netbios domain name */ kfree_sensitive(ses->auth_key.response); ses->auth_key.len = size + 2 * dlen; ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL); if (!ses->auth_key.response) { ses->auth_key.len = 0; return -ENOMEM; } blobptr = ses->auth_key.response; attrptr = (struct ntlmssp2_name *) blobptr; /* * As defined in MS-NTLM 3.3.2, just this av pair field * is sufficient as part of the temp */ attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME); attrptr->length = cpu_to_le16(2 * dlen); blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp); return 0; } /* Server has provided av pairs/target info in the type 2 challenge * packet and we have plucked it and stored within smb session. * We parse that blob here to find netbios domain name to be used * as part of ntlmv2 authentication (in Target String), if not already * specified on the command line. * If this function returns without any error but without fetching * domain name, authentication may fail against some server but * may not fail against other (those who are not very particular * about target string i.e. for some, just user name might suffice. */ static int find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) { unsigned int attrsize; unsigned int type; unsigned int onesize = sizeof(struct ntlmssp2_name); unsigned char *blobptr; unsigned char *blobend; struct ntlmssp2_name *attrptr; if (!ses->auth_key.len || !ses->auth_key.response) return 0; blobptr = ses->auth_key.response; blobend = blobptr + ses->auth_key.len; while (blobptr + onesize < blobend) { attrptr = (struct ntlmssp2_name *) blobptr; type = le16_to_cpu(attrptr->type); if (type == NTLMSSP_AV_EOL) break; blobptr += 2; /* advance attr type */ attrsize = le16_to_cpu(attrptr->length); blobptr += 2; /* advance attr size */ if (blobptr + attrsize > blobend) break; if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN) break; if (!ses->domainName) { ses->domainName = kmalloc(attrsize + 1, GFP_KERNEL); if (!ses->domainName) return -ENOMEM; cifs_from_utf16(ses->domainName, (__le16 *)blobptr, attrsize, attrsize, nls_cp, NO_MAP_UNI_RSVD); break; } } blobptr += attrsize; /* advance attr value */ } return 0; } /* Server has provided av pairs/target info in the type 2 challenge * packet and we have plucked it and stored within smb session. * We parse that blob here to find the server given timestamp * as part of ntlmv2 authentication (or local current time as * default in case of failure) */ static __le64 find_timestamp(struct cifs_ses *ses) { unsigned int attrsize; unsigned int type; unsigned int onesize = sizeof(struct ntlmssp2_name); unsigned char *blobptr; unsigned char *blobend; struct ntlmssp2_name *attrptr; struct timespec64 ts; if (!ses->auth_key.len || !ses->auth_key.response) return 0; blobptr = ses->auth_key.response; blobend = blobptr + ses->auth_key.len; while (blobptr + onesize < blobend) { attrptr = (struct ntlmssp2_name *) blobptr; type = le16_to_cpu(attrptr->type); if (type == NTLMSSP_AV_EOL) break; blobptr += 2; /* advance attr type */ attrsize = le16_to_cpu(attrptr->length); blobptr += 2; /* advance attr size */ if (blobptr + attrsize > blobend) break; if (type == NTLMSSP_AV_TIMESTAMP) { if (attrsize == sizeof(u64)) return *((__le64 *)blobptr); } blobptr += attrsize; /* advance attr value */ } ktime_get_real_ts64(&ts); return cpu_to_le64(cifs_UnixTimeToNT(ts)); } static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, const struct nls_table *nls_cp) { int rc = 0; int len; char nt_hash[CIFS_NTHASH_SIZE]; __le16 *user; wchar_t *domain; wchar_t *server; if (!ses->server->secmech.hmacmd5) { cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__); return -1; } /* calculate md4 hash of password */ E_md4hash(ses->password, nt_hash, nls_cp); rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm, nt_hash, CIFS_NTHASH_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not set NT Hash as a key\n", __func__); return rc; } rc = crypto_shash_init(ses->server->secmech.hmacmd5); if (rc) { cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__); return rc; } /* convert ses->user_name to unicode */ len = ses->user_name ? strlen(ses->user_name) : 0; user = kmalloc(2 + (len * 2), GFP_KERNEL); if (user == NULL) { rc = -ENOMEM; return rc; } if (len) { len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp); UniStrupr(user); } else { memset(user, '\0', 2); } rc = crypto_shash_update(ses->server->secmech.hmacmd5, (char *)user, 2 * len); kfree(user); if (rc) { cifs_dbg(VFS, "%s: Could not update with user\n", __func__); return rc; } /* convert ses->domainName to unicode and uppercase */ if (ses->domainName) { len = strlen(ses->domainName); domain = kmalloc(2 + (len * 2), GFP_KERNEL); if (domain == NULL) { rc = -ENOMEM; return rc; } len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len, nls_cp); rc = crypto_shash_update(ses->server->secmech.hmacmd5, (char *)domain, 2 * len); kfree(domain); if (rc) { cifs_dbg(VFS, "%s: Could not update with domain\n", __func__); return rc; } } else { /* We use ses->ip_addr if no domain name available */ len = strlen(ses->ip_addr); server = kmalloc(2 + (len * 2), GFP_KERNEL); if (server == NULL) { rc = -ENOMEM; return rc; } len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len, nls_cp); rc = crypto_shash_update(ses->server->secmech.hmacmd5, (char *)server, 2 * len); kfree(server); if (rc) { cifs_dbg(VFS, "%s: Could not update with server\n", __func__); return rc; } } rc = crypto_shash_final(ses->server->secmech.hmacmd5, ntlmv2_hash); if (rc) cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); return rc; } static int CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) { int rc; struct ntlmv2_resp *ntlmv2 = (struct ntlmv2_resp *) (ses->auth_key.response + CIFS_SESS_KEY_SIZE); unsigned int hash_len; /* The MD5 hash starts at challenge_key.key */ hash_len = ses->auth_key.len - (CIFS_SESS_KEY_SIZE + offsetof(struct ntlmv2_resp, challenge.key[0])); if (!ses->server->secmech.hmacmd5) { cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__); return -1; } rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n", __func__); return rc; } rc = crypto_shash_init(ses->server->secmech.hmacmd5); if (rc) { cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__); return rc; } if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) memcpy(ntlmv2->challenge.key, ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE); else memcpy(ntlmv2->challenge.key, ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE); rc = crypto_shash_update(ses->server->secmech.hmacmd5, ntlmv2->challenge.key, hash_len); if (rc) { cifs_dbg(VFS, "%s: Could not update with response\n", __func__); return rc; } /* Note that the MD5 digest over writes anon.challenge_key.key */ rc = crypto_shash_final(ses->server->secmech.hmacmd5, ntlmv2->ntlmv2_hash); if (rc) cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); return rc; } int setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc; int baselen; unsigned int tilen; struct ntlmv2_resp *ntlmv2; char ntlmv2_hash[16]; unsigned char *tiblob = NULL; /* target info blob */ __le64 rsp_timestamp; if (nls_cp == NULL) { cifs_dbg(VFS, "%s called with nls_cp==NULL\n", __func__); return -EINVAL; } if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { if (!ses->domainName) { if (ses->domainAuto) { rc = find_domain_name(ses, nls_cp); if (rc) { cifs_dbg(VFS, "error %d finding domain name\n", rc); goto setup_ntlmv2_rsp_ret; } } else { ses->domainName = kstrdup("", GFP_KERNEL); } } } else { rc = build_avpair_blob(ses, nls_cp); if (rc) { cifs_dbg(VFS, "error %d building av pair blob\n", rc); goto setup_ntlmv2_rsp_ret; } } /* Must be within 5 minutes of the server (or in range +/-2h * in case of Mac OS X), so simply carry over server timestamp * (as Windows 7 does) */ rsp_timestamp = find_timestamp(ses); baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp); tilen = ses->auth_key.len; tiblob = ses->auth_key.response; ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL); if (!ses->auth_key.response) { rc = -ENOMEM; ses->auth_key.len = 0; goto setup_ntlmv2_rsp_ret; } ses->auth_key.len += baselen; ntlmv2 = (struct ntlmv2_resp *) (ses->auth_key.response + CIFS_SESS_KEY_SIZE); ntlmv2->blob_signature = cpu_to_le32(0x00000101); ntlmv2->reserved = 0; ntlmv2->time = rsp_timestamp; get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal)); ntlmv2->reserved2 = 0; memcpy(ses->auth_key.response + baselen, tiblob, tilen); cifs_server_lock(ses->server); rc = cifs_alloc_hash("hmac(md5)", &ses->server->secmech.hmacmd5); if (rc) { goto unlock; } /* calculate ntlmv2_hash */ rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp); if (rc) { cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc); goto unlock; } /* calculate first part of the client response (CR1) */ rc = CalcNTLMv2_response(ses, ntlmv2_hash); if (rc) { cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc); goto unlock; } /* now calculate the session key for NTLMv2 */ rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n", __func__); goto unlock; } rc = crypto_shash_init(ses->server->secmech.hmacmd5); if (rc) { cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__); goto unlock; } rc = crypto_shash_update(ses->server->secmech.hmacmd5, ntlmv2->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not update with response\n", __func__); goto unlock; } rc = crypto_shash_final(ses->server->secmech.hmacmd5, ses->auth_key.response); if (rc) cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); unlock: cifs_server_unlock(ses->server); setup_ntlmv2_rsp_ret: kfree_sensitive(tiblob); return rc; } int calc_seckey(struct cifs_ses *ses) { unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ struct arc4_ctx *ctx_arc4; if (fips_enabled) return -ENODEV; get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL); if (!ctx_arc4) { cifs_dbg(VFS, "Could not allocate arc4 context\n"); return -ENOMEM; } cifs_arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE); cifs_arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key, CIFS_CPHTXT_SIZE); /* make secondary_key/nonce as session key */ memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE); /* and make len as that of session key only */ ses->auth_key.len = CIFS_SESS_KEY_SIZE; memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE); kfree_sensitive(ctx_arc4); return 0; } void cifs_crypto_secmech_release(struct TCP_Server_Info *server) { cifs_free_hash(&server->secmech.aes_cmac); cifs_free_hash(&server->secmech.hmacsha256); cifs_free_hash(&server->secmech.md5); cifs_free_hash(&server->secmech.sha512); cifs_free_hash(&server->secmech.hmacmd5); if (server->secmech.enc) { crypto_free_aead(server->secmech.enc); server->secmech.enc = NULL; } if (server->secmech.dec) { crypto_free_aead(server->secmech.dec); server->secmech.dec = NULL; } }
linux-master
fs/smb/client/cifsencrypt.c
// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French ([email protected]) * */ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/mempool.h> #include <linux/vmalloc.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "smberr.h" #include "nterr.h" #include "cifs_unicode.h" #include "smb2pdu.h" #include "cifsfs.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dns_resolve.h" #include "dfs_cache.h" #include "dfs.h" #endif #include "fs_context.h" #include "cached_dir.h" extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; /* The xid serves as a useful identifier for each incoming vfs request, in a similar way to the mid which is useful to track each sent smb, and CurrentXid can also provide a running counter (although it will eventually wrap past zero) of the total vfs operations handled since the cifs fs was mounted */ unsigned int _get_xid(void) { unsigned int xid; spin_lock(&GlobalMid_Lock); GlobalTotalActiveXid++; /* keep high water mark for number of simultaneous ops in filesystem */ if (GlobalTotalActiveXid > GlobalMaxActiveXid) GlobalMaxActiveXid = GlobalTotalActiveXid; if (GlobalTotalActiveXid > 65000) cifs_dbg(FYI, "warning: more than 65000 requests active\n"); xid = GlobalCurrentXid++; spin_unlock(&GlobalMid_Lock); return xid; } void _free_xid(unsigned int xid) { spin_lock(&GlobalMid_Lock); /* if (GlobalTotalActiveXid == 0) BUG(); */ GlobalTotalActiveXid--; spin_unlock(&GlobalMid_Lock); } struct cifs_ses * sesInfoAlloc(void) { struct cifs_ses *ret_buf; ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); if (ret_buf) { atomic_inc(&sesInfoAllocCount); spin_lock_init(&ret_buf->ses_lock); ret_buf->ses_status = SES_NEW; ++ret_buf->ses_count; INIT_LIST_HEAD(&ret_buf->smb_ses_list); INIT_LIST_HEAD(&ret_buf->tcon_list); mutex_init(&ret_buf->session_mutex); spin_lock_init(&ret_buf->iface_lock); INIT_LIST_HEAD(&ret_buf->iface_list); spin_lock_init(&ret_buf->chan_lock); } return ret_buf; } void sesInfoFree(struct cifs_ses *buf_to_free) { struct cifs_server_iface *iface = NULL, *niface = NULL; if (buf_to_free == NULL) { cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n"); return; } unload_nls(buf_to_free->local_nls); atomic_dec(&sesInfoAllocCount); kfree(buf_to_free->serverOS); kfree(buf_to_free->serverDomain); kfree(buf_to_free->serverNOS); kfree_sensitive(buf_to_free->password); kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); kfree_sensitive(buf_to_free->auth_key.response); spin_lock(&buf_to_free->iface_lock); list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list, iface_head) kref_put(&iface->refcount, release_iface); spin_unlock(&buf_to_free->iface_lock); kfree_sensitive(buf_to_free); } struct cifs_tcon * tcon_info_alloc(bool dir_leases_enabled) { struct cifs_tcon *ret_buf; ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL); if (!ret_buf) return NULL; if (dir_leases_enabled == true) { ret_buf->cfids = init_cached_dirs(); if (!ret_buf->cfids) { kfree(ret_buf); return NULL; } } /* else ret_buf->cfids is already set to NULL above */ atomic_inc(&tconInfoAllocCount); ret_buf->status = TID_NEW; ++ret_buf->tc_count; spin_lock_init(&ret_buf->tc_lock); INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->tcon_list); spin_lock_init(&ret_buf->open_file_lock); spin_lock_init(&ret_buf->stat_lock); atomic_set(&ret_buf->num_local_opens, 0); atomic_set(&ret_buf->num_remote_opens, 0); #ifdef CONFIG_CIFS_DFS_UPCALL INIT_LIST_HEAD(&ret_buf->dfs_ses_list); #endif return ret_buf; } void tconInfoFree(struct cifs_tcon *tcon) { if (tcon == NULL) { cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n"); return; } free_cached_dirs(tcon->cfids); atomic_dec(&tconInfoAllocCount); kfree(tcon->nativeFileSystem); kfree_sensitive(tcon->password); #ifdef CONFIG_CIFS_DFS_UPCALL dfs_put_root_smb_sessions(&tcon->dfs_ses_list); #endif kfree(tcon->origin_fullpath); kfree(tcon); } struct smb_hdr * cifs_buf_get(void) { struct smb_hdr *ret_buf = NULL; /* * SMB2 header is bigger than CIFS one - no problems to clean some * more bytes for CIFS. */ size_t buf_size = sizeof(struct smb2_hdr); /* * We could use negotiated size instead of max_msgsize - * but it may be more efficient to always alloc same size * albeit slightly larger than necessary and maxbuffersize * defaults to this and can not be bigger. */ ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS); /* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */ memset(ret_buf, 0, buf_size + 3); atomic_inc(&buf_alloc_count); #ifdef CONFIG_CIFS_STATS2 atomic_inc(&total_buf_alloc_count); #endif /* CONFIG_CIFS_STATS2 */ return ret_buf; } void cifs_buf_release(void *buf_to_free) { if (buf_to_free == NULL) { /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/ return; } mempool_free(buf_to_free, cifs_req_poolp); atomic_dec(&buf_alloc_count); return; } struct smb_hdr * cifs_small_buf_get(void) { struct smb_hdr *ret_buf = NULL; /* We could use negotiated size instead of max_msgsize - but it may be more efficient to always alloc same size albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ atomic_inc(&small_buf_alloc_count); #ifdef CONFIG_CIFS_STATS2 atomic_inc(&total_small_buf_alloc_count); #endif /* CONFIG_CIFS_STATS2 */ return ret_buf; } void cifs_small_buf_release(void *buf_to_free) { if (buf_to_free == NULL) { cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n"); return; } mempool_free(buf_to_free, cifs_sm_req_poolp); atomic_dec(&small_buf_alloc_count); return; } void free_rsp_buf(int resp_buftype, void *rsp) { if (resp_buftype == CIFS_SMALL_BUFFER) cifs_small_buf_release(rsp); else if (resp_buftype == CIFS_LARGE_BUFFER) cifs_buf_release(rsp); } /* NB: MID can not be set if treeCon not passed in, in that case it is responsbility of caller to set the mid */ void header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , const struct cifs_tcon *treeCon, int word_count /* length of fixed section (word count) in two byte units */) { char *temp = (char *) buffer; memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ buffer->smb_buf_length = cpu_to_be32( (2 * word_count) + sizeof(struct smb_hdr) - 4 /* RFC 1001 length field does not count */ + 2 /* for bcc field itself */) ; buffer->Protocol[0] = 0xFF; buffer->Protocol[1] = 'S'; buffer->Protocol[2] = 'M'; buffer->Protocol[3] = 'B'; buffer->Command = smb_command; buffer->Flags = 0x00; /* case sensitive */ buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES; buffer->Pid = cpu_to_le16((__u16)current->tgid); buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16)); if (treeCon) { buffer->Tid = treeCon->tid; if (treeCon->ses) { if (treeCon->ses->capabilities & CAP_UNICODE) buffer->Flags2 |= SMBFLG2_UNICODE; if (treeCon->ses->capabilities & CAP_STATUS32) buffer->Flags2 |= SMBFLG2_ERR_STATUS; /* Uid is not converted */ buffer->Uid = treeCon->ses->Suid; if (treeCon->ses->server) buffer->Mid = get_next_mid(treeCon->ses->server); } if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) buffer->Flags2 |= SMBFLG2_DFS; if (treeCon->nocase) buffer->Flags |= SMBFLG_CASELESS; if ((treeCon->ses) && (treeCon->ses->server)) if (treeCon->ses->server->sign) buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; } /* endian conversion of flags is now done just before sending */ buffer->WordCount = (char) word_count; return; } static int check_smb_hdr(struct smb_hdr *smb) { /* does it have the right SMB "signature" ? */ if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) { cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n", *(unsigned int *)smb->Protocol); return 1; } /* if it's a response then accept */ if (smb->Flags & SMBFLG_RESPONSE) return 0; /* only one valid case where server sends us request */ if (smb->Command == SMB_COM_LOCKING_ANDX) return 0; cifs_dbg(VFS, "Server sent request, not response. mid=%u\n", get_mid(smb)); return 1; } int checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server) { struct smb_hdr *smb = (struct smb_hdr *)buf; __u32 rfclen = be32_to_cpu(smb->smb_buf_length); __u32 clc_len; /* calculated length */ cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n", total_read, rfclen); /* is this frame too small to even get to a BCC? */ if (total_read < 2 + sizeof(struct smb_hdr)) { if ((total_read >= sizeof(struct smb_hdr) - 1) && (smb->Status.CifsError != 0)) { /* it's an error return */ smb->WordCount = 0; /* some error cases do not return wct and bcc */ return 0; } else if ((total_read == sizeof(struct smb_hdr) + 1) && (smb->WordCount == 0)) { char *tmp = (char *)smb; /* Need to work around a bug in two servers here */ /* First, check if the part of bcc they sent was zero */ if (tmp[sizeof(struct smb_hdr)] == 0) { /* some servers return only half of bcc * on simple responses (wct, bcc both zero) * in particular have seen this on * ulogoffX and FindClose. This leaves * one byte of bcc potentially unitialized */ /* zero rest of bcc */ tmp[sizeof(struct smb_hdr)+1] = 0; return 0; } cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n"); } else { cifs_dbg(VFS, "Length less than smb header size\n"); } return -EIO; } /* otherwise, there is enough to get to the BCC */ if (check_smb_hdr(smb)) return -EIO; clc_len = smbCalcSize(smb); if (4 + rfclen != total_read) { cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n", rfclen); return -EIO; } if (4 + rfclen != clc_len) { __u16 mid = get_mid(smb); /* check if bcc wrapped around for large read responses */ if ((rfclen > 64 * 1024) && (rfclen > clc_len)) { /* check if lengths match mod 64K */ if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF)) return 0; /* bcc wrapped */ } cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n", clc_len, 4 + rfclen, mid); if (4 + rfclen < clc_len) { cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n", rfclen, mid); return -EIO; } else if (rfclen > clc_len + 512) { /* * Some servers (Windows XP in particular) send more * data than the lengths in the SMB packet would * indicate on certain calls (byte range locks and * trans2 find first calls in particular). While the * client can handle such a frame by ignoring the * trailing data, we choose limit the amount of extra * data to 512 bytes. */ cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n", rfclen, mid); return -EIO; } } return 0; } bool is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) { struct smb_hdr *buf = (struct smb_hdr *)buffer; struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; struct TCP_Server_Info *pserver; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsInodeInfo *pCifsInode; struct cifsFileInfo *netfile; cifs_dbg(FYI, "Checking for oplock break or dnotify response\n"); if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && (pSMB->hdr.Flags & SMBFLG_RESPONSE)) { struct smb_com_transaction_change_notify_rsp *pSMBr = (struct smb_com_transaction_change_notify_rsp *)buf; struct file_notify_information *pnotify; __u32 data_offset = 0; size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length); if (get_bcc(buf) > sizeof(struct file_notify_information)) { data_offset = le32_to_cpu(pSMBr->DataOffset); if (data_offset > len - sizeof(struct file_notify_information)) { cifs_dbg(FYI, "Invalid data_offset %u\n", data_offset); return true; } pnotify = (struct file_notify_information *) ((char *)&pSMBr->hdr.Protocol + data_offset); cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n", pnotify->FileName, pnotify->Action); /* cifs_dump_mem("Rcvd notify Data: ",buf, sizeof(struct smb_hdr)+60); */ return true; } if (pSMBr->hdr.Status.CifsError) { cifs_dbg(FYI, "notify err 0x%x\n", pSMBr->hdr.Status.CifsError); return true; } return false; } if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX) return false; if (pSMB->hdr.Flags & SMBFLG_RESPONSE) { /* no sense logging error on invalid handle on oplock break - harmless race between close request and oplock break response is expected from time to time writing out large dirty files cached on the client */ if ((NT_STATUS_INVALID_HANDLE) == le32_to_cpu(pSMB->hdr.Status.CifsError)) { cifs_dbg(FYI, "Invalid handle on oplock break\n"); return true; } else if (ERRbadfid == le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { return true; } else { return false; /* on valid oplock brk we get "request" */ } } if (pSMB->hdr.WordCount != 8) return false; cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n", pSMB->LockType, pSMB->OplockLevel); if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) return false; /* If server is a channel, select the primary channel */ pserver = SERVER_IS_CHAN(srv) ? srv->primary_server : srv; /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid != buf->Tid) continue; cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); spin_lock(&tcon->open_file_lock); list_for_each_entry(netfile, &tcon->openFileList, tlist) { if (pSMB->Fid != netfile->fid.netfid) continue; cifs_dbg(FYI, "file id match, oplock break\n"); pCifsInode = CIFS_I(d_inode(netfile->dentry)); set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &pCifsInode->flags); netfile->oplock_epoch = 0; netfile->oplock_level = pSMB->OplockLevel; netfile->oplock_break_cancelled = false; cifs_queue_oplock_break(netfile); spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "No matching file for oplock break\n"); return true; } } spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); return true; } void dump_smb(void *buf, int smb_buf_length) { if (traceSMB == 0) return; print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf, smb_buf_length, true); } void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { struct cifs_tcon *tcon = NULL; if (cifs_sb->master_tlink) tcon = cifs_sb_master_tcon(cifs_sb); cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; cifs_sb->mnt_cifs_serverino_autodisabled = true; cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n", tcon ? tcon->tree_name : "new server"); cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n"); cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n"); } } void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) { oplock &= 0xF; if (oplock == OPLOCK_EXCLUSIVE) { cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", &cinode->netfs.inode); } else if (oplock == OPLOCK_READ) { cinode->oplock = CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", &cinode->netfs.inode); } else cinode->oplock = 0; } /* * We wait for oplock breaks to be processed before we attempt to perform * writes. */ int cifs_get_writer(struct cifsInodeInfo *cinode) { int rc; start: rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK, TASK_KILLABLE); if (rc) return rc; spin_lock(&cinode->writers_lock); if (!cinode->writers) set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); cinode->writers++; /* Check to see if we have started servicing an oplock break */ if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) { cinode->writers--; if (cinode->writers == 0) { clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); } spin_unlock(&cinode->writers_lock); goto start; } spin_unlock(&cinode->writers_lock); return 0; } void cifs_put_writer(struct cifsInodeInfo *cinode) { spin_lock(&cinode->writers_lock); cinode->writers--; if (cinode->writers == 0) { clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); } spin_unlock(&cinode->writers_lock); } /** * cifs_queue_oplock_break - queue the oplock break handler for cfile * @cfile: The file to break the oplock on * * This function is called from the demultiplex thread when it * receives an oplock break for @cfile. * * Assumes the tcon->open_file_lock is held. * Assumes cfile->file_info_lock is NOT held. */ void cifs_queue_oplock_break(struct cifsFileInfo *cfile) { /* * Bump the handle refcount now while we hold the * open_file_lock to enforce the validity of it for the oplock * break handler. The matching put is done at the end of the * handler. */ cifsFileInfo_get(cfile); queue_work(cifsoplockd_wq, &cfile->oplock_break); } void cifs_done_oplock_break(struct cifsInodeInfo *cinode) { clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK); } bool backup_cred(struct cifs_sb_info *cifs_sb) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) { if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid())) return true; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) { if (in_group_p(cifs_sb->ctx->backupgid)) return true; } return false; } void cifs_del_pending_open(struct cifs_pending_open *open) { spin_lock(&tlink_tcon(open->tlink)->open_file_lock); list_del(&open->olist); spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); } void cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open) { memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE); open->oplock = CIFS_OPLOCK_NO_CHANGE; open->tlink = tlink; fid->pending_open = open; list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens); } void cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open) { spin_lock(&tlink_tcon(tlink)->open_file_lock); cifs_add_pending_open_locked(fid, tlink, open); spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); } /* * Critical section which runs after acquiring deferred_lock. * As there is no reference count on cifs_deferred_close, pdclose * should not be used outside deferred_lock. */ bool cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose) { struct cifs_deferred_close *dclose; list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) { if ((dclose->netfid == cfile->fid.netfid) && (dclose->persistent_fid == cfile->fid.persistent_fid) && (dclose->volatile_fid == cfile->fid.volatile_fid)) { *pdclose = dclose; return true; } } return false; } /* * Critical section which runs after acquiring deferred_lock. */ void cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose) { bool is_deferred = false; struct cifs_deferred_close *pdclose; is_deferred = cifs_is_deferred_close(cfile, &pdclose); if (is_deferred) { kfree(dclose); return; } dclose->tlink = cfile->tlink; dclose->netfid = cfile->fid.netfid; dclose->persistent_fid = cfile->fid.persistent_fid; dclose->volatile_fid = cfile->fid.volatile_fid; list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes); } /* * Critical section which runs after acquiring deferred_lock. */ void cifs_del_deferred_close(struct cifsFileInfo *cfile) { bool is_deferred = false; struct cifs_deferred_close *dclose; is_deferred = cifs_is_deferred_close(cfile, &dclose); if (!is_deferred) return; list_del(&dclose->dlist); kfree(dclose); } void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *cfile = NULL; struct file_list *tmp_list, *tmp_next_list; struct list_head file_head; if (cifs_inode == NULL) return; INIT_LIST_HEAD(&file_head); spin_lock(&cifs_inode->open_file_lock); list_for_each_entry(cfile, &cifs_inode->openFileList, flist) { if (delayed_work_pending(&cfile->deferred)) { if (cancel_delayed_work(&cfile->deferred)) { spin_lock(&cifs_inode->deferred_lock); cifs_del_deferred_close(cfile); spin_unlock(&cifs_inode->deferred_lock); tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) break; tmp_list->cfile = cfile; list_add_tail(&tmp_list->list, &file_head); } } } spin_unlock(&cifs_inode->open_file_lock); list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { _cifsFileInfo_put(tmp_list->cfile, false, false); list_del(&tmp_list->list); kfree(tmp_list); } } void cifs_close_all_deferred_files(struct cifs_tcon *tcon) { struct cifsFileInfo *cfile; struct file_list *tmp_list, *tmp_next_list; struct list_head file_head; INIT_LIST_HEAD(&file_head); spin_lock(&tcon->open_file_lock); list_for_each_entry(cfile, &tcon->openFileList, tlist) { if (delayed_work_pending(&cfile->deferred)) { if (cancel_delayed_work(&cfile->deferred)) { spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); cifs_del_deferred_close(cfile); spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) break; tmp_list->cfile = cfile; list_add_tail(&tmp_list->list, &file_head); } } } spin_unlock(&tcon->open_file_lock); list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { _cifsFileInfo_put(tmp_list->cfile, true, false); list_del(&tmp_list->list); kfree(tmp_list); } } void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) { struct cifsFileInfo *cfile; struct file_list *tmp_list, *tmp_next_list; struct list_head file_head; void *page; const char *full_path; INIT_LIST_HEAD(&file_head); page = alloc_dentry_path(); spin_lock(&tcon->open_file_lock); list_for_each_entry(cfile, &tcon->openFileList, tlist) { full_path = build_path_from_dentry(cfile->dentry, page); if (strstr(full_path, path)) { if (delayed_work_pending(&cfile->deferred)) { if (cancel_delayed_work(&cfile->deferred)) { spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); cifs_del_deferred_close(cfile); spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) break; tmp_list->cfile = cfile; list_add_tail(&tmp_list->list, &file_head); } } } } spin_unlock(&tcon->open_file_lock); list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { _cifsFileInfo_put(tmp_list->cfile, true, false); list_del(&tmp_list->list); kfree(tmp_list); } free_dentry_path(page); } /* parses DFS referral V3 structure * caller is responsible for freeing target_nodes * returns: * - on success - 0 * - on failure - errno */ int parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, unsigned int *num_of_nodes, struct dfs_info3_param **target_nodes, const struct nls_table *nls_codepage, int remap, const char *searchName, bool is_unicode) { int i, rc = 0; char *data_end; struct dfs_referral_level_3 *ref; *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); if (*num_of_nodes < 1) { cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", *num_of_nodes); rc = -EINVAL; goto parse_DFS_referrals_exit; } ref = (struct dfs_referral_level_3 *) &(rsp->referrals); if (ref->VersionNumber != cpu_to_le16(3)) { cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", le16_to_cpu(ref->VersionNumber)); rc = -EINVAL; goto parse_DFS_referrals_exit; } /* get the upper boundary of the resp buffer */ data_end = (char *)rsp + rsp_size; cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", *num_of_nodes, le32_to_cpu(rsp->DFSFlags)); *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), GFP_KERNEL); if (*target_nodes == NULL) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* collect necessary data from referrals */ for (i = 0; i < *num_of_nodes; i++) { char *temp; int max_len; struct dfs_info3_param *node = (*target_nodes)+i; node->flags = le32_to_cpu(rsp->DFSFlags); if (is_unicode) { __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, GFP_KERNEL); if (tmp == NULL) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } cifsConvertToUTF16((__le16 *) tmp, searchName, PATH_MAX, nls_codepage, remap); node->path_consumed = cifs_utf16_bytes(tmp, le16_to_cpu(rsp->PathConsumed), nls_codepage); kfree(tmp); } else node->path_consumed = le16_to_cpu(rsp->PathConsumed); node->server_type = le16_to_cpu(ref->ServerType); node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); /* copy DfsPath */ temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); max_len = data_end - temp; node->path_name = cifs_strndup_from_utf16(temp, max_len, is_unicode, nls_codepage); if (!node->path_name) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* copy link target UNC */ temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); max_len = data_end - temp; node->node_name = cifs_strndup_from_utf16(temp, max_len, is_unicode, nls_codepage); if (!node->node_name) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } node->ttl = le32_to_cpu(ref->TimeToLive); ref++; } parse_DFS_referrals_exit: if (rc) { free_dfs_info_array(*target_nodes, *num_of_nodes); *target_nodes = NULL; *num_of_nodes = 0; } return rc; } struct cifs_aio_ctx * cifs_aio_ctx_alloc(void) { struct cifs_aio_ctx *ctx; /* * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io * to false so that we know when we have to unreference pages within * cifs_aio_ctx_release() */ ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL); if (!ctx) return NULL; INIT_LIST_HEAD(&ctx->list); mutex_init(&ctx->aio_mutex); init_completion(&ctx->done); kref_init(&ctx->refcount); return ctx; } void cifs_aio_ctx_release(struct kref *refcount) { struct cifs_aio_ctx *ctx = container_of(refcount, struct cifs_aio_ctx, refcount); cifsFileInfo_put(ctx->cfile); /* * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly * which means that iov_iter_extract_pages() was a success and thus * that we may have references or pins on pages that we need to * release. */ if (ctx->bv) { if (ctx->should_dirty || ctx->bv_need_unpin) { unsigned int i; for (i = 0; i < ctx->nr_pinned_pages; i++) { struct page *page = ctx->bv[i].bv_page; if (ctx->should_dirty) set_page_dirty(page); if (ctx->bv_need_unpin) unpin_user_page(page); } } kvfree(ctx->bv); } kfree(ctx); } /** * cifs_alloc_hash - allocate hash and hash context together * @name: The name of the crypto hash algo * @sdesc: SHASH descriptor where to put the pointer to the hash TFM * * The caller has to make sure @sdesc is initialized to either NULL or * a valid context. It can be freed via cifs_free_hash(). */ int cifs_alloc_hash(const char *name, struct shash_desc **sdesc) { int rc = 0; struct crypto_shash *alg = NULL; if (*sdesc) return 0; alg = crypto_alloc_shash(name, 0, 0); if (IS_ERR(alg)) { cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name); rc = PTR_ERR(alg); *sdesc = NULL; return rc; } *sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL); if (*sdesc == NULL) { cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name); crypto_free_shash(alg); return -ENOMEM; } (*sdesc)->tfm = alg; return 0; } /** * cifs_free_hash - free hash and hash context together * @sdesc: Where to find the pointer to the hash TFM * * Freeing a NULL descriptor is safe. */ void cifs_free_hash(struct shash_desc **sdesc) { if (unlikely(!sdesc) || !*sdesc) return; if ((*sdesc)->tfm) { crypto_free_shash((*sdesc)->tfm); (*sdesc)->tfm = NULL; } kfree_sensitive(*sdesc); *sdesc = NULL; } void extract_unc_hostname(const char *unc, const char **h, size_t *len) { const char *end; /* skip initial slashes */ while (*unc && (*unc == '\\' || *unc == '/')) unc++; end = unc; while (*end && !(*end == '\\' || *end == '/')) end++; *h = unc; *len = end - unc; } /** * copy_path_name - copy src path to dst, possibly truncating * @dst: The destination buffer * @src: The source name * * returns number of bytes written (including trailing nul) */ int copy_path_name(char *dst, const char *src) { int name_len; /* * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it * will truncate and strlen(dst) will be PATH_MAX-1 */ name_len = strscpy(dst, src, PATH_MAX); if (WARN_ON_ONCE(name_len < 0)) name_len = PATH_MAX-1; /* we count the trailing nul */ name_len++; return name_len; } struct super_cb_data { void *data; struct super_block *sb; }; static void tcon_super_cb(struct super_block *sb, void *arg) { struct super_cb_data *sd = arg; struct cifs_sb_info *cifs_sb; struct cifs_tcon *t1 = sd->data, *t2; if (sd->sb) return; cifs_sb = CIFS_SB(sb); t2 = cifs_sb_master_tcon(cifs_sb); spin_lock(&t2->tc_lock); if (t1->ses == t2->ses && t1->ses->server == t2->ses->server && t2->origin_fullpath && dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath)) sd->sb = sb; spin_unlock(&t2->tc_lock); } static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *), void *data) { struct super_cb_data sd = { .data = data, .sb = NULL, }; struct file_system_type **fs_type = (struct file_system_type *[]) { &cifs_fs_type, &smb3_fs_type, NULL, }; for (; *fs_type; fs_type++) { iterate_supers_type(*fs_type, f, &sd); if (sd.sb) { /* * Grab an active reference in order to prevent automounts (DFS links) * of expiring and then freeing up our cifs superblock pointer while * we're doing failover. */ cifs_sb_active(sd.sb); return sd.sb; } } pr_warn_once("%s: could not find dfs superblock\n", __func__); return ERR_PTR(-EINVAL); } static void __cifs_put_super(struct super_block *sb) { if (!IS_ERR_OR_NULL(sb)) cifs_sb_deactive(sb); } struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon) { spin_lock(&tcon->tc_lock); if (!tcon->origin_fullpath) { spin_unlock(&tcon->tc_lock); return ERR_PTR(-ENOENT); } spin_unlock(&tcon->tc_lock); return __cifs_get_super(tcon_super_cb, tcon); } void cifs_put_tcp_super(struct super_block *sb) { __cifs_put_super(sb); } #ifdef CONFIG_CIFS_DFS_UPCALL int match_target_ip(struct TCP_Server_Info *server, const char *share, size_t share_len, bool *result) { int rc; char *target; struct sockaddr_storage ss; *result = false; target = kzalloc(share_len + 3, GFP_KERNEL); if (!target) return -ENOMEM; scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share); cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2); rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL); kfree(target); if (rc < 0) return rc; spin_lock(&server->srv_lock); *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss); spin_unlock(&server->srv_lock); cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result); return 0; } int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix) { int rc; kfree(cifs_sb->prepath); cifs_sb->prepath = NULL; if (prefix && *prefix) { cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC); if (IS_ERR(cifs_sb->prepath)) { rc = PTR_ERR(cifs_sb->prepath); cifs_sb->prepath = NULL; return rc; } if (cifs_sb->prepath) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); } cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; return 0; } /* * Handle weird Windows SMB server behaviour. It responds with * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains * non-ASCII unicode symbols. */ int cifs_inval_name_dfs_link_error(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, bool *islink) { struct cifs_ses *ses = tcon->ses; size_t len; char *path; char *ref_path; *islink = false; /* * Fast path - skip check when @full_path doesn't have a prefix path to * look up or tcon is not DFS. */ if (strlen(full_path) < 2 || !cifs_sb || (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) || !is_tcon_dfs(tcon)) return 0; spin_lock(&tcon->tc_lock); if (!tcon->origin_fullpath) { spin_unlock(&tcon->tc_lock); return 0; } spin_unlock(&tcon->tc_lock); /* * Slow path - tcon is DFS and @full_path has prefix path, so attempt * to get a referral to figure out whether it is an DFS link. */ len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1; path = kmalloc(len, GFP_KERNEL); if (!path) return -ENOMEM; scnprintf(path, len, "%s%s", tcon->tree_name, full_path); ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls, cifs_remap(cifs_sb)); kfree(path); if (IS_ERR(ref_path)) { if (PTR_ERR(ref_path) != -EINVAL) return PTR_ERR(ref_path); } else { struct dfs_info3_param *refs = NULL; int num_refs = 0; /* * XXX: we are not using dfs_cache_find() here because we might * end up filling all the DFS cache and thus potentially * removing cached DFS targets that the client would eventually * need during failover. */ ses = CIFS_DFS_ROOT_SES(ses); if (ses->server->ops->get_dfs_refer && !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs, &num_refs, cifs_sb->local_nls, cifs_remap(cifs_sb))) *islink = refs[0].server_type == DFS_TYPE_LINK; free_dfs_info_array(refs, num_refs); kfree(ref_path); } return 0; } #endif int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry) { int timeout = 10; int rc; spin_lock(&server->srv_lock); if (server->tcpStatus != CifsNeedReconnect) { spin_unlock(&server->srv_lock); return 0; } timeout *= server->nr_targets; spin_unlock(&server->srv_lock); /* * Give demultiplex thread up to 10 seconds to each target available for * reconnect -- should be greater than cifs socket timeout which is 7 * seconds. * * On "soft" mounts we wait once. Hard mounts keep retrying until * process is killed or server comes back on-line. */ do { rc = wait_event_interruptible_timeout(server->response_q, (server->tcpStatus != CifsNeedReconnect), timeout * HZ); if (rc < 0) { cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n", __func__); return -ERESTARTSYS; } /* are we still trying to reconnect? */ spin_lock(&server->srv_lock); if (server->tcpStatus != CifsNeedReconnect) { spin_unlock(&server->srv_lock); return 0; } spin_unlock(&server->srv_lock); } while (retry); cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__); return -EHOSTDOWN; }
linux-master
fs/smb/client/misc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) International Business Machines Corp., 2000,2005 * * Modified by Steve French ([email protected]) */ #include <linux/fs.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/kstrtox.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <uapi/linux/ethtool.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifsfs.h" #include "fs_context.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif #ifdef CONFIG_CIFS_SMB_DIRECT #include "smbdirect.h" #endif #include "cifs_swn.h" void cifs_dump_mem(char *label, void *data, int length) { pr_debug("%s: dump of %d bytes of data at 0x%p\n", label, length, data); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4, data, length, true); } void cifs_dump_detail(void *buf, struct TCP_Server_Info *server) { #ifdef CONFIG_CIFS_DEBUG2 struct smb_hdr *smb = buf; cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n", smb->Command, smb->Status.CifsError, smb->Flags, smb->Flags2, smb->Mid, smb->Pid); cifs_dbg(VFS, "smb buf %p len %u\n", smb, server->ops->calc_smb_size(smb)); #endif /* CONFIG_CIFS_DEBUG2 */ } void cifs_dump_mids(struct TCP_Server_Info *server) { #ifdef CONFIG_CIFS_DEBUG2 struct mid_q_entry *mid_entry; if (server == NULL) return; cifs_dbg(VFS, "Dump pending requests:\n"); spin_lock(&server->mid_lock); list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n", mid_entry->mid_state, le16_to_cpu(mid_entry->command), mid_entry->pid, mid_entry->callback_data, mid_entry->mid); #ifdef CONFIG_CIFS_STATS2 cifs_dbg(VFS, "IsLarge: %d buf: %p time rcv: %ld now: %ld\n", mid_entry->large_buf, mid_entry->resp_buf, mid_entry->when_received, jiffies); #endif /* STATS2 */ cifs_dbg(VFS, "IsMult: %d IsEnd: %d\n", mid_entry->multiRsp, mid_entry->multiEnd); if (mid_entry->resp_buf) { cifs_dump_detail(mid_entry->resp_buf, server); cifs_dump_mem("existing buf: ", mid_entry->resp_buf, 62); } } spin_unlock(&server->mid_lock); #endif /* CONFIG_CIFS_DEBUG2 */ } #ifdef CONFIG_PROC_FS static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon) { __u32 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); seq_printf(m, "%s Mounts: %d ", tcon->tree_name, tcon->tc_count); if (tcon->nativeFileSystem) seq_printf(m, "Type: %s ", tcon->nativeFileSystem); seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d Status: %d", le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), le32_to_cpu(tcon->fsAttrInfo.Attributes), le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), tcon->status); if (dev_type == FILE_DEVICE_DISK) seq_puts(m, " type: DISK "); else if (dev_type == FILE_DEVICE_CD_ROM) seq_puts(m, " type: CDROM "); else seq_printf(m, " type: %d ", dev_type); seq_printf(m, "Serial Number: 0x%x", tcon->vol_serial_number); if ((tcon->seal) || (tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) seq_puts(m, " encrypted"); if (tcon->nocase) seq_printf(m, " nocase"); if (tcon->unix_ext) seq_printf(m, " POSIX Extensions"); if (tcon->ses->server->ops->dump_share_caps) tcon->ses->server->ops->dump_share_caps(m, tcon); if (tcon->use_witness) seq_puts(m, " Witness"); if (tcon->broken_sparse_sup) seq_puts(m, " nosparse"); if (tcon->need_reconnect) seq_puts(m, "\tDISCONNECTED "); spin_lock(&tcon->tc_lock); if (tcon->origin_fullpath) { seq_printf(m, "\n\tDFS origin fullpath: %s", tcon->origin_fullpath); } spin_unlock(&tcon->tc_lock); seq_putc(m, '\n'); } static void cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan) { struct TCP_Server_Info *server = chan->server; seq_printf(m, "\n\n\t\tChannel: %d ConnectionId: 0x%llx" "\n\t\tNumber of credits: %d,%d,%d Dialect 0x%x" "\n\t\tTCP status: %d Instance: %d" "\n\t\tLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d" "\n\t\tIn Send: %d In MaxReq Wait: %d", i+1, server->conn_id, server->credits, server->echo_credits, server->oplock_credits, server->dialect, server->tcpStatus, server->reconnect_instance, server->srv_count, server->sec_mode, in_flight(server), atomic_read(&server->in_send), atomic_read(&server->num_waiters)); #ifdef CONFIG_NET_NS if (server->net) seq_printf(m, " Net namespace: %u ", server->net->ns.inum); #endif /* NET_NS */ } static inline const char *smb_speed_to_str(size_t bps) { size_t mbps = bps / 1000 / 1000; switch (mbps) { case SPEED_10: return "10Mbps"; case SPEED_100: return "100Mbps"; case SPEED_1000: return "1Gbps"; case SPEED_2500: return "2.5Gbps"; case SPEED_5000: return "5Gbps"; case SPEED_10000: return "10Gbps"; case SPEED_14000: return "14Gbps"; case SPEED_20000: return "20Gbps"; case SPEED_25000: return "25Gbps"; case SPEED_40000: return "40Gbps"; case SPEED_50000: return "50Gbps"; case SPEED_56000: return "56Gbps"; case SPEED_100000: return "100Gbps"; case SPEED_200000: return "200Gbps"; case SPEED_400000: return "400Gbps"; case SPEED_800000: return "800Gbps"; default: return "Unknown"; } } static void cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface) { struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr; struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr; seq_printf(m, "\tSpeed: %s\n", smb_speed_to_str(iface->speed)); seq_puts(m, "\t\tCapabilities: "); if (iface->rdma_capable) seq_puts(m, "rdma "); if (iface->rss_capable) seq_puts(m, "rss "); if (!iface->rdma_capable && !iface->rss_capable) seq_puts(m, "None"); seq_putc(m, '\n'); if (iface->sockaddr.ss_family == AF_INET) seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr); else if (iface->sockaddr.ss_family == AF_INET6) seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr); if (!iface->is_active) seq_puts(m, "\t\t[for-cleanup]\n"); } static int cifs_debug_files_proc_show(struct seq_file *m, void *v) { struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsFileInfo *cfile; seq_puts(m, "# Version:1\n"); seq_puts(m, "# Format:\n"); seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>"); #ifdef CONFIG_CIFS_DEBUG2 seq_printf(m, " <filename> <mid>\n"); #else seq_printf(m, " <filename>\n"); #endif /* CIFS_DEBUG2 */ spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { spin_lock(&tcon->open_file_lock); list_for_each_entry(cfile, &tcon->openFileList, tlist) { seq_printf(m, "0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd", tcon->tid, ses->Suid, cfile->fid.persistent_fid, cfile->f_flags, cfile->count, cfile->pid, from_kuid(&init_user_ns, cfile->uid), cfile->dentry); #ifdef CONFIG_CIFS_DEBUG2 seq_printf(m, " %llu\n", cfile->fid.mid); #else seq_printf(m, "\n"); #endif /* CIFS_DEBUG2 */ } spin_unlock(&tcon->open_file_lock); } } } spin_unlock(&cifs_tcp_ses_lock); seq_putc(m, '\n'); return 0; } static int cifs_debug_data_proc_show(struct seq_file *m, void *v) { struct mid_q_entry *mid_entry; struct TCP_Server_Info *server; struct TCP_Server_Info *chan_server; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifs_server_iface *iface; int c, i, j; seq_puts(m, "Display Internal CIFS Data Structures for Debugging\n" "---------------------------------------------------\n"); seq_printf(m, "CIFS Version %s\n", CIFS_VERSION); seq_printf(m, "Features:"); #ifdef CONFIG_CIFS_DFS_UPCALL seq_printf(m, " DFS"); #endif #ifdef CONFIG_CIFS_FSCACHE seq_printf(m, ",FSCACHE"); #endif #ifdef CONFIG_CIFS_SMB_DIRECT seq_printf(m, ",SMB_DIRECT"); #endif #ifdef CONFIG_CIFS_STATS2 seq_printf(m, ",STATS2"); #else seq_printf(m, ",STATS"); #endif #ifdef CONFIG_CIFS_DEBUG2 seq_printf(m, ",DEBUG2"); #elif defined(CONFIG_CIFS_DEBUG) seq_printf(m, ",DEBUG"); #endif #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY seq_printf(m, ",ALLOW_INSECURE_LEGACY"); #endif #ifdef CONFIG_CIFS_POSIX seq_printf(m, ",CIFS_POSIX"); #endif #ifdef CONFIG_CIFS_UPCALL seq_printf(m, ",UPCALL(SPNEGO)"); #endif #ifdef CONFIG_CIFS_XATTR seq_printf(m, ",XATTR"); #endif seq_printf(m, ",ACL"); #ifdef CONFIG_CIFS_SWN_UPCALL seq_puts(m, ",WITNESS"); #endif seq_putc(m, '\n'); seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize); seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); seq_printf(m, "\nServers: "); c = 0; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { /* channel info will be printed as a part of sessions below */ if (SERVER_IS_CHAN(server)) continue; c++; seq_printf(m, "\n%d) ConnectionId: 0x%llx ", c, server->conn_id); spin_lock(&server->srv_lock); if (server->hostname) seq_printf(m, "Hostname: %s ", server->hostname); seq_printf(m, "\nClientGUID: %pUL", server->client_guid); spin_unlock(&server->srv_lock); #ifdef CONFIG_CIFS_SMB_DIRECT if (!server->rdma) goto skip_rdma; if (!server->smbd_conn) { seq_printf(m, "\nSMBDirect transport not available"); goto skip_rdma; } seq_printf(m, "\nSMBDirect (in hex) protocol version: %x " "transport status: %x", server->smbd_conn->protocol, server->smbd_conn->transport_status); seq_printf(m, "\nConn receive_credit_max: %x " "send_credit_target: %x max_send_size: %x", server->smbd_conn->receive_credit_max, server->smbd_conn->send_credit_target, server->smbd_conn->max_send_size); seq_printf(m, "\nConn max_fragmented_recv_size: %x " "max_fragmented_send_size: %x max_receive_size:%x", server->smbd_conn->max_fragmented_recv_size, server->smbd_conn->max_fragmented_send_size, server->smbd_conn->max_receive_size); seq_printf(m, "\nConn keep_alive_interval: %x " "max_readwrite_size: %x rdma_readwrite_threshold: %x", server->smbd_conn->keep_alive_interval, server->smbd_conn->max_readwrite_size, server->smbd_conn->rdma_readwrite_threshold); seq_printf(m, "\nDebug count_get_receive_buffer: %x " "count_put_receive_buffer: %x count_send_empty: %x", server->smbd_conn->count_get_receive_buffer, server->smbd_conn->count_put_receive_buffer, server->smbd_conn->count_send_empty); seq_printf(m, "\nRead Queue count_reassembly_queue: %x " "count_enqueue_reassembly_queue: %x " "count_dequeue_reassembly_queue: %x " "fragment_reassembly_remaining: %x " "reassembly_data_length: %x " "reassembly_queue_length: %x", server->smbd_conn->count_reassembly_queue, server->smbd_conn->count_enqueue_reassembly_queue, server->smbd_conn->count_dequeue_reassembly_queue, server->smbd_conn->fragment_reassembly_remaining, server->smbd_conn->reassembly_data_length, server->smbd_conn->reassembly_queue_length); seq_printf(m, "\nCurrent Credits send_credits: %x " "receive_credits: %x receive_credit_target: %x", atomic_read(&server->smbd_conn->send_credits), atomic_read(&server->smbd_conn->receive_credits), server->smbd_conn->receive_credit_target); seq_printf(m, "\nPending send_pending: %x ", atomic_read(&server->smbd_conn->send_pending)); seq_printf(m, "\nReceive buffers count_receive_queue: %x " "count_empty_packet_queue: %x", server->smbd_conn->count_receive_queue, server->smbd_conn->count_empty_packet_queue); seq_printf(m, "\nMR responder_resources: %x " "max_frmr_depth: %x mr_type: %x", server->smbd_conn->responder_resources, server->smbd_conn->max_frmr_depth, server->smbd_conn->mr_type); seq_printf(m, "\nMR mr_ready_count: %x mr_used_count: %x", atomic_read(&server->smbd_conn->mr_ready_count), atomic_read(&server->smbd_conn->mr_used_count)); skip_rdma: #endif seq_printf(m, "\nNumber of credits: %d,%d,%d Dialect 0x%x", server->credits, server->echo_credits, server->oplock_credits, server->dialect); if (server->compress_algorithm == SMB3_COMPRESS_LZNT1) seq_printf(m, " COMPRESS_LZNT1"); else if (server->compress_algorithm == SMB3_COMPRESS_LZ77) seq_printf(m, " COMPRESS_LZ77"); else if (server->compress_algorithm == SMB3_COMPRESS_LZ77_HUFF) seq_printf(m, " COMPRESS_LZ77_HUFF"); if (server->sign) seq_printf(m, " signed"); if (server->posix_ext_supported) seq_printf(m, " posix"); if (server->nosharesock) seq_printf(m, " nosharesock"); if (server->rdma) seq_printf(m, "\nRDMA "); seq_printf(m, "\nTCP status: %d Instance: %d" "\nLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d", server->tcpStatus, server->reconnect_instance, server->srv_count, server->sec_mode, in_flight(server)); #ifdef CONFIG_NET_NS if (server->net) seq_printf(m, " Net namespace: %u ", server->net->ns.inum); #endif /* NET_NS */ seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d", atomic_read(&server->in_send), atomic_read(&server->num_waiters)); if (server->leaf_fullpath) { seq_printf(m, "\nDFS leaf full path: %s", server->leaf_fullpath); } seq_printf(m, "\n\n\tSessions: "); i = 0; list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { i++; if ((ses->serverDomain == NULL) || (ses->serverOS == NULL) || (ses->serverNOS == NULL)) { seq_printf(m, "\n\t%d) Address: %s Uses: %d Capability: 0x%x\tSession Status: %d ", i, ses->ip_addr, ses->ses_count, ses->capabilities, ses->ses_status); if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) seq_printf(m, "Guest "); else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) seq_printf(m, "Anonymous "); } else { seq_printf(m, "\n\t%d) Name: %s Domain: %s Uses: %d OS: %s " "\n\tNOS: %s\tCapability: 0x%x" "\n\tSMB session status: %d ", i, ses->ip_addr, ses->serverDomain, ses->ses_count, ses->serverOS, ses->serverNOS, ses->capabilities, ses->ses_status); } seq_printf(m, "\n\tSecurity type: %s ", get_security_type_str(server->ops->select_sectype(server, ses->sectype))); /* dump session id helpful for use with network trace */ seq_printf(m, " SessionId: 0x%llx", ses->Suid); if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) { seq_puts(m, " encrypted"); /* can help in debugging to show encryption type */ if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM) seq_puts(m, "(gcm256)"); } if (ses->sign) seq_puts(m, " signed"); seq_printf(m, "\n\tUser: %d Cred User: %d", from_kuid(&init_user_ns, ses->linux_uid), from_kuid(&init_user_ns, ses->cred_uid)); if (ses->dfs_root_ses) { seq_printf(m, "\n\tDFS root session id: 0x%llx", ses->dfs_root_ses->Suid); } spin_lock(&ses->chan_lock); if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0)) seq_puts(m, "\tPrimary channel: DISCONNECTED "); if (CIFS_CHAN_IN_RECONNECT(ses, 0)) seq_puts(m, "\t[RECONNECTING] "); if (ses->chan_count > 1) { seq_printf(m, "\n\n\tExtra Channels: %zu ", ses->chan_count-1); for (j = 1; j < ses->chan_count; j++) { cifs_dump_channel(m, j, &ses->chans[j]); if (CIFS_CHAN_NEEDS_RECONNECT(ses, j)) seq_puts(m, "\tDISCONNECTED "); if (CIFS_CHAN_IN_RECONNECT(ses, j)) seq_puts(m, "\t[RECONNECTING] "); } } spin_unlock(&ses->chan_lock); seq_puts(m, "\n\n\tShares: "); j = 0; seq_printf(m, "\n\t%d) IPC: ", j); if (ses->tcon_ipc) cifs_debug_tcon(m, ses->tcon_ipc); else seq_puts(m, "none\n"); list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { ++j; seq_printf(m, "\n\t%d) ", j); cifs_debug_tcon(m, tcon); } spin_lock(&ses->iface_lock); if (ses->iface_count) seq_printf(m, "\n\n\tServer interfaces: %zu" "\tLast updated: %lu seconds ago", ses->iface_count, (jiffies - ses->iface_last_update) / HZ); j = 0; list_for_each_entry(iface, &ses->iface_list, iface_head) { seq_printf(m, "\n\t%d)", ++j); cifs_dump_iface(m, iface); if (is_ses_using_iface(ses, iface)) seq_puts(m, "\t\t[CONNECTED]\n"); } spin_unlock(&ses->iface_lock); seq_puts(m, "\n\n\tMIDs: "); spin_lock(&ses->chan_lock); for (j = 0; j < ses->chan_count; j++) { chan_server = ses->chans[j].server; if (!chan_server) continue; if (list_empty(&chan_server->pending_mid_q)) continue; seq_printf(m, "\n\tServer ConnectionId: 0x%llx", chan_server->conn_id); spin_lock(&chan_server->mid_lock); list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) { seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu", mid_entry->mid_state, le16_to_cpu(mid_entry->command), mid_entry->pid, mid_entry->callback_data, mid_entry->mid); } spin_unlock(&chan_server->mid_lock); } spin_unlock(&ses->chan_lock); seq_puts(m, "\n--\n"); } if (i == 0) seq_printf(m, "\n\t\t[NONE]"); } if (c == 0) seq_printf(m, "\n\t[NONE]"); spin_unlock(&cifs_tcp_ses_lock); seq_putc(m, '\n'); cifs_swn_dump(m); /* BB add code to dump additional info such as TCP session info now */ return 0; } static ssize_t cifs_stats_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { bool bv; int rc; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; rc = kstrtobool_from_user(buffer, count, &bv); if (rc == 0) { #ifdef CONFIG_CIFS_STATS2 int i; atomic_set(&total_buf_alloc_count, 0); atomic_set(&total_small_buf_alloc_count, 0); #endif /* CONFIG_CIFS_STATS2 */ atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); spin_lock(&GlobalMid_Lock); GlobalMaxActiveXid = 0; GlobalCurrentXid = 0; spin_unlock(&GlobalMid_Lock); spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { server->max_in_flight = 0; #ifdef CONFIG_CIFS_STATS2 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) { atomic_set(&server->num_cmds[i], 0); atomic_set(&server->smb2slowcmd[i], 0); server->time_per_cmd[i] = 0; server->slowest_cmd[i] = 0; server->fastest_cmd[0] = 0; } #endif /* CONFIG_CIFS_STATS2 */ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { atomic_set(&tcon->num_smbs_sent, 0); spin_lock(&tcon->stat_lock); tcon->bytes_read = 0; tcon->bytes_written = 0; spin_unlock(&tcon->stat_lock); if (server->ops->clear_stats) server->ops->clear_stats(tcon); } } } spin_unlock(&cifs_tcp_ses_lock); } else { return rc; } return count; } static int cifs_stats_proc_show(struct seq_file *m, void *v) { int i; #ifdef CONFIG_CIFS_STATS2 int j; #endif /* STATS2 */ struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; seq_printf(m, "Resources in use\nCIFS Session: %d\n", sesInfoAllocCount.counter); seq_printf(m, "Share (unique mount targets): %d\n", tconInfoAllocCount.counter); seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n", buf_alloc_count.counter, cifs_min_rcv + tcpSesAllocCount.counter); seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n", small_buf_alloc_count.counter, cifs_min_small); #ifdef CONFIG_CIFS_STATS2 seq_printf(m, "Total Large %d Small %d Allocations\n", atomic_read(&total_buf_alloc_count), atomic_read(&total_small_buf_alloc_count)); #endif /* CONFIG_CIFS_STATS2 */ seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count)); seq_printf(m, "\n%d session %d share reconnects\n", tcpSesReconnectCount.counter, tconInfoReconnectCount.counter); seq_printf(m, "Total vfs operations: %d maximum at one time: %d\n", GlobalCurrentXid, GlobalMaxActiveXid); i = 0; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { seq_printf(m, "\nMax requests in flight: %d", server->max_in_flight); #ifdef CONFIG_CIFS_STATS2 seq_puts(m, "\nTotal time spent processing by command. Time "); seq_printf(m, "units are jiffies (%d per second)\n", HZ); seq_puts(m, " SMB3 CMD\tNumber\tTotal Time\tFastest\tSlowest\n"); seq_puts(m, " --------\t------\t----------\t-------\t-------\n"); for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++) seq_printf(m, " %d\t\t%d\t%llu\t\t%u\t%u\n", j, atomic_read(&server->num_cmds[j]), server->time_per_cmd[j], server->fastest_cmd[j], server->slowest_cmd[j]); for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++) if (atomic_read(&server->smb2slowcmd[j])) { spin_lock(&server->srv_lock); seq_printf(m, " %d slow responses from %s for command %d\n", atomic_read(&server->smb2slowcmd[j]), server->hostname, j); spin_unlock(&server->srv_lock); } #endif /* STATS2 */ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { i++; seq_printf(m, "\n%d) %s", i, tcon->tree_name); if (tcon->need_reconnect) seq_puts(m, "\tDISCONNECTED "); seq_printf(m, "\nSMBs: %d", atomic_read(&tcon->num_smbs_sent)); if (server->ops->print_stats) server->ops->print_stats(m, tcon); } } } spin_unlock(&cifs_tcp_ses_lock); seq_putc(m, '\n'); return 0; } static int cifs_stats_proc_open(struct inode *inode, struct file *file) { return single_open(file, cifs_stats_proc_show, NULL); } static const struct proc_ops cifs_stats_proc_ops = { .proc_open = cifs_stats_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = cifs_stats_proc_write, }; #ifdef CONFIG_CIFS_SMB_DIRECT #define PROC_FILE_DEFINE(name) \ static ssize_t name##_write(struct file *file, const char __user *buffer, \ size_t count, loff_t *ppos) \ { \ int rc; \ rc = kstrtoint_from_user(buffer, count, 10, & name); \ if (rc) \ return rc; \ return count; \ } \ static int name##_proc_show(struct seq_file *m, void *v) \ { \ seq_printf(m, "%d\n", name ); \ return 0; \ } \ static int name##_open(struct inode *inode, struct file *file) \ { \ return single_open(file, name##_proc_show, NULL); \ } \ \ static const struct proc_ops cifs_##name##_proc_fops = { \ .proc_open = name##_open, \ .proc_read = seq_read, \ .proc_lseek = seq_lseek, \ .proc_release = single_release, \ .proc_write = name##_write, \ } PROC_FILE_DEFINE(rdma_readwrite_threshold); PROC_FILE_DEFINE(smbd_max_frmr_depth); PROC_FILE_DEFINE(smbd_keep_alive_interval); PROC_FILE_DEFINE(smbd_max_receive_size); PROC_FILE_DEFINE(smbd_max_fragmented_recv_size); PROC_FILE_DEFINE(smbd_max_send_size); PROC_FILE_DEFINE(smbd_send_credit_target); PROC_FILE_DEFINE(smbd_receive_credit_max); #endif static struct proc_dir_entry *proc_fs_cifs; static const struct proc_ops cifsFYI_proc_ops; static const struct proc_ops cifs_lookup_cache_proc_ops; static const struct proc_ops traceSMB_proc_ops; static const struct proc_ops cifs_security_flags_proc_ops; static const struct proc_ops cifs_linux_ext_proc_ops; static const struct proc_ops cifs_mount_params_proc_ops; void cifs_proc_init(void) { proc_fs_cifs = proc_mkdir("fs/cifs", NULL); if (proc_fs_cifs == NULL) return; proc_create_single("DebugData", 0, proc_fs_cifs, cifs_debug_data_proc_show); proc_create_single("open_files", 0400, proc_fs_cifs, cifs_debug_files_proc_show); proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_ops); proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_ops); proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_ops); proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs, &cifs_linux_ext_proc_ops); proc_create("SecurityFlags", 0644, proc_fs_cifs, &cifs_security_flags_proc_ops); proc_create("LookupCacheEnabled", 0644, proc_fs_cifs, &cifs_lookup_cache_proc_ops); proc_create("mount_params", 0444, proc_fs_cifs, &cifs_mount_params_proc_ops); #ifdef CONFIG_CIFS_DFS_UPCALL proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_ops); #endif #ifdef CONFIG_CIFS_SMB_DIRECT proc_create("rdma_readwrite_threshold", 0644, proc_fs_cifs, &cifs_rdma_readwrite_threshold_proc_fops); proc_create("smbd_max_frmr_depth", 0644, proc_fs_cifs, &cifs_smbd_max_frmr_depth_proc_fops); proc_create("smbd_keep_alive_interval", 0644, proc_fs_cifs, &cifs_smbd_keep_alive_interval_proc_fops); proc_create("smbd_max_receive_size", 0644, proc_fs_cifs, &cifs_smbd_max_receive_size_proc_fops); proc_create("smbd_max_fragmented_recv_size", 0644, proc_fs_cifs, &cifs_smbd_max_fragmented_recv_size_proc_fops); proc_create("smbd_max_send_size", 0644, proc_fs_cifs, &cifs_smbd_max_send_size_proc_fops); proc_create("smbd_send_credit_target", 0644, proc_fs_cifs, &cifs_smbd_send_credit_target_proc_fops); proc_create("smbd_receive_credit_max", 0644, proc_fs_cifs, &cifs_smbd_receive_credit_max_proc_fops); #endif } void cifs_proc_clean(void) { if (proc_fs_cifs == NULL) return; remove_proc_entry("DebugData", proc_fs_cifs); remove_proc_entry("open_files", proc_fs_cifs); remove_proc_entry("cifsFYI", proc_fs_cifs); remove_proc_entry("traceSMB", proc_fs_cifs); remove_proc_entry("Stats", proc_fs_cifs); remove_proc_entry("SecurityFlags", proc_fs_cifs); remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); remove_proc_entry("mount_params", proc_fs_cifs); #ifdef CONFIG_CIFS_DFS_UPCALL remove_proc_entry("dfscache", proc_fs_cifs); #endif #ifdef CONFIG_CIFS_SMB_DIRECT remove_proc_entry("rdma_readwrite_threshold", proc_fs_cifs); remove_proc_entry("smbd_max_frmr_depth", proc_fs_cifs); remove_proc_entry("smbd_keep_alive_interval", proc_fs_cifs); remove_proc_entry("smbd_max_receive_size", proc_fs_cifs); remove_proc_entry("smbd_max_fragmented_recv_size", proc_fs_cifs); remove_proc_entry("smbd_max_send_size", proc_fs_cifs); remove_proc_entry("smbd_send_credit_target", proc_fs_cifs); remove_proc_entry("smbd_receive_credit_max", proc_fs_cifs); #endif remove_proc_entry("fs/cifs", NULL); } static int cifsFYI_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", cifsFYI); return 0; } static int cifsFYI_proc_open(struct inode *inode, struct file *file) { return single_open(file, cifsFYI_proc_show, NULL); } static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char c[2] = { '\0' }; bool bv; int rc; rc = get_user(c[0], buffer); if (rc) return rc; if (kstrtobool(c, &bv) == 0) cifsFYI = bv; else if ((c[0] > '1') && (c[0] <= '9')) cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */ else return -EINVAL; return count; } static const struct proc_ops cifsFYI_proc_ops = { .proc_open = cifsFYI_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = cifsFYI_proc_write, }; static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", linuxExtEnabled); return 0; } static int cifs_linux_ext_proc_open(struct inode *inode, struct file *file) { return single_open(file, cifs_linux_ext_proc_show, NULL); } static ssize_t cifs_linux_ext_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { int rc; rc = kstrtobool_from_user(buffer, count, &linuxExtEnabled); if (rc) return rc; return count; } static const struct proc_ops cifs_linux_ext_proc_ops = { .proc_open = cifs_linux_ext_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = cifs_linux_ext_proc_write, }; static int cifs_lookup_cache_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", lookupCacheEnabled); return 0; } static int cifs_lookup_cache_proc_open(struct inode *inode, struct file *file) { return single_open(file, cifs_lookup_cache_proc_show, NULL); } static ssize_t cifs_lookup_cache_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { int rc; rc = kstrtobool_from_user(buffer, count, &lookupCacheEnabled); if (rc) return rc; return count; } static const struct proc_ops cifs_lookup_cache_proc_ops = { .proc_open = cifs_lookup_cache_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = cifs_lookup_cache_proc_write, }; static int traceSMB_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", traceSMB); return 0; } static int traceSMB_proc_open(struct inode *inode, struct file *file) { return single_open(file, traceSMB_proc_show, NULL); } static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { int rc; rc = kstrtobool_from_user(buffer, count, &traceSMB); if (rc) return rc; return count; } static const struct proc_ops traceSMB_proc_ops = { .proc_open = traceSMB_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = traceSMB_proc_write, }; static int cifs_security_flags_proc_show(struct seq_file *m, void *v) { seq_printf(m, "0x%x\n", global_secflags); return 0; } static int cifs_security_flags_proc_open(struct inode *inode, struct file *file) { return single_open(file, cifs_security_flags_proc_show, NULL); } /* * Ensure that if someone sets a MUST flag, that we disable all other MAY * flags except for the ones corresponding to the given MUST flag. If there are * multiple MUST flags, then try to prefer more secure ones. */ static void cifs_security_flags_handle_must_flags(unsigned int *flags) { unsigned int signflags = *flags & CIFSSEC_MUST_SIGN; if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) *flags = CIFSSEC_MUST_KRB5; else if ((*flags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) *flags = CIFSSEC_MUST_NTLMSSP; else if ((*flags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) *flags = CIFSSEC_MUST_NTLMV2; *flags |= signflags; } static ssize_t cifs_security_flags_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { int rc; unsigned int flags; char flags_string[12]; bool bv; if ((count < 1) || (count > 11)) return -EINVAL; memset(flags_string, 0, 12); if (copy_from_user(flags_string, buffer, count)) return -EFAULT; if (count < 3) { /* single char or single char followed by null */ if (kstrtobool(flags_string, &bv) == 0) { global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF; return count; } else if (!isdigit(flags_string[0])) { cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", flags_string); return -EINVAL; } } /* else we have a number */ rc = kstrtouint(flags_string, 0, &flags); if (rc) { cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", flags_string); return rc; } cifs_dbg(FYI, "sec flags 0x%x\n", flags); if (flags == 0) { cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", flags_string); return -EINVAL; } if (flags & ~CIFSSEC_MASK) { cifs_dbg(VFS, "Unsupported security flags: 0x%x\n", flags & ~CIFSSEC_MASK); return -EINVAL; } cifs_security_flags_handle_must_flags(&flags); /* flags look ok - update the global security flags for cifs module */ global_secflags = flags; if (global_secflags & CIFSSEC_MUST_SIGN) { /* requiring signing implies signing is allowed */ global_secflags |= CIFSSEC_MAY_SIGN; cifs_dbg(FYI, "packet signing now required\n"); } else if ((global_secflags & CIFSSEC_MAY_SIGN) == 0) { cifs_dbg(FYI, "packet signing disabled\n"); } /* BB should we turn on MAY flags for other MUST options? */ return count; } static const struct proc_ops cifs_security_flags_proc_ops = { .proc_open = cifs_security_flags_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = cifs_security_flags_proc_write, }; /* To make it easier to debug, can help to show mount params */ static int cifs_mount_params_proc_show(struct seq_file *m, void *v) { const struct fs_parameter_spec *p; const char *type; for (p = smb3_fs_parameters; p->name; p++) { /* cannot use switch with pointers... */ if (!p->type) { if (p->flags == fs_param_neg_with_no) type = "noflag"; else type = "flag"; } else if (p->type == fs_param_is_bool) type = "bool"; else if (p->type == fs_param_is_u32) type = "u32"; else if (p->type == fs_param_is_u64) type = "u64"; else if (p->type == fs_param_is_string) type = "string"; else type = "unknown"; seq_printf(m, "%s:%s\n", p->name, type); } return 0; } static int cifs_mount_params_proc_open(struct inode *inode, struct file *file) { return single_open(file, cifs_mount_params_proc_show, NULL); } static const struct proc_ops cifs_mount_params_proc_ops = { .proc_open = cifs_mount_params_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, /* No need for write for now */ /* .proc_write = cifs_mount_params_proc_write, */ }; #else inline void cifs_proc_init(void) { } inline void cifs_proc_clean(void) { } #endif /* PROC_FS */
linux-master
fs/smb/client/cifs_debug.c
// SPDX-License-Identifier: GPL-2.0 /* * Netlink routines for CIFS * * Copyright (c) 2020 Samuel Cabrero <[email protected]> */ #include <net/genetlink.h> #include <uapi/linux/cifs/cifs_netlink.h> #include "netlink.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifs_swn.h" static const struct nla_policy cifs_genl_policy[CIFS_GENL_ATTR_MAX + 1] = { [CIFS_GENL_ATTR_SWN_REGISTRATION_ID] = { .type = NLA_U32 }, [CIFS_GENL_ATTR_SWN_NET_NAME] = { .type = NLA_STRING }, [CIFS_GENL_ATTR_SWN_SHARE_NAME] = { .type = NLA_STRING }, [CIFS_GENL_ATTR_SWN_IP] = { .len = sizeof(struct sockaddr_storage) }, [CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY] = { .type = NLA_FLAG }, [CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY] = { .type = NLA_FLAG }, [CIFS_GENL_ATTR_SWN_IP_NOTIFY] = { .type = NLA_FLAG }, [CIFS_GENL_ATTR_SWN_KRB_AUTH] = { .type = NLA_FLAG }, [CIFS_GENL_ATTR_SWN_USER_NAME] = { .type = NLA_STRING }, [CIFS_GENL_ATTR_SWN_PASSWORD] = { .type = NLA_STRING }, [CIFS_GENL_ATTR_SWN_DOMAIN_NAME] = { .type = NLA_STRING }, [CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE] = { .type = NLA_U32 }, [CIFS_GENL_ATTR_SWN_RESOURCE_STATE] = { .type = NLA_U32 }, [CIFS_GENL_ATTR_SWN_RESOURCE_NAME] = { .type = NLA_STRING}, }; static const struct genl_ops cifs_genl_ops[] = { { .cmd = CIFS_GENL_CMD_SWN_NOTIFY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = cifs_swn_notify, }, }; static const struct genl_multicast_group cifs_genl_mcgrps[] = { [CIFS_GENL_MCGRP_SWN] = { .name = CIFS_GENL_MCGRP_SWN_NAME }, }; struct genl_family cifs_genl_family = { .name = CIFS_GENL_NAME, .version = CIFS_GENL_VERSION, .hdrsize = 0, .maxattr = CIFS_GENL_ATTR_MAX, .module = THIS_MODULE, .policy = cifs_genl_policy, .ops = cifs_genl_ops, .n_ops = ARRAY_SIZE(cifs_genl_ops), .resv_start_op = CIFS_GENL_CMD_SWN_NOTIFY + 1, .mcgrps = cifs_genl_mcgrps, .n_mcgrps = ARRAY_SIZE(cifs_genl_mcgrps), }; /** * cifs_genl_init - Register generic netlink family * * Return zero if initialized successfully, otherwise non-zero. */ int cifs_genl_init(void) { int ret; ret = genl_register_family(&cifs_genl_family); if (ret < 0) { cifs_dbg(VFS, "%s: failed to register netlink family\n", __func__); return ret; } return 0; } /** * cifs_genl_exit - Unregister generic netlink family */ void cifs_genl_exit(void) { int ret; ret = genl_unregister_family(&cifs_genl_family); if (ret < 0) { cifs_dbg(VFS, "%s: failed to unregister netlink family\n", __func__); } }
linux-master
fs/smb/client/netlink.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (c) Jeffrey Layton <[email protected]>, 2013 * * The const tables in this file were converted from the following info * provided by Microsoft: * * 3.1.5.3 Mapping UTF-16 Strings to Upper Case: * * https://msdn.microsoft.com/en-us/library/hh877830.aspx * http://www.microsoft.com/en-us/download/details.aspx?displaylang=en&id=10921 * * In particular, the table in "Windows 8 Upper Case Mapping Table.txt" was * post-processed using the winucase_convert.pl script. */ #include <linux/nls.h> wchar_t cifs_toupper(wchar_t in); /* quiet sparse */ static const wchar_t t2_00[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7, 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf, 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x0000, 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x0178, }; static const wchar_t t2_01[256] = { 0x0000, 0x0100, 0x0000, 0x0102, 0x0000, 0x0104, 0x0000, 0x0106, 0x0000, 0x0108, 0x0000, 0x010a, 0x0000, 0x010c, 0x0000, 0x010e, 0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0000, 0x0116, 0x0000, 0x0118, 0x0000, 0x011a, 0x0000, 0x011c, 0x0000, 0x011e, 0x0000, 0x0120, 0x0000, 0x0122, 0x0000, 0x0124, 0x0000, 0x0126, 0x0000, 0x0128, 0x0000, 0x012a, 0x0000, 0x012c, 0x0000, 0x012e, 0x0000, 0x0000, 0x0000, 0x0132, 0x0000, 0x0134, 0x0000, 0x0136, 0x0000, 0x0000, 0x0139, 0x0000, 0x013b, 0x0000, 0x013d, 0x0000, 0x013f, 0x0000, 0x0141, 0x0000, 0x0143, 0x0000, 0x0145, 0x0000, 0x0147, 0x0000, 0x0000, 0x014a, 0x0000, 0x014c, 0x0000, 0x014e, 0x0000, 0x0150, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156, 0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x0000, 0x015e, 0x0000, 0x0160, 0x0000, 0x0162, 0x0000, 0x0164, 0x0000, 0x0166, 0x0000, 0x0168, 0x0000, 0x016a, 0x0000, 0x016c, 0x0000, 0x016e, 0x0000, 0x0170, 0x0000, 0x0172, 0x0000, 0x0174, 0x0000, 0x0176, 0x0000, 0x0000, 0x0179, 0x0000, 0x017b, 0x0000, 0x017d, 0x0000, 0x0243, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0000, 0x0000, 0x0187, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x01f6, 0x0000, 0x0000, 0x0000, 0x0198, 0x023d, 0x0000, 0x0000, 0x0000, 0x0220, 0x0000, 0x0000, 0x01a0, 0x0000, 0x01a2, 0x0000, 0x01a4, 0x0000, 0x0000, 0x01a7, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ac, 0x0000, 0x0000, 0x01af, 0x0000, 0x0000, 0x0000, 0x01b3, 0x0000, 0x01b5, 0x0000, 0x0000, 0x01b8, 0x0000, 0x0000, 0x0000, 0x01bc, 0x0000, 0x01f7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01c4, 0x0000, 0x0000, 0x01c7, 0x0000, 0x0000, 0x01ca, 0x0000, 0x01cd, 0x0000, 0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000, 0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x018e, 0x0000, 0x01de, 0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6, 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee, 0x0000, 0x0000, 0x0000, 0x01f1, 0x0000, 0x01f4, 0x0000, 0x0000, 0x0000, 0x01f8, 0x0000, 0x01fa, 0x0000, 0x01fc, 0x0000, 0x01fe, }; static const wchar_t t2_02[256] = { 0x0000, 0x0200, 0x0000, 0x0202, 0x0000, 0x0204, 0x0000, 0x0206, 0x0000, 0x0208, 0x0000, 0x020a, 0x0000, 0x020c, 0x0000, 0x020e, 0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0214, 0x0000, 0x0216, 0x0000, 0x0218, 0x0000, 0x021a, 0x0000, 0x021c, 0x0000, 0x021e, 0x0000, 0x0000, 0x0000, 0x0222, 0x0000, 0x0224, 0x0000, 0x0226, 0x0000, 0x0228, 0x0000, 0x022a, 0x0000, 0x022c, 0x0000, 0x022e, 0x0000, 0x0230, 0x0000, 0x0232, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x023b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0241, 0x0000, 0x0000, 0x0000, 0x0000, 0x0246, 0x0000, 0x0248, 0x0000, 0x024a, 0x0000, 0x024c, 0x0000, 0x024e, 0x2c6f, 0x2c6d, 0x0000, 0x0181, 0x0186, 0x0000, 0x0189, 0x018a, 0x0000, 0x018f, 0x0000, 0x0190, 0x0000, 0x0000, 0x0000, 0x0000, 0x0193, 0x0000, 0x0000, 0x0194, 0x0000, 0x0000, 0x0000, 0x0000, 0x0197, 0x0196, 0x0000, 0x2c62, 0x0000, 0x0000, 0x0000, 0x019c, 0x0000, 0x2c6e, 0x019d, 0x0000, 0x0000, 0x019f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c64, 0x0000, 0x0000, 0x01a6, 0x0000, 0x0000, 0x01a9, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ae, 0x0244, 0x01b1, 0x01b2, 0x0245, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01b7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_03[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0370, 0x0000, 0x0372, 0x0000, 0x0000, 0x0000, 0x0376, 0x0000, 0x0000, 0x0000, 0x03fd, 0x03fe, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0386, 0x0388, 0x0389, 0x038a, 0x0000, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f, 0x03a0, 0x03a1, 0x0000, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7, 0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x038c, 0x038e, 0x038f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03cf, 0x0000, 0x03d8, 0x0000, 0x03da, 0x0000, 0x03dc, 0x0000, 0x03de, 0x0000, 0x03e0, 0x0000, 0x03e2, 0x0000, 0x03e4, 0x0000, 0x03e6, 0x0000, 0x03e8, 0x0000, 0x03ea, 0x0000, 0x03ec, 0x0000, 0x03ee, 0x0000, 0x0000, 0x03f9, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03f7, 0x0000, 0x0000, 0x03fa, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_04[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x040d, 0x040e, 0x040f, 0x0000, 0x0460, 0x0000, 0x0462, 0x0000, 0x0464, 0x0000, 0x0466, 0x0000, 0x0468, 0x0000, 0x046a, 0x0000, 0x046c, 0x0000, 0x046e, 0x0000, 0x0470, 0x0000, 0x0472, 0x0000, 0x0474, 0x0000, 0x0476, 0x0000, 0x0478, 0x0000, 0x047a, 0x0000, 0x047c, 0x0000, 0x047e, 0x0000, 0x0480, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x048a, 0x0000, 0x048c, 0x0000, 0x048e, 0x0000, 0x0490, 0x0000, 0x0492, 0x0000, 0x0494, 0x0000, 0x0496, 0x0000, 0x0498, 0x0000, 0x049a, 0x0000, 0x049c, 0x0000, 0x049e, 0x0000, 0x04a0, 0x0000, 0x04a2, 0x0000, 0x04a4, 0x0000, 0x04a6, 0x0000, 0x04a8, 0x0000, 0x04aa, 0x0000, 0x04ac, 0x0000, 0x04ae, 0x0000, 0x04b0, 0x0000, 0x04b2, 0x0000, 0x04b4, 0x0000, 0x04b6, 0x0000, 0x04b8, 0x0000, 0x04ba, 0x0000, 0x04bc, 0x0000, 0x04be, 0x0000, 0x0000, 0x04c1, 0x0000, 0x04c3, 0x0000, 0x04c5, 0x0000, 0x04c7, 0x0000, 0x04c9, 0x0000, 0x04cb, 0x0000, 0x04cd, 0x04c0, 0x0000, 0x04d0, 0x0000, 0x04d2, 0x0000, 0x04d4, 0x0000, 0x04d6, 0x0000, 0x04d8, 0x0000, 0x04da, 0x0000, 0x04dc, 0x0000, 0x04de, 0x0000, 0x04e0, 0x0000, 0x04e2, 0x0000, 0x04e4, 0x0000, 0x04e6, 0x0000, 0x04e8, 0x0000, 0x04ea, 0x0000, 0x04ec, 0x0000, 0x04ee, 0x0000, 0x04f0, 0x0000, 0x04f2, 0x0000, 0x04f4, 0x0000, 0x04f6, 0x0000, 0x04f8, 0x0000, 0x04fa, 0x0000, 0x04fc, 0x0000, 0x04fe, }; static const wchar_t t2_05[256] = { 0x0000, 0x0500, 0x0000, 0x0502, 0x0000, 0x0504, 0x0000, 0x0506, 0x0000, 0x0508, 0x0000, 0x050a, 0x0000, 0x050c, 0x0000, 0x050e, 0x0000, 0x0510, 0x0000, 0x0512, 0x0000, 0x0514, 0x0000, 0x0516, 0x0000, 0x0518, 0x0000, 0x051a, 0x0000, 0x051c, 0x0000, 0x051e, 0x0000, 0x0520, 0x0000, 0x0522, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537, 0x0538, 0x0539, 0x053a, 0x053b, 0x053c, 0x053d, 0x053e, 0x053f, 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547, 0x0548, 0x0549, 0x054a, 0x054b, 0x054c, 0x054d, 0x054e, 0x054f, 0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_1d[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xa77d, 0x0000, 0x0000, 0x0000, 0x2c63, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_1e[256] = { 0x0000, 0x1e00, 0x0000, 0x1e02, 0x0000, 0x1e04, 0x0000, 0x1e06, 0x0000, 0x1e08, 0x0000, 0x1e0a, 0x0000, 0x1e0c, 0x0000, 0x1e0e, 0x0000, 0x1e10, 0x0000, 0x1e12, 0x0000, 0x1e14, 0x0000, 0x1e16, 0x0000, 0x1e18, 0x0000, 0x1e1a, 0x0000, 0x1e1c, 0x0000, 0x1e1e, 0x0000, 0x1e20, 0x0000, 0x1e22, 0x0000, 0x1e24, 0x0000, 0x1e26, 0x0000, 0x1e28, 0x0000, 0x1e2a, 0x0000, 0x1e2c, 0x0000, 0x1e2e, 0x0000, 0x1e30, 0x0000, 0x1e32, 0x0000, 0x1e34, 0x0000, 0x1e36, 0x0000, 0x1e38, 0x0000, 0x1e3a, 0x0000, 0x1e3c, 0x0000, 0x1e3e, 0x0000, 0x1e40, 0x0000, 0x1e42, 0x0000, 0x1e44, 0x0000, 0x1e46, 0x0000, 0x1e48, 0x0000, 0x1e4a, 0x0000, 0x1e4c, 0x0000, 0x1e4e, 0x0000, 0x1e50, 0x0000, 0x1e52, 0x0000, 0x1e54, 0x0000, 0x1e56, 0x0000, 0x1e58, 0x0000, 0x1e5a, 0x0000, 0x1e5c, 0x0000, 0x1e5e, 0x0000, 0x1e60, 0x0000, 0x1e62, 0x0000, 0x1e64, 0x0000, 0x1e66, 0x0000, 0x1e68, 0x0000, 0x1e6a, 0x0000, 0x1e6c, 0x0000, 0x1e6e, 0x0000, 0x1e70, 0x0000, 0x1e72, 0x0000, 0x1e74, 0x0000, 0x1e76, 0x0000, 0x1e78, 0x0000, 0x1e7a, 0x0000, 0x1e7c, 0x0000, 0x1e7e, 0x0000, 0x1e80, 0x0000, 0x1e82, 0x0000, 0x1e84, 0x0000, 0x1e86, 0x0000, 0x1e88, 0x0000, 0x1e8a, 0x0000, 0x1e8c, 0x0000, 0x1e8e, 0x0000, 0x1e90, 0x0000, 0x1e92, 0x0000, 0x1e94, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1ea0, 0x0000, 0x1ea2, 0x0000, 0x1ea4, 0x0000, 0x1ea6, 0x0000, 0x1ea8, 0x0000, 0x1eaa, 0x0000, 0x1eac, 0x0000, 0x1eae, 0x0000, 0x1eb0, 0x0000, 0x1eb2, 0x0000, 0x1eb4, 0x0000, 0x1eb6, 0x0000, 0x1eb8, 0x0000, 0x1eba, 0x0000, 0x1ebc, 0x0000, 0x1ebe, 0x0000, 0x1ec0, 0x0000, 0x1ec2, 0x0000, 0x1ec4, 0x0000, 0x1ec6, 0x0000, 0x1ec8, 0x0000, 0x1eca, 0x0000, 0x1ecc, 0x0000, 0x1ece, 0x0000, 0x1ed0, 0x0000, 0x1ed2, 0x0000, 0x1ed4, 0x0000, 0x1ed6, 0x0000, 0x1ed8, 0x0000, 0x1eda, 0x0000, 0x1edc, 0x0000, 0x1ede, 0x0000, 0x1ee0, 0x0000, 0x1ee2, 0x0000, 0x1ee4, 0x0000, 0x1ee6, 0x0000, 0x1ee8, 0x0000, 0x1eea, 0x0000, 0x1eec, 0x0000, 0x1eee, 0x0000, 0x1ef0, 0x0000, 0x1ef2, 0x0000, 0x1ef4, 0x0000, 0x1ef6, 0x0000, 0x1ef8, 0x0000, 0x1efa, 0x0000, 0x1efc, 0x0000, 0x1efe, }; static const wchar_t t2_1f[256] = { 0x1f08, 0x1f09, 0x1f0a, 0x1f0b, 0x1f0c, 0x1f0d, 0x1f0e, 0x1f0f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1f18, 0x1f19, 0x1f1a, 0x1f1b, 0x1f1c, 0x1f1d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1f28, 0x1f29, 0x1f2a, 0x1f2b, 0x1f2c, 0x1f2d, 0x1f2e, 0x1f2f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1f38, 0x1f39, 0x1f3a, 0x1f3b, 0x1f3c, 0x1f3d, 0x1f3e, 0x1f3f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1f48, 0x1f49, 0x1f4a, 0x1f4b, 0x1f4c, 0x1f4d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1f59, 0x0000, 0x1f5b, 0x0000, 0x1f5d, 0x0000, 0x1f5f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1f68, 0x1f69, 0x1f6a, 0x1f6b, 0x1f6c, 0x1f6d, 0x1f6e, 0x1f6f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1fba, 0x1fbb, 0x1fc8, 0x1fc9, 0x1fca, 0x1fcb, 0x1fda, 0x1fdb, 0x1ff8, 0x1ff9, 0x1fea, 0x1feb, 0x1ffa, 0x1ffb, 0x0000, 0x0000, 0x1f88, 0x1f89, 0x1f8a, 0x1f8b, 0x1f8c, 0x1f8d, 0x1f8e, 0x1f8f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1f98, 0x1f99, 0x1f9a, 0x1f9b, 0x1f9c, 0x1f9d, 0x1f9e, 0x1f9f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1fa8, 0x1fa9, 0x1faa, 0x1fab, 0x1fac, 0x1fad, 0x1fae, 0x1faf, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1fb8, 0x1fb9, 0x0000, 0x1fbc, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1fcc, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1fd8, 0x1fd9, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1fe8, 0x1fe9, 0x0000, 0x0000, 0x0000, 0x1fec, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1ffc, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_21[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2132, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2160, 0x2161, 0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167, 0x2168, 0x2169, 0x216a, 0x216b, 0x216c, 0x216d, 0x216e, 0x216f, 0x0000, 0x0000, 0x0000, 0x0000, 0x2183, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_24[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x24b6, 0x24b7, 0x24b8, 0x24b9, 0x24ba, 0x24bb, 0x24bc, 0x24bd, 0x24be, 0x24bf, 0x24c0, 0x24c1, 0x24c2, 0x24c3, 0x24c4, 0x24c5, 0x24c6, 0x24c7, 0x24c8, 0x24c9, 0x24ca, 0x24cb, 0x24cc, 0x24cd, 0x24ce, 0x24cf, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_2c[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c00, 0x2c01, 0x2c02, 0x2c03, 0x2c04, 0x2c05, 0x2c06, 0x2c07, 0x2c08, 0x2c09, 0x2c0a, 0x2c0b, 0x2c0c, 0x2c0d, 0x2c0e, 0x2c0f, 0x2c10, 0x2c11, 0x2c12, 0x2c13, 0x2c14, 0x2c15, 0x2c16, 0x2c17, 0x2c18, 0x2c19, 0x2c1a, 0x2c1b, 0x2c1c, 0x2c1d, 0x2c1e, 0x2c1f, 0x2c20, 0x2c21, 0x2c22, 0x2c23, 0x2c24, 0x2c25, 0x2c26, 0x2c27, 0x2c28, 0x2c29, 0x2c2a, 0x2c2b, 0x2c2c, 0x2c2d, 0x2c2e, 0x0000, 0x0000, 0x2c60, 0x0000, 0x0000, 0x0000, 0x023a, 0x023e, 0x0000, 0x2c67, 0x0000, 0x2c69, 0x0000, 0x2c6b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c72, 0x0000, 0x0000, 0x2c75, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c80, 0x0000, 0x2c82, 0x0000, 0x2c84, 0x0000, 0x2c86, 0x0000, 0x2c88, 0x0000, 0x2c8a, 0x0000, 0x2c8c, 0x0000, 0x2c8e, 0x0000, 0x2c90, 0x0000, 0x2c92, 0x0000, 0x2c94, 0x0000, 0x2c96, 0x0000, 0x2c98, 0x0000, 0x2c9a, 0x0000, 0x2c9c, 0x0000, 0x2c9e, 0x0000, 0x2ca0, 0x0000, 0x2ca2, 0x0000, 0x2ca4, 0x0000, 0x2ca6, 0x0000, 0x2ca8, 0x0000, 0x2caa, 0x0000, 0x2cac, 0x0000, 0x2cae, 0x0000, 0x2cb0, 0x0000, 0x2cb2, 0x0000, 0x2cb4, 0x0000, 0x2cb6, 0x0000, 0x2cb8, 0x0000, 0x2cba, 0x0000, 0x2cbc, 0x0000, 0x2cbe, 0x0000, 0x2cc0, 0x0000, 0x2cc2, 0x0000, 0x2cc4, 0x0000, 0x2cc6, 0x0000, 0x2cc8, 0x0000, 0x2cca, 0x0000, 0x2ccc, 0x0000, 0x2cce, 0x0000, 0x2cd0, 0x0000, 0x2cd2, 0x0000, 0x2cd4, 0x0000, 0x2cd6, 0x0000, 0x2cd8, 0x0000, 0x2cda, 0x0000, 0x2cdc, 0x0000, 0x2cde, 0x0000, 0x2ce0, 0x0000, 0x2ce2, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_2d[256] = { 0x10a0, 0x10a1, 0x10a2, 0x10a3, 0x10a4, 0x10a5, 0x10a6, 0x10a7, 0x10a8, 0x10a9, 0x10aa, 0x10ab, 0x10ac, 0x10ad, 0x10ae, 0x10af, 0x10b0, 0x10b1, 0x10b2, 0x10b3, 0x10b4, 0x10b5, 0x10b6, 0x10b7, 0x10b8, 0x10b9, 0x10ba, 0x10bb, 0x10bc, 0x10bd, 0x10be, 0x10bf, 0x10c0, 0x10c1, 0x10c2, 0x10c3, 0x10c4, 0x10c5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_a6[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xa640, 0x0000, 0xa642, 0x0000, 0xa644, 0x0000, 0xa646, 0x0000, 0xa648, 0x0000, 0xa64a, 0x0000, 0xa64c, 0x0000, 0xa64e, 0x0000, 0xa650, 0x0000, 0xa652, 0x0000, 0xa654, 0x0000, 0xa656, 0x0000, 0xa658, 0x0000, 0xa65a, 0x0000, 0xa65c, 0x0000, 0xa65e, 0x0000, 0x0000, 0x0000, 0xa662, 0x0000, 0xa664, 0x0000, 0xa666, 0x0000, 0xa668, 0x0000, 0xa66a, 0x0000, 0xa66c, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xa680, 0x0000, 0xa682, 0x0000, 0xa684, 0x0000, 0xa686, 0x0000, 0xa688, 0x0000, 0xa68a, 0x0000, 0xa68c, 0x0000, 0xa68e, 0x0000, 0xa690, 0x0000, 0xa692, 0x0000, 0xa694, 0x0000, 0xa696, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_a7[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xa722, 0x0000, 0xa724, 0x0000, 0xa726, 0x0000, 0xa728, 0x0000, 0xa72a, 0x0000, 0xa72c, 0x0000, 0xa72e, 0x0000, 0x0000, 0x0000, 0xa732, 0x0000, 0xa734, 0x0000, 0xa736, 0x0000, 0xa738, 0x0000, 0xa73a, 0x0000, 0xa73c, 0x0000, 0xa73e, 0x0000, 0xa740, 0x0000, 0xa742, 0x0000, 0xa744, 0x0000, 0xa746, 0x0000, 0xa748, 0x0000, 0xa74a, 0x0000, 0xa74c, 0x0000, 0xa74e, 0x0000, 0xa750, 0x0000, 0xa752, 0x0000, 0xa754, 0x0000, 0xa756, 0x0000, 0xa758, 0x0000, 0xa75a, 0x0000, 0xa75c, 0x0000, 0xa75e, 0x0000, 0xa760, 0x0000, 0xa762, 0x0000, 0xa764, 0x0000, 0xa766, 0x0000, 0xa768, 0x0000, 0xa76a, 0x0000, 0xa76c, 0x0000, 0xa76e, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xa779, 0x0000, 0xa77b, 0x0000, 0x0000, 0xa77e, 0x0000, 0xa780, 0x0000, 0xa782, 0x0000, 0xa784, 0x0000, 0xa786, 0x0000, 0x0000, 0x0000, 0x0000, 0xa78b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t t2_ff[256] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xff21, 0xff22, 0xff23, 0xff24, 0xff25, 0xff26, 0xff27, 0xff28, 0xff29, 0xff2a, 0xff2b, 0xff2c, 0xff2d, 0xff2e, 0xff2f, 0xff30, 0xff31, 0xff32, 0xff33, 0xff34, 0xff35, 0xff36, 0xff37, 0xff38, 0xff39, 0xff3a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const wchar_t *const toplevel[256] = { t2_00, t2_01, t2_02, t2_03, t2_04, t2_05, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, t2_1d, t2_1e, t2_1f, NULL, t2_21, NULL, NULL, t2_24, NULL, NULL, NULL, NULL, NULL, NULL, NULL, t2_2c, t2_2d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, t2_a6, t2_a7, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, t2_ff, }; /** * cifs_toupper - convert a wchar_t from lower to uppercase * @in: character to convert from lower to uppercase * * This function consults the static tables above to convert a wchar_t from * lower to uppercase. In the event that there is no mapping, the original * "in" character is returned. */ wchar_t cifs_toupper(wchar_t in) { unsigned char idx; const wchar_t *tbl; wchar_t out; /* grab upper byte */ idx = (in & 0xff00) >> 8; /* find pointer to 2nd layer table */ tbl = toplevel[idx]; if (!tbl) return in; /* grab lower byte */ idx = in & 0xff; /* look up character in table */ out = tbl[idx]; if (out) return out; return in; }
linux-master
fs/smb/client/winucase.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Contains mounting routines used for handling traversal via SMB junctions. * * Copyright (c) 2007 Igor Mammedov * Copyright (C) International Business Machines Corp., 2008 * Author(s): Igor Mammedov ([email protected]) * Steve French ([email protected]) * Copyright (c) 2023 Paulo Alcantara <[email protected]> */ #include <linux/dcache.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/vfs.h> #include <linux/fs.h> #include <linux/inet.h> #include "cifsglob.h" #include "cifsproto.h" #include "cifsfs.h" #include "cifs_debug.h" #include "fs_context.h" static LIST_HEAD(cifs_automount_list); static void cifs_expire_automounts(struct work_struct *work); static DECLARE_DELAYED_WORK(cifs_automount_task, cifs_expire_automounts); static int cifs_mountpoint_expiry_timeout = 500 * HZ; static void cifs_expire_automounts(struct work_struct *work) { struct list_head *list = &cifs_automount_list; mark_mounts_for_expiry(list); if (!list_empty(list)) schedule_delayed_work(&cifs_automount_task, cifs_mountpoint_expiry_timeout); } void cifs_release_automount_timer(void) { if (WARN_ON(!list_empty(&cifs_automount_list))) return; cancel_delayed_work_sync(&cifs_automount_task); } /** * cifs_build_devname - build a devicename from a UNC and optional prepath * @nodename: pointer to UNC string * @prepath: pointer to prefixpath (or NULL if there isn't one) * * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer * big enough to hold the final thing. Copy the UNC from the nodename, and * concatenate the prepath onto the end of it if there is one. * * Returns pointer to the built string, or a ERR_PTR. Caller is responsible * for freeing the returned string. */ char * cifs_build_devname(char *nodename, const char *prepath) { size_t pplen; size_t unclen; char *dev; char *pos; /* skip over any preceding delimiters */ nodename += strspn(nodename, "\\"); if (!*nodename) return ERR_PTR(-EINVAL); /* get length of UNC and set pos to last char */ unclen = strlen(nodename); pos = nodename + unclen - 1; /* trim off any trailing delimiters */ while (*pos == '\\') { --pos; --unclen; } /* allocate a buffer: * +2 for preceding "//" * +1 for delimiter between UNC and prepath * +1 for trailing NULL */ pplen = prepath ? strlen(prepath) : 0; dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); pos = dev; /* add the initial "//" */ *pos = '/'; ++pos; *pos = '/'; ++pos; /* copy in the UNC portion from referral */ memcpy(pos, nodename, unclen); pos += unclen; /* copy the prefixpath remainder (if there is one) */ if (pplen) { *pos = '/'; ++pos; memcpy(pos, prepath, pplen); pos += pplen; } /* NULL terminator */ *pos = '\0'; convert_delimiter(dev, '/'); return dev; } /* Return full path out of a dentry set for automount */ static char *automount_fullpath(struct dentry *dentry, void *page) { struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); size_t len; char *s; spin_lock(&tcon->tc_lock); if (!tcon->origin_fullpath) { spin_unlock(&tcon->tc_lock); return build_path_from_dentry_optional_prefix(dentry, page, true); } spin_unlock(&tcon->tc_lock); s = dentry_path_raw(dentry, page, PATH_MAX); if (IS_ERR(s)) return s; /* for root, we want "" */ if (!s[1]) s++; spin_lock(&tcon->tc_lock); len = strlen(tcon->origin_fullpath); if (s < (char *)page + len) { spin_unlock(&tcon->tc_lock); return ERR_PTR(-ENAMETOOLONG); } s -= len; memcpy(s, tcon->origin_fullpath, len); spin_unlock(&tcon->tc_lock); convert_delimiter(s, '/'); return s; } /* * Create a vfsmount that we can automount */ static struct vfsmount *cifs_do_automount(struct path *path) { int rc; struct dentry *mntpt = path->dentry; struct fs_context *fc; void *page = NULL; struct smb3_fs_context *ctx, *cur_ctx; struct smb3_fs_context tmp; char *full_path; struct vfsmount *mnt; if (IS_ROOT(mntpt)) return ERR_PTR(-ESTALE); cur_ctx = CIFS_SB(mntpt->d_sb)->ctx; fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, mntpt); if (IS_ERR(fc)) return ERR_CAST(fc); ctx = smb3_fc2context(fc); page = alloc_dentry_path(); full_path = automount_fullpath(mntpt, page); if (IS_ERR(full_path)) { mnt = ERR_CAST(full_path); goto out; } tmp = *cur_ctx; tmp.source = NULL; tmp.leaf_fullpath = NULL; tmp.UNC = tmp.prepath = NULL; tmp.dfs_root_ses = NULL; rc = smb3_fs_context_dup(ctx, &tmp); if (rc) { mnt = ERR_PTR(rc); goto out; } rc = smb3_parse_devname(full_path, ctx); if (rc) { mnt = ERR_PTR(rc); goto out; } ctx->source = smb3_fs_context_fullpath(ctx, '/'); if (IS_ERR(ctx->source)) { mnt = ERR_CAST(ctx->source); ctx->source = NULL; goto out; } cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s\n", __func__, ctx->source, ctx->UNC, ctx->prepath); mnt = fc_mount(fc); out: put_fs_context(fc); free_dentry_path(page); return mnt; } /* * Attempt to automount the referral */ struct vfsmount *cifs_d_automount(struct path *path) { struct vfsmount *newmnt; cifs_dbg(FYI, "%s: %pd\n", __func__, path->dentry); newmnt = cifs_do_automount(path); if (IS_ERR(newmnt)) { cifs_dbg(FYI, "leaving %s [automount failed]\n" , __func__); return newmnt; } mntget(newmnt); /* prevent immediate expiration */ mnt_set_expiry(newmnt, &cifs_automount_list); schedule_delayed_work(&cifs_automount_task, cifs_mountpoint_expiry_timeout); cifs_dbg(FYI, "leaving %s [ok]\n" , __func__); return newmnt; } const struct inode_operations cifs_namespace_inode_operations = { };
linux-master
fs/smb/client/namespace.c
// SPDX-License-Identifier: LGPL-2.1 /* * * SMB/CIFS session setup handling routines * * Copyright (c) International Business Machines Corp., 2006, 2009 * Author(s): Steve French ([email protected]) * */ #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "ntlmssp.h" #include "nterr.h" #include <linux/utsname.h> #include <linux/slab.h> #include <linux/version.h> #include "cifsfs.h" #include "cifs_spnego.h" #include "smb2proto.h" #include "fs_context.h" static int cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, struct cifs_server_iface *iface); bool is_server_using_iface(struct TCP_Server_Info *server, struct cifs_server_iface *iface) { struct sockaddr_in *i4 = (struct sockaddr_in *)&iface->sockaddr; struct sockaddr_in6 *i6 = (struct sockaddr_in6 *)&iface->sockaddr; struct sockaddr_in *s4 = (struct sockaddr_in *)&server->dstaddr; struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)&server->dstaddr; if (server->dstaddr.ss_family != iface->sockaddr.ss_family) return false; if (server->dstaddr.ss_family == AF_INET) { if (s4->sin_addr.s_addr != i4->sin_addr.s_addr) return false; } else if (server->dstaddr.ss_family == AF_INET6) { if (memcmp(&s6->sin6_addr, &i6->sin6_addr, sizeof(i6->sin6_addr)) != 0) return false; } else { /* unknown family.. */ return false; } return true; } bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface) { int i; spin_lock(&ses->chan_lock); for (i = 0; i < ses->chan_count; i++) { if (ses->chans[i].iface == iface) { spin_unlock(&ses->chan_lock); return true; } } spin_unlock(&ses->chan_lock); return false; } /* channel helper functions. assumed that chan_lock is held by caller. */ unsigned int cifs_ses_get_chan_index(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int i; for (i = 0; i < ses->chan_count; i++) { if (ses->chans[i].server == server) return i; } /* If we didn't find the channel, it is likely a bug */ if (server) cifs_dbg(VFS, "unable to get chan index for server: 0x%llx", server->conn_id); WARN_ON(1); return 0; } void cifs_chan_set_in_reconnect(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index = cifs_ses_get_chan_index(ses, server); ses->chans[chan_index].in_reconnect = true; } void cifs_chan_clear_in_reconnect(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index = cifs_ses_get_chan_index(ses, server); ses->chans[chan_index].in_reconnect = false; } bool cifs_chan_in_reconnect(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index = cifs_ses_get_chan_index(ses, server); return CIFS_CHAN_IN_RECONNECT(ses, chan_index); } void cifs_chan_set_need_reconnect(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index = cifs_ses_get_chan_index(ses, server); set_bit(chan_index, &ses->chans_need_reconnect); cifs_dbg(FYI, "Set reconnect bitmask for chan %u; now 0x%lx\n", chan_index, ses->chans_need_reconnect); } void cifs_chan_clear_need_reconnect(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index = cifs_ses_get_chan_index(ses, server); clear_bit(chan_index, &ses->chans_need_reconnect); cifs_dbg(FYI, "Cleared reconnect bitmask for chan %u; now 0x%lx\n", chan_index, ses->chans_need_reconnect); } bool cifs_chan_needs_reconnect(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index = cifs_ses_get_chan_index(ses, server); return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index); } bool cifs_chan_is_iface_active(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index = cifs_ses_get_chan_index(ses, server); return ses->chans[chan_index].iface && ses->chans[chan_index].iface->is_active; } /* returns number of channels added */ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) { struct TCP_Server_Info *server = ses->server; int old_chan_count, new_chan_count; int left; int rc = 0; int tries = 0; struct cifs_server_iface *iface = NULL, *niface = NULL; spin_lock(&ses->chan_lock); new_chan_count = old_chan_count = ses->chan_count; left = ses->chan_max - ses->chan_count; if (left <= 0) { spin_unlock(&ses->chan_lock); cifs_dbg(FYI, "ses already at max_channels (%zu), nothing to open\n", ses->chan_max); return 0; } if (server->dialect < SMB30_PROT_ID) { spin_unlock(&ses->chan_lock); cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n"); return 0; } if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { ses->chan_max = 1; spin_unlock(&ses->chan_lock); cifs_server_dbg(VFS, "no multichannel support\n"); return 0; } spin_unlock(&ses->chan_lock); /* * Keep connecting to same, fastest, iface for all channels as * long as its RSS. Try next fastest one if not RSS or channel * creation fails. */ spin_lock(&ses->iface_lock); iface = list_first_entry(&ses->iface_list, struct cifs_server_iface, iface_head); spin_unlock(&ses->iface_lock); while (left > 0) { tries++; if (tries > 3*ses->chan_max) { cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n", left); break; } spin_lock(&ses->iface_lock); if (!ses->iface_count) { spin_unlock(&ses->iface_lock); break; } list_for_each_entry_safe_from(iface, niface, &ses->iface_list, iface_head) { /* skip ifaces that are unusable */ if (!iface->is_active || (is_ses_using_iface(ses, iface) && !iface->rss_capable)) { continue; } /* take ref before unlock */ kref_get(&iface->refcount); spin_unlock(&ses->iface_lock); rc = cifs_ses_add_channel(cifs_sb, ses, iface); spin_lock(&ses->iface_lock); if (rc) { cifs_dbg(VFS, "failed to open extra channel on iface:%pIS rc=%d\n", &iface->sockaddr, rc); kref_put(&iface->refcount, release_iface); continue; } cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n", &iface->sockaddr); break; } spin_unlock(&ses->iface_lock); left--; new_chan_count++; } return new_chan_count - old_chan_count; } /* * update the iface for the channel if necessary. * will return 0 when iface is updated, 1 if removed, 2 otherwise * Must be called with chan_lock held. */ int cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) { unsigned int chan_index; struct cifs_server_iface *iface = NULL; struct cifs_server_iface *old_iface = NULL; int rc = 0; spin_lock(&ses->chan_lock); chan_index = cifs_ses_get_chan_index(ses, server); if (!chan_index) { spin_unlock(&ses->chan_lock); return 0; } if (ses->chans[chan_index].iface) { old_iface = ses->chans[chan_index].iface; if (old_iface->is_active) { spin_unlock(&ses->chan_lock); return 1; } } spin_unlock(&ses->chan_lock); spin_lock(&ses->iface_lock); /* then look for a new one */ list_for_each_entry(iface, &ses->iface_list, iface_head) { if (!iface->is_active || (is_ses_using_iface(ses, iface) && !iface->rss_capable)) { continue; } kref_get(&iface->refcount); break; } if (list_entry_is_head(iface, &ses->iface_list, iface_head)) { rc = 1; iface = NULL; cifs_dbg(FYI, "unable to find a suitable iface\n"); } /* now drop the ref to the current iface */ if (old_iface && iface) { cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n", &old_iface->sockaddr, &iface->sockaddr); kref_put(&old_iface->refcount, release_iface); } else if (old_iface) { cifs_dbg(FYI, "releasing ref to iface: %pIS\n", &old_iface->sockaddr); kref_put(&old_iface->refcount, release_iface); } else { WARN_ON(!iface); cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr); } spin_unlock(&ses->iface_lock); spin_lock(&ses->chan_lock); chan_index = cifs_ses_get_chan_index(ses, server); ses->chans[chan_index].iface = iface; /* No iface is found. if secondary chan, drop connection */ if (!iface && SERVER_IS_CHAN(server)) ses->chans[chan_index].server = NULL; spin_unlock(&ses->chan_lock); if (!iface && SERVER_IS_CHAN(server)) cifs_put_tcp_session(server, false); return rc; } /* * If server is a channel of ses, return the corresponding enclosing * cifs_chan otherwise return NULL. */ struct cifs_chan * cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server) { int i; spin_lock(&ses->chan_lock); for (i = 0; i < ses->chan_count; i++) { if (ses->chans[i].server == server) { spin_unlock(&ses->chan_lock); return &ses->chans[i]; } } spin_unlock(&ses->chan_lock); return NULL; } static int cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, struct cifs_server_iface *iface) { struct TCP_Server_Info *chan_server; struct cifs_chan *chan; struct smb3_fs_context *ctx; static const char unc_fmt[] = "\\%s\\foo"; struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr; struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr; size_t len; int rc; unsigned int xid = get_xid(); if (iface->sockaddr.ss_family == AF_INET) cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n", ses, iface->speed, iface->rdma_capable ? "yes" : "no", &ipv4->sin_addr); else cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI6)\n", ses, iface->speed, iface->rdma_capable ? "yes" : "no", &ipv6->sin6_addr); /* * Setup a ctx with mostly the same info as the existing * session and overwrite it with the requested iface data. * * We need to setup at least the fields used for negprot and * sesssetup. * * We only need the ctx here, so we can reuse memory from * the session and server without caring about memory * management. */ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { rc = -ENOMEM; goto out_free_xid; } /* Always make new connection for now (TODO?) */ ctx->nosharesock = true; /* Auth */ ctx->domainauto = ses->domainAuto; ctx->domainname = ses->domainName; /* no hostname for extra channels */ ctx->server_hostname = ""; ctx->username = ses->user_name; ctx->password = ses->password; ctx->sectype = ses->sectype; ctx->sign = ses->sign; /* UNC and paths */ /* XXX: Use ses->server->hostname? */ len = sizeof(unc_fmt) + SERVER_NAME_LEN_WITH_NULL; ctx->UNC = kzalloc(len, GFP_KERNEL); if (!ctx->UNC) { rc = -ENOMEM; goto out_free_ctx; } scnprintf(ctx->UNC, len, unc_fmt, ses->ip_addr); ctx->prepath = ""; /* Reuse same version as master connection */ ctx->vals = ses->server->vals; ctx->ops = ses->server->ops; ctx->noblocksnd = ses->server->noblocksnd; ctx->noautotune = ses->server->noautotune; ctx->sockopt_tcp_nodelay = ses->server->tcp_nodelay; ctx->echo_interval = ses->server->echo_interval / HZ; ctx->max_credits = ses->server->max_credits; /* * This will be used for encoding/decoding user/domain/pw * during sess setup auth. */ ctx->local_nls = cifs_sb->local_nls; /* Use RDMA if possible */ ctx->rdma = iface->rdma_capable; memcpy(&ctx->dstaddr, &iface->sockaddr, sizeof(ctx->dstaddr)); /* reuse master con client guid */ memcpy(&ctx->client_guid, ses->server->client_guid, sizeof(ctx->client_guid)); ctx->use_client_guid = true; chan_server = cifs_get_tcp_session(ctx, ses->server); spin_lock(&ses->chan_lock); chan = &ses->chans[ses->chan_count]; chan->server = chan_server; if (IS_ERR(chan->server)) { rc = PTR_ERR(chan->server); chan->server = NULL; spin_unlock(&ses->chan_lock); goto out; } chan->iface = iface; ses->chan_count++; atomic_set(&ses->chan_seq, 0); /* Mark this channel as needing connect/setup */ cifs_chan_set_need_reconnect(ses, chan->server); spin_unlock(&ses->chan_lock); mutex_lock(&ses->session_mutex); /* * We need to allocate the server crypto now as we will need * to sign packets before we generate the channel signing key * (we sign with the session key) */ rc = smb311_crypto_shash_allocate(chan->server); if (rc) { cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__); mutex_unlock(&ses->session_mutex); goto out; } rc = cifs_negotiate_protocol(xid, ses, chan->server); if (!rc) rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls); mutex_unlock(&ses->session_mutex); out: if (rc && chan->server) { /* * we should avoid race with these delayed works before we * remove this channel */ cancel_delayed_work_sync(&chan->server->echo); cancel_delayed_work_sync(&chan->server->reconnect); spin_lock(&ses->chan_lock); /* we rely on all bits beyond chan_count to be clear */ cifs_chan_clear_need_reconnect(ses, chan->server); ses->chan_count--; /* * chan_count should never reach 0 as at least the primary * channel is always allocated */ WARN_ON(ses->chan_count < 1); spin_unlock(&ses->chan_lock); cifs_put_tcp_session(chan->server, 0); } kfree(ctx->UNC); out_free_ctx: kfree(ctx); out_free_xid: free_xid(xid); return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, struct TCP_Server_Info *server, SESSION_SETUP_ANDX *pSMB) { __u32 capabilities = 0; /* init fields common to all four types of SessSetup */ /* Note that offsets for first seven fields in req struct are same */ /* in CIFS Specs so does not matter which of 3 forms of struct */ /* that we use in next few lines */ /* Note that header is initialized to zero in header_assemble */ pSMB->req.AndXCommand = 0xFF; pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32, CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4, USHRT_MAX)); pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq); pSMB->req.VcNumber = cpu_to_le16(1); /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ /* BB verify whether signing required on neg or just on auth frame (and NTLM case) */ capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; if (server->sign) pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_UNICODE) { pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE; capabilities |= CAP_UNICODE; } if (ses->capabilities & CAP_STATUS32) { pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS; capabilities |= CAP_STATUS32; } if (ses->capabilities & CAP_DFS) { pSMB->req.hdr.Flags2 |= SMBFLG2_DFS; capabilities |= CAP_DFS; } if (ses->capabilities & CAP_UNIX) capabilities |= CAP_UNIX; return capabilities; } static void unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; int bytes_ret = 0; /* Copy OS version */ bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32, nls_cp); bcc_ptr += 2 * bytes_ret; bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release, 32, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* trailing null */ bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS, 32, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* trailing null */ *pbcc_area = bcc_ptr; } static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; int bytes_ret = 0; /* copy domain */ if (ses->domainName == NULL) { /* Sending null domain better than using a bogus domain name (as we did briefly in 2.6.18) since server will use its default */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; bytes_ret = 0; } else bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null terminator */ *pbcc_area = bcc_ptr; } static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; int bytes_ret = 0; /* BB FIXME add check that strings total less than 335 or will need to send them as arrays */ /* copy user */ if (ses->user_name == NULL) { /* null user mount */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; } else { bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN, nls_cp); } bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null termination */ unicode_domain_string(&bcc_ptr, ses, nls_cp); unicode_oslm_strings(&bcc_ptr, nls_cp); *pbcc_area = bcc_ptr; } static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; int len; /* copy user */ /* BB what about null user mounts - check that we do this BB */ /* copy user */ if (ses->user_name != NULL) { len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); if (WARN_ON_ONCE(len < 0)) len = CIFS_MAX_USERNAME_LEN - 1; bcc_ptr += len; } /* else null user mount */ *bcc_ptr = 0; bcc_ptr++; /* account for null termination */ /* copy domain */ if (ses->domainName != NULL) { len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); if (WARN_ON_ONCE(len < 0)) len = CIFS_MAX_DOMAINNAME_LEN - 1; bcc_ptr += len; } /* else we will send a null domain name so the server will default to its own domain */ *bcc_ptr = 0; bcc_ptr++; /* BB check for overflow here */ strcpy(bcc_ptr, "Linux version "); bcc_ptr += strlen("Linux version "); strcpy(bcc_ptr, init_utsname()->release); bcc_ptr += strlen(init_utsname()->release) + 1; strcpy(bcc_ptr, CIFS_NETWORK_OPSYS); bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1; *pbcc_area = bcc_ptr; } static void decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses, const struct nls_table *nls_cp) { int len; char *data = *pbcc_area; cifs_dbg(FYI, "bleft %d\n", bleft); kfree(ses->serverOS); ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp); cifs_dbg(FYI, "serverOS=%s\n", ses->serverOS); len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; data += len; bleft -= len; if (bleft <= 0) return; kfree(ses->serverNOS); ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp); cifs_dbg(FYI, "serverNOS=%s\n", ses->serverNOS); len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; data += len; bleft -= len; if (bleft <= 0) return; kfree(ses->serverDomain); ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp); cifs_dbg(FYI, "serverDomain=%s\n", ses->serverDomain); return; } static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, struct cifs_ses *ses, const struct nls_table *nls_cp) { int len; char *bcc_ptr = *pbcc_area; cifs_dbg(FYI, "decode sessetup ascii. bleft %d\n", bleft); len = strnlen(bcc_ptr, bleft); if (len >= bleft) return; kfree(ses->serverOS); ses->serverOS = kmalloc(len + 1, GFP_KERNEL); if (ses->serverOS) { memcpy(ses->serverOS, bcc_ptr, len); ses->serverOS[len] = 0; if (strncmp(ses->serverOS, "OS/2", 4) == 0) cifs_dbg(FYI, "OS/2 server\n"); } bcc_ptr += len + 1; bleft -= len + 1; len = strnlen(bcc_ptr, bleft); if (len >= bleft) return; kfree(ses->serverNOS); ses->serverNOS = kmalloc(len + 1, GFP_KERNEL); if (ses->serverNOS) { memcpy(ses->serverNOS, bcc_ptr, len); ses->serverNOS[len] = 0; } bcc_ptr += len + 1; bleft -= len + 1; len = strnlen(bcc_ptr, bleft); if (len > bleft) return; /* No domain field in LANMAN case. Domain is returned by old servers in the SMB negprot response */ /* BB For newer servers which do not support Unicode, but thus do return domain here we could add parsing for it later, but it is not very important */ cifs_dbg(FYI, "ascii: bytes left %d\n", bleft); } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses) { unsigned int tioffset; /* challenge message target info area */ unsigned int tilen; /* challenge message target info area length */ CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; __u32 server_flags; if (blob_len < sizeof(CHALLENGE_MESSAGE)) { cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len); return -EINVAL; } if (memcmp(pblob->Signature, "NTLMSSP", 8)) { cifs_dbg(VFS, "blob signature incorrect %s\n", pblob->Signature); return -EINVAL; } if (pblob->MessageType != NtLmChallenge) { cifs_dbg(VFS, "Incorrect message type %d\n", pblob->MessageType); return -EINVAL; } server_flags = le32_to_cpu(pblob->NegotiateFlags); cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__, ses->ntlmssp->client_flags, server_flags); if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) && (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) { cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n", __func__); return -EINVAL; } if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) { cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__); return -EINVAL; } if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) { cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n", __func__); return -EOPNOTSUPP; } if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH)) pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n", __func__); ses->ntlmssp->server_flags = server_flags; memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE); /* In particular we can examine sign flags */ /* BB spec says that if AvId field of MsvAvTimestamp is populated then we must set the MIC field of the AUTHENTICATE_MESSAGE */ tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); tilen = le16_to_cpu(pblob->TargetInfoArray.Length); if (tioffset > blob_len || tioffset + tilen > blob_len) { cifs_dbg(VFS, "tioffset + tilen too high %u + %u\n", tioffset, tilen); return -EINVAL; } if (tilen) { kfree_sensitive(ses->auth_key.response); ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Challenge target info alloc failure\n"); return -ENOMEM; } ses->auth_key.len = tilen; } return 0; } static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size) { int sz = base_size + ses->auth_key.len - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2; if (ses->domainName) sz += sizeof(__le16) * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); else sz += sizeof(__le16); if (ses->user_name) sz += sizeof(__le16) * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); else sz += sizeof(__le16); if (ses->workstation_name[0]) sz += sizeof(__le16) * strnlen(ses->workstation_name, ntlmssp_workstation_name_size(ses)); else sz += sizeof(__le16); return sz; } static inline void cifs_security_buffer_from_str(SECURITY_BUFFER *pbuf, char *str_value, int str_length, unsigned char *pstart, unsigned char **pcur, const struct nls_table *nls_cp) { unsigned char *tmp = pstart; int len; if (!pbuf) return; if (!pcur) pcur = &tmp; if (!str_value) { pbuf->BufferOffset = cpu_to_le32(*pcur - pstart); pbuf->Length = 0; pbuf->MaximumLength = 0; *pcur += sizeof(__le16); } else { len = cifs_strtoUTF16((__le16 *)*pcur, str_value, str_length, nls_cp); len *= sizeof(__le16); pbuf->BufferOffset = cpu_to_le32(*pcur - pstart); pbuf->Length = cpu_to_le16(len); pbuf->MaximumLength = cpu_to_le16(len); *pcur += len; } } /* BB Move to ntlmssp.c eventually */ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen, struct cifs_ses *ses, struct TCP_Server_Info *server, const struct nls_table *nls_cp) { int rc = 0; NEGOTIATE_MESSAGE *sec_blob; __u32 flags; unsigned char *tmp; int len; len = size_of_ntlmssp_blob(ses, sizeof(NEGOTIATE_MESSAGE)); *pbuffer = kmalloc(len, GFP_KERNEL); if (!*pbuffer) { rc = -ENOMEM; cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc); *buflen = 0; goto setup_ntlm_neg_ret; } sec_blob = (NEGOTIATE_MESSAGE *)*pbuffer; memset(*pbuffer, 0, sizeof(NEGOTIATE_MESSAGE)); memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmNegotiate; /* BB is NTLMV2 session security format easier to use here? */ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN; if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE); ses->ntlmssp->client_flags = flags; sec_blob->NegotiateFlags = cpu_to_le32(flags); /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */ cifs_security_buffer_from_str(&sec_blob->DomainName, NULL, CIFS_MAX_DOMAINNAME_LEN, *pbuffer, &tmp, nls_cp); cifs_security_buffer_from_str(&sec_blob->WorkstationName, NULL, CIFS_MAX_WORKSTATION_LEN, *pbuffer, &tmp, nls_cp); *buflen = tmp - *pbuffer; setup_ntlm_neg_ret: return rc; } /* * Build ntlmssp blob with additional fields, such as version, * supported by modern servers. For safety limit to SMB3 or later * See notes in MS-NLMP Section 2.2.2.1 e.g. */ int build_ntlmssp_smb3_negotiate_blob(unsigned char **pbuffer, u16 *buflen, struct cifs_ses *ses, struct TCP_Server_Info *server, const struct nls_table *nls_cp) { int rc = 0; struct negotiate_message *sec_blob; __u32 flags; unsigned char *tmp; int len; len = size_of_ntlmssp_blob(ses, sizeof(struct negotiate_message)); *pbuffer = kmalloc(len, GFP_KERNEL); if (!*pbuffer) { rc = -ENOMEM; cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc); *buflen = 0; goto setup_ntlm_smb3_neg_ret; } sec_blob = (struct negotiate_message *)*pbuffer; memset(*pbuffer, 0, sizeof(struct negotiate_message)); memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmNegotiate; /* BB is NTLMV2 session security format easier to use here? */ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_VERSION; if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; sec_blob->Version.ProductMajorVersion = LINUX_VERSION_MAJOR; sec_blob->Version.ProductMinorVersion = LINUX_VERSION_PATCHLEVEL; sec_blob->Version.ProductBuild = cpu_to_le16(SMB3_PRODUCT_BUILD); sec_blob->Version.NTLMRevisionCurrent = NTLMSSP_REVISION_W2K3; tmp = *pbuffer + sizeof(struct negotiate_message); ses->ntlmssp->client_flags = flags; sec_blob->NegotiateFlags = cpu_to_le32(flags); /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */ cifs_security_buffer_from_str(&sec_blob->DomainName, NULL, CIFS_MAX_DOMAINNAME_LEN, *pbuffer, &tmp, nls_cp); cifs_security_buffer_from_str(&sec_blob->WorkstationName, NULL, CIFS_MAX_WORKSTATION_LEN, *pbuffer, &tmp, nls_cp); *buflen = tmp - *pbuffer; setup_ntlm_smb3_neg_ret: return rc; } /* See MS-NLMP 2.2.1.3 */ int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen, struct cifs_ses *ses, struct TCP_Server_Info *server, const struct nls_table *nls_cp) { int rc; AUTHENTICATE_MESSAGE *sec_blob; __u32 flags; unsigned char *tmp; int len; rc = setup_ntlmv2_rsp(ses, nls_cp); if (rc) { cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc); *buflen = 0; goto setup_ntlmv2_ret; } len = size_of_ntlmssp_blob(ses, sizeof(AUTHENTICATE_MESSAGE)); *pbuffer = kmalloc(len, GFP_KERNEL); if (!*pbuffer) { rc = -ENOMEM; cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc); *buflen = 0; goto setup_ntlmv2_ret; } sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer; memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmAuthenticate; flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED; /* we only send version information in ntlmssp negotiate, so do not set this flag */ flags = flags & ~NTLMSSP_NEGOTIATE_VERSION; tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); sec_blob->LmChallengeResponse.BufferOffset = cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE)); sec_blob->LmChallengeResponse.Length = 0; sec_blob->LmChallengeResponse.MaximumLength = 0; sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - *pbuffer); if (ses->user_name != NULL) { memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE, ses->auth_key.len - CIFS_SESS_KEY_SIZE); tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE; sec_blob->NtChallengeResponse.Length = cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); sec_blob->NtChallengeResponse.MaximumLength = cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); } else { /* * don't send an NT Response for anonymous access */ sec_blob->NtChallengeResponse.Length = 0; sec_blob->NtChallengeResponse.MaximumLength = 0; } cifs_security_buffer_from_str(&sec_blob->DomainName, ses->domainName, CIFS_MAX_DOMAINNAME_LEN, *pbuffer, &tmp, nls_cp); cifs_security_buffer_from_str(&sec_blob->UserName, ses->user_name, CIFS_MAX_USERNAME_LEN, *pbuffer, &tmp, nls_cp); cifs_security_buffer_from_str(&sec_blob->WorkstationName, ses->workstation_name, ntlmssp_workstation_name_size(ses), *pbuffer, &tmp, nls_cp); if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) && !calc_seckey(ses)) { memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); sec_blob->SessionKey.MaximumLength = cpu_to_le16(CIFS_CPHTXT_SIZE); tmp += CIFS_CPHTXT_SIZE; } else { sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->SessionKey.Length = 0; sec_blob->SessionKey.MaximumLength = 0; } *buflen = tmp - *pbuffer; setup_ntlmv2_ret: return rc; } enum securityEnum cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) { switch (server->negflavor) { case CIFS_NEGFLAVOR_EXTENDED: switch (requested) { case Kerberos: case RawNTLMSSP: return requested; case Unspecified: if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; default: return Unspecified; } case CIFS_NEGFLAVOR_UNENCAP: switch (requested) { case NTLMv2: return requested; case Unspecified: if (global_secflags & CIFSSEC_MAY_NTLMV2) return NTLMv2; break; default: break; } fallthrough; default: return Unspecified; } } struct sess_data { unsigned int xid; struct cifs_ses *ses; struct TCP_Server_Info *server; struct nls_table *nls_cp; void (*func)(struct sess_data *); int result; /* we will send the SMB in three pieces: * a fixed length beginning part, an optional * SPNEGO blob (which can be zero length), and a * last part which will include the strings * and rest of bcc area. This allows us to avoid * a large buffer 17K allocation */ int buf0_type; struct kvec iov[3]; }; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int sess_alloc_buffer(struct sess_data *sess_data, int wct) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb_hdr *smb_buf; rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses, (void **)&smb_buf); if (rc) return rc; sess_data->iov[0].iov_base = (char *)smb_buf; sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4; /* * This variable will be used to clear the buffer * allocated above in case of any error in the calling function. */ sess_data->buf0_type = CIFS_SMALL_BUFFER; /* 2000 big enough to fit max user, domain, NOS name etc. */ sess_data->iov[2].iov_base = kmalloc(2000, GFP_KERNEL); if (!sess_data->iov[2].iov_base) { rc = -ENOMEM; goto out_free_smb_buf; } return 0; out_free_smb_buf: cifs_small_buf_release(smb_buf); sess_data->iov[0].iov_base = NULL; sess_data->iov[0].iov_len = 0; sess_data->buf0_type = CIFS_NO_BUFFER; return rc; } static void sess_free_buffer(struct sess_data *sess_data) { struct kvec *iov = sess_data->iov; /* * Zero the session data before freeing, as it might contain sensitive info (keys, etc). * Note that iov[1] is already freed by caller. */ if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) memzero_explicit(iov[0].iov_base, iov[0].iov_len); free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; kfree_sensitive(iov[2].iov_base); } static int sess_establish_session(struct sess_data *sess_data) { struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; cifs_server_lock(server); if (!server->session_estab) { if (server->sign) { server->session_key.response = kmemdup(ses->auth_key.response, ses->auth_key.len, GFP_KERNEL); if (!server->session_key.response) { cifs_server_unlock(server); return -ENOMEM; } server->session_key.len = ses->auth_key.len; } server->sequence_number = 0x2; server->session_estab = true; } cifs_server_unlock(server); cifs_dbg(FYI, "CIFS session established successfully\n"); return 0; } static int sess_sendreceive(struct sess_data *sess_data) { int rc; struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base; __u16 count; struct kvec rsp_iov = { NULL, 0 }; count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; be32_add_cpu(&smb_buf->smb_buf_length, count); put_bcc(count, smb_buf); rc = SendReceive2(sess_data->xid, sess_data->ses, sess_data->iov, 3 /* num_iovecs */, &sess_data->buf0_type, CIFS_LOG_ERROR, &rsp_iov); cifs_small_buf_release(sess_data->iov[0].iov_base); memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); return rc; } static void sess_auth_ntlmv2(struct sess_data *sess_data) { int rc = 0; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; char *bcc_ptr; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; __u32 capabilities; __u16 bytes_remaining; /* old style NTLM sessionsetup */ /* wct = 13 */ rc = sess_alloc_buffer(sess_data, 13); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; bcc_ptr = sess_data->iov[2].iov_base; capabilities = cifs_ssetup_hdr(ses, server, pSMB); pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); /* LM2 password would be here if we supported it */ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; if (ses->user_name != NULL) { /* calculate nlmv2 response and session key */ rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp); if (rc) { cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc); goto out; } memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, ses->auth_key.len - CIFS_SESS_KEY_SIZE); bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; /* set case sensitive password length after tilen may get * assigned, tilen is 0 otherwise. */ pSMB->req_no_secext.CaseSensitivePasswordLength = cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); } else { pSMB->req_no_secext.CaseSensitivePasswordLength = 0; } if (ses->capabilities & CAP_UNICODE) { if (!IS_ALIGNED(sess_data->iov[0].iov_len, 2)) { *bcc_ptr = 0; bcc_ptr++; } unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } else { ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; rc = sess_sendreceive(sess_data); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 3) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } rc = sess_establish_session(sess_data); out: sess_data->result = rc; sess_data->func = NULL; sess_free_buffer(sess_data); kfree_sensitive(ses->auth_key.response); ses->auth_key.response = NULL; } #ifdef CONFIG_CIFS_UPCALL static void sess_auth_kerberos(struct sess_data *sess_data) { int rc = 0; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; char *bcc_ptr; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; __u32 capabilities; __u16 bytes_remaining; struct key *spnego_key = NULL; struct cifs_spnego_msg *msg; u16 blob_len; /* extended security */ /* wct = 12 */ rc = sess_alloc_buffer(sess_data, 12); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; bcc_ptr = sess_data->iov[2].iov_base; capabilities = cifs_ssetup_hdr(ses, server, pSMB); spnego_key = cifs_get_spnego_key(ses, server); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); spnego_key = NULL; goto out; } msg = spnego_key->payload.data[0]; /* * check version field to make sure that cifs.upcall is * sending us a response in an expected form */ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { cifs_dbg(VFS, "incorrect version of cifs.upcall (expected %d but got %d)\n", CIFS_SPNEGO_UPCALL_VERSION, msg->version); rc = -EKEYREJECTED; goto out_put_spnego_key; } kfree_sensitive(ses->auth_key.response); ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n", msg->sesskey_len); rc = -ENOMEM; goto out_put_spnego_key; } ses->auth_key.len = msg->sesskey_len; pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; capabilities |= CAP_EXTENDED_SECURITY; pSMB->req.Capabilities = cpu_to_le32(capabilities); sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; sess_data->iov[1].iov_len = msg->secblob_len; pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len); if (ses->capabilities & CAP_UNICODE) { /* unicode strings must be word aligned */ if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { *bcc_ptr = 0; bcc_ptr++; } unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp); } else { /* BB: is this right? */ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; rc = sess_sendreceive(sess_data); if (rc) goto out_put_spnego_key; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 4) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out_put_spnego_key; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); if (blob_len > bytes_remaining) { cifs_dbg(VFS, "bad security blob length %d\n", blob_len); rc = -EINVAL; goto out_put_spnego_key; } bcc_ptr += blob_len; bytes_remaining -= blob_len; /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } rc = sess_establish_session(sess_data); out_put_spnego_key: key_invalidate(spnego_key); key_put(spnego_key); out: sess_data->result = rc; sess_data->func = NULL; sess_free_buffer(sess_data); kfree_sensitive(ses->auth_key.response); ses->auth_key.response = NULL; } #endif /* ! CONFIG_CIFS_UPCALL */ /* * The required kvec buffers have to be allocated before calling this * function. */ static int _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data) { SESSION_SETUP_ANDX *pSMB; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; __u32 capabilities; char *bcc_ptr; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; capabilities = cifs_ssetup_hdr(ses, server, pSMB); if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { cifs_dbg(VFS, "NTLMSSP requires Unicode support\n"); return -ENOSYS; } pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; capabilities |= CAP_EXTENDED_SECURITY; pSMB->req.Capabilities |= cpu_to_le32(capabilities); bcc_ptr = sess_data->iov[2].iov_base; /* unicode strings must be word aligned */ if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { *bcc_ptr = 0; bcc_ptr++; } unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; return 0; } static void sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data); static void sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data) { int rc; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; __u16 bytes_remaining; char *bcc_ptr; unsigned char *ntlmsspblob = NULL; u16 blob_len; cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n"); /* * if memory allocation is successful, caller of this function * frees it. */ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); if (!ses->ntlmssp) { rc = -ENOMEM; goto out; } ses->ntlmssp->sesskey_per_smbsess = false; /* wct = 12 */ rc = sess_alloc_buffer(sess_data, 12); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; /* Build security blob before we assemble the request */ rc = build_ntlmssp_negotiate_blob(&ntlmsspblob, &blob_len, ses, server, sess_data->nls_cp); if (rc) goto out_free_ntlmsspblob; sess_data->iov[1].iov_len = blob_len; sess_data->iov[1].iov_base = ntlmsspblob; pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len); rc = _sess_auth_rawntlmssp_assemble_req(sess_data); if (rc) goto out_free_ntlmsspblob; rc = sess_sendreceive(sess_data); pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; /* If true, rc here is expected and not an error */ if (sess_data->buf0_type != CIFS_NO_BUFFER && smb_buf->Status.CifsError == cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED)) rc = 0; if (rc) goto out_free_ntlmsspblob; cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); if (smb_buf->WordCount != 4) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out_free_ntlmsspblob; } ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); if (blob_len > bytes_remaining) { cifs_dbg(VFS, "bad security blob length %d\n", blob_len); rc = -EINVAL; goto out_free_ntlmsspblob; } rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses); out_free_ntlmsspblob: kfree_sensitive(ntlmsspblob); out: sess_free_buffer(sess_data); if (!rc) { sess_data->func = sess_auth_rawntlmssp_authenticate; return; } /* Else error. Cleanup */ kfree_sensitive(ses->auth_key.response); ses->auth_key.response = NULL; kfree_sensitive(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->func = NULL; sess_data->result = rc; } static void sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data) { int rc; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; __u16 bytes_remaining; char *bcc_ptr; unsigned char *ntlmsspblob = NULL; u16 blob_len; cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n"); /* wct = 12 */ rc = sess_alloc_buffer(sess_data, 12); if (rc) goto out; /* Build security blob before we assemble the request */ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)pSMB; rc = build_ntlmssp_auth_blob(&ntlmsspblob, &blob_len, ses, server, sess_data->nls_cp); if (rc) goto out_free_ntlmsspblob; sess_data->iov[1].iov_len = blob_len; sess_data->iov[1].iov_base = ntlmsspblob; pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len); /* * Make sure that we tell the server that we are using * the uid that it just gave us back on the response * (challenge) */ smb_buf->Uid = ses->Suid; rc = _sess_auth_rawntlmssp_assemble_req(sess_data); if (rc) goto out_free_ntlmsspblob; rc = sess_sendreceive(sess_data); if (rc) goto out_free_ntlmsspblob; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 4) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out_free_ntlmsspblob; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ if (ses->Suid != smb_buf->Uid) { ses->Suid = smb_buf->Uid; cifs_dbg(FYI, "UID changed! new UID = %llu\n", ses->Suid); } bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); if (blob_len > bytes_remaining) { cifs_dbg(VFS, "bad security blob length %d\n", blob_len); rc = -EINVAL; goto out_free_ntlmsspblob; } bcc_ptr += blob_len; bytes_remaining -= blob_len; /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } out_free_ntlmsspblob: kfree_sensitive(ntlmsspblob); out: sess_free_buffer(sess_data); if (!rc) rc = sess_establish_session(sess_data); /* Cleanup */ kfree_sensitive(ses->auth_key.response); ses->auth_key.response = NULL; kfree_sensitive(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->func = NULL; sess_data->result = rc; } static int select_sec(struct sess_data *sess_data) { int type; struct cifs_ses *ses = sess_data->ses; struct TCP_Server_Info *server = sess_data->server; type = cifs_select_sectype(server, ses->sectype); cifs_dbg(FYI, "sess setup type %d\n", type); if (type == Unspecified) { cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); return -EINVAL; } switch (type) { case NTLMv2: sess_data->func = sess_auth_ntlmv2; break; case Kerberos: #ifdef CONFIG_CIFS_UPCALL sess_data->func = sess_auth_kerberos; break; #else cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); return -ENOSYS; #endif /* CONFIG_CIFS_UPCALL */ case RawNTLMSSP: sess_data->func = sess_auth_rawntlmssp_negotiate; break; default: cifs_dbg(VFS, "secType %d not supported!\n", type); return -ENOSYS; } return 0; } int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server, const struct nls_table *nls_cp) { int rc = 0; struct sess_data *sess_data; if (ses == NULL) { WARN(1, "%s: ses == NULL!", __func__); return -EINVAL; } sess_data = kzalloc(sizeof(struct sess_data), GFP_KERNEL); if (!sess_data) return -ENOMEM; sess_data->xid = xid; sess_data->ses = ses; sess_data->server = server; sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->nls_cp = (struct nls_table *) nls_cp; rc = select_sec(sess_data); if (rc) goto out; while (sess_data->func) sess_data->func(sess_data); /* Store result before we free sess_data */ rc = sess_data->result; out: kfree_sensitive(sess_data); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
linux-master
fs/smb/client/sess.c
/* Block- or MTD-based romfs * * Copyright © 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) * * Derived from: ROMFS file system, Linux implementation * * Copyright © 1997-1999 Janos Farkas <[email protected]> * * Using parts of the minix filesystem * Copyright © 1991, 1992 Linus Torvalds * * and parts of the affs filesystem additionally * Copyright © 1993 Ray Burr * Copyright © 1996 Hans-Joachim Widmaier * * Changes * Changed for 2.1.19 modules * Jan 1997 Initial release * Jun 1997 2.1.43+ changes * Proper page locking in read_folio * Changed to work with 2.1.45+ fs * Jul 1997 Fixed follow_link * 2.1.47 * lookup shouldn't return -ENOENT * from Horst von Brand: * fail on wrong checksum * double unlock_super was possible * correct namelen for statfs * spotted by Bill Hawes: * readlink shouldn't iput() * Jun 1998 2.1.106 from Avery Pennarun: glibc scandir() * exposed a problem in readdir * 2.1.107 code-freeze spellchecker run * Aug 1998 2.1.118+ VFS changes * Sep 1998 2.1.122 another VFS change (follow_link) * Apr 1999 2.2.7 no more EBADF checking in * lookup/readdir, use ERR_PTR * Jun 1999 2.3.6 d_alloc_root use changed * 2.3.9 clean up usage of ENOENT/negative * dentries in lookup * clean up page flags setting * (error, uptodate, locking) in * in read_folio * use init_special_inode for * fifos/sockets (and streamline) in * read_inode, fix _ops table order * Aug 1999 2.3.16 __initfunc() => __init change * Oct 1999 2.3.24 page->owner hack obsoleted * Nov 1999 2.3.27 2.3.25+ page->offset => index change * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/fs_context.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/statfs.h> #include <linux/mtd/super.h> #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/uaccess.h> #include <linux/major.h> #include "internal.h" static struct kmem_cache *romfs_inode_cachep; static const umode_t romfs_modemap[8] = { 0, /* hard link */ S_IFDIR | 0644, /* directory */ S_IFREG | 0644, /* regular file */ S_IFLNK | 0777, /* symlink */ S_IFBLK | 0600, /* blockdev */ S_IFCHR | 0600, /* chardev */ S_IFSOCK | 0644, /* socket */ S_IFIFO | 0644 /* FIFO */ }; static const unsigned char romfs_dtype_table[] = { DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_SOCK, DT_FIFO }; static struct inode *romfs_iget(struct super_block *sb, unsigned long pos); /* * read a page worth of data from the image */ static int romfs_read_folio(struct file *file, struct folio *folio) { struct page *page = &folio->page; struct inode *inode = page->mapping->host; loff_t offset, size; unsigned long fillsize, pos; void *buf; int ret; buf = kmap(page); if (!buf) return -ENOMEM; /* 32 bit warning -- but not for us :) */ offset = page_offset(page); size = i_size_read(inode); fillsize = 0; ret = 0; if (offset < size) { size -= offset; fillsize = size > PAGE_SIZE ? PAGE_SIZE : size; pos = ROMFS_I(inode)->i_dataoffset + offset; ret = romfs_dev_read(inode->i_sb, pos, buf, fillsize); if (ret < 0) { SetPageError(page); fillsize = 0; ret = -EIO; } } if (fillsize < PAGE_SIZE) memset(buf + fillsize, 0, PAGE_SIZE - fillsize); if (ret == 0) SetPageUptodate(page); flush_dcache_page(page); kunmap(page); unlock_page(page); return ret; } static const struct address_space_operations romfs_aops = { .read_folio = romfs_read_folio }; /* * read the entries from a directory */ static int romfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *i = file_inode(file); struct romfs_inode ri; unsigned long offset, maxoff; int j, ino, nextfh; char fsname[ROMFS_MAXFN]; /* XXX dynamic? */ int ret; maxoff = romfs_maxsize(i->i_sb); offset = ctx->pos; if (!offset) { offset = i->i_ino & ROMFH_MASK; ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto out; offset = be32_to_cpu(ri.spec) & ROMFH_MASK; } /* Not really failsafe, but we are read-only... */ for (;;) { if (!offset || offset >= maxoff) { offset = maxoff; ctx->pos = offset; goto out; } ctx->pos = offset; /* Fetch inode info */ ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto out; j = romfs_dev_strnlen(i->i_sb, offset + ROMFH_SIZE, sizeof(fsname) - 1); if (j < 0) goto out; ret = romfs_dev_read(i->i_sb, offset + ROMFH_SIZE, fsname, j); if (ret < 0) goto out; fsname[j] = '\0'; ino = offset; nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) == ROMFH_HRD) ino = be32_to_cpu(ri.spec); if (!dir_emit(ctx, fsname, j, ino, romfs_dtype_table[nextfh & ROMFH_TYPE])) goto out; offset = nextfh & ROMFH_MASK; } out: return 0; } /* * look up an entry in a directory */ static struct dentry *romfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { unsigned long offset, maxoff; struct inode *inode = NULL; struct romfs_inode ri; const char *name; /* got from dentry */ int len, ret; offset = dir->i_ino & ROMFH_MASK; ret = romfs_dev_read(dir->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto error; /* search all the file entries in the list starting from the one * pointed to by the directory's special data */ maxoff = romfs_maxsize(dir->i_sb); offset = be32_to_cpu(ri.spec) & ROMFH_MASK; name = dentry->d_name.name; len = dentry->d_name.len; for (;;) { if (!offset || offset >= maxoff) break; ret = romfs_dev_read(dir->i_sb, offset, &ri, sizeof(ri)); if (ret < 0) goto error; /* try to match the first 16 bytes of name */ ret = romfs_dev_strcmp(dir->i_sb, offset + ROMFH_SIZE, name, len); if (ret < 0) goto error; if (ret == 1) { /* Hard link handling */ if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD) offset = be32_to_cpu(ri.spec) & ROMFH_MASK; inode = romfs_iget(dir->i_sb, offset); break; } /* next entry */ offset = be32_to_cpu(ri.next) & ROMFH_MASK; } return d_splice_alias(inode, dentry); error: return ERR_PTR(ret); } static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, .iterate_shared = romfs_readdir, .llseek = generic_file_llseek, }; static const struct inode_operations romfs_dir_inode_operations = { .lookup = romfs_lookup, }; /* * get a romfs inode based on its position in the image (which doubles as the * inode number) */ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos) { struct romfs_inode_info *inode; struct romfs_inode ri; struct inode *i; unsigned long nlen; unsigned nextfh; int ret; umode_t mode; /* we might have to traverse a chain of "hard link" file entries to get * to the actual file */ for (;;) { ret = romfs_dev_read(sb, pos, &ri, sizeof(ri)); if (ret < 0) goto error; /* XXX: do romfs_checksum here too (with name) */ nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) != ROMFH_HRD) break; pos = be32_to_cpu(ri.spec) & ROMFH_MASK; } /* determine the length of the filename */ nlen = romfs_dev_strnlen(sb, pos + ROMFH_SIZE, ROMFS_MAXFN); if (IS_ERR_VALUE(nlen)) goto eio; /* get an inode for this image position */ i = iget_locked(sb, pos); if (!i) return ERR_PTR(-ENOMEM); if (!(i->i_state & I_NEW)) return i; /* precalculate the data offset */ inode = ROMFS_I(i); inode->i_metasize = (ROMFH_SIZE + nlen + 1 + ROMFH_PAD) & ROMFH_MASK; inode->i_dataoffset = pos + inode->i_metasize; set_nlink(i, 1); /* Hard to decide.. */ i->i_size = be32_to_cpu(ri.size); i->i_mtime = i->i_atime = inode_set_ctime(i, 0, 0); /* set up mode and ops */ mode = romfs_modemap[nextfh & ROMFH_TYPE]; switch (nextfh & ROMFH_TYPE) { case ROMFH_DIR: i->i_size = ROMFS_I(i)->i_metasize; i->i_op = &romfs_dir_inode_operations; i->i_fop = &romfs_dir_operations; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_REG: i->i_fop = &romfs_ro_fops; i->i_data.a_ops = &romfs_aops; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_SYM: i->i_op = &page_symlink_inode_operations; inode_nohighmem(i); i->i_data.a_ops = &romfs_aops; mode |= S_IRWXUGO; break; default: /* depending on MBZ for sock/fifos */ nextfh = be32_to_cpu(ri.spec); init_special_inode(i, mode, MKDEV(nextfh >> 16, nextfh & 0xffff)); break; } i->i_mode = mode; i->i_blocks = (i->i_size + 511) >> 9; unlock_new_inode(i); return i; eio: ret = -EIO; error: pr_err("read error for inode 0x%lx\n", pos); return ERR_PTR(ret); } /* * allocate a new inode */ static struct inode *romfs_alloc_inode(struct super_block *sb) { struct romfs_inode_info *inode; inode = alloc_inode_sb(sb, romfs_inode_cachep, GFP_KERNEL); return inode ? &inode->vfs_inode : NULL; } /* * return a spent inode to the slab cache */ static void romfs_free_inode(struct inode *inode) { kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); } /* * get filesystem statistics */ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = 0; /* When calling huge_encode_dev(), * use sb->s_bdev->bd_dev when, * - CONFIG_ROMFS_ON_BLOCK defined * use sb->s_dev when, * - CONFIG_ROMFS_ON_BLOCK undefined and * - CONFIG_ROMFS_ON_MTD defined * leave id as 0 when, * - CONFIG_ROMFS_ON_BLOCK undefined and * - CONFIG_ROMFS_ON_MTD undefined */ if (sb->s_bdev) id = huge_encode_dev(sb->s_bdev->bd_dev); else if (sb->s_dev) id = huge_encode_dev(sb->s_dev); buf->f_type = ROMFS_MAGIC; buf->f_namelen = ROMFS_MAXFN; buf->f_bsize = ROMBSIZE; buf->f_bfree = buf->f_bavail = buf->f_ffree; buf->f_blocks = (romfs_maxsize(dentry->d_sb) + ROMBSIZE - 1) >> ROMBSBITS; buf->f_fsid = u64_to_fsid(id); return 0; } /* * remounting must involve read-only */ static int romfs_reconfigure(struct fs_context *fc) { sync_filesystem(fc->root->d_sb); fc->sb_flags |= SB_RDONLY; return 0; } static const struct super_operations romfs_super_ops = { .alloc_inode = romfs_alloc_inode, .free_inode = romfs_free_inode, .statfs = romfs_statfs, }; /* * checksum check on part of a romfs filesystem */ static __u32 romfs_checksum(const void *data, int size) { const __be32 *ptr = data; __u32 sum; sum = 0; size >>= 2; while (size > 0) { sum += be32_to_cpu(*ptr++); size--; } return sum; } /* * fill in the superblock */ static int romfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct romfs_super_block *rsb; struct inode *root; unsigned long pos, img_size; const char *storage; size_t len; int ret; #ifdef CONFIG_BLOCK if (!sb->s_mtd) { sb_set_blocksize(sb, ROMBSIZE); } else { sb->s_blocksize = ROMBSIZE; sb->s_blocksize_bits = blksize_bits(ROMBSIZE); } #endif sb->s_maxbytes = 0xFFFFFFFF; sb->s_magic = ROMFS_MAGIC; sb->s_flags |= SB_RDONLY | SB_NOATIME; sb->s_time_min = 0; sb->s_time_max = 0; sb->s_op = &romfs_super_ops; #ifdef CONFIG_ROMFS_ON_MTD /* Use same dev ID from the underlying mtdblock device */ if (sb->s_mtd) sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); #endif /* read the image superblock and check it */ rsb = kmalloc(512, GFP_KERNEL); if (!rsb) return -ENOMEM; sb->s_fs_info = (void *) 512; ret = romfs_dev_read(sb, 0, rsb, 512); if (ret < 0) goto error_rsb; img_size = be32_to_cpu(rsb->size); if (sb->s_mtd && img_size > sb->s_mtd->size) goto error_rsb_inval; sb->s_fs_info = (void *) img_size; if (rsb->word0 != ROMSB_WORD0 || rsb->word1 != ROMSB_WORD1 || img_size < ROMFH_SIZE) { if (!(fc->sb_flags & SB_SILENT)) errorf(fc, "VFS: Can't find a romfs filesystem on dev %s.\n", sb->s_id); goto error_rsb_inval; } if (romfs_checksum(rsb, min_t(size_t, img_size, 512))) { pr_err("bad initial checksum on dev %s.\n", sb->s_id); goto error_rsb_inval; } storage = sb->s_mtd ? "MTD" : "the block layer"; len = strnlen(rsb->name, ROMFS_MAXFN); if (!(fc->sb_flags & SB_SILENT)) pr_notice("Mounting image '%*.*s' through %s\n", (unsigned) len, (unsigned) len, rsb->name, storage); kfree(rsb); rsb = NULL; /* find the root directory */ pos = (ROMFH_SIZE + len + 1 + ROMFH_PAD) & ROMFH_MASK; root = romfs_iget(sb, pos); if (IS_ERR(root)) return PTR_ERR(root); sb->s_root = d_make_root(root); if (!sb->s_root) return -ENOMEM; return 0; error_rsb_inval: ret = -EINVAL; error_rsb: kfree(rsb); return ret; } /* * get a superblock for mounting */ static int romfs_get_tree(struct fs_context *fc) { int ret = -EINVAL; #ifdef CONFIG_ROMFS_ON_MTD ret = get_tree_mtd(fc, romfs_fill_super); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (ret == -EINVAL) ret = get_tree_bdev(fc, romfs_fill_super); #endif return ret; } static const struct fs_context_operations romfs_context_ops = { .get_tree = romfs_get_tree, .reconfigure = romfs_reconfigure, }; /* * Set up the filesystem mount context. */ static int romfs_init_fs_context(struct fs_context *fc) { fc->ops = &romfs_context_ops; return 0; } /* * destroy a romfs superblock in the appropriate manner */ static void romfs_kill_sb(struct super_block *sb) { generic_shutdown_super(sb); #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) { put_mtd_device(sb->s_mtd); sb->s_mtd = NULL; } #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) { sync_blockdev(sb->s_bdev); blkdev_put(sb->s_bdev, sb); } #endif } static struct file_system_type romfs_fs_type = { .owner = THIS_MODULE, .name = "romfs", .init_fs_context = romfs_init_fs_context, .kill_sb = romfs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("romfs"); /* * inode storage initialiser */ static void romfs_i_init_once(void *_inode) { struct romfs_inode_info *inode = _inode; inode_init_once(&inode->vfs_inode); } /* * romfs module initialisation */ static int __init init_romfs_fs(void) { int ret; pr_info("ROMFS MTD (C) 2007 Red Hat, Inc.\n"); romfs_inode_cachep = kmem_cache_create("romfs_i", sizeof(struct romfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, romfs_i_init_once); if (!romfs_inode_cachep) { pr_err("Failed to initialise inode cache\n"); return -ENOMEM; } ret = register_filesystem(&romfs_fs_type); if (ret) { pr_err("Failed to register filesystem\n"); goto error_register; } return 0; error_register: kmem_cache_destroy(romfs_inode_cachep); return ret; } /* * romfs module removal */ static void __exit exit_romfs_fs(void) { unregister_filesystem(&romfs_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(romfs_inode_cachep); } module_init(init_romfs_fs); module_exit(exit_romfs_fs); MODULE_DESCRIPTION("Direct-MTD Capable RomFS"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); /* Actually dual-licensed, but it doesn't matter for */
linux-master
fs/romfs/super.c
// SPDX-License-Identifier: GPL-2.0-or-later /* RomFS storage access routines * * Copyright © 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/fs.h> #include <linux/mtd/super.h> #include <linux/buffer_head.h> #include "internal.h" #if !defined(CONFIG_ROMFS_ON_MTD) && !defined(CONFIG_ROMFS_ON_BLOCK) #error no ROMFS backing store interface configured #endif #ifdef CONFIG_ROMFS_ON_MTD #define ROMFS_MTD_READ(sb, ...) mtd_read((sb)->s_mtd, ##__VA_ARGS__) /* * read data from an romfs image on an MTD device */ static int romfs_mtd_read(struct super_block *sb, unsigned long pos, void *buf, size_t buflen) { size_t rlen; int ret; ret = ROMFS_MTD_READ(sb, pos, buflen, &rlen, buf); return (ret < 0 || rlen != buflen) ? -EIO : 0; } /* * determine the length of a string in a romfs image on an MTD device */ static ssize_t romfs_mtd_strnlen(struct super_block *sb, unsigned long pos, size_t maxlen) { ssize_t n = 0; size_t segment; u_char buf[16], *p; size_t len; int ret; /* scan the string up to 16 bytes at a time */ while (maxlen > 0) { segment = min_t(size_t, maxlen, 16); ret = ROMFS_MTD_READ(sb, pos, segment, &len, buf); if (ret < 0) return ret; p = memchr(buf, 0, len); if (p) return n + (p - buf); maxlen -= len; pos += len; n += len; } return n; } /* * compare a string to one in a romfs image on MTD * - return 1 if matched, 0 if differ, -ve if error */ static int romfs_mtd_strcmp(struct super_block *sb, unsigned long pos, const char *str, size_t size) { u_char buf[17]; size_t len, segment; int ret; /* scan the string up to 16 bytes at a time, and attempt to grab the * trailing NUL whilst we're at it */ buf[0] = 0xff; while (size > 0) { segment = min_t(size_t, size + 1, 17); ret = ROMFS_MTD_READ(sb, pos, segment, &len, buf); if (ret < 0) return ret; len--; if (memcmp(buf, str, len) != 0) return 0; buf[0] = buf[len]; size -= len; pos += len; str += len; } /* check the trailing NUL was */ if (buf[0]) return 0; return 1; } #endif /* CONFIG_ROMFS_ON_MTD */ #ifdef CONFIG_ROMFS_ON_BLOCK /* * read data from an romfs image on a block device */ static int romfs_blk_read(struct super_block *sb, unsigned long pos, void *buf, size_t buflen) { struct buffer_head *bh; unsigned long offset; size_t segment; /* copy the string up to blocksize bytes at a time */ while (buflen > 0) { offset = pos & (ROMBSIZE - 1); segment = min_t(size_t, buflen, ROMBSIZE - offset); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; memcpy(buf, bh->b_data + offset, segment); brelse(bh); buf += segment; buflen -= segment; pos += segment; } return 0; } /* * determine the length of a string in romfs on a block device */ static ssize_t romfs_blk_strnlen(struct super_block *sb, unsigned long pos, size_t limit) { struct buffer_head *bh; unsigned long offset; ssize_t n = 0; size_t segment; u_char *buf, *p; /* scan the string up to blocksize bytes at a time */ while (limit > 0) { offset = pos & (ROMBSIZE - 1); segment = min_t(size_t, limit, ROMBSIZE - offset); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; buf = bh->b_data + offset; p = memchr(buf, 0, segment); brelse(bh); if (p) return n + (p - buf); limit -= segment; pos += segment; n += segment; } return n; } /* * compare a string to one in a romfs image on a block device * - return 1 if matched, 0 if differ, -ve if error */ static int romfs_blk_strcmp(struct super_block *sb, unsigned long pos, const char *str, size_t size) { struct buffer_head *bh; unsigned long offset; size_t segment; bool matched, terminated = false; /* compare string up to a block at a time */ while (size > 0) { offset = pos & (ROMBSIZE - 1); segment = min_t(size_t, size, ROMBSIZE - offset); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; matched = (memcmp(bh->b_data + offset, str, segment) == 0); size -= segment; pos += segment; str += segment; if (matched && size == 0 && offset + segment < ROMBSIZE) { if (!bh->b_data[offset + segment]) terminated = true; else matched = false; } brelse(bh); if (!matched) return 0; } if (!terminated) { /* the terminating NUL must be on the first byte of the next * block */ BUG_ON((pos & (ROMBSIZE - 1)) != 0); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; matched = !bh->b_data[0]; brelse(bh); if (!matched) return 0; } return 1; } #endif /* CONFIG_ROMFS_ON_BLOCK */ /* * read data from the romfs image */ int romfs_dev_read(struct super_block *sb, unsigned long pos, void *buf, size_t buflen) { size_t limit; limit = romfs_maxsize(sb); if (pos >= limit || buflen > limit - pos) return -EIO; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) return romfs_mtd_read(sb, pos, buf, buflen); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) return romfs_blk_read(sb, pos, buf, buflen); #endif return -EIO; } /* * determine the length of a string in romfs */ ssize_t romfs_dev_strnlen(struct super_block *sb, unsigned long pos, size_t maxlen) { size_t limit; limit = romfs_maxsize(sb); if (pos >= limit) return -EIO; if (maxlen > limit - pos) maxlen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) return romfs_mtd_strnlen(sb, pos, maxlen); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) return romfs_blk_strnlen(sb, pos, maxlen); #endif return -EIO; } /* * compare a string to one in romfs * - the string to be compared to, str, may not be NUL-terminated; instead the * string is of the specified size * - return 1 if matched, 0 if differ, -ve if error */ int romfs_dev_strcmp(struct super_block *sb, unsigned long pos, const char *str, size_t size) { size_t limit; limit = romfs_maxsize(sb); if (pos >= limit) return -EIO; if (size > ROMFS_MAXFN) return -ENAMETOOLONG; if (size + 1 > limit - pos) return -EIO; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) return romfs_mtd_strcmp(sb, pos, str, size); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) return romfs_blk_strcmp(sb, pos, str, size); #endif return -EIO; }
linux-master
fs/romfs/storage.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NOMMU mmap support for RomFS on MTD devices * * Copyright © 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/mm.h> #include <linux/mtd/super.h> #include "internal.h" /* * try to determine where a shared mapping can be made * - only supported for NOMMU at the moment (MMU can't doesn't copy private * mappings) * - attempts to map through to the underlying MTD device */ static unsigned long romfs_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct inode *inode = file->f_mapping->host; struct mtd_info *mtd = inode->i_sb->s_mtd; unsigned long isize, offset, maxpages, lpages; int ret; if (!mtd) return (unsigned long) -ENOSYS; /* the mapping mustn't extend beyond the EOF */ lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; isize = i_size_read(inode); offset = pgoff << PAGE_SHIFT; maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; if ((pgoff >= maxpages) || (maxpages - pgoff < lpages)) return (unsigned long) -EINVAL; if (addr != 0) return (unsigned long) -EINVAL; if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) return (unsigned long) -EINVAL; offset += ROMFS_I(inode)->i_dataoffset; if (offset >= mtd->size) return (unsigned long) -EINVAL; /* the mapping mustn't extend beyond the EOF */ if ((offset + len) > mtd->size) len = mtd->size - offset; ret = mtd_get_unmapped_area(mtd, len, offset, flags); if (ret == -EOPNOTSUPP) ret = -ENOSYS; return (unsigned long) ret; } /* * permit a R/O mapping to be made directly through onto an MTD device if * possible */ static int romfs_mmap(struct file *file, struct vm_area_struct *vma) { return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS; } static unsigned romfs_mmap_capabilities(struct file *file) { struct mtd_info *mtd = file_inode(file)->i_sb->s_mtd; if (!mtd) return NOMMU_MAP_COPY; return mtd_mmap_capabilities(mtd); } const struct file_operations romfs_ro_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .splice_read = filemap_splice_read, .mmap = romfs_mmap, .get_unmapped_area = romfs_get_unmapped_area, .mmap_capabilities = romfs_mmap_capabilities, };
linux-master
fs/romfs/mmap-nommu.c
// SPDX-License-Identifier: GPL-2.0-only /* * super.c * * PURPOSE * Super block routines for the OSTA-UDF(tm) filesystem. * * DESCRIPTION * OSTA-UDF(tm) = Optical Storage Technology Association * Universal Disk Format. * * This code is based on version 2.00 of the UDF specification, * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346]. * http://www.osta.org/ * https://www.ecma.ch/ * https://www.iso.org/ * * COPYRIGHT * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 2000 Stelias Computing Inc * * HISTORY * * 09/24/98 dgb changed to allow compiling outside of kernel, and * added some debugging. * 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34 * 10/16/98 attempting some multi-session support * 10/17/98 added freespace count for "df" * 11/11/98 gr added novrs option * 11/26/98 dgb added fileset,anchor mount options * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced * vol descs. rewrote option handling based on isofs * 12/20/98 find the free space bitmap (if it exists) */ #include "udfdecl.h" #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/parser.h> #include <linux/stat.h> #include <linux/cdrom.h> #include <linux/nls.h> #include <linux/vfs.h> #include <linux/vmalloc.h> #include <linux/errno.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/bitmap.h> #include <linux/crc-itu-t.h> #include <linux/log2.h> #include <asm/byteorder.h> #include <linux/iversion.h> #include "udf_sb.h" #include "udf_i.h" #include <linux/init.h> #include <linux/uaccess.h> enum { VDS_POS_PRIMARY_VOL_DESC, VDS_POS_UNALLOC_SPACE_DESC, VDS_POS_LOGICAL_VOL_DESC, VDS_POS_IMP_USE_VOL_DESC, VDS_POS_LENGTH }; #define VSD_FIRST_SECTOR_OFFSET 32768 #define VSD_MAX_SECTOR_OFFSET 0x800000 /* * Maximum number of Terminating Descriptor / Logical Volume Integrity * Descriptor redirections. The chosen numbers are arbitrary - just that we * hopefully don't limit any real use of rewritten inode on write-once media * but avoid looping for too long on corrupted media. */ #define UDF_MAX_TD_NESTING 64 #define UDF_MAX_LVID_NESTING 1000 enum { UDF_MAX_LINKS = 0xffff }; /* * We limit filesize to 4TB. This is arbitrary as the on-disk format supports * more but because the file space is described by a linked list of extents, * each of which can have at most 1GB, the creation and handling of extents * gets unusably slow beyond certain point... */ #define UDF_MAX_FILESIZE (1ULL << 42) /* These are the "meat" - everything else is stuffing */ static int udf_fill_super(struct super_block *, void *, int); static void udf_put_super(struct super_block *); static int udf_sync_fs(struct super_block *, int); static int udf_remount_fs(struct super_block *, int *, char *); static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad); static void udf_open_lvid(struct super_block *); static void udf_close_lvid(struct super_block *); static unsigned int udf_count_free(struct super_block *); static int udf_statfs(struct dentry *, struct kstatfs *); static int udf_show_options(struct seq_file *, struct dentry *); struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb) { struct logicalVolIntegrityDesc *lvid; unsigned int partnum; unsigned int offset; if (!UDF_SB(sb)->s_lvid_bh) return NULL; lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data; partnum = le32_to_cpu(lvid->numOfPartitions); /* The offset is to skip freeSpaceTable and sizeTable arrays */ offset = partnum * 2 * sizeof(uint32_t); return (struct logicalVolIntegrityDescImpUse *) (((uint8_t *)(lvid + 1)) + offset); } /* UDF filesystem type */ static struct dentry *udf_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super); } static struct file_system_type udf_fstype = { .owner = THIS_MODULE, .name = "udf", .mount = udf_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("udf"); static struct kmem_cache *udf_inode_cachep; static struct inode *udf_alloc_inode(struct super_block *sb) { struct udf_inode_info *ei; ei = alloc_inode_sb(sb, udf_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->i_unique = 0; ei->i_lenExtents = 0; ei->i_lenStreams = 0; ei->i_next_alloc_block = 0; ei->i_next_alloc_goal = 0; ei->i_strat4096 = 0; ei->i_streamdir = 0; ei->i_hidden = 0; init_rwsem(&ei->i_data_sem); ei->cached_extent.lstart = -1; spin_lock_init(&ei->i_extent_cache_lock); inode_set_iversion(&ei->vfs_inode, 1); return &ei->vfs_inode; } static void udf_free_in_core_inode(struct inode *inode) { kmem_cache_free(udf_inode_cachep, UDF_I(inode)); } static void init_once(void *foo) { struct udf_inode_info *ei = foo; ei->i_data = NULL; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { udf_inode_cachep = kmem_cache_create("udf_inode_cache", sizeof(struct udf_inode_info), 0, (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT), init_once); if (!udf_inode_cachep) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(udf_inode_cachep); } /* Superblock operations */ static const struct super_operations udf_sb_ops = { .alloc_inode = udf_alloc_inode, .free_inode = udf_free_in_core_inode, .write_inode = udf_write_inode, .evict_inode = udf_evict_inode, .put_super = udf_put_super, .sync_fs = udf_sync_fs, .statfs = udf_statfs, .remount_fs = udf_remount_fs, .show_options = udf_show_options, }; struct udf_options { unsigned char novrs; unsigned int blocksize; unsigned int session; unsigned int lastblock; unsigned int anchor; unsigned int flags; umode_t umask; kgid_t gid; kuid_t uid; umode_t fmode; umode_t dmode; struct nls_table *nls_map; }; static int __init init_udf_fs(void) { int err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&udf_fstype); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_udf_fs(void) { unregister_filesystem(&udf_fstype); destroy_inodecache(); } static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count) { struct udf_sb_info *sbi = UDF_SB(sb); sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL); if (!sbi->s_partmaps) { sbi->s_partitions = 0; return -ENOMEM; } sbi->s_partitions = count; return 0; } static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) { int i; int nr_groups = bitmap->s_nr_groups; for (i = 0; i < nr_groups; i++) brelse(bitmap->s_block_bitmap[i]); kvfree(bitmap); } static void udf_free_partition(struct udf_part_map *map) { int i; struct udf_meta_data *mdata; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) iput(map->s_uspace.s_table); if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) udf_sb_free_bitmap(map->s_uspace.s_bitmap); if (map->s_partition_type == UDF_SPARABLE_MAP15) for (i = 0; i < 4; i++) brelse(map->s_type_specific.s_sparing.s_spar_map[i]); else if (map->s_partition_type == UDF_METADATA_MAP25) { mdata = &map->s_type_specific.s_metadata; iput(mdata->s_metadata_fe); mdata->s_metadata_fe = NULL; iput(mdata->s_mirror_fe); mdata->s_mirror_fe = NULL; iput(mdata->s_bitmap_fe); mdata->s_bitmap_fe = NULL; } } static void udf_sb_free_partitions(struct super_block *sb) { struct udf_sb_info *sbi = UDF_SB(sb); int i; if (!sbi->s_partmaps) return; for (i = 0; i < sbi->s_partitions; i++) udf_free_partition(&sbi->s_partmaps[i]); kfree(sbi->s_partmaps); sbi->s_partmaps = NULL; } static int udf_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct udf_sb_info *sbi = UDF_SB(sb); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) seq_puts(seq, ",nostrict"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET)) seq_printf(seq, ",bs=%lu", sb->s_blocksize); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) seq_puts(seq, ",unhide"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) seq_puts(seq, ",undelete"); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB)) seq_puts(seq, ",noadinicb"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD)) seq_puts(seq, ",shortad"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET)) seq_puts(seq, ",uid=forget"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET)) seq_puts(seq, ",gid=forget"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid)); if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid)); if (sbi->s_umask != 0) seq_printf(seq, ",umask=%ho", sbi->s_umask); if (sbi->s_fmode != UDF_INVALID_MODE) seq_printf(seq, ",mode=%ho", sbi->s_fmode); if (sbi->s_dmode != UDF_INVALID_MODE) seq_printf(seq, ",dmode=%ho", sbi->s_dmode); if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET)) seq_printf(seq, ",session=%d", sbi->s_session); if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET)) seq_printf(seq, ",lastblock=%u", sbi->s_last_block); if (sbi->s_anchor != 0) seq_printf(seq, ",anchor=%u", sbi->s_anchor); if (sbi->s_nls_map) seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset); else seq_puts(seq, ",iocharset=utf8"); return 0; } /* * udf_parse_options * * PURPOSE * Parse mount options. * * DESCRIPTION * The following mount options are supported: * * gid= Set the default group. * umask= Set the default umask. * mode= Set the default file permissions. * dmode= Set the default directory permissions. * uid= Set the default user. * bs= Set the block size. * unhide Show otherwise hidden files. * undelete Show deleted files in lists. * adinicb Embed data in the inode (default) * noadinicb Don't embed data in the inode * shortad Use short ad's * longad Use long ad's (default) * nostrict Unset strict conformance * iocharset= Set the NLS character set * * The remaining are for debugging and disaster recovery: * * novrs Skip volume sequence recognition * * The following expect a offset from 0. * * session= Set the CDROM session (default= last session) * anchor= Override standard anchor location. (default= 256) * volume= Override the VolumeDesc location. (unused) * partition= Override the PartitionDesc location. (unused) * lastblock= Set the last block of the filesystem/ * * The following expect a offset from the partition root. * * fileset= Override the fileset block location. (unused) * rootdir= Override the root directory location. (unused) * WARNING: overriding the rootdir to a non-directory may * yield highly unpredictable results. * * PRE-CONDITIONS * options Pointer to mount options string. * uopts Pointer to mount options variable. * * POST-CONDITIONS * <return> 1 Mount options parsed okay. * <return> 0 Error parsing mount options. * * HISTORY * July 1, 1997 - Andrew E. Mileski * Written, tested, and released. */ enum { Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete, Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad, Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock, Opt_anchor, Opt_volume, Opt_partition, Opt_fileset, Opt_rootdir, Opt_utf8, Opt_iocharset, Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore, Opt_fmode, Opt_dmode }; static const match_table_t tokens = { {Opt_novrs, "novrs"}, {Opt_nostrict, "nostrict"}, {Opt_bs, "bs=%u"}, {Opt_unhide, "unhide"}, {Opt_undelete, "undelete"}, {Opt_noadinicb, "noadinicb"}, {Opt_adinicb, "adinicb"}, {Opt_shortad, "shortad"}, {Opt_longad, "longad"}, {Opt_uforget, "uid=forget"}, {Opt_uignore, "uid=ignore"}, {Opt_gforget, "gid=forget"}, {Opt_gignore, "gid=ignore"}, {Opt_gid, "gid=%u"}, {Opt_uid, "uid=%u"}, {Opt_umask, "umask=%o"}, {Opt_session, "session=%u"}, {Opt_lastblock, "lastblock=%u"}, {Opt_anchor, "anchor=%u"}, {Opt_volume, "volume=%u"}, {Opt_partition, "partition=%u"}, {Opt_fileset, "fileset=%u"}, {Opt_rootdir, "rootdir=%u"}, {Opt_utf8, "utf8"}, {Opt_iocharset, "iocharset=%s"}, {Opt_fmode, "mode=%o"}, {Opt_dmode, "dmode=%o"}, {Opt_err, NULL} }; static int udf_parse_options(char *options, struct udf_options *uopt, bool remount) { char *p; int option; unsigned int uv; uopt->novrs = 0; uopt->session = 0xFFFFFFFF; uopt->lastblock = 0; uopt->anchor = 0; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; unsigned n; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_novrs: uopt->novrs = 1; break; case Opt_bs: if (match_int(&args[0], &option)) return 0; n = option; if (n != 512 && n != 1024 && n != 2048 && n != 4096) return 0; uopt->blocksize = n; uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET); break; case Opt_unhide: uopt->flags |= (1 << UDF_FLAG_UNHIDE); break; case Opt_undelete: uopt->flags |= (1 << UDF_FLAG_UNDELETE); break; case Opt_noadinicb: uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB); break; case Opt_adinicb: uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB); break; case Opt_shortad: uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD); break; case Opt_longad: uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD); break; case Opt_gid: if (match_uint(args, &uv)) return 0; uopt->gid = make_kgid(current_user_ns(), uv); if (!gid_valid(uopt->gid)) return 0; uopt->flags |= (1 << UDF_FLAG_GID_SET); break; case Opt_uid: if (match_uint(args, &uv)) return 0; uopt->uid = make_kuid(current_user_ns(), uv); if (!uid_valid(uopt->uid)) return 0; uopt->flags |= (1 << UDF_FLAG_UID_SET); break; case Opt_umask: if (match_octal(args, &option)) return 0; uopt->umask = option; break; case Opt_nostrict: uopt->flags &= ~(1 << UDF_FLAG_STRICT); break; case Opt_session: if (match_int(args, &option)) return 0; uopt->session = option; if (!remount) uopt->flags |= (1 << UDF_FLAG_SESSION_SET); break; case Opt_lastblock: if (match_int(args, &option)) return 0; uopt->lastblock = option; if (!remount) uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET); break; case Opt_anchor: if (match_int(args, &option)) return 0; uopt->anchor = option; break; case Opt_volume: case Opt_partition: case Opt_fileset: case Opt_rootdir: /* Ignored (never implemented properly) */ break; case Opt_utf8: if (!remount) { unload_nls(uopt->nls_map); uopt->nls_map = NULL; } break; case Opt_iocharset: if (!remount) { unload_nls(uopt->nls_map); uopt->nls_map = NULL; } /* When nls_map is not loaded then UTF-8 is used */ if (!remount && strcmp(args[0].from, "utf8") != 0) { uopt->nls_map = load_nls(args[0].from); if (!uopt->nls_map) { pr_err("iocharset %s not found\n", args[0].from); return 0; } } break; case Opt_uforget: uopt->flags |= (1 << UDF_FLAG_UID_FORGET); break; case Opt_uignore: case Opt_gignore: /* These options are superseeded by uid=<number> */ break; case Opt_gforget: uopt->flags |= (1 << UDF_FLAG_GID_FORGET); break; case Opt_fmode: if (match_octal(args, &option)) return 0; uopt->fmode = option & 0777; break; case Opt_dmode: if (match_octal(args, &option)) return 0; uopt->dmode = option & 0777; break; default: pr_err("bad mount option \"%s\" or missing value\n", p); return 0; } } return 1; } static int udf_remount_fs(struct super_block *sb, int *flags, char *options) { struct udf_options uopt; struct udf_sb_info *sbi = UDF_SB(sb); int error = 0; if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT)) return -EACCES; sync_filesystem(sb); uopt.flags = sbi->s_flags; uopt.uid = sbi->s_uid; uopt.gid = sbi->s_gid; uopt.umask = sbi->s_umask; uopt.fmode = sbi->s_fmode; uopt.dmode = sbi->s_dmode; uopt.nls_map = NULL; if (!udf_parse_options(options, &uopt, true)) return -EINVAL; write_lock(&sbi->s_cred_lock); sbi->s_flags = uopt.flags; sbi->s_uid = uopt.uid; sbi->s_gid = uopt.gid; sbi->s_umask = uopt.umask; sbi->s_fmode = uopt.fmode; sbi->s_dmode = uopt.dmode; write_unlock(&sbi->s_cred_lock); if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) goto out_unlock; if (*flags & SB_RDONLY) udf_close_lvid(sb); else udf_open_lvid(sb); out_unlock: return error; } /* * Check VSD descriptor. Returns -1 in case we are at the end of volume * recognition area, 0 if the descriptor is valid but non-interesting, 1 if * we found one of NSR descriptors we are looking for. */ static int identify_vsd(const struct volStructDesc *vsd) { int ret = 0; if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) { switch (vsd->structType) { case 0: udf_debug("ISO9660 Boot Record found\n"); break; case 1: udf_debug("ISO9660 Primary Volume Descriptor found\n"); break; case 2: udf_debug("ISO9660 Supplementary Volume Descriptor found\n"); break; case 3: udf_debug("ISO9660 Volume Partition Descriptor found\n"); break; case 255: udf_debug("ISO9660 Volume Descriptor Set Terminator found\n"); break; default: udf_debug("ISO9660 VRS (%u) found\n", vsd->structType); break; } } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) ; /* ret = 0 */ else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) ret = 1; else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) ret = 1; else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN)) ; /* ret = 0 */ else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN)) ; /* ret = 0 */ else { /* TEA01 or invalid id : end of volume recognition area */ ret = -1; } return ret; } /* * Check Volume Structure Descriptors (ECMA 167 2/9.1) * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) * @return 1 if NSR02 or NSR03 found, * -1 if first sector read error, 0 otherwise */ static int udf_check_vsd(struct super_block *sb) { struct volStructDesc *vsd = NULL; loff_t sector = VSD_FIRST_SECTOR_OFFSET; int sectorsize; struct buffer_head *bh = NULL; int nsr = 0; struct udf_sb_info *sbi; loff_t session_offset; sbi = UDF_SB(sb); if (sb->s_blocksize < sizeof(struct volStructDesc)) sectorsize = sizeof(struct volStructDesc); else sectorsize = sb->s_blocksize; session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits; sector += session_offset; udf_debug("Starting at sector %u (%lu byte sectors)\n", (unsigned int)(sector >> sb->s_blocksize_bits), sb->s_blocksize); /* Process the sequence (if applicable). The hard limit on the sector * offset is arbitrary, hopefully large enough so that all valid UDF * filesystems will be recognised. There is no mention of an upper * bound to the size of the volume recognition area in the standard. * The limit will prevent the code to read all the sectors of a * specially crafted image (like a bluray disc full of CD001 sectors), * potentially causing minutes or even hours of uninterruptible I/O * activity. This actually happened with uninitialised SSD partitions * (all 0xFF) before the check for the limit and all valid IDs were * added */ for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) { /* Read a block */ bh = sb_bread(sb, sector >> sb->s_blocksize_bits); if (!bh) break; vsd = (struct volStructDesc *)(bh->b_data + (sector & (sb->s_blocksize - 1))); nsr = identify_vsd(vsd); /* Found NSR or end? */ if (nsr) { brelse(bh); break; } /* * Special handling for improperly formatted VRS (e.g., Win10) * where components are separated by 2048 bytes even though * sectors are 4K */ if (sb->s_blocksize == 4096) { nsr = identify_vsd(vsd + 1); /* Ignore unknown IDs... */ if (nsr < 0) nsr = 0; } brelse(bh); } if (nsr > 0) return 1; else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET) return -1; else return 0; } static int udf_verify_domain_identifier(struct super_block *sb, struct regid *ident, char *dname) { struct domainIdentSuffix *suffix; if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) { udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname); goto force_ro; } if (ident->flags & ENTITYID_FLAGS_DIRTY) { udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n", dname); goto force_ro; } suffix = (struct domainIdentSuffix *)ident->identSuffix; if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) || (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) { if (!sb_rdonly(sb)) { udf_warn(sb, "Descriptor for %s marked write protected." " Forcing read only mount.\n", dname); } goto force_ro; } return 0; force_ro: if (!sb_rdonly(sb)) return -EACCES; UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); return 0; } static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset, struct kernel_lb_addr *root) { int ret; ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set"); if (ret < 0) return ret; *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation); UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum); udf_debug("Rootdir at block=%u, partition=%u\n", root->logicalBlockNum, root->partitionReferenceNum); return 0; } static int udf_find_fileset(struct super_block *sb, struct kernel_lb_addr *fileset, struct kernel_lb_addr *root) { struct buffer_head *bh; uint16_t ident; int ret; if (fileset->logicalBlockNum == 0xFFFFFFFF && fileset->partitionReferenceNum == 0xFFFF) return -EINVAL; bh = udf_read_ptagged(sb, fileset, 0, &ident); if (!bh) return -EIO; if (ident != TAG_IDENT_FSD) { brelse(bh); return -EINVAL; } udf_debug("Fileset at block=%u, partition=%u\n", fileset->logicalBlockNum, fileset->partitionReferenceNum); UDF_SB(sb)->s_partition = fileset->partitionReferenceNum; ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root); brelse(bh); return ret; } /* * Load primary Volume Descriptor Sequence * * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence * should be tried. */ static int udf_load_pvoldesc(struct super_block *sb, sector_t block) { struct primaryVolDesc *pvoldesc; uint8_t *outstr; struct buffer_head *bh; uint16_t ident; int ret; struct timestamp *ts; outstr = kmalloc(128, GFP_NOFS); if (!outstr) return -ENOMEM; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) { ret = -EAGAIN; goto out2; } if (ident != TAG_IDENT_PVD) { ret = -EIO; goto out_bh; } pvoldesc = (struct primaryVolDesc *)bh->b_data; udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time, pvoldesc->recordingDateAndTime); ts = &pvoldesc->recordingDateAndTime; udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n", le16_to_cpu(ts->year), ts->month, ts->day, ts->hour, ts->minute, le16_to_cpu(ts->typeAndTimezone)); ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32); if (ret < 0) { strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName"); pr_warn("incorrect volume identification, setting to " "'InvalidName'\n"); } else { strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); } udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128); if (ret < 0) { ret = 0; goto out_bh; } outstr[ret] = 0; udf_debug("volSetIdent[] = '%s'\n", outstr); ret = 0; out_bh: brelse(bh); out2: kfree(outstr); return ret; } struct inode *udf_find_metadata_inode_efe(struct super_block *sb, u32 meta_file_loc, u32 partition_ref) { struct kernel_lb_addr addr; struct inode *metadata_fe; addr.logicalBlockNum = meta_file_loc; addr.partitionReferenceNum = partition_ref; metadata_fe = udf_iget_special(sb, &addr); if (IS_ERR(metadata_fe)) { udf_warn(sb, "metadata inode efe not found\n"); return metadata_fe; } if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) { udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n"); iput(metadata_fe); return ERR_PTR(-EIO); } return metadata_fe; } static int udf_load_metadata_files(struct super_block *sb, int partition, int type1_index) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_meta_data *mdata; struct kernel_lb_addr addr; struct inode *fe; map = &sbi->s_partmaps[partition]; mdata = &map->s_type_specific.s_metadata; mdata->s_phys_partition_ref = type1_index; /* metadata address */ udf_debug("Metadata file location: block = %u part = %u\n", mdata->s_meta_file_loc, mdata->s_phys_partition_ref); fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, mdata->s_phys_partition_ref); if (IS_ERR(fe)) { /* mirror file entry */ udf_debug("Mirror metadata file location: block = %u part = %u\n", mdata->s_mirror_file_loc, mdata->s_phys_partition_ref); fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, mdata->s_phys_partition_ref); if (IS_ERR(fe)) { udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); return PTR_ERR(fe); } mdata->s_mirror_fe = fe; } else mdata->s_metadata_fe = fe; /* * bitmap file entry * Note: * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102) */ if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { addr.logicalBlockNum = mdata->s_bitmap_file_loc; addr.partitionReferenceNum = mdata->s_phys_partition_ref; udf_debug("Bitmap file location: block = %u part = %u\n", addr.logicalBlockNum, addr.partitionReferenceNum); fe = udf_iget_special(sb, &addr); if (IS_ERR(fe)) { if (sb_rdonly(sb)) udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); else { udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n"); return PTR_ERR(fe); } } else mdata->s_bitmap_fe = fe; } udf_debug("udf_load_metadata_files Ok\n"); return 0; } int udf_compute_nr_groups(struct super_block *sb, u32 partition) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; return DIV_ROUND_UP(map->s_partition_len + (sizeof(struct spaceBitmapDesc) << 3), sb->s_blocksize * 8); } static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) { struct udf_bitmap *bitmap; int nr_groups = udf_compute_nr_groups(sb, index); bitmap = kvzalloc(struct_size(bitmap, s_block_bitmap, nr_groups), GFP_KERNEL); if (!bitmap) return NULL; bitmap->s_nr_groups = nr_groups; return bitmap; } static int check_partition_desc(struct super_block *sb, struct partitionDesc *p, struct udf_part_map *map) { bool umap, utable, fmap, ftable; struct partitionHeaderDesc *phd; switch (le32_to_cpu(p->accessType)) { case PD_ACCESS_TYPE_READ_ONLY: case PD_ACCESS_TYPE_WRITE_ONCE: case PD_ACCESS_TYPE_NONE: goto force_ro; } /* No Partition Header Descriptor? */ if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) && strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) goto force_ro; phd = (struct partitionHeaderDesc *)p->partitionContentsUse; utable = phd->unallocSpaceTable.extLength; umap = phd->unallocSpaceBitmap.extLength; ftable = phd->freedSpaceTable.extLength; fmap = phd->freedSpaceBitmap.extLength; /* No allocation info? */ if (!utable && !umap && !ftable && !fmap) goto force_ro; /* We don't support blocks that require erasing before overwrite */ if (ftable || fmap) goto force_ro; /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */ if (utable && umap) goto force_ro; if (map->s_partition_type == UDF_VIRTUAL_MAP15 || map->s_partition_type == UDF_VIRTUAL_MAP20 || map->s_partition_type == UDF_METADATA_MAP25) goto force_ro; return 0; force_ro: if (!sb_rdonly(sb)) return -EACCES; UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); return 0; } static int udf_fill_partdesc_info(struct super_block *sb, struct partitionDesc *p, int p_index) { struct udf_part_map *map; struct udf_sb_info *sbi = UDF_SB(sb); struct partitionHeaderDesc *phd; int err; map = &sbi->s_partmaps[p_index]; map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */ map->s_partition_root = le32_to_cpu(p->partitionStartingLocation); if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY)) map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY; if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE)) map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE; if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE)) map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE; if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE)) map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE; udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n", p_index, map->s_partition_type, map->s_partition_root, map->s_partition_len); err = check_partition_desc(sb, p, map); if (err) return err; /* * Skip loading allocation info it we cannot ever write to the fs. * This is a correctness thing as we may have decided to force ro mount * to avoid allocation info we don't support. */ if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT)) return 0; phd = (struct partitionHeaderDesc *)p->partitionContentsUse; if (phd->unallocSpaceTable.extLength) { struct kernel_lb_addr loc = { .logicalBlockNum = le32_to_cpu( phd->unallocSpaceTable.extPosition), .partitionReferenceNum = p_index, }; struct inode *inode; inode = udf_iget_special(sb, &loc); if (IS_ERR(inode)) { udf_debug("cannot load unallocSpaceTable (part %d)\n", p_index); return PTR_ERR(inode); } map->s_uspace.s_table = inode; map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; udf_debug("unallocSpaceTable (part %d) @ %lu\n", p_index, map->s_uspace.s_table->i_ino); } if (phd->unallocSpaceBitmap.extLength) { struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); if (!bitmap) return -ENOMEM; map->s_uspace.s_bitmap = bitmap; bitmap->s_extPosition = le32_to_cpu( phd->unallocSpaceBitmap.extPosition); map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; udf_debug("unallocSpaceBitmap (part %d) @ %u\n", p_index, bitmap->s_extPosition); } return 0; } static void udf_find_vat_block(struct super_block *sb, int p_index, int type1_index, sector_t start_block) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map = &sbi->s_partmaps[p_index]; sector_t vat_block; struct kernel_lb_addr ino; struct inode *inode; /* * VAT file entry is in the last recorded block. Some broken disks have * it a few blocks before so try a bit harder... */ ino.partitionReferenceNum = type1_index; for (vat_block = start_block; vat_block >= map->s_partition_root && vat_block >= start_block - 3; vat_block--) { ino.logicalBlockNum = vat_block - map->s_partition_root; inode = udf_iget_special(sb, &ino); if (!IS_ERR(inode)) { sbi->s_vat_inode = inode; break; } } } static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map = &sbi->s_partmaps[p_index]; struct buffer_head *bh = NULL; struct udf_inode_info *vati; struct virtualAllocationTable20 *vat20; sector_t blocks = sb_bdev_nr_blocks(sb); udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); if (!sbi->s_vat_inode && sbi->s_last_block != blocks - 1) { pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n", (unsigned long)sbi->s_last_block, (unsigned long)blocks - 1); udf_find_vat_block(sb, p_index, type1_index, blocks - 1); } if (!sbi->s_vat_inode) return -EIO; if (map->s_partition_type == UDF_VIRTUAL_MAP15) { map->s_type_specific.s_virtual.s_start_offset = 0; map->s_type_specific.s_virtual.s_num_entries = (sbi->s_vat_inode->i_size - 36) >> 2; } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) { vati = UDF_I(sbi->s_vat_inode); if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { int err = 0; bh = udf_bread(sbi->s_vat_inode, 0, 0, &err); if (!bh) { if (!err) err = -EFSCORRUPTED; return err; } vat20 = (struct virtualAllocationTable20 *)bh->b_data; } else { vat20 = (struct virtualAllocationTable20 *) vati->i_data; } map->s_type_specific.s_virtual.s_start_offset = le16_to_cpu(vat20->lengthHeader); map->s_type_specific.s_virtual.s_num_entries = (sbi->s_vat_inode->i_size - map->s_type_specific.s_virtual. s_start_offset) >> 2; brelse(bh); } return 0; } /* * Load partition descriptor block * * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor * sequence. */ static int udf_load_partdesc(struct super_block *sb, sector_t block) { struct buffer_head *bh; struct partitionDesc *p; struct udf_part_map *map; struct udf_sb_info *sbi = UDF_SB(sb); int i, type1_idx; uint16_t partitionNumber; uint16_t ident; int ret; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) return -EAGAIN; if (ident != TAG_IDENT_PD) { ret = 0; goto out_bh; } p = (struct partitionDesc *)bh->b_data; partitionNumber = le16_to_cpu(p->partitionNumber); /* First scan for TYPE1 and SPARABLE partitions */ for (i = 0; i < sbi->s_partitions; i++) { map = &sbi->s_partmaps[i]; udf_debug("Searching map: (%u == %u)\n", map->s_partition_num, partitionNumber); if (map->s_partition_num == partitionNumber && (map->s_partition_type == UDF_TYPE1_MAP15 || map->s_partition_type == UDF_SPARABLE_MAP15)) break; } if (i >= sbi->s_partitions) { udf_debug("Partition (%u) not found in partition map\n", partitionNumber); ret = 0; goto out_bh; } ret = udf_fill_partdesc_info(sb, p, i); if (ret < 0) goto out_bh; /* * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and * PHYSICAL partitions are already set up */ type1_idx = i; map = NULL; /* supress 'maybe used uninitialized' warning */ for (i = 0; i < sbi->s_partitions; i++) { map = &sbi->s_partmaps[i]; if (map->s_partition_num == partitionNumber && (map->s_partition_type == UDF_VIRTUAL_MAP15 || map->s_partition_type == UDF_VIRTUAL_MAP20 || map->s_partition_type == UDF_METADATA_MAP25)) break; } if (i >= sbi->s_partitions) { ret = 0; goto out_bh; } ret = udf_fill_partdesc_info(sb, p, i); if (ret < 0) goto out_bh; if (map->s_partition_type == UDF_METADATA_MAP25) { ret = udf_load_metadata_files(sb, i, type1_idx); if (ret < 0) { udf_err(sb, "error loading MetaData partition map %d\n", i); goto out_bh; } } else { /* * If we have a partition with virtual map, we don't handle * writing to it (we overwrite blocks instead of relocating * them). */ if (!sb_rdonly(sb)) { ret = -EACCES; goto out_bh; } UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); ret = udf_load_vat(sb, i, type1_idx); if (ret < 0) goto out_bh; } ret = 0; out_bh: /* In case loading failed, we handle cleanup in udf_fill_super */ brelse(bh); return ret; } static int udf_load_sparable_map(struct super_block *sb, struct udf_part_map *map, struct sparablePartitionMap *spm) { uint32_t loc; uint16_t ident; struct sparingTable *st; struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing; int i; struct buffer_head *bh; map->s_partition_type = UDF_SPARABLE_MAP15; sdata->s_packet_len = le16_to_cpu(spm->packetLength); if (!is_power_of_2(sdata->s_packet_len)) { udf_err(sb, "error loading logical volume descriptor: " "Invalid packet length %u\n", (unsigned)sdata->s_packet_len); return -EIO; } if (spm->numSparingTables > 4) { udf_err(sb, "error loading logical volume descriptor: " "Too many sparing tables (%d)\n", (int)spm->numSparingTables); return -EIO; } if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) { udf_err(sb, "error loading logical volume descriptor: " "Too big sparing table size (%u)\n", le32_to_cpu(spm->sizeSparingTable)); return -EIO; } for (i = 0; i < spm->numSparingTables; i++) { loc = le32_to_cpu(spm->locSparingTable[i]); bh = udf_read_tagged(sb, loc, loc, &ident); if (!bh) continue; st = (struct sparingTable *)bh->b_data; if (ident != 0 || strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING)) || sizeof(*st) + le16_to_cpu(st->reallocationTableLen) > sb->s_blocksize) { brelse(bh); continue; } sdata->s_spar_map[i] = bh; } map->s_partition_func = udf_get_pblock_spar15; return 0; } static int udf_load_logicalvol(struct super_block *sb, sector_t block, struct kernel_lb_addr *fileset) { struct logicalVolDesc *lvd; int i, offset; uint8_t type; struct udf_sb_info *sbi = UDF_SB(sb); struct genericPartitionMap *gpm; uint16_t ident; struct buffer_head *bh; unsigned int table_len; int ret; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) return -EAGAIN; BUG_ON(ident != TAG_IDENT_LVD); lvd = (struct logicalVolDesc *)bh->b_data; table_len = le32_to_cpu(lvd->mapTableLength); if (table_len > sb->s_blocksize - sizeof(*lvd)) { udf_err(sb, "error loading logical volume descriptor: " "Partition table too long (%u > %lu)\n", table_len, sb->s_blocksize - sizeof(*lvd)); ret = -EIO; goto out_bh; } ret = udf_verify_domain_identifier(sb, &lvd->domainIdent, "logical volume"); if (ret) goto out_bh; ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); if (ret) goto out_bh; for (i = 0, offset = 0; i < sbi->s_partitions && offset < table_len; i++, offset += gpm->partitionMapLength) { struct udf_part_map *map = &sbi->s_partmaps[i]; gpm = (struct genericPartitionMap *) &(lvd->partitionMaps[offset]); type = gpm->partitionMapType; if (type == 1) { struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)gpm; map->s_partition_type = UDF_TYPE1_MAP15; map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum); map->s_partition_num = le16_to_cpu(gpm1->partitionNum); map->s_partition_func = NULL; } else if (type == 2) { struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)gpm; if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL))) { u16 suf = le16_to_cpu(((__le16 *)upm2->partIdent. identSuffix)[0]); if (suf < 0x0200) { map->s_partition_type = UDF_VIRTUAL_MAP15; map->s_partition_func = udf_get_pblock_virt15; } else { map->s_partition_type = UDF_VIRTUAL_MAP20; map->s_partition_func = udf_get_pblock_virt20; } } else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { ret = udf_load_sparable_map(sb, map, (struct sparablePartitionMap *)gpm); if (ret < 0) goto out_bh; } else if (!strncmp(upm2->partIdent.ident, UDF_ID_METADATA, strlen(UDF_ID_METADATA))) { struct udf_meta_data *mdata = &map->s_type_specific.s_metadata; struct metadataPartitionMap *mdm = (struct metadataPartitionMap *) &(lvd->partitionMaps[offset]); udf_debug("Parsing Logical vol part %d type %u id=%s\n", i, type, UDF_ID_METADATA); map->s_partition_type = UDF_METADATA_MAP25; map->s_partition_func = udf_get_pblock_meta25; mdata->s_meta_file_loc = le32_to_cpu(mdm->metadataFileLoc); mdata->s_mirror_file_loc = le32_to_cpu(mdm->metadataMirrorFileLoc); mdata->s_bitmap_file_loc = le32_to_cpu(mdm->metadataBitmapFileLoc); mdata->s_alloc_unit_size = le32_to_cpu(mdm->allocUnitSize); mdata->s_align_unit_size = le16_to_cpu(mdm->alignUnitSize); if (mdm->flags & 0x01) mdata->s_flags |= MF_DUPLICATE_MD; udf_debug("Metadata Ident suffix=0x%x\n", le16_to_cpu(*(__le16 *) mdm->partIdent.identSuffix)); udf_debug("Metadata part num=%u\n", le16_to_cpu(mdm->partitionNum)); udf_debug("Metadata part alloc unit size=%u\n", le32_to_cpu(mdm->allocUnitSize)); udf_debug("Metadata file loc=%u\n", le32_to_cpu(mdm->metadataFileLoc)); udf_debug("Mirror file loc=%u\n", le32_to_cpu(mdm->metadataMirrorFileLoc)); udf_debug("Bitmap file loc=%u\n", le32_to_cpu(mdm->metadataBitmapFileLoc)); udf_debug("Flags: %d %u\n", mdata->s_flags, mdm->flags); } else { udf_debug("Unknown ident: %s\n", upm2->partIdent.ident); continue; } map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum); map->s_partition_num = le16_to_cpu(upm2->partitionNum); } udf_debug("Partition (%d:%u) type %u on volume %u\n", i, map->s_partition_num, type, map->s_volumeseqnum); } if (fileset) { struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]); *fileset = lelb_to_cpu(la->extLocation); udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n", fileset->logicalBlockNum, fileset->partitionReferenceNum); } if (lvd->integritySeqExt.extLength) udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); ret = 0; if (!sbi->s_lvid_bh) { /* We can't generate unique IDs without a valid LVID */ if (sb_rdonly(sb)) { UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); } else { udf_warn(sb, "Damaged or missing LVID, forcing " "readonly mount\n"); ret = -EACCES; } } out_bh: brelse(bh); return ret; } /* * Find the prevailing Logical Volume Integrity Descriptor. */ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc) { struct buffer_head *bh, *final_bh; uint16_t ident; struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; int indirections = 0; u32 parts, impuselen; while (++indirections <= UDF_MAX_LVID_NESTING) { final_bh = NULL; while (loc.extLength > 0 && (bh = udf_read_tagged(sb, loc.extLocation, loc.extLocation, &ident))) { if (ident != TAG_IDENT_LVID) { brelse(bh); break; } brelse(final_bh); final_bh = bh; loc.extLength -= sb->s_blocksize; loc.extLocation++; } if (!final_bh) return; brelse(sbi->s_lvid_bh); sbi->s_lvid_bh = final_bh; lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data; if (lvid->nextIntegrityExt.extLength == 0) goto check; loc = leea_to_cpu(lvid->nextIntegrityExt); } udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n", UDF_MAX_LVID_NESTING); out_err: brelse(sbi->s_lvid_bh); sbi->s_lvid_bh = NULL; return; check: parts = le32_to_cpu(lvid->numOfPartitions); impuselen = le32_to_cpu(lvid->lengthOfImpUse); if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize || sizeof(struct logicalVolIntegrityDesc) + impuselen + 2 * parts * sizeof(u32) > sb->s_blocksize) { udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), " "ignoring.\n", parts, impuselen); goto out_err; } } /* * Step for reallocation of table of partition descriptor sequence numbers. * Must be power of 2. */ #define PART_DESC_ALLOC_STEP 32 struct part_desc_seq_scan_data { struct udf_vds_record rec; u32 partnum; }; struct desc_seq_scan_data { struct udf_vds_record vds[VDS_POS_LENGTH]; unsigned int size_part_descs; unsigned int num_part_descs; struct part_desc_seq_scan_data *part_descs_loc; }; static struct udf_vds_record *handle_partition_descriptor( struct buffer_head *bh, struct desc_seq_scan_data *data) { struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; int partnum; int i; partnum = le16_to_cpu(desc->partitionNumber); for (i = 0; i < data->num_part_descs; i++) if (partnum == data->part_descs_loc[i].partnum) return &(data->part_descs_loc[i].rec); if (data->num_part_descs >= data->size_part_descs) { struct part_desc_seq_scan_data *new_loc; unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); if (!new_loc) return ERR_PTR(-ENOMEM); memcpy(new_loc, data->part_descs_loc, data->size_part_descs * sizeof(*new_loc)); kfree(data->part_descs_loc); data->part_descs_loc = new_loc; data->size_part_descs = new_size; } return &(data->part_descs_loc[data->num_part_descs++].rec); } static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident, struct buffer_head *bh, struct desc_seq_scan_data *data) { switch (ident) { case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]); case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]); case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]); case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]); case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ return handle_partition_descriptor(bh, data); } return NULL; } /* * Process a main/reserve volume descriptor sequence. * @block First block of first extent of the sequence. * @lastblock Lastblock of first extent of the sequence. * @fileset There we store extent containing root fileset * * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor * sequence */ static noinline int udf_process_sequence( struct super_block *sb, sector_t block, sector_t lastblock, struct kernel_lb_addr *fileset) { struct buffer_head *bh = NULL; struct udf_vds_record *curr; struct generic_desc *gd; struct volDescPtr *vdp; bool done = false; uint32_t vdsn; uint16_t ident; int ret; unsigned int indirections = 0; struct desc_seq_scan_data data; unsigned int i; memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); data.size_part_descs = PART_DESC_ALLOC_STEP; data.num_part_descs = 0; data.part_descs_loc = kcalloc(data.size_part_descs, sizeof(*data.part_descs_loc), GFP_KERNEL); if (!data.part_descs_loc) return -ENOMEM; /* * Read the main descriptor sequence and find which descriptors * are in it. */ for (; (!done && block <= lastblock); block++) { bh = udf_read_tagged(sb, block, block, &ident); if (!bh) break; /* Process each descriptor (ISO 13346 3/8.3-8.4) */ gd = (struct generic_desc *)bh->b_data; vdsn = le32_to_cpu(gd->volDescSeqNum); switch (ident) { case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ if (++indirections > UDF_MAX_TD_NESTING) { udf_err(sb, "too many Volume Descriptor " "Pointers (max %u supported)\n", UDF_MAX_TD_NESTING); brelse(bh); ret = -EIO; goto out; } vdp = (struct volDescPtr *)bh->b_data; block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation); lastblock = le32_to_cpu( vdp->nextVolDescSeqExt.extLength) >> sb->s_blocksize_bits; lastblock += block - 1; /* For loop is going to increment 'block' again */ block--; break; case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ curr = get_volume_descriptor_record(ident, bh, &data); if (IS_ERR(curr)) { brelse(bh); ret = PTR_ERR(curr); goto out; } /* Descriptor we don't care about? */ if (!curr) break; if (vdsn >= curr->volDescSeqNum) { curr->volDescSeqNum = vdsn; curr->block = block; } break; case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ done = true; break; } brelse(bh); } /* * Now read interesting descriptors again and process them * in a suitable order */ if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) { udf_err(sb, "Primary Volume Descriptor not found!\n"); ret = -EAGAIN; goto out; } ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block); if (ret < 0) goto out; if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) { ret = udf_load_logicalvol(sb, data.vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset); if (ret < 0) goto out; } /* Now handle prevailing Partition Descriptors */ for (i = 0; i < data.num_part_descs; i++) { ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); if (ret < 0) goto out; } ret = 0; out: kfree(data.part_descs_loc); return ret; } /* * Load Volume Descriptor Sequence described by anchor in bh * * Returns <0 on error, 0 on success */ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh, struct kernel_lb_addr *fileset) { struct anchorVolDescPtr *anchor; sector_t main_s, main_e, reserve_s, reserve_e; int ret; anchor = (struct anchorVolDescPtr *)bh->b_data; /* Locate the main sequence */ main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength); main_e = main_e >> sb->s_blocksize_bits; main_e += main_s - 1; /* Locate the reserve sequence */ reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation); reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength); reserve_e = reserve_e >> sb->s_blocksize_bits; reserve_e += reserve_s - 1; /* Process the main & reserve sequences */ /* responsible for finding the PartitionDesc(s) */ ret = udf_process_sequence(sb, main_s, main_e, fileset); if (ret != -EAGAIN) return ret; udf_sb_free_partitions(sb); ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset); if (ret < 0) { udf_sb_free_partitions(sb); /* No sequence was OK, return -EIO */ if (ret == -EAGAIN) ret = -EIO; } return ret; } /* * Check whether there is an anchor block in the given block and * load Volume Descriptor Sequence if so. * * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor * block */ static int udf_check_anchor_block(struct super_block *sb, sector_t block, struct kernel_lb_addr *fileset) { struct buffer_head *bh; uint16_t ident; int ret; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) return -EAGAIN; if (ident != TAG_IDENT_AVDP) { brelse(bh); return -EAGAIN; } ret = udf_load_sequence(sb, bh, fileset); brelse(bh); return ret; } /* * Search for an anchor volume descriptor pointer. * * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set * of anchors. */ static int udf_scan_anchors(struct super_block *sb, udf_pblk_t *lastblock, struct kernel_lb_addr *fileset) { udf_pblk_t last[6]; int i; struct udf_sb_info *sbi = UDF_SB(sb); int last_count = 0; int ret; /* First try user provided anchor */ if (sbi->s_anchor) { ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset); if (ret != -EAGAIN) return ret; } /* * according to spec, anchor is in either: * block 256 * lastblock-256 * lastblock * however, if the disc isn't closed, it could be 512. */ ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset); if (ret != -EAGAIN) return ret; /* * The trouble is which block is the last one. Drives often misreport * this so we try various possibilities. */ last[last_count++] = *lastblock; if (*lastblock >= 1) last[last_count++] = *lastblock - 1; last[last_count++] = *lastblock + 1; if (*lastblock >= 2) last[last_count++] = *lastblock - 2; if (*lastblock >= 150) last[last_count++] = *lastblock - 150; if (*lastblock >= 152) last[last_count++] = *lastblock - 152; for (i = 0; i < last_count; i++) { if (last[i] >= sb_bdev_nr_blocks(sb)) continue; ret = udf_check_anchor_block(sb, last[i], fileset); if (ret != -EAGAIN) { if (!ret) *lastblock = last[i]; return ret; } if (last[i] < 256) continue; ret = udf_check_anchor_block(sb, last[i] - 256, fileset); if (ret != -EAGAIN) { if (!ret) *lastblock = last[i]; return ret; } } /* Finally try block 512 in case media is open */ return udf_check_anchor_block(sb, sbi->s_session + 512, fileset); } /* * Check Volume Structure Descriptor, find Anchor block and load Volume * Descriptor Sequence. * * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor * block was not found. */ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt, int silent, struct kernel_lb_addr *fileset) { struct udf_sb_info *sbi = UDF_SB(sb); int nsr = 0; int ret; if (!sb_set_blocksize(sb, uopt->blocksize)) { if (!silent) udf_warn(sb, "Bad block size\n"); return -EINVAL; } sbi->s_last_block = uopt->lastblock; if (!uopt->novrs) { /* Check that it is NSR02 compliant */ nsr = udf_check_vsd(sb); if (!nsr) { if (!silent) udf_warn(sb, "No VRS found\n"); return -EINVAL; } if (nsr == -1) udf_debug("Failed to read sector at offset %d. " "Assuming open disc. Skipping validity " "check\n", VSD_FIRST_SECTOR_OFFSET); if (!sbi->s_last_block) sbi->s_last_block = udf_get_last_block(sb); } else { udf_debug("Validity check skipped because of novrs option\n"); } /* Look for anchor block and load Volume Descriptor Sequence */ sbi->s_anchor = uopt->anchor; ret = udf_scan_anchors(sb, &sbi->s_last_block, fileset); if (ret < 0) { if (!silent && ret == -EAGAIN) udf_warn(sb, "No anchor found\n"); return ret; } return 0; } static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid) { struct timespec64 ts; ktime_get_real_ts64(&ts); udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts); lvid->descTag.descCRC = cpu_to_le16( crc_itu_t(0, (char *)lvid + sizeof(struct tag), le16_to_cpu(lvid->descTag.descCRCLength))); lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); } static void udf_open_lvid(struct super_block *sb) { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = sbi->s_lvid_bh; struct logicalVolIntegrityDesc *lvid; struct logicalVolIntegrityDescImpUse *lvidiu; if (!bh) return; lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvidiu = udf_sb_lvidiu(sb); if (!lvidiu) return; mutex_lock(&sbi->s_alloc_mutex); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE) lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); else UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT); udf_finalize_lvid(lvid); mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; mutex_unlock(&sbi->s_alloc_mutex); /* Make opening of filesystem visible on the media immediately */ sync_dirty_buffer(bh); } static void udf_close_lvid(struct super_block *sb) { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = sbi->s_lvid_bh; struct logicalVolIntegrityDesc *lvid; struct logicalVolIntegrityDescImpUse *lvidiu; if (!bh) return; lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvidiu = udf_sb_lvidiu(sb); if (!lvidiu) return; mutex_lock(&sbi->s_alloc_mutex); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev); if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev)) lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT)) lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); /* * We set buffer uptodate unconditionally here to avoid spurious * warnings from mark_buffer_dirty() when previous EIO has marked * the buffer as !uptodate */ set_buffer_uptodate(bh); udf_finalize_lvid(lvid); mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; mutex_unlock(&sbi->s_alloc_mutex); /* Make closing of filesystem visible on the media immediately */ sync_dirty_buffer(bh); } u64 lvid_get_unique_id(struct super_block *sb) { struct buffer_head *bh; struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; struct logicalVolHeaderDesc *lvhd; u64 uniqueID; u64 ret; bh = sbi->s_lvid_bh; if (!bh) return 0; lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse; mutex_lock(&sbi->s_alloc_mutex); ret = uniqueID = le64_to_cpu(lvhd->uniqueID); if (!(++uniqueID & 0xFFFFFFFF)) uniqueID += 16; lvhd->uniqueID = cpu_to_le64(uniqueID); udf_updated_lvid(sb); mutex_unlock(&sbi->s_alloc_mutex); return ret; } static int udf_fill_super(struct super_block *sb, void *options, int silent) { int ret = -EINVAL; struct inode *inode = NULL; struct udf_options uopt; struct kernel_lb_addr rootdir, fileset; struct udf_sb_info *sbi; bool lvid_open = false; uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */ uopt.uid = make_kuid(current_user_ns(), overflowuid); uopt.gid = make_kgid(current_user_ns(), overflowgid); uopt.umask = 0; uopt.fmode = UDF_INVALID_MODE; uopt.dmode = UDF_INVALID_MODE; uopt.nls_map = NULL; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; mutex_init(&sbi->s_alloc_mutex); if (!udf_parse_options((char *)options, &uopt, false)) goto parse_options_failure; fileset.logicalBlockNum = 0xFFFFFFFF; fileset.partitionReferenceNum = 0xFFFF; sbi->s_flags = uopt.flags; sbi->s_uid = uopt.uid; sbi->s_gid = uopt.gid; sbi->s_umask = uopt.umask; sbi->s_fmode = uopt.fmode; sbi->s_dmode = uopt.dmode; sbi->s_nls_map = uopt.nls_map; rwlock_init(&sbi->s_cred_lock); if (uopt.session == 0xFFFFFFFF) sbi->s_session = udf_get_last_session(sb); else sbi->s_session = uopt.session; udf_debug("Multi-session=%d\n", sbi->s_session); /* Fill in the rest of the superblock */ sb->s_op = &udf_sb_ops; sb->s_export_op = &udf_export_ops; sb->s_magic = UDF_SUPER_MAGIC; sb->s_time_gran = 1000; if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { ret = udf_load_vrs(sb, &uopt, silent, &fileset); } else { uopt.blocksize = bdev_logical_block_size(sb->s_bdev); while (uopt.blocksize <= 4096) { ret = udf_load_vrs(sb, &uopt, silent, &fileset); if (ret < 0) { if (!silent && ret != -EACCES) { pr_notice("Scanning with blocksize %u failed\n", uopt.blocksize); } brelse(sbi->s_lvid_bh); sbi->s_lvid_bh = NULL; /* * EACCES is special - we want to propagate to * upper layers that we cannot handle RW mount. */ if (ret == -EACCES) break; } else break; uopt.blocksize <<= 1; } } if (ret < 0) { if (ret == -EAGAIN) { udf_warn(sb, "No partition found (1)\n"); ret = -EINVAL; } goto error_out; } udf_debug("Lastblock=%u\n", sbi->s_last_block); if (sbi->s_lvid_bh) { struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb); uint16_t minUDFReadRev; uint16_t minUDFWriteRev; if (!lvidiu) { ret = -EINVAL; goto error_out; } minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); if (minUDFReadRev > UDF_MAX_READ_VERSION) { udf_err(sb, "minUDFReadRev=%x (max is %x)\n", minUDFReadRev, UDF_MAX_READ_VERSION); ret = -EINVAL; goto error_out; } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) { if (!sb_rdonly(sb)) { ret = -EACCES; goto error_out; } UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); } sbi->s_udfrev = minUDFWriteRev; if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE) UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE); if (minUDFReadRev >= UDF_VERS_USE_STREAMS) UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS); } if (!sbi->s_partitions) { udf_warn(sb, "No partition found (2)\n"); ret = -EINVAL; goto error_out; } if (sbi->s_partmaps[sbi->s_partition].s_partition_flags & UDF_PART_FLAG_READ_ONLY) { if (!sb_rdonly(sb)) { ret = -EACCES; goto error_out; } UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); } ret = udf_find_fileset(sb, &fileset, &rootdir); if (ret < 0) { udf_warn(sb, "No fileset found\n"); goto error_out; } if (!silent) { struct timestamp ts; udf_time_to_disk_stamp(&ts, sbi->s_record_time); udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n", sbi->s_volume_ident, le16_to_cpu(ts.year), ts.month, ts.day, ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone)); } if (!sb_rdonly(sb)) { udf_open_lvid(sb); lvid_open = true; } /* Assign the root inode */ /* assign inodes by physical block number */ /* perhaps it's not extensible enough, but for now ... */ inode = udf_iget(sb, &rootdir); if (IS_ERR(inode)) { udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n", rootdir.logicalBlockNum, rootdir.partitionReferenceNum); ret = PTR_ERR(inode); goto error_out; } /* Allocate a dentry for the root inode */ sb->s_root = d_make_root(inode); if (!sb->s_root) { udf_err(sb, "Couldn't allocate root dentry\n"); ret = -ENOMEM; goto error_out; } sb->s_maxbytes = UDF_MAX_FILESIZE; sb->s_max_links = UDF_MAX_LINKS; return 0; error_out: iput(sbi->s_vat_inode); parse_options_failure: unload_nls(uopt.nls_map); if (lvid_open) udf_close_lvid(sb); brelse(sbi->s_lvid_bh); udf_sb_free_partitions(sb); kfree(sbi); sb->s_fs_info = NULL; return ret; } void _udf_err(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf); va_end(args); } void _udf_warn(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf); va_end(args); } static void udf_put_super(struct super_block *sb) { struct udf_sb_info *sbi; sbi = UDF_SB(sb); iput(sbi->s_vat_inode); unload_nls(sbi->s_nls_map); if (!sb_rdonly(sb)) udf_close_lvid(sb); brelse(sbi->s_lvid_bh); udf_sb_free_partitions(sb); mutex_destroy(&sbi->s_alloc_mutex); kfree(sb->s_fs_info); sb->s_fs_info = NULL; } static int udf_sync_fs(struct super_block *sb, int wait) { struct udf_sb_info *sbi = UDF_SB(sb); mutex_lock(&sbi->s_alloc_mutex); if (sbi->s_lvid_dirty) { struct buffer_head *bh = sbi->s_lvid_bh; struct logicalVolIntegrityDesc *lvid; lvid = (struct logicalVolIntegrityDesc *)bh->b_data; udf_finalize_lvid(lvid); /* * Blockdevice will be synced later so we don't have to submit * the buffer for IO */ mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; } mutex_unlock(&sbi->s_alloc_mutex); return 0; } static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDescImpUse *lvidiu; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); lvidiu = udf_sb_lvidiu(sb); buf->f_type = UDF_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; buf->f_bfree = udf_count_free(sb); buf->f_bavail = buf->f_bfree; /* * Let's pretend each free block is also a free 'inode' since UDF does * not have separate preallocated table of inodes. */ buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) + le32_to_cpu(lvidiu->numDirs)) : 0) + buf->f_bfree; buf->f_ffree = buf->f_bfree; buf->f_namelen = UDF_NAME_LEN; buf->f_fsid = u64_to_fsid(id); return 0; } static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap) { struct buffer_head *bh = NULL; unsigned int accum = 0; int index; udf_pblk_t block = 0, newblock; struct kernel_lb_addr loc; uint32_t bytes; uint8_t *ptr; uint16_t ident; struct spaceBitmapDesc *bm; loc.logicalBlockNum = bitmap->s_extPosition; loc.partitionReferenceNum = UDF_SB(sb)->s_partition; bh = udf_read_ptagged(sb, &loc, 0, &ident); if (!bh) { udf_err(sb, "udf_count_free failed\n"); goto out; } else if (ident != TAG_IDENT_SBD) { brelse(bh); udf_err(sb, "udf_count_free failed\n"); goto out; } bm = (struct spaceBitmapDesc *)bh->b_data; bytes = le32_to_cpu(bm->numOfBytes); index = sizeof(struct spaceBitmapDesc); /* offset in first block only */ ptr = (uint8_t *)bh->b_data; while (bytes > 0) { u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index); accum += bitmap_weight((const unsigned long *)(ptr + index), cur_bytes * 8); bytes -= cur_bytes; if (bytes) { brelse(bh); newblock = udf_get_lb_pblock(sb, &loc, ++block); bh = sb_bread(sb, newblock); if (!bh) { udf_debug("read failed\n"); goto out; } index = 0; ptr = (uint8_t *)bh->b_data; } } brelse(bh); out: return accum; } static unsigned int udf_count_free_table(struct super_block *sb, struct inode *table) { unsigned int accum = 0; uint32_t elen; struct kernel_lb_addr eloc; struct extent_position epos; mutex_lock(&UDF_SB(sb)->s_alloc_mutex); epos.block = UDF_I(table)->i_location; epos.offset = sizeof(struct unallocSpaceEntry); epos.bh = NULL; while (udf_next_aext(table, &epos, &eloc, &elen, 1) != -1) accum += (elen >> table->i_sb->s_blocksize_bits); brelse(epos.bh); mutex_unlock(&UDF_SB(sb)->s_alloc_mutex); return accum; } static unsigned int udf_count_free(struct super_block *sb) { unsigned int accum = 0; struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; unsigned int part = sbi->s_partition; int ptype = sbi->s_partmaps[part].s_partition_type; if (ptype == UDF_METADATA_MAP25) { part = sbi->s_partmaps[part].s_type_specific.s_metadata. s_phys_partition_ref; } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) { /* * Filesystems with VAT are append-only and we cannot write to * them. Let's just report 0 here. */ return 0; } if (sbi->s_lvid_bh) { struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *) sbi->s_lvid_bh->b_data; if (le32_to_cpu(lvid->numOfPartitions) > part) { accum = le32_to_cpu( lvid->freeSpaceTable[part]); if (accum == 0xFFFFFFFF) accum = 0; } } if (accum) return accum; map = &sbi->s_partmaps[part]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { accum += udf_count_free_bitmap(sb, map->s_uspace.s_bitmap); } if (accum) return accum; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { accum += udf_count_free_table(sb, map->s_uspace.s_table); } return accum; } MODULE_AUTHOR("Ben Fennema"); MODULE_DESCRIPTION("Universal Disk Format Filesystem"); MODULE_LICENSE("GPL"); module_init(init_udf_fs) module_exit(exit_udf_fs)
linux-master
fs/udf/super.c
// SPDX-License-Identifier: GPL-2.0-only /* * directory.c * * PURPOSE * Directory related functions * */ #include "udfdecl.h" #include "udf_i.h" #include <linux/fs.h> #include <linux/string.h> #include <linux/bio.h> #include <linux/crc-itu-t.h> #include <linux/iversion.h> static int udf_verify_fi(struct udf_fileident_iter *iter) { unsigned int len; if (iter->fi.descTag.tagIdent != cpu_to_le16(TAG_IDENT_FID)) { udf_err(iter->dir->i_sb, "directory (ino %lu) has entry at pos %llu with incorrect tag %x\n", iter->dir->i_ino, (unsigned long long)iter->pos, le16_to_cpu(iter->fi.descTag.tagIdent)); return -EFSCORRUPTED; } len = udf_dir_entry_len(&iter->fi); if (le16_to_cpu(iter->fi.lengthOfImpUse) & 3) { udf_err(iter->dir->i_sb, "directory (ino %lu) has entry at pos %llu with unaligned length of impUse field\n", iter->dir->i_ino, (unsigned long long)iter->pos); return -EFSCORRUPTED; } /* * This is in fact allowed by the spec due to long impUse field but * we don't support it. If there is real media with this large impUse * field, support can be added. */ if (len > 1 << iter->dir->i_blkbits) { udf_err(iter->dir->i_sb, "directory (ino %lu) has too big (%u) entry at pos %llu\n", iter->dir->i_ino, len, (unsigned long long)iter->pos); return -EFSCORRUPTED; } if (iter->pos + len > iter->dir->i_size) { udf_err(iter->dir->i_sb, "directory (ino %lu) has entry past directory size at pos %llu\n", iter->dir->i_ino, (unsigned long long)iter->pos); return -EFSCORRUPTED; } if (udf_dir_entry_len(&iter->fi) != sizeof(struct tag) + le16_to_cpu(iter->fi.descTag.descCRCLength)) { udf_err(iter->dir->i_sb, "directory (ino %lu) has entry where CRC length (%u) does not match entry length (%u)\n", iter->dir->i_ino, (unsigned)le16_to_cpu(iter->fi.descTag.descCRCLength), (unsigned)(udf_dir_entry_len(&iter->fi) - sizeof(struct tag))); return -EFSCORRUPTED; } return 0; } static int udf_copy_fi(struct udf_fileident_iter *iter) { struct udf_inode_info *iinfo = UDF_I(iter->dir); u32 blksize = 1 << iter->dir->i_blkbits; u32 off, len, nameoff; int err; /* Skip copying when we are at EOF */ if (iter->pos >= iter->dir->i_size) { iter->name = NULL; return 0; } if (iter->dir->i_size < iter->pos + sizeof(struct fileIdentDesc)) { udf_err(iter->dir->i_sb, "directory (ino %lu) has entry straddling EOF\n", iter->dir->i_ino); return -EFSCORRUPTED; } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { memcpy(&iter->fi, iinfo->i_data + iinfo->i_lenEAttr + iter->pos, sizeof(struct fileIdentDesc)); err = udf_verify_fi(iter); if (err < 0) return err; iter->name = iinfo->i_data + iinfo->i_lenEAttr + iter->pos + sizeof(struct fileIdentDesc) + le16_to_cpu(iter->fi.lengthOfImpUse); return 0; } off = iter->pos & (blksize - 1); len = min_t(u32, sizeof(struct fileIdentDesc), blksize - off); memcpy(&iter->fi, iter->bh[0]->b_data + off, len); if (len < sizeof(struct fileIdentDesc)) memcpy((char *)(&iter->fi) + len, iter->bh[1]->b_data, sizeof(struct fileIdentDesc) - len); err = udf_verify_fi(iter); if (err < 0) return err; /* Handle directory entry name */ nameoff = off + sizeof(struct fileIdentDesc) + le16_to_cpu(iter->fi.lengthOfImpUse); if (off + udf_dir_entry_len(&iter->fi) <= blksize) { iter->name = iter->bh[0]->b_data + nameoff; } else if (nameoff >= blksize) { iter->name = iter->bh[1]->b_data + (nameoff - blksize); } else { iter->name = iter->namebuf; len = blksize - nameoff; memcpy(iter->name, iter->bh[0]->b_data + nameoff, len); memcpy(iter->name + len, iter->bh[1]->b_data, iter->fi.lengthFileIdent - len); } return 0; } /* Readahead 8k once we are at 8k boundary */ static void udf_readahead_dir(struct udf_fileident_iter *iter) { unsigned int ralen = 16 >> (iter->dir->i_blkbits - 9); struct buffer_head *tmp, *bha[16]; int i, num; udf_pblk_t blk; if (iter->loffset & (ralen - 1)) return; if (iter->loffset + ralen > (iter->elen >> iter->dir->i_blkbits)) ralen = (iter->elen >> iter->dir->i_blkbits) - iter->loffset; num = 0; for (i = 0; i < ralen; i++) { blk = udf_get_lb_pblock(iter->dir->i_sb, &iter->eloc, iter->loffset + i); tmp = sb_getblk(iter->dir->i_sb, blk); if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp)) bha[num++] = tmp; else brelse(tmp); } if (num) { bh_readahead_batch(num, bha, REQ_RAHEAD); for (i = 0; i < num; i++) brelse(bha[i]); } } static struct buffer_head *udf_fiiter_bread_blk(struct udf_fileident_iter *iter) { udf_pblk_t blk; udf_readahead_dir(iter); blk = udf_get_lb_pblock(iter->dir->i_sb, &iter->eloc, iter->loffset); return sb_bread(iter->dir->i_sb, blk); } /* * Updates loffset to point to next directory block; eloc, elen & epos are * updated if we need to traverse to the next extent as well. */ static int udf_fiiter_advance_blk(struct udf_fileident_iter *iter) { iter->loffset++; if (iter->loffset < DIV_ROUND_UP(iter->elen, 1<<iter->dir->i_blkbits)) return 0; iter->loffset = 0; if (udf_next_aext(iter->dir, &iter->epos, &iter->eloc, &iter->elen, 1) != (EXT_RECORDED_ALLOCATED >> 30)) { if (iter->pos == iter->dir->i_size) { iter->elen = 0; return 0; } udf_err(iter->dir->i_sb, "extent after position %llu not allocated in directory (ino %lu)\n", (unsigned long long)iter->pos, iter->dir->i_ino); return -EFSCORRUPTED; } return 0; } static int udf_fiiter_load_bhs(struct udf_fileident_iter *iter) { int blksize = 1 << iter->dir->i_blkbits; int off = iter->pos & (blksize - 1); int err; struct fileIdentDesc *fi; /* Is there any further extent we can map from? */ if (!iter->bh[0] && iter->elen) { iter->bh[0] = udf_fiiter_bread_blk(iter); if (!iter->bh[0]) { err = -ENOMEM; goto out_brelse; } if (!buffer_uptodate(iter->bh[0])) { err = -EIO; goto out_brelse; } } /* There's no next block so we are done */ if (iter->pos >= iter->dir->i_size) return 0; /* Need to fetch next block as well? */ if (off + sizeof(struct fileIdentDesc) > blksize) goto fetch_next; fi = (struct fileIdentDesc *)(iter->bh[0]->b_data + off); /* Need to fetch next block to get name? */ if (off + udf_dir_entry_len(fi) > blksize) { fetch_next: err = udf_fiiter_advance_blk(iter); if (err) goto out_brelse; iter->bh[1] = udf_fiiter_bread_blk(iter); if (!iter->bh[1]) { err = -ENOMEM; goto out_brelse; } if (!buffer_uptodate(iter->bh[1])) { err = -EIO; goto out_brelse; } } return 0; out_brelse: brelse(iter->bh[0]); brelse(iter->bh[1]); iter->bh[0] = iter->bh[1] = NULL; return err; } int udf_fiiter_init(struct udf_fileident_iter *iter, struct inode *dir, loff_t pos) { struct udf_inode_info *iinfo = UDF_I(dir); int err = 0; iter->dir = dir; iter->bh[0] = iter->bh[1] = NULL; iter->pos = pos; iter->elen = 0; iter->epos.bh = NULL; iter->name = NULL; /* * When directory is verified, we don't expect directory iteration to * fail and it can be difficult to undo without corrupting filesystem. * So just do not allow memory allocation failures here. */ iter->namebuf = kmalloc(UDF_NAME_LEN_CS0, GFP_KERNEL | __GFP_NOFAIL); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { err = udf_copy_fi(iter); goto out; } if (inode_bmap(dir, iter->pos >> dir->i_blkbits, &iter->epos, &iter->eloc, &iter->elen, &iter->loffset) != (EXT_RECORDED_ALLOCATED >> 30)) { if (pos == dir->i_size) return 0; udf_err(dir->i_sb, "position %llu not allocated in directory (ino %lu)\n", (unsigned long long)pos, dir->i_ino); err = -EFSCORRUPTED; goto out; } err = udf_fiiter_load_bhs(iter); if (err < 0) goto out; err = udf_copy_fi(iter); out: if (err < 0) udf_fiiter_release(iter); return err; } int udf_fiiter_advance(struct udf_fileident_iter *iter) { unsigned int oldoff, len; int blksize = 1 << iter->dir->i_blkbits; int err; oldoff = iter->pos & (blksize - 1); len = udf_dir_entry_len(&iter->fi); iter->pos += len; if (UDF_I(iter->dir)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (oldoff + len >= blksize) { brelse(iter->bh[0]); iter->bh[0] = NULL; /* Next block already loaded? */ if (iter->bh[1]) { iter->bh[0] = iter->bh[1]; iter->bh[1] = NULL; } else { err = udf_fiiter_advance_blk(iter); if (err < 0) return err; } } err = udf_fiiter_load_bhs(iter); if (err < 0) return err; } return udf_copy_fi(iter); } void udf_fiiter_release(struct udf_fileident_iter *iter) { iter->dir = NULL; brelse(iter->bh[0]); brelse(iter->bh[1]); iter->bh[0] = iter->bh[1] = NULL; kfree(iter->namebuf); iter->namebuf = NULL; } static void udf_copy_to_bufs(void *buf1, int len1, void *buf2, int len2, int off, void *src, int len) { int copy; if (off >= len1) { off -= len1; } else { copy = min(off + len, len1) - off; memcpy(buf1 + off, src, copy); src += copy; len -= copy; off = 0; } if (len > 0) { if (WARN_ON_ONCE(off + len > len2 || !buf2)) return; memcpy(buf2 + off, src, len); } } static uint16_t udf_crc_fi_bufs(void *buf1, int len1, void *buf2, int len2, int off, int len) { int copy; uint16_t crc = 0; if (off >= len1) { off -= len1; } else { copy = min(off + len, len1) - off; crc = crc_itu_t(crc, buf1 + off, copy); len -= copy; off = 0; } if (len > 0) { if (WARN_ON_ONCE(off + len > len2 || !buf2)) return 0; crc = crc_itu_t(crc, buf2 + off, len); } return crc; } static void udf_copy_fi_to_bufs(char *buf1, int len1, char *buf2, int len2, int off, struct fileIdentDesc *fi, uint8_t *impuse, uint8_t *name) { uint16_t crc; int fioff = off; int crcoff = off + sizeof(struct tag); unsigned int crclen = udf_dir_entry_len(fi) - sizeof(struct tag); char zeros[UDF_NAME_PAD] = {}; int endoff = off + udf_dir_entry_len(fi); udf_copy_to_bufs(buf1, len1, buf2, len2, off, fi, sizeof(struct fileIdentDesc)); off += sizeof(struct fileIdentDesc); if (impuse) udf_copy_to_bufs(buf1, len1, buf2, len2, off, impuse, le16_to_cpu(fi->lengthOfImpUse)); off += le16_to_cpu(fi->lengthOfImpUse); if (name) { udf_copy_to_bufs(buf1, len1, buf2, len2, off, name, fi->lengthFileIdent); off += fi->lengthFileIdent; udf_copy_to_bufs(buf1, len1, buf2, len2, off, zeros, endoff - off); } crc = udf_crc_fi_bufs(buf1, len1, buf2, len2, crcoff, crclen); fi->descTag.descCRC = cpu_to_le16(crc); fi->descTag.descCRCLength = cpu_to_le16(crclen); fi->descTag.tagChecksum = udf_tag_checksum(&fi->descTag); udf_copy_to_bufs(buf1, len1, buf2, len2, fioff, fi, sizeof(struct tag)); } void udf_fiiter_write_fi(struct udf_fileident_iter *iter, uint8_t *impuse) { struct udf_inode_info *iinfo = UDF_I(iter->dir); void *buf1, *buf2 = NULL; int len1, len2 = 0, off; int blksize = 1 << iter->dir->i_blkbits; off = iter->pos & (blksize - 1); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { buf1 = iinfo->i_data + iinfo->i_lenEAttr; len1 = iter->dir->i_size; } else { buf1 = iter->bh[0]->b_data; len1 = blksize; if (iter->bh[1]) { buf2 = iter->bh[1]->b_data; len2 = blksize; } } udf_copy_fi_to_bufs(buf1, len1, buf2, len2, off, &iter->fi, impuse, iter->name == iter->namebuf ? iter->name : NULL); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { mark_inode_dirty(iter->dir); } else { mark_buffer_dirty_inode(iter->bh[0], iter->dir); if (iter->bh[1]) mark_buffer_dirty_inode(iter->bh[1], iter->dir); } inode_inc_iversion(iter->dir); } void udf_fiiter_update_elen(struct udf_fileident_iter *iter, uint32_t new_elen) { struct udf_inode_info *iinfo = UDF_I(iter->dir); int diff = new_elen - iter->elen; /* Skip update when we already went past the last extent */ if (!iter->elen) return; iter->elen = new_elen; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) iter->epos.offset -= sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) iter->epos.offset -= sizeof(struct long_ad); udf_write_aext(iter->dir, &iter->epos, &iter->eloc, iter->elen, 1); iinfo->i_lenExtents += diff; mark_inode_dirty(iter->dir); } /* Append new block to directory. @iter is expected to point at EOF */ int udf_fiiter_append_blk(struct udf_fileident_iter *iter) { struct udf_inode_info *iinfo = UDF_I(iter->dir); int blksize = 1 << iter->dir->i_blkbits; struct buffer_head *bh; sector_t block; uint32_t old_elen = iter->elen; int err; if (WARN_ON_ONCE(iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)) return -EINVAL; /* Round up last extent in the file */ udf_fiiter_update_elen(iter, ALIGN(iter->elen, blksize)); /* Allocate new block and refresh mapping information */ block = iinfo->i_lenExtents >> iter->dir->i_blkbits; bh = udf_bread(iter->dir, block, 1, &err); if (!bh) { udf_fiiter_update_elen(iter, old_elen); return err; } if (inode_bmap(iter->dir, block, &iter->epos, &iter->eloc, &iter->elen, &iter->loffset) != (EXT_RECORDED_ALLOCATED >> 30)) { udf_err(iter->dir->i_sb, "block %llu not allocated in directory (ino %lu)\n", (unsigned long long)block, iter->dir->i_ino); return -EFSCORRUPTED; } if (!(iter->pos & (blksize - 1))) { brelse(iter->bh[0]); iter->bh[0] = bh; } else { iter->bh[1] = bh; } return 0; } struct short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, uint32_t *offset, int inc) { struct short_ad *sa; if ((!ptr) || (!offset)) { pr_err("%s: invalidparms\n", __func__); return NULL; } if ((*offset + sizeof(struct short_ad)) > maxoffset) return NULL; else { sa = (struct short_ad *)ptr; if (sa->extLength == 0) return NULL; } if (inc) *offset += sizeof(struct short_ad); return sa; } struct long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, uint32_t *offset, int inc) { struct long_ad *la; if ((!ptr) || (!offset)) { pr_err("%s: invalidparms\n", __func__); return NULL; } if ((*offset + sizeof(struct long_ad)) > maxoffset) return NULL; else { la = (struct long_ad *)ptr; if (la->extLength == 0) return NULL; } if (inc) *offset += sizeof(struct long_ad); return la; }
linux-master
fs/udf/directory.c
// SPDX-License-Identifier: GPL-2.0-only /* * unicode.c * * PURPOSE * Routines for converting between UTF-8 and OSTA Compressed Unicode. * Also handles filename mangling * * DESCRIPTION * OSTA Compressed Unicode is explained in the OSTA UDF specification. * http://www.osta.org/ * UTF-8 is explained in the IETF RFC XXXX. * ftp://ftp.internic.net/rfc/rfcxxxx.txt * */ #include "udfdecl.h" #include <linux/kernel.h> #include <linux/string.h> /* for memset */ #include <linux/nls.h> #include <linux/crc-itu-t.h> #include <linux/slab.h> #include "udf_sb.h" #define PLANE_SIZE 0x10000 #define UNICODE_MAX 0x10ffff #define SURROGATE_MASK 0xfffff800 #define SURROGATE_PAIR 0x0000d800 #define SURROGATE_LOW 0x00000400 #define SURROGATE_CHAR_BITS 10 #define SURROGATE_CHAR_MASK ((1 << SURROGATE_CHAR_BITS) - 1) #define ILLEGAL_CHAR_MARK '_' #define EXT_MARK '.' #define CRC_MARK '#' #define EXT_SIZE 5 /* Number of chars we need to store generated CRC to make filename unique */ #define CRC_LEN 5 static unicode_t get_utf16_char(const uint8_t *str_i, int str_i_max_len, int str_i_idx, int u_ch, unicode_t *ret) { unicode_t c; int start_idx = str_i_idx; /* Expand OSTA compressed Unicode to Unicode */ c = str_i[str_i_idx++]; if (u_ch > 1) c = (c << 8) | str_i[str_i_idx++]; if ((c & SURROGATE_MASK) == SURROGATE_PAIR) { unicode_t next; /* Trailing surrogate char */ if (str_i_idx >= str_i_max_len) { c = UNICODE_MAX + 1; goto out; } /* Low surrogate must follow the high one... */ if (c & SURROGATE_LOW) { c = UNICODE_MAX + 1; goto out; } WARN_ON_ONCE(u_ch != 2); next = str_i[str_i_idx++] << 8; next |= str_i[str_i_idx++]; if ((next & SURROGATE_MASK) != SURROGATE_PAIR || !(next & SURROGATE_LOW)) { c = UNICODE_MAX + 1; goto out; } c = PLANE_SIZE + ((c & SURROGATE_CHAR_MASK) << SURROGATE_CHAR_BITS) + (next & SURROGATE_CHAR_MASK); } out: *ret = c; return str_i_idx - start_idx; } static int udf_name_conv_char(uint8_t *str_o, int str_o_max_len, int *str_o_idx, const uint8_t *str_i, int str_i_max_len, int *str_i_idx, int u_ch, int *needsCRC, int (*conv_f)(wchar_t, unsigned char *, int), int translate) { unicode_t c; int illChar = 0; int len, gotch = 0; while (!gotch && *str_i_idx < str_i_max_len) { if (*str_o_idx >= str_o_max_len) { *needsCRC = 1; return gotch; } len = get_utf16_char(str_i, str_i_max_len, *str_i_idx, u_ch, &c); /* These chars cannot be converted. Replace them. */ if (c == 0 || c > UNICODE_MAX || (conv_f && c > MAX_WCHAR_T) || (translate && c == '/')) { illChar = 1; if (!translate) gotch = 1; } else if (illChar) break; else gotch = 1; *str_i_idx += len; } if (illChar) { *needsCRC = 1; c = ILLEGAL_CHAR_MARK; gotch = 1; } if (gotch) { if (conv_f) { len = conv_f(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx); } else { len = utf32_to_utf8(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx); if (len < 0) len = -ENAMETOOLONG; } /* Valid character? */ if (len >= 0) *str_o_idx += len; else if (len == -ENAMETOOLONG) { *needsCRC = 1; gotch = 0; } else { str_o[(*str_o_idx)++] = ILLEGAL_CHAR_MARK; *needsCRC = 1; } } return gotch; } static int udf_name_from_CS0(struct super_block *sb, uint8_t *str_o, int str_max_len, const uint8_t *ocu, int ocu_len, int translate) { uint32_t c; uint8_t cmp_id; int idx, len; int u_ch; int needsCRC = 0; int ext_i_len, ext_max_len; int str_o_len = 0; /* Length of resulting output */ int ext_o_len = 0; /* Extension output length */ int ext_crc_len = 0; /* Extension output length if used with CRC */ int i_ext = -1; /* Extension position in input buffer */ int o_crc = 0; /* Rightmost possible output pos for CRC+ext */ unsigned short valueCRC; uint8_t ext[EXT_SIZE * NLS_MAX_CHARSET_SIZE + 1]; uint8_t crc[CRC_LEN]; int (*conv_f)(wchar_t, unsigned char *, int); if (str_max_len <= 0) return 0; if (ocu_len == 0) { memset(str_o, 0, str_max_len); return 0; } if (UDF_SB(sb)->s_nls_map) conv_f = UDF_SB(sb)->s_nls_map->uni2char; else conv_f = NULL; cmp_id = ocu[0]; if (cmp_id != 8 && cmp_id != 16) { memset(str_o, 0, str_max_len); pr_err("unknown compression code (%u)\n", cmp_id); return -EINVAL; } u_ch = cmp_id >> 3; ocu++; ocu_len--; if (ocu_len % u_ch) { pr_err("incorrect filename length (%d)\n", ocu_len + 1); return -EINVAL; } if (translate) { /* Look for extension */ for (idx = ocu_len - u_ch, ext_i_len = 0; (idx >= 0) && (ext_i_len < EXT_SIZE); idx -= u_ch, ext_i_len++) { c = ocu[idx]; if (u_ch > 1) c = (c << 8) | ocu[idx + 1]; if (c == EXT_MARK) { if (ext_i_len) i_ext = idx; break; } } if (i_ext >= 0) { /* Convert extension */ ext_max_len = min_t(int, sizeof(ext), str_max_len); ext[ext_o_len++] = EXT_MARK; idx = i_ext + u_ch; while (udf_name_conv_char(ext, ext_max_len, &ext_o_len, ocu, ocu_len, &idx, u_ch, &needsCRC, conv_f, translate)) { if ((ext_o_len + CRC_LEN) < str_max_len) ext_crc_len = ext_o_len; } } } idx = 0; while (1) { if (translate && (idx == i_ext)) { if (str_o_len > (str_max_len - ext_o_len)) needsCRC = 1; break; } if (!udf_name_conv_char(str_o, str_max_len, &str_o_len, ocu, ocu_len, &idx, u_ch, &needsCRC, conv_f, translate)) break; if (translate && (str_o_len <= (str_max_len - ext_o_len - CRC_LEN))) o_crc = str_o_len; } if (translate) { if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' && (str_o_len == 1 || str_o[1] == '.')) needsCRC = 1; if (needsCRC) { str_o_len = o_crc; valueCRC = crc_itu_t(0, ocu, ocu_len); crc[0] = CRC_MARK; crc[1] = hex_asc_upper_hi(valueCRC >> 8); crc[2] = hex_asc_upper_lo(valueCRC >> 8); crc[3] = hex_asc_upper_hi(valueCRC); crc[4] = hex_asc_upper_lo(valueCRC); len = min_t(int, CRC_LEN, str_max_len - str_o_len); memcpy(&str_o[str_o_len], crc, len); str_o_len += len; ext_o_len = ext_crc_len; } if (ext_o_len > 0) { memcpy(&str_o[str_o_len], ext, ext_o_len); str_o_len += ext_o_len; } } return str_o_len; } static int udf_name_to_CS0(struct super_block *sb, uint8_t *ocu, int ocu_max_len, const uint8_t *str_i, int str_len) { int i, len; unsigned int max_val; int u_len, u_ch; unicode_t uni_char; int (*conv_f)(const unsigned char *, int, wchar_t *); if (ocu_max_len <= 0) return 0; if (UDF_SB(sb)->s_nls_map) conv_f = UDF_SB(sb)->s_nls_map->char2uni; else conv_f = NULL; memset(ocu, 0, ocu_max_len); ocu[0] = 8; max_val = 0xff; u_ch = 1; try_again: u_len = 1; for (i = 0; i < str_len; i += len) { /* Name didn't fit? */ if (u_len + u_ch > ocu_max_len) return 0; if (conv_f) { wchar_t wchar; len = conv_f(&str_i[i], str_len - i, &wchar); if (len > 0) uni_char = wchar; } else { len = utf8_to_utf32(&str_i[i], str_len - i, &uni_char); } /* Invalid character, deal with it */ if (len <= 0 || uni_char > UNICODE_MAX) { len = 1; uni_char = '?'; } if (uni_char > max_val) { unicode_t c; if (max_val == 0xff) { max_val = 0xffff; ocu[0] = 0x10; u_ch = 2; goto try_again; } /* * Use UTF-16 encoding for chars outside we * cannot encode directly. */ if (u_len + 2 * u_ch > ocu_max_len) return 0; uni_char -= PLANE_SIZE; c = SURROGATE_PAIR | ((uni_char >> SURROGATE_CHAR_BITS) & SURROGATE_CHAR_MASK); ocu[u_len++] = (uint8_t)(c >> 8); ocu[u_len++] = (uint8_t)(c & 0xff); uni_char = SURROGATE_PAIR | SURROGATE_LOW | (uni_char & SURROGATE_CHAR_MASK); } if (max_val == 0xffff) ocu[u_len++] = (uint8_t)(uni_char >> 8); ocu[u_len++] = (uint8_t)(uni_char & 0xff); } return u_len; } /* * Convert CS0 dstring to output charset. Warning: This function may truncate * input string if it is too long as it is used for informational strings only * and it is better to truncate the string than to refuse mounting a media. */ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len) { int s_len = 0; if (i_len > 0) { s_len = ocu_i[i_len - 1]; if (s_len >= i_len) { pr_warn("incorrect dstring lengths (%d/%d)," " truncating\n", s_len, i_len); s_len = i_len - 1; /* 2-byte encoding? Need to round properly... */ if (ocu_i[0] == 16) s_len -= (s_len - 1) & 2; } } return udf_name_from_CS0(sb, utf_o, o_len, ocu_i, s_len, 0); } int udf_get_filename(struct super_block *sb, const uint8_t *sname, int slen, uint8_t *dname, int dlen) { int ret; if (!slen) return -EIO; if (dlen <= 0) return 0; ret = udf_name_from_CS0(sb, dname, dlen, sname, slen, 1); /* Zero length filename isn't valid... */ if (ret == 0) ret = -EINVAL; return ret; } int udf_put_filename(struct super_block *sb, const uint8_t *sname, int slen, uint8_t *dname, int dlen) { return udf_name_to_CS0(sb, dname, dlen, sname, slen); }
linux-master
fs/udf/unicode.c
// SPDX-License-Identifier: GPL-2.0-only /* * balloc.c * * PURPOSE * Block allocation handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * (C) 1999-2001 Ben Fennema * (C) 1999 Stelias Computing Inc * * HISTORY * * 02/24/99 blf Created. * */ #include "udfdecl.h" #include <linux/bitops.h> #include "udf_i.h" #include "udf_sb.h" #define udf_clear_bit __test_and_clear_bit_le #define udf_set_bit __test_and_set_bit_le #define udf_test_bit test_bit_le #define udf_find_next_one_bit find_next_bit_le static int read_block_bitmap(struct super_block *sb, struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr) { struct buffer_head *bh = NULL; int i; int max_bits, off, count; struct kernel_lb_addr loc; loc.logicalBlockNum = bitmap->s_extPosition; loc.partitionReferenceNum = UDF_SB(sb)->s_partition; bh = sb_bread(sb, udf_get_lb_pblock(sb, &loc, block)); bitmap->s_block_bitmap[bitmap_nr] = bh; if (!bh) return -EIO; /* Check consistency of Space Bitmap buffer. */ max_bits = sb->s_blocksize * 8; if (!bitmap_nr) { off = sizeof(struct spaceBitmapDesc) << 3; count = min(max_bits - off, bitmap->s_nr_groups); } else { /* * Rough check if bitmap number is too big to have any bitmap * blocks reserved. */ if (bitmap_nr > (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2) return 0; off = 0; count = bitmap->s_nr_groups - bitmap_nr * max_bits + (sizeof(struct spaceBitmapDesc) << 3); count = min(count, max_bits); } for (i = 0; i < count; i++) if (udf_test_bit(i + off, bh->b_data)) return -EFSCORRUPTED; return 0; } static int __load_block_bitmap(struct super_block *sb, struct udf_bitmap *bitmap, unsigned int block_group) { int retval = 0; int nr_groups = bitmap->s_nr_groups; if (block_group >= nr_groups) { udf_debug("block_group (%u) > nr_groups (%d)\n", block_group, nr_groups); } if (bitmap->s_block_bitmap[block_group]) return block_group; retval = read_block_bitmap(sb, bitmap, block_group, block_group); if (retval < 0) return retval; return block_group; } static inline int load_block_bitmap(struct super_block *sb, struct udf_bitmap *bitmap, unsigned int block_group) { int slot; slot = __load_block_bitmap(sb, bitmap, block_group); if (slot < 0) return slot; if (!bitmap->s_block_bitmap[slot]) return -EIO; return slot; } static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt) { struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; if (!sbi->s_lvid_bh) return; lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; le32_add_cpu(&lvid->freeSpaceTable[partition], cnt); udf_updated_lvid(sb); } static void udf_bitmap_free_blocks(struct super_block *sb, struct udf_bitmap *bitmap, struct kernel_lb_addr *bloc, uint32_t offset, uint32_t count) { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = NULL; struct udf_part_map *partmap; unsigned long block; unsigned long block_group; unsigned long bit; unsigned long i; int bitmap_nr; unsigned long overflow; mutex_lock(&sbi->s_alloc_mutex); partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; if (bloc->logicalBlockNum + count < count || (bloc->logicalBlockNum + count) > partmap->s_partition_len) { udf_debug("%u < %d || %u + %u > %u\n", bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, partmap->s_partition_len); goto error_return; } block = bloc->logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); do { overflow = 0; block_group = block >> (sb->s_blocksize_bits + 3); bit = block % (sb->s_blocksize << 3); /* * Check to see if we are freeing blocks across a group boundary. */ if (bit + count > (sb->s_blocksize << 3)) { overflow = bit + count - (sb->s_blocksize << 3); count -= overflow; } bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto error_return; bh = bitmap->s_block_bitmap[bitmap_nr]; for (i = 0; i < count; i++) { if (udf_set_bit(bit + i, bh->b_data)) { udf_debug("bit %lu already set\n", bit + i); udf_debug("byte=%2x\n", ((__u8 *)bh->b_data)[(bit + i) >> 3]); } } udf_add_free_space(sb, sbi->s_partition, count); mark_buffer_dirty(bh); if (overflow) { block += count; count = overflow; } } while (overflow); error_return: mutex_unlock(&sbi->s_alloc_mutex); } static int udf_bitmap_prealloc_blocks(struct super_block *sb, struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block, uint32_t block_count) { struct udf_sb_info *sbi = UDF_SB(sb); int alloc_count = 0; int bit, block, block_group; int bitmap_nr; struct buffer_head *bh; __u32 part_len; mutex_lock(&sbi->s_alloc_mutex); part_len = sbi->s_partmaps[partition].s_partition_len; if (first_block >= part_len) goto out; if (first_block + block_count > part_len) block_count = part_len - first_block; do { block = first_block + (sizeof(struct spaceBitmapDesc) << 3); block_group = block >> (sb->s_blocksize_bits + 3); bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto out; bh = bitmap->s_block_bitmap[bitmap_nr]; bit = block % (sb->s_blocksize << 3); while (bit < (sb->s_blocksize << 3) && block_count > 0) { if (!udf_clear_bit(bit, bh->b_data)) goto out; block_count--; alloc_count++; bit++; block++; } mark_buffer_dirty(bh); } while (block_count > 0); out: udf_add_free_space(sb, partition, -alloc_count); mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } static udf_pblk_t udf_bitmap_new_block(struct super_block *sb, struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err) { struct udf_sb_info *sbi = UDF_SB(sb); int newbit, bit = 0; udf_pblk_t block; int block_group, group_start; int end_goal, nr_groups, bitmap_nr, i; struct buffer_head *bh = NULL; char *ptr; udf_pblk_t newblock = 0; *err = -ENOSPC; mutex_lock(&sbi->s_alloc_mutex); repeat: if (goal >= sbi->s_partmaps[partition].s_partition_len) goal = 0; nr_groups = bitmap->s_nr_groups; block = goal + (sizeof(struct spaceBitmapDesc) << 3); block_group = block >> (sb->s_blocksize_bits + 3); group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto error_return; bh = bitmap->s_block_bitmap[bitmap_nr]; ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { bit = block % (sb->s_blocksize << 3); if (udf_test_bit(bit, bh->b_data)) goto got_block; end_goal = (bit + 63) & ~63; bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); if (bit < end_goal) goto got_block; ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3)); newbit = (ptr - ((char *)bh->b_data)) << 3; if (newbit < sb->s_blocksize << 3) { bit = newbit; goto search_back; } newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit); if (newbit < sb->s_blocksize << 3) { bit = newbit; goto got_block; } } for (i = 0; i < (nr_groups * 2); i++) { block_group++; if (block_group >= nr_groups) block_group = 0; group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto error_return; bh = bitmap->s_block_bitmap[bitmap_nr]; if (i < nr_groups) { ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { bit = (ptr - ((char *)bh->b_data)) << 3; break; } } else { bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit < sb->s_blocksize << 3) break; } } if (i >= (nr_groups * 2)) { mutex_unlock(&sbi->s_alloc_mutex); return newblock; } if (bit < sb->s_blocksize << 3) goto search_back; else bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit >= sb->s_blocksize << 3) { mutex_unlock(&sbi->s_alloc_mutex); return 0; } search_back: i = 0; while (i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data)) { ++i; --bit; } got_block: newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - (sizeof(struct spaceBitmapDesc) << 3); if (newblock >= sbi->s_partmaps[partition].s_partition_len) { /* * Ran off the end of the bitmap, and bits following are * non-compliant (not all zero) */ udf_err(sb, "bitmap for partition %d corrupted (block %u marked" " as free, partition length is %u)\n", partition, newblock, sbi->s_partmaps[partition].s_partition_len); goto error_return; } if (!udf_clear_bit(bit, bh->b_data)) { udf_debug("bit already cleared for block %d\n", bit); goto repeat; } mark_buffer_dirty(bh); udf_add_free_space(sb, partition, -1); mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; error_return: *err = -EIO; mutex_unlock(&sbi->s_alloc_mutex); return 0; } static void udf_table_free_blocks(struct super_block *sb, struct inode *table, struct kernel_lb_addr *bloc, uint32_t offset, uint32_t count) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *partmap; uint32_t start, end; uint32_t elen; struct kernel_lb_addr eloc; struct extent_position oepos, epos; int8_t etype; struct udf_inode_info *iinfo; mutex_lock(&sbi->s_alloc_mutex); partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; if (bloc->logicalBlockNum + count < count || (bloc->logicalBlockNum + count) > partmap->s_partition_len) { udf_debug("%u < %d || %u + %u > %u\n", bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, partmap->s_partition_len); goto error_return; } iinfo = UDF_I(table); udf_add_free_space(sb, sbi->s_partition, count); start = bloc->logicalBlockNum + offset; end = bloc->logicalBlockNum + offset + count - 1; epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); elen = 0; epos.block = oepos.block = iinfo->i_location; epos.bh = oepos.bh = NULL; while (count && (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) { if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { uint32_t tmp = ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); count -= tmp; start += tmp; elen = (etype << 30) | (0x40000000 - sb->s_blocksize); } else { elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); start += count; count = 0; } udf_write_aext(table, &oepos, &eloc, elen, 1); } else if (eloc.logicalBlockNum == (end + 1)) { if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { uint32_t tmp = ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); count -= tmp; end -= tmp; eloc.logicalBlockNum -= tmp; elen = (etype << 30) | (0x40000000 - sb->s_blocksize); } else { eloc.logicalBlockNum = start; elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); end -= count; count = 0; } udf_write_aext(table, &oepos, &eloc, elen, 1); } if (epos.bh != oepos.bh) { oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); oepos.bh = epos.bh; oepos.offset = 0; } else { oepos.offset = epos.offset; } } if (count) { /* * NOTE: we CANNOT use udf_add_aext here, as it can try to * allocate a new block, and since we hold the super block * lock already very bad things would happen :) * * We copy the behavior of udf_add_aext, but instead of * trying to allocate a new block close to the existing one, * we just steal a block from the extent we are trying to add. * * It would be nice if the blocks were close together, but it * isn't required. */ int adsize; eloc.logicalBlockNum = start; elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else { brelse(oepos.bh); brelse(epos.bh); goto error_return; } if (epos.offset + (2 * adsize) > sb->s_blocksize) { /* Steal a block from the extent being free'd */ udf_setup_indirect_aext(table, eloc.logicalBlockNum, &epos); eloc.logicalBlockNum++; elen -= sb->s_blocksize; } /* It's possible that stealing the block emptied the extent */ if (elen) __udf_add_aext(table, &epos, &eloc, elen, 1); } brelse(epos.bh); brelse(oepos.bh); error_return: mutex_unlock(&sbi->s_alloc_mutex); return; } static int udf_table_prealloc_blocks(struct super_block *sb, struct inode *table, uint16_t partition, uint32_t first_block, uint32_t block_count) { struct udf_sb_info *sbi = UDF_SB(sb); int alloc_count = 0; uint32_t elen, adsize; struct kernel_lb_addr eloc; struct extent_position epos; int8_t etype = -1; struct udf_inode_info *iinfo; if (first_block >= sbi->s_partmaps[partition].s_partition_len) return 0; iinfo = UDF_I(table); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return 0; mutex_lock(&sbi->s_alloc_mutex); epos.offset = sizeof(struct unallocSpaceEntry); epos.block = iinfo->i_location; epos.bh = NULL; eloc.logicalBlockNum = 0xFFFFFFFF; while (first_block != eloc.logicalBlockNum && (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { udf_debug("eloc=%u, elen=%u, first_block=%u\n", eloc.logicalBlockNum, elen, first_block); ; /* empty loop body */ } if (first_block == eloc.logicalBlockNum) { epos.offset -= adsize; alloc_count = (elen >> sb->s_blocksize_bits); if (alloc_count > block_count) { alloc_count = block_count; eloc.logicalBlockNum += alloc_count; elen -= (alloc_count << sb->s_blocksize_bits); udf_write_aext(table, &epos, &eloc, (etype << 30) | elen, 1); } else udf_delete_aext(table, epos); } else { alloc_count = 0; } brelse(epos.bh); if (alloc_count) udf_add_free_space(sb, partition, -alloc_count); mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } static udf_pblk_t udf_table_new_block(struct super_block *sb, struct inode *table, uint16_t partition, uint32_t goal, int *err) { struct udf_sb_info *sbi = UDF_SB(sb); uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; udf_pblk_t newblock = 0; uint32_t adsize; uint32_t elen, goal_elen = 0; struct kernel_lb_addr eloc, goal_eloc; struct extent_position epos, goal_epos; int8_t etype; struct udf_inode_info *iinfo = UDF_I(table); *err = -ENOSPC; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return newblock; mutex_lock(&sbi->s_alloc_mutex); if (goal >= sbi->s_partmaps[partition].s_partition_len) goal = 0; /* We search for the closest matching block to goal. If we find a exact hit, we stop. Otherwise we keep going till we run out of extents. We store the buffer_head, bloc, and extoffset of the current closest match and use that when we are done. */ epos.offset = sizeof(struct unallocSpaceEntry); epos.block = iinfo->i_location; epos.bh = goal_epos.bh = NULL; while (spread && (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { if (goal >= eloc.logicalBlockNum) { if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) nspread = 0; else nspread = goal - eloc.logicalBlockNum - (elen >> sb->s_blocksize_bits); } else { nspread = eloc.logicalBlockNum - goal; } if (nspread < spread) { spread = nspread; if (goal_epos.bh != epos.bh) { brelse(goal_epos.bh); goal_epos.bh = epos.bh; get_bh(goal_epos.bh); } goal_epos.block = epos.block; goal_epos.offset = epos.offset - adsize; goal_eloc = eloc; goal_elen = (etype << 30) | elen; } } brelse(epos.bh); if (spread == 0xFFFFFFFF) { brelse(goal_epos.bh); mutex_unlock(&sbi->s_alloc_mutex); return 0; } /* Only allocate blocks from the beginning of the extent. That way, we only delete (empty) extents, never have to insert an extent because of splitting */ /* This works, but very poorly.... */ newblock = goal_eloc.logicalBlockNum; goal_eloc.logicalBlockNum++; goal_elen -= sb->s_blocksize; if (goal_elen) udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); else udf_delete_aext(table, goal_epos); brelse(goal_epos.bh); udf_add_free_space(sb, partition, -1); mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; } void udf_free_blocks(struct super_block *sb, struct inode *inode, struct kernel_lb_addr *bloc, uint32_t offset, uint32_t count) { uint16_t partition = bloc->partitionReferenceNum; struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap, bloc, offset, count); } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { udf_table_free_blocks(sb, map->s_uspace.s_table, bloc, offset, count); } if (inode) { inode_sub_bytes(inode, ((sector_t)count) << sb->s_blocksize_bits); } } inline int udf_prealloc_blocks(struct super_block *sb, struct inode *inode, uint16_t partition, uint32_t first_block, uint32_t block_count) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; int allocated; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) allocated = udf_bitmap_prealloc_blocks(sb, map->s_uspace.s_bitmap, partition, first_block, block_count); else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) allocated = udf_table_prealloc_blocks(sb, map->s_uspace.s_table, partition, first_block, block_count); else return 0; if (inode && allocated > 0) inode_add_bytes(inode, allocated << sb->s_blocksize_bits); return allocated; } inline udf_pblk_t udf_new_block(struct super_block *sb, struct inode *inode, uint16_t partition, uint32_t goal, int *err) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; udf_pblk_t block; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) block = udf_bitmap_new_block(sb, map->s_uspace.s_bitmap, partition, goal, err); else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) block = udf_table_new_block(sb, map->s_uspace.s_table, partition, goal, err); else { *err = -EIO; return 0; } if (inode && block) inode_add_bytes(inode, sb->s_blocksize); return block; }
linux-master
fs/udf/balloc.c
// SPDX-License-Identifier: GPL-2.0-only /* * partition.c * * PURPOSE * Partition handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * (C) 1998-2001 Ben Fennema * * HISTORY * * 12/06/98 blf Created file. * */ #include "udfdecl.h" #include "udf_sb.h" #include "udf_i.h" #include <linux/fs.h> #include <linux/string.h> #include <linux/mutex.h> uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; if (partition >= sbi->s_partitions) { udf_debug("block=%u, partition=%u, offset=%u: invalid partition\n", block, partition, offset); return 0xFFFFFFFF; } map = &sbi->s_partmaps[partition]; if (map->s_partition_func) return map->s_partition_func(sb, block, partition, offset); else return map->s_partition_root + block + offset; } uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct buffer_head *bh = NULL; uint32_t newblock; uint32_t index; uint32_t loc; struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_virtual_data *vdata; struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode); int err; map = &sbi->s_partmaps[partition]; vdata = &map->s_type_specific.s_virtual; if (block > vdata->s_num_entries) { udf_debug("Trying to access block beyond end of VAT (%u max %u)\n", block, vdata->s_num_entries); return 0xFFFFFFFF; } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { loc = le32_to_cpu(((__le32 *)(iinfo->i_data + vdata->s_start_offset))[block]); goto translate; } index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t); if (block >= index) { block -= index; newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t))); index = block % (sb->s_blocksize / sizeof(uint32_t)); } else { newblock = 0; index = vdata->s_start_offset / sizeof(uint32_t) + block; } bh = udf_bread(sbi->s_vat_inode, newblock, 0, &err); if (!bh) { udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%u,%u)\n", sb, block, partition); return 0xFFFFFFFF; } loc = le32_to_cpu(((__le32 *)bh->b_data)[index]); brelse(bh); translate: if (iinfo->i_location.partitionReferenceNum == partition) { udf_debug("recursive call to udf_get_pblock!\n"); return 0xFFFFFFFF; } return udf_get_pblock(sb, loc, iinfo->i_location.partitionReferenceNum, offset); } inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { return udf_get_pblock_virt15(sb, block, partition, offset); } uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { int i; struct sparingTable *st = NULL; struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; uint32_t packet; struct udf_sparing_data *sdata; map = &sbi->s_partmaps[partition]; sdata = &map->s_type_specific.s_sparing; packet = (block + offset) & ~(sdata->s_packet_len - 1); for (i = 0; i < 4; i++) { if (sdata->s_spar_map[i] != NULL) { st = (struct sparingTable *) sdata->s_spar_map[i]->b_data; break; } } if (st) { for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { struct sparingEntry *entry = &st->mapEntry[i]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc >= 0xFFFFFFF0) break; else if (origLoc == packet) return le32_to_cpu(entry->mappedLocation) + ((block + offset) & (sdata->s_packet_len - 1)); else if (origLoc > packet) break; } } return map->s_partition_root + block + offset; } int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) { struct udf_sparing_data *sdata; struct sparingTable *st = NULL; struct sparingEntry mapEntry; uint32_t packet; int i, j, k, l; struct udf_sb_info *sbi = UDF_SB(sb); u16 reallocationTableLen; struct buffer_head *bh; int ret = 0; mutex_lock(&sbi->s_alloc_mutex); for (i = 0; i < sbi->s_partitions; i++) { struct udf_part_map *map = &sbi->s_partmaps[i]; if (old_block > map->s_partition_root && old_block < map->s_partition_root + map->s_partition_len) { sdata = &map->s_type_specific.s_sparing; packet = (old_block - map->s_partition_root) & ~(sdata->s_packet_len - 1); for (j = 0; j < 4; j++) if (sdata->s_spar_map[j] != NULL) { st = (struct sparingTable *) sdata->s_spar_map[j]->b_data; break; } if (!st) { ret = 1; goto out; } reallocationTableLen = le16_to_cpu(st->reallocationTableLen); for (k = 0; k < reallocationTableLen; k++) { struct sparingEntry *entry = &st->mapEntry[k]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc == 0xFFFFFFFF) { for (; j < 4; j++) { int len; bh = sdata->s_spar_map[j]; if (!bh) continue; st = (struct sparingTable *) bh->b_data; entry->origLocation = cpu_to_le32(packet); len = sizeof(struct sparingTable) + reallocationTableLen * sizeof(struct sparingEntry); udf_update_tag((char *)st, len); mark_buffer_dirty(bh); } *new_block = le32_to_cpu( entry->mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } else if (origLoc == packet) { *new_block = le32_to_cpu( entry->mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } else if (origLoc > packet) break; } for (l = k; l < reallocationTableLen; l++) { struct sparingEntry *entry = &st->mapEntry[l]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc != 0xFFFFFFFF) continue; for (; j < 4; j++) { bh = sdata->s_spar_map[j]; if (!bh) continue; st = (struct sparingTable *)bh->b_data; mapEntry = st->mapEntry[l]; mapEntry.origLocation = cpu_to_le32(packet); memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry)); st->mapEntry[k] = mapEntry; udf_update_tag((char *)st, sizeof(struct sparingTable) + reallocationTableLen * sizeof(struct sparingEntry)); mark_buffer_dirty(bh); } *new_block = le32_to_cpu( st->mapEntry[k].mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } ret = 1; goto out; } /* if old_block */ } if (i == sbi->s_partitions) { /* outside of partitions */ /* for now, fail =) */ ret = 1; } out: mutex_unlock(&sbi->s_alloc_mutex); return ret; } static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block, uint16_t partition, uint32_t offset) { struct super_block *sb = inode->i_sb; struct udf_part_map *map; struct kernel_lb_addr eloc; uint32_t elen; sector_t ext_offset; struct extent_position epos = {}; uint32_t phyblock; if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) != (EXT_RECORDED_ALLOCATED >> 30)) phyblock = 0xFFFFFFFF; else { map = &UDF_SB(sb)->s_partmaps[partition]; /* map to sparable/physical partition desc */ phyblock = udf_get_pblock(sb, eloc.logicalBlockNum, map->s_type_specific.s_metadata.s_phys_partition_ref, ext_offset + offset); } brelse(epos.bh); return phyblock; } uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_meta_data *mdata; uint32_t retblk; struct inode *inode; udf_debug("READING from METADATA\n"); map = &sbi->s_partmaps[partition]; mdata = &map->s_type_specific.s_metadata; inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe; if (!inode) return 0xFFFFFFFF; retblk = udf_try_read_meta(inode, block, partition, offset); if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) { udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n"); if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) { mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, mdata->s_phys_partition_ref); if (IS_ERR(mdata->s_mirror_fe)) mdata->s_mirror_fe = NULL; mdata->s_flags |= MF_MIRROR_FE_LOADED; } inode = mdata->s_mirror_fe; if (!inode) return 0xFFFFFFFF; retblk = udf_try_read_meta(inode, block, partition, offset); } return retblk; }
linux-master
fs/udf/partition.c
// SPDX-License-Identifier: GPL-2.0-only /* * lowlevel.c * * PURPOSE * Low Level Device Routines for the UDF filesystem * * COPYRIGHT * (C) 1999-2001 Ben Fennema * * HISTORY * * 03/26/99 blf Created. */ #include "udfdecl.h" #include <linux/blkdev.h> #include <linux/cdrom.h> #include <linux/uaccess.h> #include "udf_sb.h" unsigned int udf_get_last_session(struct super_block *sb) { struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk); struct cdrom_multisession ms_info; if (!cdi) { udf_debug("CDROMMULTISESSION not supported.\n"); return 0; } ms_info.addr_format = CDROM_LBA; if (cdrom_multisession(cdi, &ms_info) == 0) { udf_debug("XA disk: %s, vol_desc_start=%d\n", ms_info.xa_flag ? "yes" : "no", ms_info.addr.lba); if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */ return ms_info.addr.lba; } return 0; } udf_pblk_t udf_get_last_block(struct super_block *sb) { struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk); unsigned long lblock = 0; /* * The cdrom layer call failed or returned obviously bogus value? * Try using the device size... */ if (!cdi || cdrom_get_last_written(cdi, &lblock) || lblock == 0) { if (sb_bdev_nr_blocks(sb) > ~(udf_pblk_t)0) return 0; lblock = sb_bdev_nr_blocks(sb); } if (lblock) return lblock - 1; return 0; }
linux-master
fs/udf/lowlevel.c
// SPDX-License-Identifier: GPL-2.0-only /* * dir.c * * PURPOSE * Directory handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * (C) 1998-2004 Ben Fennema * * HISTORY * * 10/05/98 dgb Split directory operations into its own file * Implemented directory reads via do_udf_readdir * 10/06/98 Made directory operations work! * 11/17/98 Rewrote directory to support ICBTAG_FLAG_AD_LONG * 11/25/98 blf Rewrote directory handling (readdir+lookup) to support reading * across blocks. * 12/12/98 Split out the lookup code to namei.c. bulk of directory * code now in directory.c:udf_fileident_read. */ #include "udfdecl.h" #include <linux/string.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/bio.h> #include <linux/iversion.h> #include "udf_i.h" #include "udf_sb.h" static int udf_readdir(struct file *file, struct dir_context *ctx) { struct inode *dir = file_inode(file); loff_t nf_pos, emit_pos = 0; int flen; unsigned char *fname = NULL; int ret = 0; struct super_block *sb = dir->i_sb; bool pos_valid = false; struct udf_fileident_iter iter; if (ctx->pos == 0) { if (!dir_emit_dot(file, ctx)) return 0; ctx->pos = 1; } nf_pos = (ctx->pos - 1) << 2; if (nf_pos >= dir->i_size) goto out; /* * Something changed since last readdir (either lseek was called or dir * changed)? We need to verify the position correctly points at the * beginning of some dir entry so that the directory parsing code does * not get confused. Since UDF does not have any reliable way of * identifying beginning of dir entry (names are under user control), * we need to scan the directory from the beginning. */ if (!inode_eq_iversion(dir, file->f_version)) { emit_pos = nf_pos; nf_pos = 0; } else { pos_valid = true; } fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!fname) { ret = -ENOMEM; goto out; } for (ret = udf_fiiter_init(&iter, dir, nf_pos); !ret && iter.pos < dir->i_size; ret = udf_fiiter_advance(&iter)) { struct kernel_lb_addr tloc; udf_pblk_t iblock; /* Still not at offset where user asked us to read from? */ if (iter.pos < emit_pos) continue; /* Update file position only if we got past the current one */ pos_valid = true; ctx->pos = (iter.pos >> 2) + 1; if (iter.fi.fileCharacteristics & FID_FILE_CHAR_DELETED) { if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) continue; } if (iter.fi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) { if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) continue; } if (iter.fi.fileCharacteristics & FID_FILE_CHAR_PARENT) { if (!dir_emit_dotdot(file, ctx)) goto out_iter; continue; } flen = udf_get_filename(sb, iter.name, iter.fi.lengthFileIdent, fname, UDF_NAME_LEN); if (flen < 0) continue; tloc = lelb_to_cpu(iter.fi.icb.extLocation); iblock = udf_get_lb_pblock(sb, &tloc, 0); if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN)) goto out_iter; } if (!ret) { ctx->pos = (iter.pos >> 2) + 1; pos_valid = true; } out_iter: udf_fiiter_release(&iter); out: if (pos_valid) file->f_version = inode_query_iversion(dir); kfree(fname); return ret; } /* readdir and lookup functions */ const struct file_operations udf_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = udf_readdir, .unlocked_ioctl = udf_ioctl, .fsync = generic_file_fsync, };
linux-master
fs/udf/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * inode.c * * PURPOSE * Inode handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 10/04/98 dgb Added rudimentary directory functions * 10/07/98 Fully working udf_block_map! It works! * 11/25/98 bmap altered to better support extents * 12/06/98 blf partition support in udf_iget, udf_block_map * and udf_read_inode * 12/12/98 rewrote udf_block_map to handle next extents and descs across * block boundaries (which is not actually allowed) * 12/20/98 added support for strategy 4096 * 03/07/99 rewrote udf_block_map (again) * New funcs, inode_bmap, udf_next_aext * 04/19/99 Support for writing device EA's for major/minor # */ #include "udfdecl.h" #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/crc-itu-t.h> #include <linux/mpage.h> #include <linux/uio.h> #include <linux/bio.h> #include "udf_i.h" #include "udf_sb.h" #define EXTENT_MERGE_SIZE 5 #define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \ FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \ FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC) #define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \ FE_PERM_O_DELETE) struct udf_map_rq; static umode_t udf_convert_permissions(struct fileEntry *); static int udf_update_inode(struct inode *, int); static int udf_sync_inode(struct inode *inode); static int udf_alloc_i_data(struct inode *inode, size_t size); static int inode_getblk(struct inode *inode, struct udf_map_rq *map); static int udf_insert_aext(struct inode *, struct extent_position, struct kernel_lb_addr, uint32_t); static void udf_split_extents(struct inode *, int *, int, udf_pblk_t, struct kernel_long_ad *, int *); static void udf_prealloc_extents(struct inode *, int, int, struct kernel_long_ad *, int *); static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *); static int udf_update_extents(struct inode *, struct kernel_long_ad *, int, int, struct extent_position *); static int udf_get_block_wb(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create); static void __udf_clear_extent_cache(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->cached_extent.lstart != -1) { brelse(iinfo->cached_extent.epos.bh); iinfo->cached_extent.lstart = -1; } } /* Invalidate extent cache */ static void udf_clear_extent_cache(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); spin_lock(&iinfo->i_extent_cache_lock); __udf_clear_extent_cache(inode); spin_unlock(&iinfo->i_extent_cache_lock); } /* Return contents of extent cache */ static int udf_read_extent_cache(struct inode *inode, loff_t bcount, loff_t *lbcount, struct extent_position *pos) { struct udf_inode_info *iinfo = UDF_I(inode); int ret = 0; spin_lock(&iinfo->i_extent_cache_lock); if ((iinfo->cached_extent.lstart <= bcount) && (iinfo->cached_extent.lstart != -1)) { /* Cache hit */ *lbcount = iinfo->cached_extent.lstart; memcpy(pos, &iinfo->cached_extent.epos, sizeof(struct extent_position)); if (pos->bh) get_bh(pos->bh); ret = 1; } spin_unlock(&iinfo->i_extent_cache_lock); return ret; } /* Add extent to extent cache */ static void udf_update_extent_cache(struct inode *inode, loff_t estart, struct extent_position *pos) { struct udf_inode_info *iinfo = UDF_I(inode); spin_lock(&iinfo->i_extent_cache_lock); /* Invalidate previously cached extent */ __udf_clear_extent_cache(inode); if (pos->bh) get_bh(pos->bh); memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos)); iinfo->cached_extent.lstart = estart; switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: iinfo->cached_extent.epos.offset -= sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: iinfo->cached_extent.epos.offset -= sizeof(struct long_ad); break; } spin_unlock(&iinfo->i_extent_cache_lock); } void udf_evict_inode(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); int want_delete = 0; if (!is_bad_inode(inode)) { if (!inode->i_nlink) { want_delete = 1; udf_setsize(inode, 0); udf_update_inode(inode, IS_SYNC(inode)); } if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && inode->i_size != iinfo->i_lenExtents) { udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", inode->i_ino, inode->i_mode, (unsigned long long)inode->i_size, (unsigned long long)iinfo->i_lenExtents); } } truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); clear_inode(inode); kfree(iinfo->i_data); iinfo->i_data = NULL; udf_clear_extent_cache(inode); if (want_delete) { udf_free_inode(inode); } } static void udf_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); loff_t isize = inode->i_size; if (to > isize) { truncate_pagecache(inode, isize); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } } } static int udf_adinicb_writepage(struct folio *folio, struct writeback_control *wbc, void *data) { struct inode *inode = folio->mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); BUG_ON(!folio_test_locked(folio)); BUG_ON(folio->index != 0); memcpy_from_file_folio(iinfo->i_data + iinfo->i_lenEAttr, folio, 0, i_size_read(inode)); folio_unlock(folio); mark_inode_dirty(inode); return 0; } static int udf_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) return mpage_writepages(mapping, wbc, udf_get_block_wb); return write_cache_pages(mapping, wbc, udf_adinicb_writepage, NULL); } static void udf_adinicb_readpage(struct page *page) { struct inode *inode = page->mapping->host; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); loff_t isize = i_size_read(inode); kaddr = kmap_local_page(page); memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, isize); memset(kaddr + isize, 0, PAGE_SIZE - isize); flush_dcache_page(page); SetPageUptodate(page); kunmap_local(kaddr); } static int udf_read_folio(struct file *file, struct folio *folio) { struct udf_inode_info *iinfo = UDF_I(file_inode(file)); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { udf_adinicb_readpage(&folio->page); folio_unlock(folio); return 0; } return mpage_read_folio(folio, udf_get_block); } static void udf_readahead(struct readahead_control *rac) { struct udf_inode_info *iinfo = UDF_I(rac->mapping->host); /* * No readahead needed for in-ICB files and udf_get_block() would get * confused for such file anyway. */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) return; mpage_readahead(rac, udf_get_block); } static int udf_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct udf_inode_info *iinfo = UDF_I(file_inode(file)); struct page *page; int ret; if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { ret = block_write_begin(mapping, pos, len, pagep, udf_get_block); if (unlikely(ret)) udf_write_failed(mapping, pos + len); return ret; } if (WARN_ON_ONCE(pos >= PAGE_SIZE)) return -EIO; page = grab_cache_page_write_begin(mapping, 0); if (!page) return -ENOMEM; *pagep = page; if (!PageUptodate(page)) udf_adinicb_readpage(page); return 0; } static int udf_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = file_inode(file); loff_t last_pos; if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) return generic_write_end(file, mapping, pos, len, copied, page, fsdata); last_pos = pos + copied; if (last_pos > inode->i_size) i_size_write(inode, last_pos); set_page_dirty(page); unlock_page(page); put_page(page); return copied; } static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; /* Fallback to buffered IO for in-ICB files */ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) return 0; ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block); if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE)) udf_write_failed(mapping, iocb->ki_pos + count); return ret; } static sector_t udf_bmap(struct address_space *mapping, sector_t block) { struct udf_inode_info *iinfo = UDF_I(mapping->host); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) return -EINVAL; return generic_block_bmap(mapping, block, udf_get_block); } const struct address_space_operations udf_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = udf_read_folio, .readahead = udf_readahead, .writepages = udf_writepages, .write_begin = udf_write_begin, .write_end = udf_write_end, .direct_IO = udf_direct_IO, .bmap = udf_bmap, .migrate_folio = buffer_migrate_folio, }; /* * Expand file stored in ICB to a normal one-block-file * * This function requires i_mutex held */ int udf_expand_file_adinicb(struct inode *inode) { struct page *page; struct udf_inode_info *iinfo = UDF_I(inode); int err; WARN_ON_ONCE(!inode_is_locked(inode)); if (!iinfo->i_lenAlloc) { down_write(&iinfo->i_data_sem); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; up_write(&iinfo->i_data_sem); mark_inode_dirty(inode); return 0; } page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) return -ENOMEM; if (!PageUptodate(page)) udf_adinicb_readpage(page); down_write(&iinfo->i_data_sem); memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; set_page_dirty(page); unlock_page(page); up_write(&iinfo->i_data_sem); err = filemap_fdatawrite(inode->i_mapping); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); down_write(&iinfo->i_data_sem); memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr, inode->i_size); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; iinfo->i_lenAlloc = inode->i_size; up_write(&iinfo->i_data_sem); } put_page(page); mark_inode_dirty(inode); return err; } #define UDF_MAP_CREATE 0x01 /* Mapping can allocate new blocks */ #define UDF_MAP_NOPREALLOC 0x02 /* Do not preallocate blocks */ #define UDF_BLK_MAPPED 0x01 /* Block was successfully mapped */ #define UDF_BLK_NEW 0x02 /* Block was freshly allocated */ struct udf_map_rq { sector_t lblk; udf_pblk_t pblk; int iflags; /* UDF_MAP_ flags determining behavior */ int oflags; /* UDF_BLK_ flags reporting results */ }; static int udf_map_block(struct inode *inode, struct udf_map_rq *map) { int err; struct udf_inode_info *iinfo = UDF_I(inode); if (WARN_ON_ONCE(iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)) return -EFSCORRUPTED; map->oflags = 0; if (!(map->iflags & UDF_MAP_CREATE)) { struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; down_read(&iinfo->i_data_sem); if (inode_bmap(inode, map->lblk, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset); map->oflags |= UDF_BLK_MAPPED; } up_read(&iinfo->i_data_sem); brelse(epos.bh); return 0; } down_write(&iinfo->i_data_sem); /* * Block beyond EOF and prealloc extents? Just discard preallocation * as it is not useful and complicates things. */ if (((loff_t)map->lblk) << inode->i_blkbits >= iinfo->i_lenExtents) udf_discard_prealloc(inode); udf_clear_extent_cache(inode); err = inode_getblk(inode, map); up_write(&iinfo->i_data_sem); return err; } static int __udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int flags) { int err; struct udf_map_rq map = { .lblk = block, .iflags = flags, }; err = udf_map_block(inode, &map); if (err < 0) return err; if (map.oflags & UDF_BLK_MAPPED) { map_bh(bh_result, inode->i_sb, map.pblk); if (map.oflags & UDF_BLK_NEW) set_buffer_new(bh_result); } return 0; } int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { int flags = create ? UDF_MAP_CREATE : 0; /* * We preallocate blocks only for regular files. It also makes sense * for directories but there's a problem when to drop the * preallocation. We might use some delayed work for that but I feel * it's overengineering for a filesystem like UDF. */ if (!S_ISREG(inode->i_mode)) flags |= UDF_MAP_NOPREALLOC; return __udf_get_block(inode, block, bh_result, flags); } /* * We shouldn't be allocating blocks on page writeback since we allocate them * on page fault. We can spot dirty buffers without allocated blocks though * when truncate expands file. These however don't have valid data so we can * safely ignore them. So never allocate blocks from page writeback. */ static int udf_get_block_wb(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { return __udf_get_block(inode, block, bh_result, 0); } /* Extend the file with new blocks totaling 'new_block_bytes', * return the number of extents added */ static int udf_do_extend_file(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, loff_t new_block_bytes) { uint32_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; struct udf_inode_info *iinfo; int err; /* The previous extent is fake and we should not extend by anything * - there's nothing to do... */ if (!new_block_bytes && fake) return 0; iinfo = UDF_I(inode); /* Round the last extent up to a multiple of block size */ if (last_ext->extLength & (sb->s_blocksize - 1)) { last_ext->extLength = (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); iinfo->i_lenExtents = (iinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); } add = 0; /* Can we merge with the previous extent? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { add = (1 << 30) - sb->s_blocksize - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); if (add > new_block_bytes) add = new_block_bytes; new_block_bytes -= add; last_ext->extLength += add; } if (fake) { err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err < 0) goto out_err; count++; } else { struct kernel_lb_addr tmploc; uint32_t tmplen; udf_write_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); /* * We've rewritten the last extent. If we are going to add * more extents, we may need to enter possible following * empty indirect extent. */ if (new_block_bytes) udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); } iinfo->i_lenExtents += add; /* Managed to do everything necessary? */ if (!new_block_bytes) goto out; /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; add = (1 << 30) - sb->s_blocksize; last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add; /* Create enough extents to cover the whole hole */ while (new_block_bytes > add) { new_block_bytes -= add; err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) goto out_err; iinfo->i_lenExtents += add; count++; } if (new_block_bytes) { last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | new_block_bytes; err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) goto out_err; iinfo->i_lenExtents += new_block_bytes; count++; } out: /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) last_pos->offset -= sizeof(struct long_ad); else return -EIO; return count; out_err: /* Remove extents we've created so far */ udf_clear_extent_cache(inode); udf_truncate_extents(inode); return err; } /* Extend the final block of the file to final_block_len bytes */ static void udf_do_extend_final_block(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, uint32_t new_elen) { uint32_t added_bytes; /* * Extent already large enough? It may be already rounded up to block * size... */ if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) return; added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); last_ext->extLength += added_bytes; UDF_I(inode)->i_lenExtents += added_bytes; udf_write_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); } static int udf_extend_file(struct inode *inode, loff_t newsize) { struct extent_position epos; struct kernel_lb_addr eloc; uint32_t elen; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; loff_t new_elen; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err = 0; bool within_last_ext; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); down_write(&iinfo->i_data_sem); /* * When creating hole in file, just don't bother with preserving * preallocation. It likely won't be very useful anyway. */ udf_discard_prealloc(inode); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); within_last_ext = (etype != -1); /* We don't expect extents past EOF... */ WARN_ON_ONCE(within_last_ext && elen > ((loff_t)offset + 1) << inode->i_blkbits); if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { /* File has no extents at all or has empty last * indirect extent! Create a fake extent... */ extent.extLocation.logicalBlockNum = 0; extent.extLocation.partitionReferenceNum = 0; extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; } else { epos.offset -= adsize; etype = udf_next_aext(inode, &epos, &extent.extLocation, &extent.extLength, 0); extent.extLength |= etype << 30; } new_elen = ((loff_t)offset << inode->i_blkbits) | (newsize & (sb->s_blocksize - 1)); /* File has extent covering the new size (could happen when extending * inside a block)? */ if (within_last_ext) { /* Extending file within the last file block */ udf_do_extend_final_block(inode, &epos, &extent, new_elen); } else { err = udf_do_extend_file(inode, &epos, &extent, new_elen); } if (err < 0) goto out; err = 0; out: brelse(epos.bh); up_write(&iinfo->i_data_sem); return err; } static int inode_getblk(struct inode *inode, struct udf_map_rq *map) { struct kernel_long_ad laarr[EXTENT_MERGE_SIZE]; struct extent_position prev_epos, cur_epos, next_epos; int count = 0, startnum = 0, endnum = 0; uint32_t elen = 0, tmpelen; struct kernel_lb_addr eloc, tmpeloc; int c = 1; loff_t lbcount = 0, b_off = 0; udf_pblk_t newblocknum; sector_t offset = 0; int8_t etype; struct udf_inode_info *iinfo = UDF_I(inode); udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum; int lastblock = 0; bool isBeyondEOF; int ret = 0; prev_epos.offset = udf_file_entry_alloc_offset(inode); prev_epos.block = iinfo->i_location; prev_epos.bh = NULL; cur_epos = next_epos = prev_epos; b_off = (loff_t)map->lblk << inode->i_sb->s_blocksize_bits; /* find the extent which contains the block we are looking for. alternate between laarr[0] and laarr[1] for locations of the current extent, and the previous extent */ do { if (prev_epos.bh != cur_epos.bh) { brelse(prev_epos.bh); get_bh(cur_epos.bh); prev_epos.bh = cur_epos.bh; } if (cur_epos.bh != next_epos.bh) { brelse(cur_epos.bh); get_bh(next_epos.bh); cur_epos.bh = next_epos.bh; } lbcount += elen; prev_epos.block = cur_epos.block; cur_epos.block = next_epos.block; prev_epos.offset = cur_epos.offset; cur_epos.offset = next_epos.offset; etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1); if (etype == -1) break; c = !c; laarr[c].extLength = (etype << 30) | elen; laarr[c].extLocation = eloc; if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) pgoal = eloc.logicalBlockNum + ((elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); count++; } while (lbcount + elen <= b_off); b_off -= lbcount; offset = b_off >> inode->i_sb->s_blocksize_bits; /* * Move prev_epos and cur_epos into indirect extent if we are at * the pointer to it */ udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); /* if the extent is allocated and recorded, return the block if the extent is not a multiple of the blocksize, round up */ if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { if (elen & (inode->i_sb->s_blocksize - 1)) { elen = EXT_RECORDED_ALLOCATED | ((elen + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); iinfo->i_lenExtents = ALIGN(iinfo->i_lenExtents, inode->i_sb->s_blocksize); udf_write_aext(inode, &cur_epos, &eloc, elen, 1); } map->oflags = UDF_BLK_MAPPED; map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset); goto out_free; } /* Are we beyond EOF and preallocated extent? */ if (etype == -1) { loff_t hole_len; isBeyondEOF = true; if (count) { if (c) laarr[0] = laarr[1]; startnum = 1; } else { /* Create a fake extent when there's not one */ memset(&laarr[0].extLocation, 0x00, sizeof(struct kernel_lb_addr)); laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; /* Will udf_do_extend_file() create real extent from a fake one? */ startnum = (offset > 0); } /* Create extents for the hole between EOF and offset */ hole_len = (loff_t)offset << inode->i_blkbits; ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len); if (ret < 0) goto out_free; c = 0; offset = 0; count += ret; /* * Is there any real extent? - otherwise we overwrite the fake * one... */ if (count) c = !c; laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | inode->i_sb->s_blocksize; memset(&laarr[c].extLocation, 0x00, sizeof(struct kernel_lb_addr)); count++; endnum = c + 1; lastblock = 1; } else { isBeyondEOF = false; endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, swap it with the previous */ if (!c && count != 1) { laarr[2] = laarr[0]; laarr[0] = laarr[1]; laarr[1] = laarr[2]; c = 1; } /* if the current block is located in an extent, read the next extent */ etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0); if (etype != -1) { laarr[c + 1].extLength = (etype << 30) | elen; laarr[c + 1].extLocation = eloc; count++; startnum++; endnum++; } else lastblock = 1; } /* if the current extent is not recorded but allocated, get the * block in the extent corresponding to the requested block */ if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) newblocknum = laarr[c].extLocation.logicalBlockNum + offset; else { /* otherwise, allocate a new block */ if (iinfo->i_next_alloc_block == map->lblk) goal = iinfo->i_next_alloc_goal; if (!goal) { if (!(goal = pgoal)) /* XXX: what was intended here? */ goal = iinfo->i_location.logicalBlockNum + 1; } newblocknum = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, goal, &ret); if (!newblocknum) goto out_free; if (isBeyondEOF) iinfo->i_lenExtents += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple * blocks, split the extent into at most three extents. blocks prior * to requested block, requested block, and blocks after requested * block */ udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); if (!(map->iflags & UDF_MAP_NOPREALLOC)) udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); /* merge any continuous blocks in laarr */ udf_merge_extents(inode, laarr, &endnum); /* write back the new extents, inserting new extents if the new number * of extents is greater than the old number, and deleting extents if * the new number of extents is less than the old number */ ret = udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); if (ret < 0) goto out_free; map->pblk = udf_get_pblock(inode->i_sb, newblocknum, iinfo->i_location.partitionReferenceNum, 0); if (!map->pblk) { ret = -EFSCORRUPTED; goto out_free; } map->oflags = UDF_BLK_NEW | UDF_BLK_MAPPED; iinfo->i_next_alloc_block = map->lblk + 1; iinfo->i_next_alloc_goal = newblocknum + 1; inode_set_ctime_current(inode); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); ret = 0; out_free: brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); return ret; } static void udf_split_extents(struct inode *inode, int *c, int offset, udf_pblk_t newblocknum, struct kernel_long_ad *laarr, int *endnum) { unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { int curr = *c; int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits; int8_t etype = (laarr[curr].extLength >> 30); if (blen == 1) ; else if (!offset || blen == offset + 1) { laarr[curr + 2] = laarr[curr + 1]; laarr[curr + 1] = laarr[curr]; } else { laarr[curr + 3] = laarr[curr + 1]; laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; } if (offset) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &laarr[curr].extLocation, 0, offset); laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (offset << blocksize_bits); laarr[curr].extLocation.logicalBlockNum = 0; laarr[curr].extLocation. partitionReferenceNum = 0; } else laarr[curr].extLength = (etype << 30) | (offset << blocksize_bits); curr++; (*c)++; (*endnum)++; } laarr[curr].extLocation.logicalBlockNum = newblocknum; if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) laarr[curr].extLocation.partitionReferenceNum = UDF_I(inode)->i_location.partitionReferenceNum; laarr[curr].extLength = EXT_RECORDED_ALLOCATED | blocksize; curr++; if (blen != offset + 1) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) laarr[curr].extLocation.logicalBlockNum += offset + 1; laarr[curr].extLength = (etype << 30) | ((blen - (offset + 1)) << blocksize_bits); curr++; (*endnum)++; } } } static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, struct kernel_long_ad *laarr, int *endnum) { int start, length = 0, currlength = 0, i; if (*endnum >= (c + 1)) { if (!lastblock) return; else start = c; } else { if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { start = c + 1; length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else start = c; } for (i = start + 1; i <= *endnum; i++) { if (i == *endnum) { if (lastblock) length += UDF_DEFAULT_PREALLOC_BLOCKS; } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else break; } if (length) { int next = laarr[start].extLocation.logicalBlockNum + (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); int numalloc = udf_prealloc_blocks(inode->i_sb, inode, laarr[start].extLocation.partitionReferenceNum, next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); if (numalloc) { if (start == (c + 1)) laarr[start].extLength += (numalloc << inode->i_sb->s_blocksize_bits); else { memmove(&laarr[c + 2], &laarr[c + 1], sizeof(struct long_ad) * (*endnum - (c + 1))); (*endnum)++; laarr[c + 1].extLocation.logicalBlockNum = next; laarr[c + 1].extLocation.partitionReferenceNum = laarr[c].extLocation. partitionReferenceNum; laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | (numalloc << inode->i_sb->s_blocksize_bits); start = c + 1; } for (i = start + 1; numalloc && i < *endnum; i++) { int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (elen > numalloc) { laarr[i].extLength -= (numalloc << inode->i_sb->s_blocksize_bits); numalloc = 0; } else { numalloc -= elen; if (*endnum > (i + 1)) memmove(&laarr[i], &laarr[i + 1], sizeof(struct long_ad) * (*endnum - (i + 1))); i--; (*endnum)--; } } UDF_I(inode)->i_lenExtents += numalloc << inode->i_sb->s_blocksize_bits; } } } static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr, int *endnum) { int i; unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; for (i = 0; i < (*endnum - 1); i++) { struct kernel_long_ad *li /*l[i]*/ = &laarr[i]; struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1]; if (((li->extLength >> 30) == (lip1->extLength >> 30)) && (((li->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || ((lip1->extLocation.logicalBlockNum - li->extLocation.logicalBlockNum) == (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits)))) { if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if (((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && ((lip1->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if ((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; li->extLength = (li->extLength & UDF_EXTENT_LENGTH_MASK) | EXT_NOT_RECORDED_NOT_ALLOCATED; } } } static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr, int startnum, int endnum, struct extent_position *epos) { int start = 0, i; struct kernel_lb_addr tmploc; uint32_t tmplen; int err; if (startnum > endnum) { for (i = 0; i < (startnum - endnum); i++) udf_delete_aext(inode, *epos); } else if (startnum < endnum) { for (i = 0; i < (endnum - startnum); i++) { err = udf_insert_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); /* * If we fail here, we are likely corrupting the extent * list and leaking blocks. At least stop early to * limit the damage. */ if (err < 0) return err; udf_next_aext(inode, epos, &laarr[i].extLocation, &laarr[i].extLength, 1); start++; } } for (i = start; i < endnum; i++) { udf_next_aext(inode, epos, &tmploc, &tmplen, 0); udf_write_aext(inode, epos, &laarr[i].extLocation, laarr[i].extLength, 1); } return 0; } struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block, int create, int *err) { struct buffer_head *bh = NULL; struct udf_map_rq map = { .lblk = block, .iflags = UDF_MAP_NOPREALLOC | (create ? UDF_MAP_CREATE : 0), }; *err = udf_map_block(inode, &map); if (*err || !(map.oflags & UDF_BLK_MAPPED)) return NULL; bh = sb_getblk(inode->i_sb, map.pblk); if (!bh) { *err = -ENOMEM; return NULL; } if (map.oflags & UDF_BLK_NEW) { lock_buffer(bh); memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); return bh; } if (bh_read(bh, 0) >= 0) return bh; brelse(bh); *err = -EIO; return NULL; } int udf_setsize(struct inode *inode, loff_t newsize) { int err = 0; struct udf_inode_info *iinfo; unsigned int bsize = i_blocksize(inode); if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return -EINVAL; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; filemap_invalidate_lock(inode->i_mapping); iinfo = UDF_I(inode); if (newsize > inode->i_size) { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (bsize >= (udf_file_entry_alloc_offset(inode) + newsize)) { down_write(&iinfo->i_data_sem); iinfo->i_lenAlloc = newsize; up_write(&iinfo->i_data_sem); goto set_size; } err = udf_expand_file_adinicb(inode); if (err) goto out_unlock; } err = udf_extend_file(inode, newsize); if (err) goto out_unlock; set_size: truncate_setsize(inode, newsize); } else { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); memset(iinfo->i_data + iinfo->i_lenEAttr + newsize, 0x00, bsize - newsize - udf_file_entry_alloc_offset(inode)); iinfo->i_lenAlloc = newsize; truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); goto update_time; } err = block_truncate_page(inode->i_mapping, newsize, udf_get_block); if (err) goto out_unlock; truncate_setsize(inode, newsize); down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); err = udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); if (err) goto out_unlock; } update_time: inode->i_mtime = inode_set_ctime_current(inode); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); out_unlock: filemap_invalidate_unlock(inode->i_mapping); return err; } /* * Maximum length of linked list formed by ICB hierarchy. The chosen number is * arbitrary - just that we hopefully don't limit any real use of rewritten * inode on write-once media but avoid looping for too long on corrupted media. */ #define UDF_MAX_ICB_NESTING 1024 static int udf_read_inode(struct inode *inode, bool hidden_inode) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint16_t ident; struct udf_inode_info *iinfo = UDF_I(inode); struct udf_sb_info *sbi = UDF_SB(inode->i_sb); struct kernel_lb_addr *iloc = &iinfo->i_location; unsigned int link_count; unsigned int indirections = 0; int bs = inode->i_sb->s_blocksize; int ret = -EIO; uint32_t uid, gid; struct timespec64 ctime; reread: if (iloc->partitionReferenceNum >= sbi->s_partitions) { udf_debug("partition reference: %u > logical volume partitions: %u\n", iloc->partitionReferenceNum, sbi->s_partitions); return -EIO; } if (iloc->logicalBlockNum >= sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { udf_debug("block=%u, partition=%u out of range\n", iloc->logicalBlockNum, iloc->partitionReferenceNum); return -EIO; } /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident); if (!bh) { udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino); return -EIO; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n", inode->i_ino, ident); goto out; } fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4096)) { struct buffer_head *ibh; ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident); if (ident == TAG_IDENT_IE && ibh) { struct kernel_lb_addr loc; struct indirectEntry *ie; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength) { brelse(ibh); memcpy(&iinfo->i_location, &loc, sizeof(struct kernel_lb_addr)); if (++indirections > UDF_MAX_ICB_NESTING) { udf_err(inode->i_sb, "too many ICBs in ICB hierarchy" " (max %d supported)\n", UDF_MAX_ICB_NESTING); goto out; } brelse(bh); goto reread; } } brelse(ibh); } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { udf_err(inode->i_sb, "unsupported strategy type: %u\n", le16_to_cpu(fe->icbTag.strategyType)); goto out; } if (fe->icbTag.strategyType == cpu_to_le16(4)) iinfo->i_strat4096 = 0; else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ iinfo->i_strat4096 = 1; iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT && iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG && iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { ret = -EIO; goto out; } iinfo->i_hidden = hidden_inode; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; iinfo->i_lenAlloc = 0; iinfo->i_next_alloc_block = 0; iinfo->i_next_alloc_goal = 0; if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { iinfo->i_efe = 1; iinfo->i_use = 0; ret = udf_alloc_i_data(inode, bs - sizeof(struct extendedFileEntry)); if (ret) goto out; memcpy(iinfo->i_data, bh->b_data + sizeof(struct extendedFileEntry), bs - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { iinfo->i_efe = 0; iinfo->i_use = 0; ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry)); if (ret) goto out; memcpy(iinfo->i_data, bh->b_data + sizeof(struct fileEntry), bs - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { iinfo->i_efe = 0; iinfo->i_use = 1; iinfo->i_lenAlloc = le32_to_cpu( ((struct unallocSpaceEntry *)bh->b_data)-> lengthAllocDescs); ret = udf_alloc_i_data(inode, bs - sizeof(struct unallocSpaceEntry)); if (ret) goto out; memcpy(iinfo->i_data, bh->b_data + sizeof(struct unallocSpaceEntry), bs - sizeof(struct unallocSpaceEntry)); return 0; } ret = -EIO; read_lock(&sbi->s_cred_lock); uid = le32_to_cpu(fe->uid); if (uid == UDF_INVALID_ID || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = sbi->s_uid; else i_uid_write(inode, uid); gid = le32_to_cpu(fe->gid); if (gid == UDF_INVALID_ID || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = sbi->s_gid; else i_gid_write(inode, gid); if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_dmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_dmode; else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS; read_unlock(&sbi->s_cred_lock); link_count = le16_to_cpu(fe->fileLinkCount); if (!link_count) { if (!hidden_inode) { ret = -ESTALE; goto out; } link_count = 1; } set_nlink(inode, link_count); inode->i_size = le64_to_cpu(fe->informationLength); iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime); udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime); udf_disk_stamp_to_time(&ctime, fe->attrTime); inode_set_ctime_to_ts(inode, ctime); iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); iinfo->i_streamdir = 0; iinfo->i_lenStreams = 0; } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime); udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime); udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime); udf_disk_stamp_to_time(&ctime, efe->attrTime); inode_set_ctime_to_ts(inode, ctime); iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); /* Named streams */ iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0); iinfo->i_locStreamdir = lelb_to_cpu(efe->streamDirectoryICB.extLocation); iinfo->i_lenStreams = le64_to_cpu(efe->objectSize); if (iinfo->i_lenStreams >= inode->i_size) iinfo->i_lenStreams -= inode->i_size; else iinfo->i_lenStreams = 0; } inode->i_generation = iinfo->i_unique; /* * Sanity check length of allocation descriptors and extended attrs to * avoid integer overflows */ if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs) goto out; /* Now do exact checks */ if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs) goto out; /* Sanity checks for files in ICB so that we don't get confused later */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { /* * For file in ICB data is stored in allocation descriptor * so sizes should match */ if (iinfo->i_lenAlloc != inode->i_size) goto out; /* File in ICB has to fit in there... */ if (inode->i_size > bs - udf_file_entry_alloc_offset(inode)) goto out; } switch (fe->icbTag.fileType) { case ICBTAG_FILE_TYPE_DIRECTORY: inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; inode->i_mode |= S_IFDIR; inc_nlink(inode); break; case ICBTAG_FILE_TYPE_REALTIME: case ICBTAG_FILE_TYPE_REGULAR: case ICBTAG_FILE_TYPE_UNDEF: case ICBTAG_FILE_TYPE_VAT20: inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; inode->i_mode |= S_IFREG; break; case ICBTAG_FILE_TYPE_BLOCK: inode->i_mode |= S_IFBLK; break; case ICBTAG_FILE_TYPE_CHAR: inode->i_mode |= S_IFCHR; break; case ICBTAG_FILE_TYPE_FIFO: init_special_inode(inode, inode->i_mode | S_IFIFO, 0); break; case ICBTAG_FILE_TYPE_SOCKET: init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; inode_nohighmem(inode); inode->i_mode = S_IFLNK | 0777; break; case ICBTAG_FILE_TYPE_MAIN: udf_debug("METADATA FILE-----\n"); break; case ICBTAG_FILE_TYPE_MIRROR: udf_debug("METADATA MIRROR FILE-----\n"); break; case ICBTAG_FILE_TYPE_BITMAP: udf_debug("METADATA BITMAP FILE-----\n"); break; default: udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n", inode->i_ino, fe->icbTag.fileType); goto out; } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (dsea) { init_special_inode(inode, inode->i_mode, MKDEV(le32_to_cpu(dsea->majorDeviceIdent), le32_to_cpu(dsea->minorDeviceIdent))); /* Developer ID ??? */ } else goto out; } ret = 0; out: brelse(bh); return ret; } static int udf_alloc_i_data(struct inode *inode, size_t size) { struct udf_inode_info *iinfo = UDF_I(inode); iinfo->i_data = kmalloc(size, GFP_KERNEL); if (!iinfo->i_data) return -ENOMEM; return 0; } static umode_t udf_convert_permissions(struct fileEntry *fe) { umode_t mode; uint32_t permissions; uint32_t flags; permissions = le32_to_cpu(fe->permissions); flags = le16_to_cpu(fe->icbTag.flags); mode = ((permissions) & 0007) | ((permissions >> 2) & 0070) | ((permissions >> 4) & 0700) | ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); return mode; } void udf_update_extra_perms(struct inode *inode, umode_t mode) { struct udf_inode_info *iinfo = UDF_I(inode); /* * UDF 2.01 sec. 3.3.3.3 Note 2: * In Unix, delete permission tracks write */ iinfo->i_extraPerms &= ~FE_DELETE_PERMS; if (mode & 0200) iinfo->i_extraPerms |= FE_PERM_U_DELETE; if (mode & 0020) iinfo->i_extraPerms |= FE_PERM_G_DELETE; if (mode & 0002) iinfo->i_extraPerms |= FE_PERM_O_DELETE; } int udf_write_inode(struct inode *inode, struct writeback_control *wbc) { return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } static int udf_sync_inode(struct inode *inode) { return udf_update_inode(inode, 1); } static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time) { if (iinfo->i_crtime.tv_sec > time.tv_sec || (iinfo->i_crtime.tv_sec == time.tv_sec && iinfo->i_crtime.tv_nsec > time.tv_nsec)) iinfo->i_crtime = time; } static int udf_update_inode(struct inode *inode, int do_sync) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint64_t lb_recorded; uint32_t udfperms; uint16_t icbflags; uint16_t crclen; int err = 0; struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; struct udf_inode_info *iinfo = UDF_I(inode); bh = sb_getblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0)); if (!bh) { udf_debug("getblk failure\n"); return -EIO; } lock_buffer(bh); memset(bh->b_data, 0, inode->i_sb->s_blocksize); fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (iinfo->i_use) { struct unallocSpaceEntry *use = (struct unallocSpaceEntry *)bh->b_data; use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), iinfo->i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); crclen = sizeof(struct unallocSpaceEntry); goto finish; } if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) fe->uid = cpu_to_le32(UDF_INVALID_ID); else fe->uid = cpu_to_le32(i_uid_read(inode)); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) fe->gid = cpu_to_le32(UDF_INVALID_ID); else fe->gid = cpu_to_le32(i_gid_read(inode)); udfperms = ((inode->i_mode & 0007)) | ((inode->i_mode & 0070) << 2) | ((inode->i_mode & 0700) << 4); udfperms |= iinfo->i_extraPerms; fe->permissions = cpu_to_le32(udfperms); if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); else { if (iinfo->i_hidden) fe->fileLinkCount = cpu_to_le16(0); else fe->fileLinkCount = cpu_to_le16(inode->i_nlink); } fe->informationLength = cpu_to_le64(inode->i_size); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct regid *eid; struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (!dsea) { dsea = (struct deviceSpec *) udf_add_extendedattr(inode, sizeof(struct deviceSpec) + sizeof(struct regid), 12, 0x3); dsea->attrType = cpu_to_le32(12); dsea->attrSubtype = 1; dsea->attrLength = cpu_to_le32( sizeof(struct deviceSpec) + sizeof(struct regid)); dsea->impUseLength = cpu_to_le32(sizeof(struct regid)); } eid = (struct regid *)dsea->impUse; memset(eid, 0, sizeof(*eid)); strcpy(eid->ident, UDF_ID_DEVELOPER); eid->identSuffix[0] = UDF_OS_CLASS_UNIX; eid->identSuffix[1] = UDF_OS_ID_LINUX; dsea->majorDeviceIdent = cpu_to_le32(imajor(inode)); dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) lb_recorded = 0; /* No extents => no blocks! */ else lb_recorded = (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> (blocksize_bits - 9); if (iinfo->i_efe == 0) { memcpy(bh->b_data + sizeof(struct fileEntry), iinfo->i_data, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&fe->attrTime, inode_get_ctime(inode)); memset(&(fe->impIdent), 0, sizeof(struct regid)); strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; fe->uniqueID = cpu_to_le64(iinfo->i_unique); fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); crclen = sizeof(struct fileEntry); } else { memcpy(bh->b_data + sizeof(struct extendedFileEntry), iinfo->i_data, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); efe->objectSize = cpu_to_le64(inode->i_size + iinfo->i_lenStreams); efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); if (iinfo->i_streamdir) { struct long_ad *icb_lad = &efe->streamDirectoryICB; icb_lad->extLocation = cpu_to_lelb(iinfo->i_locStreamdir); icb_lad->extLength = cpu_to_le32(inode->i_sb->s_blocksize); } udf_adjust_time(iinfo, inode->i_atime); udf_adjust_time(iinfo, inode->i_mtime); udf_adjust_time(iinfo, inode_get_ctime(inode)); udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime); udf_time_to_disk_stamp(&efe->attrTime, inode_get_ctime(inode)); memset(&(efe->impIdent), 0, sizeof(efe->impIdent)); strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; efe->uniqueID = cpu_to_le64(iinfo->i_unique); efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } finish: if (iinfo->i_strat4096) { fe->icbTag.strategyType = cpu_to_le16(4096); fe->icbTag.strategyParameter = cpu_to_le16(1); fe->icbTag.numEntries = cpu_to_le16(2); } else { fe->icbTag.strategyType = cpu_to_le16(4); fe->icbTag.numEntries = cpu_to_le16(1); } if (iinfo->i_use) fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE; else if (S_ISDIR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; else if (S_ISREG(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; else if (S_ISLNK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK; else if (S_ISBLK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK; else if (S_ISCHR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR; else if (S_ISFIFO(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO; else if (S_ISSOCK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; icbflags = iinfo->i_alloc_type | ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | (le16_to_cpu(fe->icbTag.flags) & ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); fe->icbTag.flags = cpu_to_le16(icbflags); if (sbi->s_udfrev >= 0x0200) fe->descTag.descVersion = cpu_to_le16(3); else fe->descTag.descVersion = cpu_to_le16(2); fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); fe->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag); fe->descTag.descCRCLength = cpu_to_le16(crclen); fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag), crclen)); fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); set_buffer_uptodate(bh); unlock_buffer(bh); /* write the data blocks */ mark_buffer_dirty(bh); if (do_sync) { sync_dirty_buffer(bh); if (buffer_write_io_error(bh)) { udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n", inode->i_ino); err = -EIO; } } brelse(bh); return err; } struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino, bool hidden_inode) { unsigned long block = udf_get_lb_pblock(sb, ino, 0); struct inode *inode = iget_locked(sb, block); int err; if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) { if (UDF_I(inode)->i_hidden != hidden_inode) { iput(inode); return ERR_PTR(-EFSCORRUPTED); } return inode; } memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); err = udf_read_inode(inode, hidden_inode); if (err < 0) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block, struct extent_position *epos) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; struct allocExtDesc *aed; struct extent_position nepos; struct kernel_lb_addr neloc; int ver, adsize; if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return -EIO; neloc.logicalBlockNum = block; neloc.partitionReferenceNum = epos->block.partitionReferenceNum; bh = sb_getblk(sb, udf_get_lb_pblock(sb, &neloc, 0)); if (!bh) return -EIO; lock_buffer(bh); memset(bh->b_data, 0x00, sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); aed = (struct allocExtDesc *)(bh->b_data); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) { aed->previousAllocExtLocation = cpu_to_le32(epos->block.logicalBlockNum); } aed->lengthAllocDescs = cpu_to_le32(0); if (UDF_SB(sb)->s_udfrev >= 0x0200) ver = 3; else ver = 2; udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block, sizeof(struct tag)); nepos.block = neloc; nepos.offset = sizeof(struct allocExtDesc); nepos.bh = bh; /* * Do we have to copy current last extent to make space for indirect * one? */ if (epos->offset + adsize > sb->s_blocksize) { struct kernel_lb_addr cp_loc; uint32_t cp_len; int cp_type; epos->offset -= adsize; cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0); cp_len |= ((uint32_t)cp_type) << 30; __udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1); udf_write_aext(inode, epos, &nepos.block, sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0); } else { __udf_add_aext(inode, epos, &nepos.block, sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0); } brelse(epos->bh); *epos = nepos; return 0; } /* * Append extent at the given position - should be the first free one in inode * / indirect extent. This function assumes there is enough space in the inode * or indirect extent. Use udf_add_aext() if you didn't check for this before. */ int __udf_add_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { struct udf_inode_info *iinfo = UDF_I(inode); struct allocExtDesc *aed; int adsize; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return -EIO; if (!epos->bh) { WARN_ON(iinfo->i_lenAlloc != epos->offset - udf_file_entry_alloc_offset(inode)); } else { aed = (struct allocExtDesc *)epos->bh->b_data; WARN_ON(le32_to_cpu(aed->lengthAllocDescs) != epos->offset - sizeof(struct allocExtDesc)); WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize); } udf_write_aext(inode, epos, eloc, elen, inc); if (!epos->bh) { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); } return 0; } /* * Append extent at given position - should be the first free one in inode * / indirect extent. Takes care of allocating and linking indirect blocks. */ int udf_add_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; struct super_block *sb = inode->i_sb; if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return -EIO; if (epos->offset + (2 * adsize) > sb->s_blocksize) { int err; udf_pblk_t new_block; new_block = udf_new_block(sb, NULL, epos->block.partitionReferenceNum, epos->block.logicalBlockNum, &err); if (!new_block) return -ENOSPC; err = udf_setup_indirect_aext(inode, new_block, epos); if (err) return err; } return __udf_add_aext(inode, epos, eloc, elen, inc); } void udf_write_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)ptr; sad->extLength = cpu_to_le32(elen); sad->extPosition = cpu_to_le32(eloc->logicalBlockNum); adsize = sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)ptr; lad->extLength = cpu_to_le32(elen); lad->extLocation = cpu_to_lelb(*eloc); memset(lad->impUse, 0x00, sizeof(lad->impUse)); adsize = sizeof(struct long_ad); break; default: return; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) { struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; udf_update_tag(epos->bh->b_data, le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); } mark_buffer_dirty_inode(epos->bh, inode); } else { mark_inode_dirty(inode); } if (inc) epos->offset += adsize; } /* * Only 1 indirect extent in a row really makes sense but allow upto 16 in case * someone does some weird stuff. */ #define UDF_MAX_INDIR_EXTS 16 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int8_t etype; unsigned int indirections = 0; while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) { udf_pblk_t block; if (++indirections > UDF_MAX_INDIR_EXTS) { udf_err(inode->i_sb, "too many indirect extents in inode %lu\n", inode->i_ino); return -1; } epos->block = *eloc; epos->offset = sizeof(struct allocExtDesc); brelse(epos->bh); block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0); epos->bh = sb_bread(inode->i_sb, block); if (!epos->bh) { udf_debug("reading block %u failed!\n", block); return -1; } } return etype; } int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int alen; int8_t etype; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) { if (!epos->offset) epos->offset = udf_file_entry_alloc_offset(inode); ptr = iinfo->i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; alen = udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc; } else { if (!epos->offset) epos->offset = sizeof(struct allocExtDesc); ptr = epos->bh->b_data + epos->offset; alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)-> lengthAllocDescs); } switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc); if (!sad) return -1; etype = le32_to_cpu(sad->extLength) >> 30; eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); eloc->partitionReferenceNum = iinfo->i_location.partitionReferenceNum; *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; break; case ICBTAG_FLAG_AD_LONG: lad = udf_get_filelongad(ptr, alen, &epos->offset, inc); if (!lad) return -1; etype = le32_to_cpu(lad->extLength) >> 30; *eloc = lelb_to_cpu(lad->extLocation); *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; break; default: udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type); return -1; } return etype; } static int udf_insert_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr neloc, uint32_t nelen) { struct kernel_lb_addr oeloc; uint32_t oelen; int8_t etype; int err; if (epos.bh) get_bh(epos.bh); while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) { udf_write_aext(inode, &epos, &neloc, nelen, 1); neloc = oeloc; nelen = (etype << 30) | oelen; } err = udf_add_aext(inode, &epos, &neloc, nelen, 1); brelse(epos.bh); return err; } int8_t udf_delete_aext(struct inode *inode, struct extent_position epos) { struct extent_position oepos; int adsize; int8_t etype; struct allocExtDesc *aed; struct udf_inode_info *iinfo; struct kernel_lb_addr eloc; uint32_t elen; if (epos.bh) { get_bh(epos.bh); get_bh(epos.bh); } iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; oepos = epos; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1) return -1; while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1); if (oepos.bh != epos.bh) { oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); oepos.bh = epos.bh; oepos.offset = epos.offset - adsize; } } memset(&eloc, 0x00, sizeof(struct kernel_lb_addr)); elen = 0; if (epos.bh != oepos.bh) { udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= (adsize * 2); mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize)); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } else { udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, epos.offset - adsize); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } brelse(epos.bh); brelse(oepos.bh); return (elen >> 30); } int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos, struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset) { unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits; int8_t etype; struct udf_inode_info *iinfo; iinfo = UDF_I(inode); if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) { pos->offset = 0; pos->block = iinfo->i_location; pos->bh = NULL; } *elen = 0; do { etype = udf_next_aext(inode, pos, eloc, elen, 1); if (etype == -1) { *offset = (bcount - lbcount) >> blocksize_bits; iinfo->i_lenExtents = lbcount; return -1; } lbcount += *elen; } while (lbcount <= bcount); /* update extent cache */ udf_update_extent_cache(inode, lbcount - *elen, pos); *offset = (bcount + *elen - lbcount) >> blocksize_bits; return etype; }
linux-master
fs/udf/inode.c