python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Utility functions for file contents encryption/decryption on
* block device-based filesystems.
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility
*/
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/namei.h>
#include "fscrypt_private.h"
/**
* fscrypt_decrypt_bio() - decrypt the contents of a bio
* @bio: the bio to decrypt
*
* Decrypt the contents of a "read" bio following successful completion of the
* underlying disk read. The bio must be reading a whole number of blocks of an
* encrypted file directly into the page cache. If the bio is reading the
* ciphertext into bounce pages instead of the page cache (for example, because
* the file is also compressed, so decompression is required after decryption),
* then this function isn't applicable. This function may sleep, so it must be
* called from a workqueue rather than from the bio's bi_end_io callback.
*
* Return: %true on success; %false on failure. On failure, bio->bi_status is
* also set to an error status.
*/
bool fscrypt_decrypt_bio(struct bio *bio)
{
struct folio_iter fi;
bio_for_each_folio_all(fi, bio) {
int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
fi.offset);
if (err) {
bio->bi_status = errno_to_blk_status(err);
return false;
}
}
return true;
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
pgoff_t lblk, sector_t pblk,
unsigned int len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
struct bio *bio;
int ret, err = 0;
int num_pages = 0;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
GFP_NOFS);
while (len) {
unsigned int blocks_this_page = min(len, blocks_per_page);
unsigned int bytes_this_page = blocks_this_page << blockbits;
if (num_pages == 0) {
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
bio->bi_iter.bi_sector =
pblk << (blockbits - SECTOR_SHIFT);
}
ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
if (WARN_ON_ONCE(ret != bytes_this_page)) {
err = -EIO;
goto out;
}
num_pages++;
len -= blocks_this_page;
lblk += blocks_this_page;
pblk += blocks_this_page;
if (num_pages == BIO_MAX_VECS || !len ||
!fscrypt_mergeable_bio(bio, inode, lblk)) {
err = submit_bio_wait(bio);
if (err)
goto out;
bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
num_pages = 0;
}
}
out:
bio_put(bio);
return err;
}
/**
* fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
* @inode: the file's inode
* @lblk: the first file logical block to zero out
* @pblk: the first filesystem physical block to zero out
* @len: number of blocks to zero out
*
* Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
* ciphertext blocks which decrypt to the all-zeroes block. The blocks must be
* both logically and physically contiguous. It's also assumed that the
* filesystem only uses a single block device, ->s_bdev.
*
* Note that since each block uses a different IV, this involves writing a
* different ciphertext to each block; we can't simply reuse the same one.
*
* Return: 0 on success; -errno on failure.
*/
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
struct page *pages[16]; /* write up to 16 pages at a time */
unsigned int nr_pages;
unsigned int i;
unsigned int offset;
struct bio *bio;
int ret, err;
if (len == 0)
return 0;
if (fscrypt_inode_uses_inline_crypto(inode))
return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
len);
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
(len + blocks_per_page - 1) >> blocks_per_page_bits);
/*
* We need at least one page for ciphertext. Allocate the first one
* from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
*
* Any additional page allocations are allowed to fail, as they only
* help performance, and waiting on the mempool for them could deadlock.
*/
for (i = 0; i < nr_pages; i++) {
pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
GFP_NOWAIT | __GFP_NOWARN);
if (!pages[i])
break;
}
nr_pages = i;
if (WARN_ON_ONCE(nr_pages <= 0))
return -EINVAL;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
do {
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
i = 0;
offset = 0;
do {
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), pages[i],
blocksize, offset, GFP_NOFS);
if (err)
goto out;
lblk++;
pblk++;
len--;
offset += blocksize;
if (offset == PAGE_SIZE || len == 0) {
ret = bio_add_page(bio, pages[i++], offset, 0);
if (WARN_ON_ONCE(ret != offset)) {
err = -EIO;
goto out;
}
offset = 0;
}
} while (i != nr_pages && len != 0);
err = submit_bio_wait(bio);
if (err)
goto out;
bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
} while (len != 0);
err = 0;
out:
bio_put(bio);
for (i = 0; i < nr_pages; i++)
fscrypt_free_bounce_page(pages[i]);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);
| linux-master | fs/crypto/bio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* fs/crypto/hooks.c
*
* Encryption hooks for higher-level filesystem operations.
*/
#include "fscrypt_private.h"
/**
* fscrypt_file_open() - prepare to open a possibly-encrypted regular file
* @inode: the inode being opened
* @filp: the struct file being set up
*
* Currently, an encrypted regular file can only be opened if its encryption key
* is available; access to the raw encrypted contents is not supported.
* Therefore, we first set up the inode's encryption key (if not already done)
* and return an error if it's unavailable.
*
* We also verify that if the parent directory (from the path via which the file
* is being opened) is encrypted, then the inode being opened uses the same
* encryption policy. This is needed as part of the enforcement that all files
* in an encrypted directory tree use the same encryption policy, as a
* protection against certain types of offline attacks. Note that this check is
* needed even when opening an *unencrypted* file, since it's forbidden to have
* an unencrypted file in an encrypted directory.
*
* Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
*/
int fscrypt_file_open(struct inode *inode, struct file *filp)
{
int err;
struct dentry *dir;
err = fscrypt_require_key(inode);
if (err)
return err;
dir = dget_parent(file_dentry(filp));
if (IS_ENCRYPTED(d_inode(dir)) &&
!fscrypt_has_permitted_context(d_inode(dir), inode)) {
fscrypt_warn(inode,
"Inconsistent encryption context (parent directory: %lu)",
d_inode(dir)->i_ino);
err = -EPERM;
}
dput(dir);
return err;
}
EXPORT_SYMBOL_GPL(fscrypt_file_open);
int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
struct dentry *dentry)
{
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
/*
* We don't need to separately check that the directory inode's key is
* available, as it's implied by the dentry not being a no-key name.
*/
if (!fscrypt_has_permitted_context(dir, inode))
return -EXDEV;
return 0;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_link);
int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
if (fscrypt_is_nokey_name(old_dentry) ||
fscrypt_is_nokey_name(new_dentry))
return -ENOKEY;
/*
* We don't need to separately check that the directory inodes' keys are
* available, as it's implied by the dentries not being no-key names.
*/
if (old_dir != new_dir) {
if (IS_ENCRYPTED(new_dir) &&
!fscrypt_has_permitted_context(new_dir,
d_inode(old_dentry)))
return -EXDEV;
if ((flags & RENAME_EXCHANGE) &&
IS_ENCRYPTED(old_dir) &&
!fscrypt_has_permitted_context(old_dir,
d_inode(new_dentry)))
return -EXDEV;
}
return 0;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_rename);
int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *fname)
{
int err = fscrypt_setup_filename(dir, &dentry->d_name, 1, fname);
if (err && err != -ENOENT)
return err;
if (fname->is_nokey_name) {
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_NOKEY_NAME;
spin_unlock(&dentry->d_lock);
}
return err;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
/**
* fscrypt_prepare_lookup_partial() - prepare lookup without filename setup
* @dir: the encrypted directory being searched
* @dentry: the dentry being looked up in @dir
*
* This function should be used by the ->lookup and ->atomic_open methods of
* filesystems that handle filename encryption and no-key name encoding
* themselves and thus can't use fscrypt_prepare_lookup(). Like
* fscrypt_prepare_lookup(), this will try to set up the directory's encryption
* key and will set DCACHE_NOKEY_NAME on the dentry if the key is unavailable.
* However, this function doesn't set up a struct fscrypt_name for the filename.
*
* Return: 0 on success; -errno on error. Note that the encryption key being
* unavailable is not considered an error. It is also not an error if
* the encryption policy is unsupported by this kernel; that is treated
* like the key being unavailable, so that files can still be deleted.
*/
int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry)
{
int err = fscrypt_get_encryption_info(dir, true);
if (!err && !fscrypt_has_encryption_key(dir)) {
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_NOKEY_NAME;
spin_unlock(&dentry->d_lock);
}
return err;
}
EXPORT_SYMBOL_GPL(fscrypt_prepare_lookup_partial);
int __fscrypt_prepare_readdir(struct inode *dir)
{
return fscrypt_get_encryption_info(dir, true);
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_readdir);
int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr)
{
if (attr->ia_valid & ATTR_SIZE)
return fscrypt_require_key(d_inode(dentry));
return 0;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_setattr);
/**
* fscrypt_prepare_setflags() - prepare to change flags with FS_IOC_SETFLAGS
* @inode: the inode on which flags are being changed
* @oldflags: the old flags
* @flags: the new flags
*
* The caller should be holding i_rwsem for write.
*
* Return: 0 on success; -errno if the flags change isn't allowed or if
* another error occurs.
*/
int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags)
{
struct fscrypt_info *ci;
struct fscrypt_master_key *mk;
int err;
/*
* When the CASEFOLD flag is set on an encrypted directory, we must
* derive the secret key needed for the dirhash. This is only possible
* if the directory uses a v2 encryption policy.
*/
if (IS_ENCRYPTED(inode) && (flags & ~oldflags & FS_CASEFOLD_FL)) {
err = fscrypt_require_key(inode);
if (err)
return err;
ci = inode->i_crypt_info;
if (ci->ci_policy.version != FSCRYPT_POLICY_V2)
return -EINVAL;
mk = ci->ci_master_key;
down_read(&mk->mk_sem);
if (is_master_key_secret_present(&mk->mk_secret))
err = fscrypt_derive_dirhash_key(ci, mk);
else
err = -ENOKEY;
up_read(&mk->mk_sem);
return err;
}
return 0;
}
/**
* fscrypt_prepare_symlink() - prepare to create a possibly-encrypted symlink
* @dir: directory in which the symlink is being created
* @target: plaintext symlink target
* @len: length of @target excluding null terminator
* @max_len: space the filesystem has available to store the symlink target
* @disk_link: (out) the on-disk symlink target being prepared
*
* This function computes the size the symlink target will require on-disk,
* stores it in @disk_link->len, and validates it against @max_len. An
* encrypted symlink may be longer than the original.
*
* Additionally, @disk_link->name is set to @target if the symlink will be
* unencrypted, but left NULL if the symlink will be encrypted. For encrypted
* symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the
* on-disk target later. (The reason for the two-step process is that some
* filesystems need to know the size of the symlink target before creating the
* inode, e.g. to determine whether it will be a "fast" or "slow" symlink.)
*
* Return: 0 on success, -ENAMETOOLONG if the symlink target is too long,
* -ENOKEY if the encryption key is missing, or another -errno code if a problem
* occurred while setting up the encryption key.
*/
int fscrypt_prepare_symlink(struct inode *dir, const char *target,
unsigned int len, unsigned int max_len,
struct fscrypt_str *disk_link)
{
const union fscrypt_policy *policy;
/*
* To calculate the size of the encrypted symlink target we need to know
* the amount of NUL padding, which is determined by the flags set in
* the encryption policy which will be inherited from the directory.
*/
policy = fscrypt_policy_to_inherit(dir);
if (policy == NULL) {
/* Not encrypted */
disk_link->name = (unsigned char *)target;
disk_link->len = len + 1;
if (disk_link->len > max_len)
return -ENAMETOOLONG;
return 0;
}
if (IS_ERR(policy))
return PTR_ERR(policy);
/*
* Calculate the size of the encrypted symlink and verify it won't
* exceed max_len. Note that for historical reasons, encrypted symlink
* targets are prefixed with the ciphertext length, despite this
* actually being redundant with i_size. This decreases by 2 bytes the
* longest symlink target we can accept.
*
* We could recover 1 byte by not counting a null terminator, but
* counting it (even though it is meaningless for ciphertext) is simpler
* for now since filesystems will assume it is there and subtract it.
*/
if (!__fscrypt_fname_encrypted_size(policy, len,
max_len - sizeof(struct fscrypt_symlink_data) - 1,
&disk_link->len))
return -ENAMETOOLONG;
disk_link->len += sizeof(struct fscrypt_symlink_data) + 1;
disk_link->name = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(fscrypt_prepare_symlink);
int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
unsigned int len, struct fscrypt_str *disk_link)
{
int err;
struct qstr iname = QSTR_INIT(target, len);
struct fscrypt_symlink_data *sd;
unsigned int ciphertext_len;
/*
* fscrypt_prepare_new_inode() should have already set up the new
* symlink inode's encryption key. We don't wait until now to do it,
* since we may be in a filesystem transaction now.
*/
if (WARN_ON_ONCE(!fscrypt_has_encryption_key(inode)))
return -ENOKEY;
if (disk_link->name) {
/* filesystem-provided buffer */
sd = (struct fscrypt_symlink_data *)disk_link->name;
} else {
sd = kmalloc(disk_link->len, GFP_NOFS);
if (!sd)
return -ENOMEM;
}
ciphertext_len = disk_link->len - sizeof(*sd) - 1;
sd->len = cpu_to_le16(ciphertext_len);
err = fscrypt_fname_encrypt(inode, &iname, sd->encrypted_path,
ciphertext_len);
if (err)
goto err_free_sd;
/*
* Null-terminating the ciphertext doesn't make sense, but we still
* count the null terminator in the length, so we might as well
* initialize it just in case the filesystem writes it out.
*/
sd->encrypted_path[ciphertext_len] = '\0';
/* Cache the plaintext symlink target for later use by get_link() */
err = -ENOMEM;
inode->i_link = kmemdup(target, len + 1, GFP_NOFS);
if (!inode->i_link)
goto err_free_sd;
if (!disk_link->name)
disk_link->name = (unsigned char *)sd;
return 0;
err_free_sd:
if (!disk_link->name)
kfree(sd);
return err;
}
EXPORT_SYMBOL_GPL(__fscrypt_encrypt_symlink);
/**
* fscrypt_get_symlink() - get the target of an encrypted symlink
* @inode: the symlink inode
* @caddr: the on-disk contents of the symlink
* @max_size: size of @caddr buffer
* @done: if successful, will be set up to free the returned target if needed
*
* If the symlink's encryption key is available, we decrypt its target.
* Otherwise, we encode its target for presentation.
*
* This may sleep, so the filesystem must have dropped out of RCU mode already.
*
* Return: the presentable symlink target or an ERR_PTR()
*/
const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
unsigned int max_size,
struct delayed_call *done)
{
const struct fscrypt_symlink_data *sd;
struct fscrypt_str cstr, pstr;
bool has_key;
int err;
/* This is for encrypted symlinks only */
if (WARN_ON_ONCE(!IS_ENCRYPTED(inode)))
return ERR_PTR(-EINVAL);
/* If the decrypted target is already cached, just return it. */
pstr.name = READ_ONCE(inode->i_link);
if (pstr.name)
return pstr.name;
/*
* Try to set up the symlink's encryption key, but we can continue
* regardless of whether the key is available or not.
*/
err = fscrypt_get_encryption_info(inode, false);
if (err)
return ERR_PTR(err);
has_key = fscrypt_has_encryption_key(inode);
/*
* For historical reasons, encrypted symlink targets are prefixed with
* the ciphertext length, even though this is redundant with i_size.
*/
if (max_size < sizeof(*sd) + 1)
return ERR_PTR(-EUCLEAN);
sd = caddr;
cstr.name = (unsigned char *)sd->encrypted_path;
cstr.len = le16_to_cpu(sd->len);
if (cstr.len == 0)
return ERR_PTR(-EUCLEAN);
if (cstr.len + sizeof(*sd) > max_size)
return ERR_PTR(-EUCLEAN);
err = fscrypt_fname_alloc_buffer(cstr.len, &pstr);
if (err)
return ERR_PTR(err);
err = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
if (err)
goto err_kfree;
err = -EUCLEAN;
if (pstr.name[0] == '\0')
goto err_kfree;
pstr.name[pstr.len] = '\0';
/*
* Cache decrypted symlink targets in i_link for later use. Don't cache
* symlink targets encoded without the key, since those become outdated
* once the key is added. This pairs with the READ_ONCE() above and in
* the VFS path lookup code.
*/
if (!has_key ||
cmpxchg_release(&inode->i_link, NULL, pstr.name) != NULL)
set_delayed_call(done, kfree_link, pstr.name);
return pstr.name;
err_kfree:
kfree(pstr.name);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fscrypt_get_symlink);
/**
* fscrypt_symlink_getattr() - set the correct st_size for encrypted symlinks
* @path: the path for the encrypted symlink being queried
* @stat: the struct being filled with the symlink's attributes
*
* Override st_size of encrypted symlinks to be the length of the decrypted
* symlink target (or the no-key encoded symlink target, if the key is
* unavailable) rather than the length of the encrypted symlink target. This is
* necessary for st_size to match the symlink target that userspace actually
* sees. POSIX requires this, and some userspace programs depend on it.
*
* This requires reading the symlink target from disk if needed, setting up the
* inode's encryption key if possible, and then decrypting or encoding the
* symlink target. This makes lstat() more heavyweight than is normally the
* case. However, decrypted symlink targets will be cached in ->i_link, so
* usually the symlink won't have to be read and decrypted again later if/when
* it is actually followed, readlink() is called, or lstat() is called again.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat)
{
struct dentry *dentry = path->dentry;
struct inode *inode = d_inode(dentry);
const char *link;
DEFINE_DELAYED_CALL(done);
/*
* To get the symlink target that userspace will see (whether it's the
* decrypted target or the no-key encoded target), we can just get it in
* the same way the VFS does during path resolution and readlink().
*/
link = READ_ONCE(inode->i_link);
if (!link) {
link = inode->i_op->get_link(dentry, inode, &done);
if (IS_ERR(link))
return PTR_ERR(link);
}
stat->size = strlen(link);
do_delayed_call(&done);
return 0;
}
EXPORT_SYMBOL_GPL(fscrypt_symlink_getattr);
| linux-master | fs/crypto/hooks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Encryption policy functions for per-file encryption support.
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility.
*
* Originally written by Michael Halcrow, 2015.
* Modified by Jaegeuk Kim, 2015.
* Modified by Eric Biggers, 2019 for v2 policy support.
*/
#include <linux/fs_context.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/mount.h>
#include "fscrypt_private.h"
/**
* fscrypt_policies_equal() - check whether two encryption policies are the same
* @policy1: the first policy
* @policy2: the second policy
*
* Return: %true if equal, else %false
*/
bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
const union fscrypt_policy *policy2)
{
if (policy1->version != policy2->version)
return false;
return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
int fscrypt_policy_to_key_spec(const union fscrypt_policy *policy,
struct fscrypt_key_specifier *key_spec)
{
switch (policy->version) {
case FSCRYPT_POLICY_V1:
key_spec->type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
memcpy(key_spec->u.descriptor, policy->v1.master_key_descriptor,
FSCRYPT_KEY_DESCRIPTOR_SIZE);
return 0;
case FSCRYPT_POLICY_V2:
key_spec->type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
memcpy(key_spec->u.identifier, policy->v2.master_key_identifier,
FSCRYPT_KEY_IDENTIFIER_SIZE);
return 0;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
}
const union fscrypt_policy *fscrypt_get_dummy_policy(struct super_block *sb)
{
if (!sb->s_cop->get_dummy_policy)
return NULL;
return sb->s_cop->get_dummy_policy(sb);
}
/*
* Return %true if the given combination of encryption modes is supported for v1
* (and later) encryption policies.
*
* Do *not* add anything new here, since v1 encryption policies are deprecated.
* New combinations of modes should go in fscrypt_valid_enc_modes_v2() only.
*/
static bool fscrypt_valid_enc_modes_v1(u32 contents_mode, u32 filenames_mode)
{
if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
filenames_mode == FSCRYPT_MODE_AES_256_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_AES_128_CBC &&
filenames_mode == FSCRYPT_MODE_AES_128_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_ADIANTUM &&
filenames_mode == FSCRYPT_MODE_ADIANTUM)
return true;
return false;
}
static bool fscrypt_valid_enc_modes_v2(u32 contents_mode, u32 filenames_mode)
{
if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
filenames_mode == FSCRYPT_MODE_AES_256_HCTR2)
return true;
if (contents_mode == FSCRYPT_MODE_SM4_XTS &&
filenames_mode == FSCRYPT_MODE_SM4_CTS)
return true;
return fscrypt_valid_enc_modes_v1(contents_mode, filenames_mode);
}
static bool supported_direct_key_modes(const struct inode *inode,
u32 contents_mode, u32 filenames_mode)
{
const struct fscrypt_mode *mode;
if (contents_mode != filenames_mode) {
fscrypt_warn(inode,
"Direct key flag not allowed with different contents and filenames modes");
return false;
}
mode = &fscrypt_modes[contents_mode];
if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) {
fscrypt_warn(inode, "Direct key flag not allowed with %s",
mode->friendly_name);
return false;
}
return true;
}
static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
const struct inode *inode,
const char *type,
int max_ino_bits, int max_lblk_bits)
{
struct super_block *sb = inode->i_sb;
int ino_bits = 64, lblk_bits = 64;
/*
* IV_INO_LBLK_* exist only because of hardware limitations, and
* currently the only known use case for them involves AES-256-XTS.
* That's also all we test currently. For these reasons, for now only
* allow AES-256-XTS here. This can be relaxed later if a use case for
* IV_INO_LBLK_* with other encryption modes arises.
*/
if (policy->contents_encryption_mode != FSCRYPT_MODE_AES_256_XTS) {
fscrypt_warn(inode,
"Can't use %s policy with contents mode other than AES-256-XTS",
type);
return false;
}
/*
* It's unsafe to include inode numbers in the IVs if the filesystem can
* potentially renumber inodes, e.g. via filesystem shrinking.
*/
if (!sb->s_cop->has_stable_inodes ||
!sb->s_cop->has_stable_inodes(sb)) {
fscrypt_warn(inode,
"Can't use %s policy on filesystem '%s' because it doesn't have stable inode numbers",
type, sb->s_id);
return false;
}
if (sb->s_cop->get_ino_and_lblk_bits)
sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
if (ino_bits > max_ino_bits) {
fscrypt_warn(inode,
"Can't use %s policy on filesystem '%s' because its inode numbers are too long",
type, sb->s_id);
return false;
}
if (lblk_bits > max_lblk_bits) {
fscrypt_warn(inode,
"Can't use %s policy on filesystem '%s' because its block numbers are too long",
type, sb->s_id);
return false;
}
return true;
}
static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy,
const struct inode *inode)
{
if (!fscrypt_valid_enc_modes_v1(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
policy->contents_encryption_mode,
policy->filenames_encryption_mode);
return false;
}
if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)",
policy->flags);
return false;
}
if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
!supported_direct_key_modes(inode, policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return false;
if (IS_CASEFOLDED(inode)) {
/* With v1, there's no way to derive dirhash keys. */
fscrypt_warn(inode,
"v1 policies can't be used on casefolded directories");
return false;
}
return true;
}
static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
const struct inode *inode)
{
int count = 0;
if (!fscrypt_valid_enc_modes_v2(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
policy->contents_encryption_mode,
policy->filenames_encryption_mode);
return false;
}
if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
FSCRYPT_POLICY_FLAG_DIRECT_KEY |
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) {
fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)",
policy->flags);
return false;
}
count += !!(policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY);
count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64);
count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32);
if (count > 1) {
fscrypt_warn(inode, "Mutually exclusive encryption flags (0x%02x)",
policy->flags);
return false;
}
if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
!supported_direct_key_modes(inode, policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return false;
if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
!supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64",
32, 32))
return false;
/*
* IV_INO_LBLK_32 hashes the inode number, so in principle it can
* support any ino_bits. However, currently the inode number is gotten
* from inode::i_ino which is 'unsigned long'. So for now the
* implementation limit is 32 bits.
*/
if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
!supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
32, 32))
return false;
if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
fscrypt_warn(inode, "Reserved bits set in encryption policy");
return false;
}
return true;
}
/**
* fscrypt_supported_policy() - check whether an encryption policy is supported
* @policy_u: the encryption policy
* @inode: the inode on which the policy will be used
*
* Given an encryption policy, check whether all its encryption modes and other
* settings are supported by this kernel on the given inode. (But we don't
* currently don't check for crypto API support here, so attempting to use an
* algorithm not configured into the crypto API will still fail later.)
*
* Return: %true if supported, else %false
*/
bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
const struct inode *inode)
{
switch (policy_u->version) {
case FSCRYPT_POLICY_V1:
return fscrypt_supported_v1_policy(&policy_u->v1, inode);
case FSCRYPT_POLICY_V2:
return fscrypt_supported_v2_policy(&policy_u->v2, inode);
}
return false;
}
/**
* fscrypt_new_context() - create a new fscrypt_context
* @ctx_u: output context
* @policy_u: input policy
* @nonce: nonce to use
*
* Create an fscrypt_context for an inode that is being assigned the given
* encryption policy. @nonce must be a new random nonce.
*
* Return: the size of the new context in bytes.
*/
static int fscrypt_new_context(union fscrypt_context *ctx_u,
const union fscrypt_policy *policy_u,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE])
{
memset(ctx_u, 0, sizeof(*ctx_u));
switch (policy_u->version) {
case FSCRYPT_POLICY_V1: {
const struct fscrypt_policy_v1 *policy = &policy_u->v1;
struct fscrypt_context_v1 *ctx = &ctx_u->v1;
ctx->version = FSCRYPT_CONTEXT_V1;
ctx->contents_encryption_mode =
policy->contents_encryption_mode;
ctx->filenames_encryption_mode =
policy->filenames_encryption_mode;
ctx->flags = policy->flags;
memcpy(ctx->master_key_descriptor,
policy->master_key_descriptor,
sizeof(ctx->master_key_descriptor));
memcpy(ctx->nonce, nonce, FSCRYPT_FILE_NONCE_SIZE);
return sizeof(*ctx);
}
case FSCRYPT_POLICY_V2: {
const struct fscrypt_policy_v2 *policy = &policy_u->v2;
struct fscrypt_context_v2 *ctx = &ctx_u->v2;
ctx->version = FSCRYPT_CONTEXT_V2;
ctx->contents_encryption_mode =
policy->contents_encryption_mode;
ctx->filenames_encryption_mode =
policy->filenames_encryption_mode;
ctx->flags = policy->flags;
memcpy(ctx->master_key_identifier,
policy->master_key_identifier,
sizeof(ctx->master_key_identifier));
memcpy(ctx->nonce, nonce, FSCRYPT_FILE_NONCE_SIZE);
return sizeof(*ctx);
}
}
BUG();
}
/**
* fscrypt_policy_from_context() - convert an fscrypt_context to
* an fscrypt_policy
* @policy_u: output policy
* @ctx_u: input context
* @ctx_size: size of input context in bytes
*
* Given an fscrypt_context, build the corresponding fscrypt_policy.
*
* Return: 0 on success, or -EINVAL if the fscrypt_context has an unrecognized
* version number or size.
*
* This does *not* validate the settings within the policy itself, e.g. the
* modes, flags, and reserved bits. Use fscrypt_supported_policy() for that.
*/
int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
const union fscrypt_context *ctx_u,
int ctx_size)
{
memset(policy_u, 0, sizeof(*policy_u));
if (!fscrypt_context_is_valid(ctx_u, ctx_size))
return -EINVAL;
switch (ctx_u->version) {
case FSCRYPT_CONTEXT_V1: {
const struct fscrypt_context_v1 *ctx = &ctx_u->v1;
struct fscrypt_policy_v1 *policy = &policy_u->v1;
policy->version = FSCRYPT_POLICY_V1;
policy->contents_encryption_mode =
ctx->contents_encryption_mode;
policy->filenames_encryption_mode =
ctx->filenames_encryption_mode;
policy->flags = ctx->flags;
memcpy(policy->master_key_descriptor,
ctx->master_key_descriptor,
sizeof(policy->master_key_descriptor));
return 0;
}
case FSCRYPT_CONTEXT_V2: {
const struct fscrypt_context_v2 *ctx = &ctx_u->v2;
struct fscrypt_policy_v2 *policy = &policy_u->v2;
policy->version = FSCRYPT_POLICY_V2;
policy->contents_encryption_mode =
ctx->contents_encryption_mode;
policy->filenames_encryption_mode =
ctx->filenames_encryption_mode;
policy->flags = ctx->flags;
memcpy(policy->__reserved, ctx->__reserved,
sizeof(policy->__reserved));
memcpy(policy->master_key_identifier,
ctx->master_key_identifier,
sizeof(policy->master_key_identifier));
return 0;
}
}
/* unreachable */
return -EINVAL;
}
/* Retrieve an inode's encryption policy */
static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy)
{
const struct fscrypt_info *ci;
union fscrypt_context ctx;
int ret;
ci = fscrypt_get_info(inode);
if (ci) {
/* key available, use the cached policy */
*policy = ci->ci_policy;
return 0;
}
if (!IS_ENCRYPTED(inode))
return -ENODATA;
ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (ret < 0)
return (ret == -ERANGE) ? -EINVAL : ret;
return fscrypt_policy_from_context(policy, &ctx, ret);
}
static int set_encryption_policy(struct inode *inode,
const union fscrypt_policy *policy)
{
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
union fscrypt_context ctx;
int ctxsize;
int err;
if (!fscrypt_supported_policy(policy, inode))
return -EINVAL;
switch (policy->version) {
case FSCRYPT_POLICY_V1:
/*
* The original encryption policy version provided no way of
* verifying that the correct master key was supplied, which was
* insecure in scenarios where multiple users have access to the
* same encrypted files (even just read-only access). The new
* encryption policy version fixes this and also implies use of
* an improved key derivation function and allows non-root users
* to securely remove keys. So as long as compatibility with
* old kernels isn't required, it is recommended to use the new
* policy version for all new encrypted directories.
*/
pr_warn_once("%s (pid %d) is setting deprecated v1 encryption policy; recommend upgrading to v2.\n",
current->comm, current->pid);
break;
case FSCRYPT_POLICY_V2:
err = fscrypt_verify_key_added(inode->i_sb,
policy->v2.master_key_identifier);
if (err)
return err;
if (policy->v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
pr_warn_once("%s (pid %d) is setting an IV_INO_LBLK_32 encryption policy. This should only be used if there are certain hardware limitations.\n",
current->comm, current->pid);
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
get_random_bytes(nonce, FSCRYPT_FILE_NONCE_SIZE);
ctxsize = fscrypt_new_context(&ctx, policy, nonce);
return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, NULL);
}
int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
{
union fscrypt_policy policy;
union fscrypt_policy existing_policy;
struct inode *inode = file_inode(filp);
u8 version;
int size;
int ret;
if (get_user(policy.version, (const u8 __user *)arg))
return -EFAULT;
size = fscrypt_policy_size(&policy);
if (size <= 0)
return -EINVAL;
/*
* We should just copy the remaining 'size - 1' bytes here, but a
* bizarre bug in gcc 7 and earlier (fixed by gcc r255731) causes gcc to
* think that size can be 0 here (despite the check above!) *and* that
* it's a compile-time constant. Thus it would think copy_from_user()
* is passed compile-time constant ULONG_MAX, causing the compile-time
* buffer overflow check to fail, breaking the build. This only occurred
* when building an i386 kernel with -Os and branch profiling enabled.
*
* Work around it by just copying the first byte again...
*/
version = policy.version;
if (copy_from_user(&policy, arg, size))
return -EFAULT;
policy.version = version;
if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
inode_lock(inode);
ret = fscrypt_get_policy(inode, &existing_policy);
if (ret == -ENODATA) {
if (!S_ISDIR(inode->i_mode))
ret = -ENOTDIR;
else if (IS_DEADDIR(inode))
ret = -ENOENT;
else if (!inode->i_sb->s_cop->empty_dir(inode))
ret = -ENOTEMPTY;
else
ret = set_encryption_policy(inode, &policy);
} else if (ret == -EINVAL ||
(ret == 0 && !fscrypt_policies_equal(&policy,
&existing_policy))) {
/* The file already uses a different encryption policy. */
ret = -EEXIST;
}
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
}
EXPORT_SYMBOL(fscrypt_ioctl_set_policy);
/* Original ioctl version; can only get the original policy version */
int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
{
union fscrypt_policy policy;
int err;
err = fscrypt_get_policy(file_inode(filp), &policy);
if (err)
return err;
if (policy.version != FSCRYPT_POLICY_V1)
return -EINVAL;
if (copy_to_user(arg, &policy, sizeof(policy.v1)))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
/* Extended ioctl version; can get policies of any version */
int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg)
{
struct fscrypt_get_policy_ex_arg arg;
union fscrypt_policy *policy = (union fscrypt_policy *)&arg.policy;
size_t policy_size;
int err;
/* arg is policy_size, then policy */
BUILD_BUG_ON(offsetof(typeof(arg), policy_size) != 0);
BUILD_BUG_ON(offsetofend(typeof(arg), policy_size) !=
offsetof(typeof(arg), policy));
BUILD_BUG_ON(sizeof(arg.policy) != sizeof(*policy));
err = fscrypt_get_policy(file_inode(filp), policy);
if (err)
return err;
policy_size = fscrypt_policy_size(policy);
if (copy_from_user(&arg, uarg, sizeof(arg.policy_size)))
return -EFAULT;
if (policy_size > arg.policy_size)
return -EOVERFLOW;
arg.policy_size = policy_size;
if (copy_to_user(uarg, &arg, sizeof(arg.policy_size) + policy_size))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex);
/* FS_IOC_GET_ENCRYPTION_NONCE: retrieve file's encryption nonce for testing */
int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg)
{
struct inode *inode = file_inode(filp);
union fscrypt_context ctx;
int ret;
ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (ret < 0)
return ret;
if (!fscrypt_context_is_valid(&ctx, ret))
return -EINVAL;
if (copy_to_user(arg, fscrypt_context_nonce(&ctx),
FSCRYPT_FILE_NONCE_SIZE))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_nonce);
/**
* fscrypt_has_permitted_context() - is a file's encryption policy permitted
* within its directory?
*
* @parent: inode for parent directory
* @child: inode for file being looked up, opened, or linked into @parent
*
* Filesystems must call this before permitting access to an inode in a
* situation where the parent directory is encrypted (either before allowing
* ->lookup() to succeed, or for a regular file before allowing it to be opened)
* and before any operation that involves linking an inode into an encrypted
* directory, including link, rename, and cross rename. It enforces the
* constraint that within a given encrypted directory tree, all files use the
* same encryption policy. The pre-access check is needed to detect potentially
* malicious offline violations of this constraint, while the link and rename
* checks are needed to prevent online violations of this constraint.
*
* Return: 1 if permitted, 0 if forbidden.
*/
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
{
union fscrypt_policy parent_policy, child_policy;
int err, err1, err2;
/* No restrictions on file types which are never encrypted */
if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
!S_ISLNK(child->i_mode))
return 1;
/* No restrictions if the parent directory is unencrypted */
if (!IS_ENCRYPTED(parent))
return 1;
/* Encrypted directories must not contain unencrypted files */
if (!IS_ENCRYPTED(child))
return 0;
/*
* Both parent and child are encrypted, so verify they use the same
* encryption policy. Compare the fscrypt_info structs if the keys are
* available, otherwise retrieve and compare the fscrypt_contexts.
*
* Note that the fscrypt_context retrieval will be required frequently
* when accessing an encrypted directory tree without the key.
* Performance-wise this is not a big deal because we already don't
* really optimize for file access without the key (to the extent that
* such access is even possible), given that any attempted access
* already causes a fscrypt_context retrieval and keyring search.
*
* In any case, if an unexpected error occurs, fall back to "forbidden".
*/
err = fscrypt_get_encryption_info(parent, true);
if (err)
return 0;
err = fscrypt_get_encryption_info(child, true);
if (err)
return 0;
err1 = fscrypt_get_policy(parent, &parent_policy);
err2 = fscrypt_get_policy(child, &child_policy);
/*
* Allow the case where the parent and child both have an unrecognized
* encryption policy, so that files with an unrecognized encryption
* policy can be deleted.
*/
if (err1 == -EINVAL && err2 == -EINVAL)
return 1;
if (err1 || err2)
return 0;
return fscrypt_policies_equal(&parent_policy, &child_policy);
}
EXPORT_SYMBOL(fscrypt_has_permitted_context);
/*
* Return the encryption policy that new files in the directory will inherit, or
* NULL if none, or an ERR_PTR() on error. If the directory is encrypted, also
* ensure that its key is set up, so that the new filename can be encrypted.
*/
const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
{
int err;
if (IS_ENCRYPTED(dir)) {
err = fscrypt_require_key(dir);
if (err)
return ERR_PTR(err);
return &dir->i_crypt_info->ci_policy;
}
return fscrypt_get_dummy_policy(dir->i_sb);
}
/**
* fscrypt_context_for_new_inode() - create an encryption context for a new inode
* @ctx: where context should be written
* @inode: inode from which to fetch policy and nonce
*
* Given an in-core "prepared" (via fscrypt_prepare_new_inode) inode,
* generate a new context and write it to ctx. ctx _must_ be at least
* FSCRYPT_SET_CONTEXT_MAX_SIZE bytes.
*
* Return: size of the resulting context or a negative error code.
*/
int fscrypt_context_for_new_inode(void *ctx, struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
BUILD_BUG_ON(sizeof(union fscrypt_context) !=
FSCRYPT_SET_CONTEXT_MAX_SIZE);
/* fscrypt_prepare_new_inode() should have set up the key already. */
if (WARN_ON_ONCE(!ci))
return -ENOKEY;
return fscrypt_new_context(ctx, &ci->ci_policy, ci->ci_nonce);
}
EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode);
/**
* fscrypt_set_context() - Set the fscrypt context of a new inode
* @inode: a new inode
* @fs_data: private data given by FS and passed to ->set_context()
*
* This should be called after fscrypt_prepare_new_inode(), generally during a
* filesystem transaction. Everything here must be %GFP_NOFS-safe.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_set_context(struct inode *inode, void *fs_data)
{
struct fscrypt_info *ci = inode->i_crypt_info;
union fscrypt_context ctx;
int ctxsize;
ctxsize = fscrypt_context_for_new_inode(&ctx, inode);
if (ctxsize < 0)
return ctxsize;
/*
* This may be the first time the inode number is available, so do any
* delayed key setup that requires the inode number.
*/
if (ci->ci_policy.version == FSCRYPT_POLICY_V2 &&
(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
fscrypt_hash_inode_number(ci, ci->ci_master_key);
return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data);
}
EXPORT_SYMBOL_GPL(fscrypt_set_context);
/**
* fscrypt_parse_test_dummy_encryption() - parse the test_dummy_encryption mount option
* @param: the mount option
* @dummy_policy: (input/output) the place to write the dummy policy that will
* result from parsing the option. Zero-initialize this. If a policy is
* already set here (due to test_dummy_encryption being given multiple
* times), then this function will verify that the policies are the same.
*
* Return: 0 on success; -EINVAL if the argument is invalid; -EEXIST if the
* argument conflicts with one already specified; or -ENOMEM.
*/
int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
struct fscrypt_dummy_policy *dummy_policy)
{
const char *arg = "v2";
union fscrypt_policy *policy;
int err;
if (param->type == fs_value_is_string && *param->string)
arg = param->string;
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy)
return -ENOMEM;
if (!strcmp(arg, "v1")) {
policy->version = FSCRYPT_POLICY_V1;
policy->v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS;
policy->v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS;
memset(policy->v1.master_key_descriptor, 0x42,
FSCRYPT_KEY_DESCRIPTOR_SIZE);
} else if (!strcmp(arg, "v2")) {
policy->version = FSCRYPT_POLICY_V2;
policy->v2.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS;
policy->v2.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS;
err = fscrypt_get_test_dummy_key_identifier(
policy->v2.master_key_identifier);
if (err)
goto out;
} else {
err = -EINVAL;
goto out;
}
if (dummy_policy->policy) {
if (fscrypt_policies_equal(policy, dummy_policy->policy))
err = 0;
else
err = -EEXIST;
goto out;
}
dummy_policy->policy = policy;
policy = NULL;
err = 0;
out:
kfree(policy);
return err;
}
EXPORT_SYMBOL_GPL(fscrypt_parse_test_dummy_encryption);
/**
* fscrypt_dummy_policies_equal() - check whether two dummy policies are equal
* @p1: the first test dummy policy (may be unset)
* @p2: the second test dummy policy (may be unset)
*
* Return: %true if the dummy policies are both set and equal, or both unset.
*/
bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
const struct fscrypt_dummy_policy *p2)
{
if (!p1->policy && !p2->policy)
return true;
if (!p1->policy || !p2->policy)
return false;
return fscrypt_policies_equal(p1->policy, p2->policy);
}
EXPORT_SYMBOL_GPL(fscrypt_dummy_policies_equal);
/**
* fscrypt_show_test_dummy_encryption() - show '-o test_dummy_encryption'
* @seq: the seq_file to print the option to
* @sep: the separator character to use
* @sb: the filesystem whose options are being shown
*
* Show the test_dummy_encryption mount option, if it was specified.
* This is mainly used for /proc/mounts.
*/
void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep,
struct super_block *sb)
{
const union fscrypt_policy *policy = fscrypt_get_dummy_policy(sb);
int vers;
if (!policy)
return;
vers = policy->version;
if (vers == FSCRYPT_POLICY_V1) /* Handle numbering quirk */
vers = 1;
seq_printf(seq, "%ctest_dummy_encryption=v%d", sep, vers);
}
EXPORT_SYMBOL_GPL(fscrypt_show_test_dummy_encryption);
| linux-master | fs/crypto/policy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation
* Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
* "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
*
* This is used to derive keys from the fscrypt master keys.
*
* Copyright 2019 Google LLC
*/
#include <crypto/hash.h>
#include <crypto/sha2.h>
#include "fscrypt_private.h"
/*
* HKDF supports any unkeyed cryptographic hash algorithm, but fscrypt uses
* SHA-512 because it is well-established, secure, and reasonably efficient.
*
* HKDF-SHA256 was also considered, as its 256-bit security strength would be
* sufficient here. A 512-bit security strength is "nice to have", though.
* Also, on 64-bit CPUs, SHA-512 is usually just as fast as SHA-256. In the
* common case of deriving an AES-256-XTS key (512 bits), that can result in
* HKDF-SHA512 being much faster than HKDF-SHA256, as the longer digest size of
* SHA-512 causes HKDF-Expand to only need to do one iteration rather than two.
*/
#define HKDF_HMAC_ALG "hmac(sha512)"
#define HKDF_HASHLEN SHA512_DIGEST_SIZE
/*
* HKDF consists of two steps:
*
* 1. HKDF-Extract: extract a pseudorandom key of length HKDF_HASHLEN bytes from
* the input keying material and optional salt.
* 2. HKDF-Expand: expand the pseudorandom key into output keying material of
* any length, parameterized by an application-specific info string.
*
* HKDF-Extract can be skipped if the input is already a pseudorandom key of
* length HKDF_HASHLEN bytes. However, cipher modes other than AES-256-XTS take
* shorter keys, and we don't want to force users of those modes to provide
* unnecessarily long master keys. Thus fscrypt still does HKDF-Extract. No
* salt is used, since fscrypt master keys should already be pseudorandom and
* there's no way to persist a random salt per master key from kernel mode.
*/
/* HKDF-Extract (RFC 5869 section 2.2), unsalted */
static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
unsigned int ikmlen, u8 prk[HKDF_HASHLEN])
{
static const u8 default_salt[HKDF_HASHLEN];
int err;
err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN);
if (err)
return err;
return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
}
/*
* Compute HKDF-Extract using the given master key as the input keying material,
* and prepare an HMAC transform object keyed by the resulting pseudorandom key.
*
* Afterwards, the keyed HMAC transform object can be used for HKDF-Expand many
* times without having to recompute HKDF-Extract each time.
*/
int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
unsigned int master_key_size)
{
struct crypto_shash *hmac_tfm;
u8 prk[HKDF_HASHLEN];
int err;
hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0);
if (IS_ERR(hmac_tfm)) {
fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld",
PTR_ERR(hmac_tfm));
return PTR_ERR(hmac_tfm);
}
if (WARN_ON_ONCE(crypto_shash_digestsize(hmac_tfm) != sizeof(prk))) {
err = -EINVAL;
goto err_free_tfm;
}
err = hkdf_extract(hmac_tfm, master_key, master_key_size, prk);
if (err)
goto err_free_tfm;
err = crypto_shash_setkey(hmac_tfm, prk, sizeof(prk));
if (err)
goto err_free_tfm;
hkdf->hmac_tfm = hmac_tfm;
goto out;
err_free_tfm:
crypto_free_shash(hmac_tfm);
out:
memzero_explicit(prk, sizeof(prk));
return err;
}
/*
* HKDF-Expand (RFC 5869 section 2.3). This expands the pseudorandom key, which
* was already keyed into 'hkdf->hmac_tfm' by fscrypt_init_hkdf(), into 'okmlen'
* bytes of output keying material parameterized by the application-specific
* 'info' of length 'infolen' bytes, prefixed by "fscrypt\0" and the 'context'
* byte. This is thread-safe and may be called by multiple threads in parallel.
*
* ('context' isn't part of the HKDF specification; it's just a prefix fscrypt
* adds to its application-specific info strings to guarantee that it doesn't
* accidentally repeat an info string when using HKDF for different purposes.)
*/
int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
u8 *okm, unsigned int okmlen)
{
SHASH_DESC_ON_STACK(desc, hkdf->hmac_tfm);
u8 prefix[9];
unsigned int i;
int err;
const u8 *prev = NULL;
u8 counter = 1;
u8 tmp[HKDF_HASHLEN];
if (WARN_ON_ONCE(okmlen > 255 * HKDF_HASHLEN))
return -EINVAL;
desc->tfm = hkdf->hmac_tfm;
memcpy(prefix, "fscrypt\0", 8);
prefix[8] = context;
for (i = 0; i < okmlen; i += HKDF_HASHLEN) {
err = crypto_shash_init(desc);
if (err)
goto out;
if (prev) {
err = crypto_shash_update(desc, prev, HKDF_HASHLEN);
if (err)
goto out;
}
err = crypto_shash_update(desc, prefix, sizeof(prefix));
if (err)
goto out;
err = crypto_shash_update(desc, info, infolen);
if (err)
goto out;
BUILD_BUG_ON(sizeof(counter) != 1);
if (okmlen - i < HKDF_HASHLEN) {
err = crypto_shash_finup(desc, &counter, 1, tmp);
if (err)
goto out;
memcpy(&okm[i], tmp, okmlen - i);
memzero_explicit(tmp, sizeof(tmp));
} else {
err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
if (err)
goto out;
}
counter++;
prev = &okm[i];
}
err = 0;
out:
if (unlikely(err))
memzero_explicit(okm, okmlen); /* so caller doesn't need to */
shash_desc_zero(desc);
return err;
}
void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf)
{
crypto_free_shash(hkdf->hmac_tfm);
}
| linux-master | fs/crypto/hkdf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This contains encryption functions for per-file encryption.
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility
*
* Written by Michael Halcrow, 2014.
*
* Filename encryption additions
* Uday Savagaonkar, 2014
* Encryption policy handling additions
* Ildar Muslukhov, 2014
* Add fscrypt_pullback_bio_page()
* Jaegeuk Kim, 2015.
*
* This has not yet undergone a rigorous security audit.
*
* The usage of AES-XTS should conform to recommendations in NIST
* Special Publication 800-38E and IEEE P1619/D16.
*/
#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL;
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
struct kmem_cache *fscrypt_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
queue_work(fscrypt_read_workqueue, work);
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
}
/**
* fscrypt_free_bounce_page() - free a ciphertext bounce page
* @bounce_page: the bounce page to free, or NULL
*
* Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
* or by fscrypt_alloc_bounce_page() directly.
*/
void fscrypt_free_bounce_page(struct page *bounce_page)
{
if (!bounce_page)
return;
set_page_private(bounce_page, (unsigned long)NULL);
ClearPagePrivate(bounce_page);
mempool_free(bounce_page, fscrypt_bounce_page_pool);
}
EXPORT_SYMBOL(fscrypt_free_bounce_page);
/*
* Generate the IV for the given logical block number within the given file.
* For filenames encryption, lblk_num == 0.
*
* Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
* needs to know about any IV generation methods where the low bits of IV don't
* simply contain the lblk_num (e.g., IV_INO_LBLK_32).
*/
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci)
{
u8 flags = fscrypt_policy_flags(&ci->ci_policy);
memset(iv, 0, ci->ci_mode->ivsize);
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
WARN_ON_ONCE(lblk_num > U32_MAX);
WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
lblk_num |= (u64)ci->ci_inode->i_ino << 32;
} else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
WARN_ON_ONCE(lblk_num > U32_MAX);
lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
}
iv->lblk_num = cpu_to_le64(lblk_num);
}
/* Encrypt or decrypt a single filesystem block of file contents */
int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
u64 lblk_num, struct page *src_page,
struct page *dest_page, unsigned int len,
unsigned int offs, gfp_t gfp_flags)
{
union fscrypt_iv iv;
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
int res = 0;
if (WARN_ON_ONCE(len <= 0))
return -EINVAL;
if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
return -EINVAL;
fscrypt_generate_iv(&iv, lblk_num, ci);
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req)
return -ENOMEM;
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &wait);
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, len, offs);
skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
else
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res) {
fscrypt_err(inode, "%scryption failed for block %llu: %d",
(rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
return res;
}
return 0;
}
/**
* fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a
* pagecache page
* @page: The locked pagecache page containing the block(s) to encrypt
* @len: Total size of the block(s) to encrypt. Must be a nonzero
* multiple of the filesystem's block size.
* @offs: Byte offset within @page of the first block to encrypt. Must be
* a multiple of the filesystem's block size.
* @gfp_flags: Memory allocation flags. See details below.
*
* A new bounce page is allocated, and the specified block(s) are encrypted into
* it. In the bounce page, the ciphertext block(s) will be located at the same
* offsets at which the plaintext block(s) were located in the source page; any
* other parts of the bounce page will be left uninitialized. However, normally
* blocksize == PAGE_SIZE and the whole page is encrypted at once.
*
* This is for use by the filesystem's ->writepages() method.
*
* The bounce page allocation is mempool-backed, so it will always succeed when
* @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However,
* only the first page of each bio can be allocated this way. To prevent
* deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
*
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags)
{
const struct inode *inode = page->mapping->host;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
struct page *ciphertext_page;
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
(offs >> blockbits);
unsigned int i;
int err;
if (WARN_ON_ONCE(!PageLocked(page)))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
return ERR_PTR(-EINVAL);
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
if (!ciphertext_page)
return ERR_PTR(-ENOMEM);
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
page, ciphertext_page,
blocksize, i, gfp_flags);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
return ERR_PTR(err);
}
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)page);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
/**
* fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
* @inode: The inode to which this block belongs
* @page: The page containing the block to encrypt
* @len: Size of block to encrypt. This must be a multiple of
* FSCRYPT_CONTENTS_ALIGNMENT.
* @offs: Byte offset within @page at which the block to encrypt begins
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
* number of the block within the file
* @gfp_flags: Memory allocation flags
*
* Encrypt a possibly-compressed filesystem block that is located in an
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
* Return: 0 on success; -errno on failure
*/
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
{
return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
len, offs, gfp_flags);
}
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
/**
* fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
* pagecache folio
* @folio: The locked pagecache folio containing the block(s) to decrypt
* @len: Total size of the block(s) to decrypt. Must be a nonzero
* multiple of the filesystem's block size.
* @offs: Byte offset within @folio of the first block to decrypt. Must be
* a multiple of the filesystem's block size.
*
* The specified block(s) are decrypted in-place within the pagecache folio,
* which must still be locked and not uptodate.
*
* This is for use by the filesystem's ->readahead() method.
*
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs)
{
const struct inode *inode = folio->mapping->host;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) +
(offs >> blockbits);
size_t i;
int err;
if (WARN_ON_ONCE(!folio_test_locked(folio)))
return -EINVAL;
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
return -EINVAL;
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
struct page *page = folio_page(folio, i >> PAGE_SHIFT);
err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
page, blocksize, i & ~PAGE_MASK,
GFP_NOFS);
if (err)
return err;
}
return 0;
}
EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
/**
* fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
* @inode: The inode to which this block belongs
* @page: The page containing the block to decrypt
* @len: Size of block to decrypt. This must be a multiple of
* FSCRYPT_CONTENTS_ALIGNMENT.
* @offs: Byte offset within @page at which the block to decrypt begins
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
* number of the block within the file
*
* Decrypt a possibly-compressed filesystem block that is located in an
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
len, offs, GFP_NOFS);
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @sb: the filesystem superblock
*
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
* Return: 0 on success; -errno on failure
*/
int fscrypt_initialize(struct super_block *sb)
{
int err = 0;
mempool_t *pool;
/* pairs with smp_store_release() below */
if (likely(smp_load_acquire(&fscrypt_bounce_page_pool)))
return 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
if (sb->s_cop->flags & FS_CFLG_OWN_PAGES)
return 0;
mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool)
goto out_unlock;
err = -ENOMEM;
pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!pool)
goto out_unlock;
/* pairs with smp_load_acquire() above */
smp_store_release(&fscrypt_bounce_page_pool, pool);
err = 0;
out_unlock:
mutex_unlock(&fscrypt_init_mutex);
return err;
}
void fscrypt_msg(const struct inode *inode, const char *level,
const char *fmt, ...)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
struct va_format vaf;
va_list args;
if (!__ratelimit(&rs))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (inode && inode->i_ino)
printk("%sfscrypt (%s, inode %lu): %pV\n",
level, inode->i_sb->s_id, inode->i_ino, &vaf);
else if (inode)
printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
else
printk("%sfscrypt: %pV\n", level, &vaf);
va_end(args);
}
/**
* fscrypt_init() - Set up for fs encryption.
*
* Return: 0 on success; -errno on failure
*/
static int __init fscrypt_init(void)
{
int err = -ENOMEM;
/*
* Use an unbound workqueue to allow bios to be decrypted in parallel
* even when they happen to complete on the same CPU. This sacrifices
* locality, but it's worthwhile since decryption is CPU-intensive.
*
* Also use a high-priority workqueue to prioritize decryption work,
* which blocks reads from completing, over regular application tasks.
*/
fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
WQ_UNBOUND | WQ_HIGHPRI,
num_online_cpus());
if (!fscrypt_read_workqueue)
goto fail;
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_info_cachep)
goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
goto fail_free_info;
return 0;
fail_free_info:
kmem_cache_destroy(fscrypt_info_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
return err;
}
late_initcall(fscrypt_init)
| linux-master | fs/crypto/crypto.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Key setup facility for FS encryption support.
*
* Copyright (C) 2015, Google, Inc.
*
* Originally written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar.
* Heavily modified since then.
*/
#include <crypto/skcipher.h>
#include <linux/random.h>
#include "fscrypt_private.h"
struct fscrypt_mode fscrypt_modes[] = {
[FSCRYPT_MODE_AES_256_XTS] = {
.friendly_name = "AES-256-XTS",
.cipher_str = "xts(aes)",
.keysize = 64,
.security_strength = 32,
.ivsize = 16,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS,
},
[FSCRYPT_MODE_AES_256_CTS] = {
.friendly_name = "AES-256-CTS-CBC",
.cipher_str = "cts(cbc(aes))",
.keysize = 32,
.security_strength = 32,
.ivsize = 16,
},
[FSCRYPT_MODE_AES_128_CBC] = {
.friendly_name = "AES-128-CBC-ESSIV",
.cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16,
.security_strength = 16,
.ivsize = 16,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
},
[FSCRYPT_MODE_AES_128_CTS] = {
.friendly_name = "AES-128-CTS-CBC",
.cipher_str = "cts(cbc(aes))",
.keysize = 16,
.security_strength = 16,
.ivsize = 16,
},
[FSCRYPT_MODE_SM4_XTS] = {
.friendly_name = "SM4-XTS",
.cipher_str = "xts(sm4)",
.keysize = 32,
.security_strength = 16,
.ivsize = 16,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_SM4_XTS,
},
[FSCRYPT_MODE_SM4_CTS] = {
.friendly_name = "SM4-CTS-CBC",
.cipher_str = "cts(cbc(sm4))",
.keysize = 16,
.security_strength = 16,
.ivsize = 16,
},
[FSCRYPT_MODE_ADIANTUM] = {
.friendly_name = "Adiantum",
.cipher_str = "adiantum(xchacha12,aes)",
.keysize = 32,
.security_strength = 32,
.ivsize = 32,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM,
},
[FSCRYPT_MODE_AES_256_HCTR2] = {
.friendly_name = "AES-256-HCTR2",
.cipher_str = "hctr2(aes)",
.keysize = 32,
.security_strength = 32,
.ivsize = 32,
},
};
static DEFINE_MUTEX(fscrypt_mode_key_setup_mutex);
static struct fscrypt_mode *
select_encryption_mode(const union fscrypt_policy *policy,
const struct inode *inode)
{
BUILD_BUG_ON(ARRAY_SIZE(fscrypt_modes) != FSCRYPT_MODE_MAX + 1);
if (S_ISREG(inode->i_mode))
return &fscrypt_modes[fscrypt_policy_contents_mode(policy)];
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
return &fscrypt_modes[fscrypt_policy_fnames_mode(policy)];
WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
inode->i_ino, (inode->i_mode & S_IFMT));
return ERR_PTR(-EINVAL);
}
/* Create a symmetric cipher object for the given encryption mode and key */
static struct crypto_skcipher *
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
const struct inode *inode)
{
struct crypto_skcipher *tfm;
int err;
tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT) {
fscrypt_warn(inode,
"Missing crypto API support for %s (API name: \"%s\")",
mode->friendly_name, mode->cipher_str);
return ERR_PTR(-ENOPKG);
}
fscrypt_err(inode, "Error allocating '%s' transform: %ld",
mode->cipher_str, PTR_ERR(tfm));
return tfm;
}
if (!xchg(&mode->logged_cryptoapi_impl, 1)) {
/*
* fscrypt performance can vary greatly depending on which
* crypto algorithm implementation is used. Help people debug
* performance problems by logging the ->cra_driver_name the
* first time a mode is used.
*/
pr_info("fscrypt: %s using implementation \"%s\"\n",
mode->friendly_name, crypto_skcipher_driver_name(tfm));
}
if (WARN_ON_ONCE(crypto_skcipher_ivsize(tfm) != mode->ivsize)) {
err = -EINVAL;
goto err_free_tfm;
}
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
if (err)
goto err_free_tfm;
return tfm;
err_free_tfm:
crypto_free_skcipher(tfm);
return ERR_PTR(err);
}
/*
* Prepare the crypto transform object or blk-crypto key in @prep_key, given the
* raw key, encryption mode (@ci->ci_mode), flag indicating which encryption
* implementation (fs-layer or blk-crypto) will be used (@ci->ci_inlinecrypt),
* and IV generation method (@ci->ci_policy.flags).
*/
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, const struct fscrypt_info *ci)
{
struct crypto_skcipher *tfm;
if (fscrypt_using_inline_encryption(ci))
return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci);
tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/*
* Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
* I.e., here we publish ->tfm with a RELEASE barrier so that
* concurrent tasks can ACQUIRE it. Note that this concurrency is only
* possible for per-mode keys, not for per-file keys.
*/
smp_store_release(&prep_key->tfm, tfm);
return 0;
}
/* Destroy a crypto transform object and/or blk-crypto key. */
void fscrypt_destroy_prepared_key(struct super_block *sb,
struct fscrypt_prepared_key *prep_key)
{
crypto_free_skcipher(prep_key->tfm);
fscrypt_destroy_inline_crypt_key(sb, prep_key);
memzero_explicit(prep_key, sizeof(*prep_key));
}
/* Given a per-file encryption key, set up the file's crypto transform object */
int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key)
{
ci->ci_owns_key = true;
return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci);
}
static int setup_per_mode_enc_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk,
struct fscrypt_prepared_key *keys,
u8 hkdf_context, bool include_fs_uuid)
{
const struct inode *inode = ci->ci_inode;
const struct super_block *sb = inode->i_sb;
struct fscrypt_mode *mode = ci->ci_mode;
const u8 mode_num = mode - fscrypt_modes;
struct fscrypt_prepared_key *prep_key;
u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
unsigned int hkdf_infolen = 0;
int err;
if (WARN_ON_ONCE(mode_num > FSCRYPT_MODE_MAX))
return -EINVAL;
prep_key = &keys[mode_num];
if (fscrypt_is_key_prepared(prep_key, ci)) {
ci->ci_enc_key = *prep_key;
return 0;
}
mutex_lock(&fscrypt_mode_key_setup_mutex);
if (fscrypt_is_key_prepared(prep_key, ci))
goto done_unlock;
BUILD_BUG_ON(sizeof(mode_num) != 1);
BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
BUILD_BUG_ON(sizeof(hkdf_info) != 17);
hkdf_info[hkdf_infolen++] = mode_num;
if (include_fs_uuid) {
memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
sizeof(sb->s_uuid));
hkdf_infolen += sizeof(sb->s_uuid);
}
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
hkdf_context, hkdf_info, hkdf_infolen,
mode_key, mode->keysize);
if (err)
goto out_unlock;
err = fscrypt_prepare_key(prep_key, mode_key, ci);
memzero_explicit(mode_key, mode->keysize);
if (err)
goto out_unlock;
done_unlock:
ci->ci_enc_key = *prep_key;
err = 0;
out_unlock:
mutex_unlock(&fscrypt_mode_key_setup_mutex);
return err;
}
/*
* Derive a SipHash key from the given fscrypt master key and the given
* application-specific information string.
*
* Note that the KDF produces a byte array, but the SipHash APIs expect the key
* as a pair of 64-bit words. Therefore, on big endian CPUs we have to do an
* endianness swap in order to get the same results as on little endian CPUs.
*/
static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
u8 context, const u8 *info,
unsigned int infolen, siphash_key_t *key)
{
int err;
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
(u8 *)key, sizeof(*key));
if (err)
return err;
BUILD_BUG_ON(sizeof(*key) != 16);
BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2);
le64_to_cpus(&key->key[0]);
le64_to_cpus(&key->key[1]);
return 0;
}
int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
const struct fscrypt_master_key *mk)
{
int err;
err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
&ci->ci_dirhash_key);
if (err)
return err;
ci->ci_dirhash_key_initialized = true;
return 0;
}
void fscrypt_hash_inode_number(struct fscrypt_info *ci,
const struct fscrypt_master_key *mk)
{
WARN_ON_ONCE(ci->ci_inode->i_ino == 0);
WARN_ON_ONCE(!mk->mk_ino_hash_key_initialized);
ci->ci_hashed_ino = (u32)siphash_1u64(ci->ci_inode->i_ino,
&mk->mk_ino_hash_key);
}
static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk)
{
int err;
err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_32_keys,
HKDF_CONTEXT_IV_INO_LBLK_32_KEY, true);
if (err)
return err;
/* pairs with smp_store_release() below */
if (!smp_load_acquire(&mk->mk_ino_hash_key_initialized)) {
mutex_lock(&fscrypt_mode_key_setup_mutex);
if (mk->mk_ino_hash_key_initialized)
goto unlock;
err = fscrypt_derive_siphash_key(mk,
HKDF_CONTEXT_INODE_HASH_KEY,
NULL, 0, &mk->mk_ino_hash_key);
if (err)
goto unlock;
/* pairs with smp_load_acquire() above */
smp_store_release(&mk->mk_ino_hash_key_initialized, true);
unlock:
mutex_unlock(&fscrypt_mode_key_setup_mutex);
if (err)
return err;
}
/*
* New inodes may not have an inode number assigned yet.
* Hashing their inode number is delayed until later.
*/
if (ci->ci_inode->i_ino)
fscrypt_hash_inode_number(ci, mk);
return 0;
}
static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk,
bool need_dirhash_key)
{
int err;
if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
/*
* DIRECT_KEY: instead of deriving per-file encryption keys, the
* per-file nonce will be included in all the IVs. But unlike
* v1 policies, for v2 policies in this case we don't encrypt
* with the master key directly but rather derive a per-mode
* encryption key. This ensures that the master key is
* consistently used only for HKDF, avoiding key reuse issues.
*/
err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_keys,
HKDF_CONTEXT_DIRECT_KEY, false);
} else if (ci->ci_policy.v2.flags &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
/*
* IV_INO_LBLK_64: encryption keys are derived from (master_key,
* mode_num, filesystem_uuid), and inode number is included in
* the IVs. This format is optimized for use with inline
* encryption hardware compliant with the UFS standard.
*/
err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys,
HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
true);
} else if (ci->ci_policy.v2.flags &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk);
} else {
u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
HKDF_CONTEXT_PER_FILE_ENC_KEY,
ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
derived_key, ci->ci_mode->keysize);
if (err)
return err;
err = fscrypt_set_per_file_enc_key(ci, derived_key);
memzero_explicit(derived_key, ci->ci_mode->keysize);
}
if (err)
return err;
/* Derive a secret dirhash key for directories that need it. */
if (need_dirhash_key) {
err = fscrypt_derive_dirhash_key(ci, mk);
if (err)
return err;
}
return 0;
}
/*
* Check whether the size of the given master key (@mk) is appropriate for the
* encryption settings which a particular file will use (@ci).
*
* If the file uses a v1 encryption policy, then the master key must be at least
* as long as the derived key, as this is a requirement of the v1 KDF.
*
* Otherwise, the KDF can accept any size key, so we enforce a slightly looser
* requirement: we require that the size of the master key be at least the
* maximum security strength of any algorithm whose key will be derived from it
* (but in practice we only need to consider @ci->ci_mode, since any other
* possible subkeys such as DIRHASH and INODE_HASH will never increase the
* required key size over @ci->ci_mode). This allows AES-256-XTS keys to be
* derived from a 256-bit master key, which is cryptographically sufficient,
* rather than requiring a 512-bit master key which is unnecessarily long. (We
* still allow 512-bit master keys if the user chooses to use them, though.)
*/
static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
const struct fscrypt_info *ci)
{
unsigned int min_keysize;
if (ci->ci_policy.version == FSCRYPT_POLICY_V1)
min_keysize = ci->ci_mode->keysize;
else
min_keysize = ci->ci_mode->security_strength;
if (mk->mk_secret.size < min_keysize) {
fscrypt_warn(NULL,
"key with %s %*phN is too short (got %u bytes, need %u+ bytes)",
master_key_spec_type(&mk->mk_spec),
master_key_spec_len(&mk->mk_spec),
(u8 *)&mk->mk_spec.u,
mk->mk_secret.size, min_keysize);
return false;
}
return true;
}
/*
* Find the master key, then set up the inode's actual encryption key.
*
* If the master key is found in the filesystem-level keyring, then it is
* returned in *mk_ret with its semaphore read-locked. This is needed to ensure
* that only one task links the fscrypt_info into ->mk_decrypted_inodes (as
* multiple tasks may race to create an fscrypt_info for the same inode), and to
* synchronize the master key being removed with a new inode starting to use it.
*/
static int setup_file_encryption_key(struct fscrypt_info *ci,
bool need_dirhash_key,
struct fscrypt_master_key **mk_ret)
{
struct super_block *sb = ci->ci_inode->i_sb;
struct fscrypt_key_specifier mk_spec;
struct fscrypt_master_key *mk;
int err;
err = fscrypt_select_encryption_impl(ci);
if (err)
return err;
err = fscrypt_policy_to_key_spec(&ci->ci_policy, &mk_spec);
if (err)
return err;
mk = fscrypt_find_master_key(sb, &mk_spec);
if (unlikely(!mk)) {
const union fscrypt_policy *dummy_policy =
fscrypt_get_dummy_policy(sb);
/*
* Add the test_dummy_encryption key on-demand. In principle,
* it should be added at mount time. Do it here instead so that
* the individual filesystems don't need to worry about adding
* this key at mount time and cleaning up on mount failure.
*/
if (dummy_policy &&
fscrypt_policies_equal(dummy_policy, &ci->ci_policy)) {
err = fscrypt_add_test_dummy_key(sb, &mk_spec);
if (err)
return err;
mk = fscrypt_find_master_key(sb, &mk_spec);
}
}
if (unlikely(!mk)) {
if (ci->ci_policy.version != FSCRYPT_POLICY_V1)
return -ENOKEY;
/*
* As a legacy fallback for v1 policies, search for the key in
* the current task's subscribed keyrings too. Don't move this
* to before the search of ->s_master_keys, since users
* shouldn't be able to override filesystem-level keys.
*/
return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci);
}
down_read(&mk->mk_sem);
/* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */
if (!is_master_key_secret_present(&mk->mk_secret)) {
err = -ENOKEY;
goto out_release_key;
}
if (!fscrypt_valid_master_key_size(mk, ci)) {
err = -ENOKEY;
goto out_release_key;
}
switch (ci->ci_policy.version) {
case FSCRYPT_POLICY_V1:
err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw);
break;
case FSCRYPT_POLICY_V2:
err = fscrypt_setup_v2_file_key(ci, mk, need_dirhash_key);
break;
default:
WARN_ON_ONCE(1);
err = -EINVAL;
break;
}
if (err)
goto out_release_key;
*mk_ret = mk;
return 0;
out_release_key:
up_read(&mk->mk_sem);
fscrypt_put_master_key(mk);
return err;
}
static void put_crypt_info(struct fscrypt_info *ci)
{
struct fscrypt_master_key *mk;
if (!ci)
return;
if (ci->ci_direct_key)
fscrypt_put_direct_key(ci->ci_direct_key);
else if (ci->ci_owns_key)
fscrypt_destroy_prepared_key(ci->ci_inode->i_sb,
&ci->ci_enc_key);
mk = ci->ci_master_key;
if (mk) {
/*
* Remove this inode from the list of inodes that were unlocked
* with the master key. In addition, if we're removing the last
* inode from a master key struct that already had its secret
* removed, then complete the full removal of the struct.
*/
spin_lock(&mk->mk_decrypted_inodes_lock);
list_del(&ci->ci_master_key_link);
spin_unlock(&mk->mk_decrypted_inodes_lock);
fscrypt_put_master_key_activeref(ci->ci_inode->i_sb, mk);
}
memzero_explicit(ci, sizeof(*ci));
kmem_cache_free(fscrypt_info_cachep, ci);
}
static int
fscrypt_setup_encryption_info(struct inode *inode,
const union fscrypt_policy *policy,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE],
bool need_dirhash_key)
{
struct fscrypt_info *crypt_info;
struct fscrypt_mode *mode;
struct fscrypt_master_key *mk = NULL;
int res;
res = fscrypt_initialize(inode->i_sb);
if (res)
return res;
crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_KERNEL);
if (!crypt_info)
return -ENOMEM;
crypt_info->ci_inode = inode;
crypt_info->ci_policy = *policy;
memcpy(crypt_info->ci_nonce, nonce, FSCRYPT_FILE_NONCE_SIZE);
mode = select_encryption_mode(&crypt_info->ci_policy, inode);
if (IS_ERR(mode)) {
res = PTR_ERR(mode);
goto out;
}
WARN_ON_ONCE(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
crypt_info->ci_mode = mode;
res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk);
if (res)
goto out;
/*
* For existing inodes, multiple tasks may race to set ->i_crypt_info.
* So use cmpxchg_release(). This pairs with the smp_load_acquire() in
* fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a
* RELEASE barrier so that other tasks can ACQUIRE it.
*/
if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) {
/*
* We won the race and set ->i_crypt_info to our crypt_info.
* Now link it into the master key's inode list.
*/
if (mk) {
crypt_info->ci_master_key = mk;
refcount_inc(&mk->mk_active_refs);
spin_lock(&mk->mk_decrypted_inodes_lock);
list_add(&crypt_info->ci_master_key_link,
&mk->mk_decrypted_inodes);
spin_unlock(&mk->mk_decrypted_inodes_lock);
}
crypt_info = NULL;
}
res = 0;
out:
if (mk) {
up_read(&mk->mk_sem);
fscrypt_put_master_key(mk);
}
put_crypt_info(crypt_info);
return res;
}
/**
* fscrypt_get_encryption_info() - set up an inode's encryption key
* @inode: the inode to set up the key for. Must be encrypted.
* @allow_unsupported: if %true, treat an unsupported encryption policy (or
* unrecognized encryption context) the same way as the key
* being unavailable, instead of returning an error. Use
* %false unless the operation being performed is needed in
* order for files (or directories) to be deleted.
*
* Set up ->i_crypt_info, if it hasn't already been done.
*
* Note: unless ->i_crypt_info is already set, this isn't %GFP_NOFS-safe. So
* generally this shouldn't be called from within a filesystem transaction.
*
* Return: 0 if ->i_crypt_info was set or was already set, *or* if the
* encryption key is unavailable. (Use fscrypt_has_encryption_key() to
* distinguish these cases.) Also can return another -errno code.
*/
int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported)
{
int res;
union fscrypt_context ctx;
union fscrypt_policy policy;
if (fscrypt_has_encryption_key(inode))
return 0;
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (res < 0) {
if (res == -ERANGE && allow_unsupported)
return 0;
fscrypt_warn(inode, "Error %d getting encryption context", res);
return res;
}
res = fscrypt_policy_from_context(&policy, &ctx, res);
if (res) {
if (allow_unsupported)
return 0;
fscrypt_warn(inode,
"Unrecognized or corrupt encryption context");
return res;
}
if (!fscrypt_supported_policy(&policy, inode)) {
if (allow_unsupported)
return 0;
return -EINVAL;
}
res = fscrypt_setup_encryption_info(inode, &policy,
fscrypt_context_nonce(&ctx),
IS_CASEFOLDED(inode) &&
S_ISDIR(inode->i_mode));
if (res == -ENOPKG && allow_unsupported) /* Algorithm unavailable? */
res = 0;
if (res == -ENOKEY)
res = 0;
return res;
}
/**
* fscrypt_prepare_new_inode() - prepare to create a new inode in a directory
* @dir: a possibly-encrypted directory
* @inode: the new inode. ->i_mode must be set already.
* ->i_ino doesn't need to be set yet.
* @encrypt_ret: (output) set to %true if the new inode will be encrypted
*
* If the directory is encrypted, set up its ->i_crypt_info in preparation for
* encrypting the name of the new file. Also, if the new inode will be
* encrypted, set up its ->i_crypt_info and set *encrypt_ret=true.
*
* This isn't %GFP_NOFS-safe, and therefore it should be called before starting
* any filesystem transaction to create the inode. For this reason, ->i_ino
* isn't required to be set yet, as the filesystem may not have set it yet.
*
* This doesn't persist the new inode's encryption context. That still needs to
* be done later by calling fscrypt_set_context().
*
* Return: 0 on success, -ENOKEY if the encryption key is missing, or another
* -errno code
*/
int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
bool *encrypt_ret)
{
const union fscrypt_policy *policy;
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
policy = fscrypt_policy_to_inherit(dir);
if (policy == NULL)
return 0;
if (IS_ERR(policy))
return PTR_ERR(policy);
if (WARN_ON_ONCE(inode->i_mode == 0))
return -EINVAL;
/*
* Only regular files, directories, and symlinks are encrypted.
* Special files like device nodes and named pipes aren't.
*/
if (!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode) &&
!S_ISLNK(inode->i_mode))
return 0;
*encrypt_ret = true;
get_random_bytes(nonce, FSCRYPT_FILE_NONCE_SIZE);
return fscrypt_setup_encryption_info(inode, policy, nonce,
IS_CASEFOLDED(dir) &&
S_ISDIR(inode->i_mode));
}
EXPORT_SYMBOL_GPL(fscrypt_prepare_new_inode);
/**
* fscrypt_put_encryption_info() - free most of an inode's fscrypt data
* @inode: an inode being evicted
*
* Free the inode's fscrypt_info. Filesystems must call this when the inode is
* being evicted. An RCU grace period need not have elapsed yet.
*/
void fscrypt_put_encryption_info(struct inode *inode)
{
put_crypt_info(inode->i_crypt_info);
inode->i_crypt_info = NULL;
}
EXPORT_SYMBOL(fscrypt_put_encryption_info);
/**
* fscrypt_free_inode() - free an inode's fscrypt data requiring RCU delay
* @inode: an inode being freed
*
* Free the inode's cached decrypted symlink target, if any. Filesystems must
* call this after an RCU grace period, just before they free the inode.
*/
void fscrypt_free_inode(struct inode *inode)
{
if (IS_ENCRYPTED(inode) && S_ISLNK(inode->i_mode)) {
kfree(inode->i_link);
inode->i_link = NULL;
}
}
EXPORT_SYMBOL(fscrypt_free_inode);
/**
* fscrypt_drop_inode() - check whether the inode's master key has been removed
* @inode: an inode being considered for eviction
*
* Filesystems supporting fscrypt must call this from their ->drop_inode()
* method so that encrypted inodes are evicted as soon as they're no longer in
* use and their master key has been removed.
*
* Return: 1 if fscrypt wants the inode to be evicted now, otherwise 0
*/
int fscrypt_drop_inode(struct inode *inode)
{
const struct fscrypt_info *ci = fscrypt_get_info(inode);
/*
* If ci is NULL, then the inode doesn't have an encryption key set up
* so it's irrelevant. If ci_master_key is NULL, then the master key
* was provided via the legacy mechanism of the process-subscribed
* keyrings, so we don't know whether it's been removed or not.
*/
if (!ci || !ci->ci_master_key)
return 0;
/*
* With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
* protected by the key were cleaned by sync_filesystem(). But if
* userspace is still using the files, inodes can be dirtied between
* then and now. We mustn't lose any writes, so skip dirty inodes here.
*/
if (inode->i_state & I_DIRTY_ALL)
return 0;
/*
* Note: since we aren't holding the key semaphore, the result here can
* immediately become outdated. But there's no correctness problem with
* unnecessarily evicting. Nor is there a correctness problem with not
* evicting while iput() is racing with the key being removed, since
* then the thread removing the key will either evict the inode itself
* or will correctly detect that it wasn't evicted due to the race.
*/
return !is_master_key_secret_present(&ci->ci_master_key->mk_secret);
}
EXPORT_SYMBOL_GPL(fscrypt_drop_inode);
| linux-master | fs/crypto/keysetup.c |
// SPDX-License-Identifier: GPL-2.0-only
/* inode.c: /proc/openprom handling routines
*
* Copyright (C) 1996-1999 Jakub Jelinek ([email protected])
* Copyright (C) 1998 Eddie C. Dost ([email protected])
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/magic.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
static DEFINE_MUTEX(op_mutex);
#define OPENPROM_ROOT_INO 0
enum op_inode_type {
op_inode_node,
op_inode_prop,
};
union op_inode_data {
struct device_node *node;
struct property *prop;
};
struct op_inode_info {
struct inode vfs_inode;
enum op_inode_type type;
union op_inode_data u;
};
static struct inode *openprom_iget(struct super_block *sb, ino_t ino);
static inline struct op_inode_info *OP_I(struct inode *inode)
{
return container_of(inode, struct op_inode_info, vfs_inode);
}
static int is_string(unsigned char *p, int len)
{
int i;
for (i = 0; i < len; i++) {
unsigned char val = p[i];
if ((i && !val) ||
(val >= ' ' && val <= '~'))
continue;
return 0;
}
return 1;
}
static int property_show(struct seq_file *f, void *v)
{
struct property *prop = f->private;
void *pval;
int len;
len = prop->length;
pval = prop->value;
if (is_string(pval, len)) {
while (len > 0) {
int n = strlen(pval);
seq_printf(f, "%s", (char *) pval);
/* Skip over the NULL byte too. */
pval += n + 1;
len -= n + 1;
if (len > 0)
seq_printf(f, " + ");
}
} else {
if (len & 3) {
while (len) {
len--;
if (len)
seq_printf(f, "%02x.",
*(unsigned char *) pval);
else
seq_printf(f, "%02x",
*(unsigned char *) pval);
pval++;
}
} else {
while (len >= 4) {
len -= 4;
if (len)
seq_printf(f, "%08x.",
*(unsigned int *) pval);
else
seq_printf(f, "%08x",
*(unsigned int *) pval);
pval += 4;
}
}
}
seq_printf(f, "\n");
return 0;
}
static void *property_start(struct seq_file *f, loff_t *pos)
{
if (*pos == 0)
return pos;
return NULL;
}
static void *property_next(struct seq_file *f, void *v, loff_t *pos)
{
(*pos)++;
return NULL;
}
static void property_stop(struct seq_file *f, void *v)
{
/* Nothing to do */
}
static const struct seq_operations property_op = {
.start = property_start,
.next = property_next,
.stop = property_stop,
.show = property_show
};
static int property_open(struct inode *inode, struct file *file)
{
struct op_inode_info *oi = OP_I(inode);
int ret;
BUG_ON(oi->type != op_inode_prop);
ret = seq_open(file, &property_op);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = oi->u.prop;
}
return ret;
}
static const struct file_operations openpromfs_prop_ops = {
.open = property_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int openpromfs_readdir(struct file *, struct dir_context *);
static const struct file_operations openprom_operations = {
.read = generic_read_dir,
.iterate_shared = openpromfs_readdir,
.llseek = generic_file_llseek,
};
static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, unsigned int);
static const struct inode_operations openprom_inode_operations = {
.lookup = openpromfs_lookup,
};
static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct op_inode_info *ent_oi, *oi = OP_I(dir);
struct device_node *dp, *child;
struct property *prop;
enum op_inode_type ent_type;
union op_inode_data ent_data;
const char *name;
struct inode *inode;
unsigned int ino;
int len;
BUG_ON(oi->type != op_inode_node);
dp = oi->u.node;
name = dentry->d_name.name;
len = dentry->d_name.len;
mutex_lock(&op_mutex);
child = dp->child;
while (child) {
const char *node_name = kbasename(child->full_name);
int n = strlen(node_name);
if (len == n &&
!strncmp(node_name, name, len)) {
ent_type = op_inode_node;
ent_data.node = child;
ino = child->unique_id;
goto found;
}
child = child->sibling;
}
prop = dp->properties;
while (prop) {
int n = strlen(prop->name);
if (len == n && !strncmp(prop->name, name, len)) {
ent_type = op_inode_prop;
ent_data.prop = prop;
ino = prop->unique_id;
goto found;
}
prop = prop->next;
}
mutex_unlock(&op_mutex);
return ERR_PTR(-ENOENT);
found:
inode = openprom_iget(dir->i_sb, ino);
mutex_unlock(&op_mutex);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (inode->i_state & I_NEW) {
inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
ent_oi = OP_I(inode);
ent_oi->type = ent_type;
ent_oi->u = ent_data;
switch (ent_type) {
case op_inode_node:
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
inode->i_op = &openprom_inode_operations;
inode->i_fop = &openprom_operations;
set_nlink(inode, 2);
break;
case op_inode_prop:
if (of_node_name_eq(dp, "options") && (len == 17) &&
!strncmp (name, "security-password", 17))
inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR;
else
inode->i_mode = S_IFREG | S_IRUGO;
inode->i_fop = &openpromfs_prop_ops;
set_nlink(inode, 1);
inode->i_size = ent_oi->u.prop->length;
break;
}
unlock_new_inode(inode);
}
return d_splice_alias(inode, dentry);
}
static int openpromfs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct op_inode_info *oi = OP_I(inode);
struct device_node *dp = oi->u.node;
struct device_node *child;
struct property *prop;
int i;
mutex_lock(&op_mutex);
if (ctx->pos == 0) {
if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR))
goto out;
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (!dir_emit(ctx, "..", 2,
(dp->parent == NULL ?
OPENPROM_ROOT_INO :
dp->parent->unique_id), DT_DIR))
goto out;
ctx->pos = 2;
}
i = ctx->pos - 2;
/* First, the children nodes as directories. */
child = dp->child;
while (i && child) {
child = child->sibling;
i--;
}
while (child) {
if (!dir_emit(ctx,
kbasename(child->full_name),
strlen(kbasename(child->full_name)),
child->unique_id, DT_DIR))
goto out;
ctx->pos++;
child = child->sibling;
}
/* Next, the properties as files. */
prop = dp->properties;
while (i && prop) {
prop = prop->next;
i--;
}
while (prop) {
if (!dir_emit(ctx, prop->name, strlen(prop->name),
prop->unique_id, DT_REG))
goto out;
ctx->pos++;
prop = prop->next;
}
out:
mutex_unlock(&op_mutex);
return 0;
}
static struct kmem_cache *op_inode_cachep;
static struct inode *openprom_alloc_inode(struct super_block *sb)
{
struct op_inode_info *oi;
oi = alloc_inode_sb(sb, op_inode_cachep, GFP_KERNEL);
if (!oi)
return NULL;
return &oi->vfs_inode;
}
static void openprom_free_inode(struct inode *inode)
{
kmem_cache_free(op_inode_cachep, OP_I(inode));
}
static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
{
struct inode *inode = iget_locked(sb, ino);
if (!inode)
inode = ERR_PTR(-ENOMEM);
return inode;
}
static int openprom_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
*flags |= SB_NOATIME;
return 0;
}
static const struct super_operations openprom_sops = {
.alloc_inode = openprom_alloc_inode,
.free_inode = openprom_free_inode,
.statfs = simple_statfs,
.remount_fs = openprom_remount,
};
static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
{
struct inode *root_inode;
struct op_inode_info *oi;
int ret;
s->s_flags |= SB_NOATIME;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = OPENPROM_SUPER_MAGIC;
s->s_op = &openprom_sops;
s->s_time_gran = 1;
root_inode = openprom_iget(s, OPENPROM_ROOT_INO);
if (IS_ERR(root_inode)) {
ret = PTR_ERR(root_inode);
goto out_no_root;
}
root_inode->i_mtime = root_inode->i_atime = inode_set_ctime_current(root_inode);
root_inode->i_op = &openprom_inode_operations;
root_inode->i_fop = &openprom_operations;
root_inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
oi = OP_I(root_inode);
oi->type = op_inode_node;
oi->u.node = of_find_node_by_path("/");
unlock_new_inode(root_inode);
s->s_root = d_make_root(root_inode);
if (!s->s_root)
goto out_no_root_dentry;
return 0;
out_no_root_dentry:
ret = -ENOMEM;
out_no_root:
printk("openprom_fill_super: get root inode failed\n");
return ret;
}
static int openpromfs_get_tree(struct fs_context *fc)
{
return get_tree_single(fc, openprom_fill_super);
}
static const struct fs_context_operations openpromfs_context_ops = {
.get_tree = openpromfs_get_tree,
};
static int openpromfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &openpromfs_context_ops;
return 0;
}
static struct file_system_type openprom_fs_type = {
.owner = THIS_MODULE,
.name = "openpromfs",
.init_fs_context = openpromfs_init_fs_context,
.kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("openpromfs");
static void op_inode_init_once(void *data)
{
struct op_inode_info *oi = (struct op_inode_info *) data;
inode_init_once(&oi->vfs_inode);
}
static int __init init_openprom_fs(void)
{
int err;
op_inode_cachep = kmem_cache_create("op_inode_cache",
sizeof(struct op_inode_info),
0,
(SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD | SLAB_ACCOUNT),
op_inode_init_once);
if (!op_inode_cachep)
return -ENOMEM;
err = register_filesystem(&openprom_fs_type);
if (err)
kmem_cache_destroy(op_inode_cachep);
return err;
}
static void __exit exit_openprom_fs(void)
{
unregister_filesystem(&openprom_fs_type);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(op_inode_cachep);
}
module_init(init_openprom_fs)
module_exit(exit_openprom_fs)
MODULE_LICENSE("GPL");
| linux-master | fs/openpromfs/inode.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2021, Alibaba Cloud
*/
#include <linux/module.h>
#include <linux/statfs.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/crc32c.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/dax.h>
#include <linux/exportfs.h>
#include "xattr.h"
#define CREATE_TRACE_POINTS
#include <trace/events/erofs.h>
static struct kmem_cache *erofs_inode_cachep __read_mostly;
void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
va_end(args);
}
void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_info("(device %s): %pV", sb->s_id, &vaf);
va_end(args);
}
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
size_t len = 1 << EROFS_SB(sb)->blkszbits;
struct erofs_super_block *dsb;
u32 expected_crc, crc;
if (len > EROFS_SUPER_OFFSET)
len -= EROFS_SUPER_OFFSET;
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
if (!dsb)
return -ENOMEM;
expected_crc = le32_to_cpu(dsb->checksum);
dsb->checksum = 0;
/* to allow for x86 boot sectors and other oddities. */
crc = crc32c(~0, dsb, len);
kfree(dsb);
if (crc != expected_crc) {
erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
crc, expected_crc);
return -EBADMSG;
}
return 0;
}
static void erofs_inode_init_once(void *ptr)
{
struct erofs_inode *vi = ptr;
inode_init_once(&vi->vfs_inode);
}
static struct inode *erofs_alloc_inode(struct super_block *sb)
{
struct erofs_inode *vi =
alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
if (!vi)
return NULL;
/* zero out everything except vfs_inode */
memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
return &vi->vfs_inode;
}
static void erofs_free_inode(struct inode *inode)
{
struct erofs_inode *vi = EROFS_I(inode);
if (inode->i_op == &erofs_fast_symlink_iops)
kfree(inode->i_link);
kfree(vi->xattr_shared_xattrs);
kmem_cache_free(erofs_inode_cachep, vi);
}
static bool check_layout_compatibility(struct super_block *sb,
struct erofs_super_block *dsb)
{
const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
EROFS_SB(sb)->feature_incompat = feature;
/* check if current kernel meets all mandatory requirements */
if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
feature & ~EROFS_ALL_FEATURE_INCOMPAT);
return false;
}
return true;
}
/* read variable-sized metadata, offset will be aligned by 4-byte */
void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
erofs_off_t *offset, int *lengthp)
{
u8 *buffer, *ptr;
int len, i, cnt;
*offset = round_up(*offset, 4);
ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
if (IS_ERR(ptr))
return ptr;
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
if (!len)
len = U16_MAX + 1;
buffer = kmalloc(len, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
*offset += sizeof(__le16);
*lengthp = len;
for (i = 0; i < len; i += cnt) {
cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
len - i);
ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
if (IS_ERR(ptr)) {
kfree(buffer);
return ptr;
}
memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
*offset += cnt;
}
return buffer;
}
#ifdef CONFIG_EROFS_FS_ZIP
static int erofs_load_compr_cfgs(struct super_block *sb,
struct erofs_super_block *dsb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
unsigned int algs, alg;
erofs_off_t offset;
int size, ret = 0;
sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
return -EINVAL;
}
erofs_init_metabuf(&buf, sb);
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
alg = 0;
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
void *data;
if (!(algs & 1))
continue;
data = erofs_read_metadata(sb, &buf, &offset, &size);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
break;
}
switch (alg) {
case Z_EROFS_COMPRESSION_LZ4:
ret = z_erofs_load_lz4_config(sb, dsb, data, size);
break;
case Z_EROFS_COMPRESSION_LZMA:
ret = z_erofs_load_lzma_config(sb, dsb, data, size);
break;
case Z_EROFS_COMPRESSION_DEFLATE:
ret = z_erofs_load_deflate_config(sb, dsb, data, size);
break;
default:
DBG_BUGON(1);
ret = -EFAULT;
}
kfree(data);
if (ret)
break;
}
erofs_put_metabuf(&buf);
return ret;
}
#else
static int erofs_load_compr_cfgs(struct super_block *sb,
struct erofs_super_block *dsb)
{
if (dsb->u1.available_compr_algs) {
erofs_err(sb, "try to load compressed fs when compression is disabled");
return -EINVAL;
}
return 0;
}
#endif
static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct erofs_device_info *dif, erofs_off_t *pos)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fscache *fscache;
struct erofs_deviceslot *dis;
struct block_device *bdev;
void *ptr;
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
dis = ptr + erofs_blkoff(sb, *pos);
if (!dif->path) {
if (!dis->tag[0]) {
erofs_err(sb, "empty device tag @ pos %llu", *pos);
return -EINVAL;
}
dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
if (!dif->path)
return -ENOMEM;
}
if (erofs_is_fscache_mode(sb)) {
fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
if (IS_ERR(fscache))
return PTR_ERR(fscache);
dif->fscache = fscache;
} else if (!sbi->devs->flatdev) {
bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
dif->bdev = bdev;
dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
NULL, NULL);
}
dif->blocks = le32_to_cpu(dis->blocks);
dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
sbi->total_blocks += dif->blocks;
*pos += EROFS_DEVT_SLOT_SIZE;
return 0;
}
static int erofs_scan_devices(struct super_block *sb,
struct erofs_super_block *dsb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
unsigned int ondisk_extradevs;
erofs_off_t pos;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_device_info *dif;
int id, err = 0;
sbi->total_blocks = sbi->primarydevice_blocks;
if (!erofs_sb_has_device_table(sbi))
ondisk_extradevs = 0;
else
ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
if (sbi->devs->extra_devices &&
ondisk_extradevs != sbi->devs->extra_devices) {
erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
ondisk_extradevs, sbi->devs->extra_devices);
return -EINVAL;
}
if (!ondisk_extradevs)
return 0;
if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
sbi->devs->flatdev = true;
sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
down_read(&sbi->devs->rwsem);
if (sbi->devs->extra_devices) {
idr_for_each_entry(&sbi->devs->tree, dif, id) {
err = erofs_init_device(&buf, sb, dif, &pos);
if (err)
break;
}
} else {
for (id = 0; id < ondisk_extradevs; id++) {
dif = kzalloc(sizeof(*dif), GFP_KERNEL);
if (!dif) {
err = -ENOMEM;
break;
}
err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
if (err < 0) {
kfree(dif);
break;
}
++sbi->devs->extra_devices;
err = erofs_init_device(&buf, sb, dif, &pos);
if (err)
break;
}
}
up_read(&sbi->devs->rwsem);
erofs_put_metabuf(&buf);
return err;
}
static int erofs_read_superblock(struct super_block *sb)
{
struct erofs_sb_info *sbi;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_super_block *dsb;
void *data;
int ret;
data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
if (IS_ERR(data)) {
erofs_err(sb, "cannot read erofs superblock");
return PTR_ERR(data);
}
sbi = EROFS_SB(sb);
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ret = -EINVAL;
if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
erofs_err(sb, "cannot find valid erofs superblock");
goto out;
}
sbi->blkszbits = dsb->blkszbits;
if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
goto out;
}
if (dsb->dirblkbits) {
erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
goto out;
}
sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
if (erofs_sb_has_sb_chksum(sbi)) {
ret = erofs_superblock_csum_verify(sb, data);
if (ret)
goto out;
}
ret = -EINVAL;
if (!check_layout_compatibility(sb, dsb))
goto out;
sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
sbi->sb_size);
goto out;
}
sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
#ifdef CONFIG_EROFS_FS_XATTR
sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
sbi->xattr_prefix_count = dsb->xattr_prefix_count;
sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
#endif
sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
sbi->root_nid = le16_to_cpu(dsb->root_nid);
sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
sbi->inos = le64_to_cpu(dsb->inos);
sbi->build_time = le64_to_cpu(dsb->build_time);
sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
ret = strscpy(sbi->volume_name, dsb->volume_name,
sizeof(dsb->volume_name));
if (ret < 0) { /* -E2BIG */
erofs_err(sb, "bad volume name without NIL terminator");
ret = -EFSCORRUPTED;
goto out;
}
/* parse on-disk compression configurations */
if (erofs_sb_has_compr_cfgs(sbi))
ret = erofs_load_compr_cfgs(sb, dsb);
else
ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
if (ret < 0)
goto out;
/* handle multiple devices */
ret = erofs_scan_devices(sb, dsb);
if (erofs_is_fscache_mode(sb))
erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
out:
erofs_put_metabuf(&buf);
return ret;
}
static void erofs_default_options(struct erofs_fs_context *ctx)
{
#ifdef CONFIG_EROFS_FS_ZIP
ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
ctx->opt.max_sync_decompress_pages = 3;
ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
#endif
#ifdef CONFIG_EROFS_FS_XATTR
set_opt(&ctx->opt, XATTR_USER);
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
set_opt(&ctx->opt, POSIX_ACL);
#endif
}
enum {
Opt_user_xattr,
Opt_acl,
Opt_cache_strategy,
Opt_dax,
Opt_dax_enum,
Opt_device,
Opt_fsid,
Opt_domain_id,
Opt_err
};
static const struct constant_table erofs_param_cache_strategy[] = {
{"disabled", EROFS_ZIP_CACHE_DISABLED},
{"readahead", EROFS_ZIP_CACHE_READAHEAD},
{"readaround", EROFS_ZIP_CACHE_READAROUND},
{}
};
static const struct constant_table erofs_dax_param_enums[] = {
{"always", EROFS_MOUNT_DAX_ALWAYS},
{"never", EROFS_MOUNT_DAX_NEVER},
{}
};
static const struct fs_parameter_spec erofs_fs_parameters[] = {
fsparam_flag_no("user_xattr", Opt_user_xattr),
fsparam_flag_no("acl", Opt_acl),
fsparam_enum("cache_strategy", Opt_cache_strategy,
erofs_param_cache_strategy),
fsparam_flag("dax", Opt_dax),
fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
fsparam_string("device", Opt_device),
fsparam_string("fsid", Opt_fsid),
fsparam_string("domain_id", Opt_domain_id),
{}
};
static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
{
#ifdef CONFIG_FS_DAX
struct erofs_fs_context *ctx = fc->fs_private;
switch (mode) {
case EROFS_MOUNT_DAX_ALWAYS:
warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
set_opt(&ctx->opt, DAX_ALWAYS);
clear_opt(&ctx->opt, DAX_NEVER);
return true;
case EROFS_MOUNT_DAX_NEVER:
set_opt(&ctx->opt, DAX_NEVER);
clear_opt(&ctx->opt, DAX_ALWAYS);
return true;
default:
DBG_BUGON(1);
return false;
}
#else
errorfc(fc, "dax options not supported");
return false;
#endif
}
static int erofs_fc_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
struct erofs_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
struct erofs_device_info *dif;
int opt, ret;
opt = fs_parse(fc, erofs_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case Opt_user_xattr:
#ifdef CONFIG_EROFS_FS_XATTR
if (result.boolean)
set_opt(&ctx->opt, XATTR_USER);
else
clear_opt(&ctx->opt, XATTR_USER);
#else
errorfc(fc, "{,no}user_xattr options not supported");
#endif
break;
case Opt_acl:
#ifdef CONFIG_EROFS_FS_POSIX_ACL
if (result.boolean)
set_opt(&ctx->opt, POSIX_ACL);
else
clear_opt(&ctx->opt, POSIX_ACL);
#else
errorfc(fc, "{,no}acl options not supported");
#endif
break;
case Opt_cache_strategy:
#ifdef CONFIG_EROFS_FS_ZIP
ctx->opt.cache_strategy = result.uint_32;
#else
errorfc(fc, "compression not supported, cache_strategy ignored");
#endif
break;
case Opt_dax:
if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
return -EINVAL;
break;
case Opt_dax_enum:
if (!erofs_fc_set_dax_mode(fc, result.uint_32))
return -EINVAL;
break;
case Opt_device:
dif = kzalloc(sizeof(*dif), GFP_KERNEL);
if (!dif)
return -ENOMEM;
dif->path = kstrdup(param->string, GFP_KERNEL);
if (!dif->path) {
kfree(dif);
return -ENOMEM;
}
down_write(&ctx->devs->rwsem);
ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
up_write(&ctx->devs->rwsem);
if (ret < 0) {
kfree(dif->path);
kfree(dif);
return ret;
}
++ctx->devs->extra_devices;
break;
#ifdef CONFIG_EROFS_FS_ONDEMAND
case Opt_fsid:
kfree(ctx->fsid);
ctx->fsid = kstrdup(param->string, GFP_KERNEL);
if (!ctx->fsid)
return -ENOMEM;
break;
case Opt_domain_id:
kfree(ctx->domain_id);
ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
if (!ctx->domain_id)
return -ENOMEM;
break;
#else
case Opt_fsid:
case Opt_domain_id:
errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
break;
#endif
default:
return -ENOPARAM;
}
return 0;
}
static struct inode *erofs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
return erofs_iget(sb, ino);
}
static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
erofs_nfs_get_inode);
}
static struct dentry *erofs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
erofs_nfs_get_inode);
}
static struct dentry *erofs_get_parent(struct dentry *child)
{
erofs_nid_t nid;
unsigned int d_type;
int err;
err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
if (err)
return ERR_PTR(err);
return d_obtain_alias(erofs_iget(child->d_sb, nid));
}
static const struct export_operations erofs_export_ops = {
.fh_to_dentry = erofs_fh_to_dentry,
.fh_to_parent = erofs_fh_to_parent,
.get_parent = erofs_get_parent,
};
static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr empty_descr = {""};
return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
}
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct erofs_sb_info *sbi;
struct erofs_fs_context *ctx = fc->fs_private;
int err;
sb->s_magic = EROFS_SUPER_MAGIC;
sb->s_flags |= SB_RDONLY | SB_NOATIME;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_op = &erofs_sops;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
sbi->opt = ctx->opt;
sbi->devs = ctx->devs;
ctx->devs = NULL;
sbi->fsid = ctx->fsid;
ctx->fsid = NULL;
sbi->domain_id = ctx->domain_id;
ctx->domain_id = NULL;
sbi->blkszbits = PAGE_SHIFT;
if (erofs_is_fscache_mode(sb)) {
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
err = erofs_fscache_register_fs(sb);
if (err)
return err;
err = super_setup_bdi(sb);
if (err)
return err;
} else {
if (!sb_set_blocksize(sb, PAGE_SIZE)) {
errorfc(fc, "failed to set initial blksize");
return -EINVAL;
}
sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
&sbi->dax_part_off,
NULL, NULL);
}
err = erofs_read_superblock(sb);
if (err)
return err;
if (sb->s_blocksize_bits != sbi->blkszbits) {
if (erofs_is_fscache_mode(sb)) {
errorfc(fc, "unsupported blksize for fscache mode");
return -EINVAL;
}
if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
errorfc(fc, "failed to set erofs blksize");
return -EINVAL;
}
}
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
if (!sbi->dax_dev) {
errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
clear_opt(&sbi->opt, DAX_ALWAYS);
} else if (sbi->blkszbits != PAGE_SHIFT) {
errorfc(fc, "unsupported blocksize for DAX");
clear_opt(&sbi->opt, DAX_ALWAYS);
}
}
sb->s_time_gran = 1;
sb->s_xattr = erofs_xattr_handlers;
sb->s_export_op = &erofs_export_ops;
if (test_opt(&sbi->opt, POSIX_ACL))
sb->s_flags |= SB_POSIXACL;
else
sb->s_flags &= ~SB_POSIXACL;
#ifdef CONFIG_EROFS_FS_ZIP
xa_init(&sbi->managed_pslots);
#endif
inode = erofs_iget(sb, ROOT_NID(sbi));
if (IS_ERR(inode))
return PTR_ERR(inode);
if (!S_ISDIR(inode->i_mode)) {
erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
ROOT_NID(sbi), inode->i_mode);
iput(inode);
return -EINVAL;
}
sb->s_root = d_make_root(inode);
if (!sb->s_root)
return -ENOMEM;
erofs_shrinker_register(sb);
if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
if (IS_ERR(sbi->packed_inode)) {
err = PTR_ERR(sbi->packed_inode);
sbi->packed_inode = NULL;
return err;
}
}
err = erofs_init_managed_cache(sb);
if (err)
return err;
err = erofs_xattr_prefixes_init(sb);
if (err)
return err;
err = erofs_register_sysfs(sb);
if (err)
return err;
erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
return 0;
}
static int erofs_fc_anon_get_tree(struct fs_context *fc)
{
return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
}
static int erofs_fc_get_tree(struct fs_context *fc)
{
struct erofs_fs_context *ctx = fc->fs_private;
if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
return get_tree_nodev(fc, erofs_fc_fill_super);
return get_tree_bdev(fc, erofs_fc_fill_super);
}
static int erofs_fc_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fs_context *ctx = fc->fs_private;
DBG_BUGON(!sb_rdonly(sb));
if (ctx->fsid || ctx->domain_id)
erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
if (test_opt(&ctx->opt, POSIX_ACL))
fc->sb_flags |= SB_POSIXACL;
else
fc->sb_flags &= ~SB_POSIXACL;
sbi->opt = ctx->opt;
fc->sb_flags |= SB_RDONLY;
return 0;
}
static int erofs_release_device_info(int id, void *ptr, void *data)
{
struct erofs_device_info *dif = ptr;
fs_put_dax(dif->dax_dev, NULL);
if (dif->bdev)
blkdev_put(dif->bdev, &erofs_fs_type);
erofs_fscache_unregister_cookie(dif->fscache);
dif->fscache = NULL;
kfree(dif->path);
kfree(dif);
return 0;
}
static void erofs_free_dev_context(struct erofs_dev_context *devs)
{
if (!devs)
return;
idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
idr_destroy(&devs->tree);
kfree(devs);
}
static void erofs_fc_free(struct fs_context *fc)
{
struct erofs_fs_context *ctx = fc->fs_private;
erofs_free_dev_context(ctx->devs);
kfree(ctx->fsid);
kfree(ctx->domain_id);
kfree(ctx);
}
static const struct fs_context_operations erofs_context_ops = {
.parse_param = erofs_fc_parse_param,
.get_tree = erofs_fc_get_tree,
.reconfigure = erofs_fc_reconfigure,
.free = erofs_fc_free,
};
static const struct fs_context_operations erofs_anon_context_ops = {
.get_tree = erofs_fc_anon_get_tree,
};
static int erofs_init_fs_context(struct fs_context *fc)
{
struct erofs_fs_context *ctx;
/* pseudo mount for anon inodes */
if (fc->sb_flags & SB_KERNMOUNT) {
fc->ops = &erofs_anon_context_ops;
return 0;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
if (!ctx->devs) {
kfree(ctx);
return -ENOMEM;
}
fc->fs_private = ctx;
idr_init(&ctx->devs->tree);
init_rwsem(&ctx->devs->rwsem);
erofs_default_options(ctx);
fc->ops = &erofs_context_ops;
return 0;
}
static void erofs_kill_sb(struct super_block *sb)
{
struct erofs_sb_info *sbi;
/* pseudo mount for anon inodes */
if (sb->s_flags & SB_KERNMOUNT) {
kill_anon_super(sb);
return;
}
if (erofs_is_fscache_mode(sb))
kill_anon_super(sb);
else
kill_block_super(sb);
sbi = EROFS_SB(sb);
if (!sbi)
return;
erofs_free_dev_context(sbi->devs);
fs_put_dax(sbi->dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
kfree(sbi->fsid);
kfree(sbi->domain_id);
kfree(sbi);
sb->s_fs_info = NULL;
}
static void erofs_put_super(struct super_block *sb)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
DBG_BUGON(!sbi);
erofs_unregister_sysfs(sb);
erofs_shrinker_unregister(sb);
erofs_xattr_prefixes_cleanup(sb);
#ifdef CONFIG_EROFS_FS_ZIP
iput(sbi->managed_cache);
sbi->managed_cache = NULL;
#endif
iput(sbi->packed_inode);
sbi->packed_inode = NULL;
erofs_free_dev_context(sbi->devs);
sbi->devs = NULL;
erofs_fscache_unregister_fs(sb);
}
struct file_system_type erofs_fs_type = {
.owner = THIS_MODULE,
.name = "erofs",
.init_fs_context = erofs_init_fs_context,
.kill_sb = erofs_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("erofs");
static int __init erofs_module_init(void)
{
int err;
erofs_check_ondisk_layout_definitions();
erofs_inode_cachep = kmem_cache_create("erofs_inode",
sizeof(struct erofs_inode), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
erofs_inode_init_once);
if (!erofs_inode_cachep)
return -ENOMEM;
err = erofs_init_shrinker();
if (err)
goto shrinker_err;
err = z_erofs_lzma_init();
if (err)
goto lzma_err;
err = z_erofs_deflate_init();
if (err)
goto deflate_err;
erofs_pcpubuf_init();
err = z_erofs_init_zip_subsystem();
if (err)
goto zip_err;
err = erofs_init_sysfs();
if (err)
goto sysfs_err;
err = register_filesystem(&erofs_fs_type);
if (err)
goto fs_err;
return 0;
fs_err:
erofs_exit_sysfs();
sysfs_err:
z_erofs_exit_zip_subsystem();
zip_err:
z_erofs_deflate_exit();
deflate_err:
z_erofs_lzma_exit();
lzma_err:
erofs_exit_shrinker();
shrinker_err:
kmem_cache_destroy(erofs_inode_cachep);
return err;
}
static void __exit erofs_module_exit(void)
{
unregister_filesystem(&erofs_fs_type);
/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
rcu_barrier();
erofs_exit_sysfs();
z_erofs_exit_zip_subsystem();
z_erofs_deflate_exit();
z_erofs_lzma_exit();
erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep);
erofs_pcpubuf_exit();
}
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
u64 id = 0;
if (!erofs_is_fscache_mode(sb))
id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = sb->s_magic;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = sbi->total_blocks;
buf->f_bfree = buf->f_bavail = 0;
buf->f_files = ULLONG_MAX;
buf->f_ffree = ULLONG_MAX - sbi->inos;
buf->f_namelen = EROFS_NAME_LEN;
buf->f_fsid = u64_to_fsid(id);
return 0;
}
static int erofs_show_options(struct seq_file *seq, struct dentry *root)
{
struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
struct erofs_mount_opts *opt = &sbi->opt;
#ifdef CONFIG_EROFS_FS_XATTR
if (test_opt(opt, XATTR_USER))
seq_puts(seq, ",user_xattr");
else
seq_puts(seq, ",nouser_xattr");
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
if (test_opt(opt, POSIX_ACL))
seq_puts(seq, ",acl");
else
seq_puts(seq, ",noacl");
#endif
#ifdef CONFIG_EROFS_FS_ZIP
if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
seq_puts(seq, ",cache_strategy=disabled");
else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
seq_puts(seq, ",cache_strategy=readahead");
else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
seq_puts(seq, ",cache_strategy=readaround");
#endif
if (test_opt(opt, DAX_ALWAYS))
seq_puts(seq, ",dax=always");
if (test_opt(opt, DAX_NEVER))
seq_puts(seq, ",dax=never");
#ifdef CONFIG_EROFS_FS_ONDEMAND
if (sbi->fsid)
seq_printf(seq, ",fsid=%s", sbi->fsid);
if (sbi->domain_id)
seq_printf(seq, ",domain_id=%s", sbi->domain_id);
#endif
return 0;
}
const struct super_operations erofs_sops = {
.put_super = erofs_put_super,
.alloc_inode = erofs_alloc_inode,
.free_inode = erofs_free_inode,
.statfs = erofs_statfs,
.show_options = erofs_show_options,
};
module_init(erofs_module_init);
module_exit(erofs_module_exit);
MODULE_DESCRIPTION("Enhanced ROM File System");
MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
MODULE_LICENSE("GPL");
| linux-master | fs/erofs/super.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022, Alibaba Cloud
* Copyright (C) 2022, Bytedance Inc. All rights reserved.
*/
#include <linux/fscache.h>
#include "internal.h"
static DEFINE_MUTEX(erofs_domain_list_lock);
static DEFINE_MUTEX(erofs_domain_cookies_lock);
static LIST_HEAD(erofs_domain_list);
static LIST_HEAD(erofs_domain_cookies_list);
static struct vfsmount *erofs_pseudo_mnt;
struct erofs_fscache_request {
struct erofs_fscache_request *primary;
struct netfs_cache_resources cache_resources;
struct address_space *mapping; /* The mapping being accessed */
loff_t start; /* Start position */
size_t len; /* Length of the request */
size_t submitted; /* Length of submitted */
short error; /* 0 or error that occurred */
refcount_t ref;
};
static struct erofs_fscache_request *erofs_fscache_req_alloc(struct address_space *mapping,
loff_t start, size_t len)
{
struct erofs_fscache_request *req;
req = kzalloc(sizeof(struct erofs_fscache_request), GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
req->mapping = mapping;
req->start = start;
req->len = len;
refcount_set(&req->ref, 1);
return req;
}
static struct erofs_fscache_request *erofs_fscache_req_chain(struct erofs_fscache_request *primary,
size_t len)
{
struct erofs_fscache_request *req;
/* use primary request for the first submission */
if (!primary->submitted) {
refcount_inc(&primary->ref);
return primary;
}
req = erofs_fscache_req_alloc(primary->mapping,
primary->start + primary->submitted, len);
if (!IS_ERR(req)) {
req->primary = primary;
refcount_inc(&primary->ref);
}
return req;
}
static void erofs_fscache_req_complete(struct erofs_fscache_request *req)
{
struct folio *folio;
bool failed = req->error;
pgoff_t start_page = req->start / PAGE_SIZE;
pgoff_t last_page = ((req->start + req->len) / PAGE_SIZE) - 1;
XA_STATE(xas, &req->mapping->i_pages, start_page);
rcu_read_lock();
xas_for_each(&xas, folio, last_page) {
if (xas_retry(&xas, folio))
continue;
if (!failed)
folio_mark_uptodate(folio);
folio_unlock(folio);
}
rcu_read_unlock();
}
static void erofs_fscache_req_put(struct erofs_fscache_request *req)
{
if (refcount_dec_and_test(&req->ref)) {
if (req->cache_resources.ops)
req->cache_resources.ops->end_operation(&req->cache_resources);
if (!req->primary)
erofs_fscache_req_complete(req);
else
erofs_fscache_req_put(req->primary);
kfree(req);
}
}
static void erofs_fscache_subreq_complete(void *priv,
ssize_t transferred_or_error, bool was_async)
{
struct erofs_fscache_request *req = priv;
if (IS_ERR_VALUE(transferred_or_error)) {
if (req->primary)
req->primary->error = transferred_or_error;
else
req->error = transferred_or_error;
}
erofs_fscache_req_put(req);
}
/*
* Read data from fscache (cookie, pstart, len), and fill the read data into
* page cache described by (req->mapping, lstart, len). @pstart describeis the
* start physical address in the cache file.
*/
static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
struct erofs_fscache_request *req, loff_t pstart, size_t len)
{
enum netfs_io_source source;
struct super_block *sb = req->mapping->host->i_sb;
struct netfs_cache_resources *cres = &req->cache_resources;
struct iov_iter iter;
loff_t lstart = req->start + req->submitted;
size_t done = 0;
int ret;
DBG_BUGON(len > req->len - req->submitted);
ret = fscache_begin_read_operation(cres, cookie);
if (ret)
return ret;
while (done < len) {
loff_t sstart = pstart + done;
size_t slen = len - done;
unsigned long flags = 1 << NETFS_SREQ_ONDEMAND;
source = cres->ops->prepare_ondemand_read(cres,
sstart, &slen, LLONG_MAX, &flags, 0);
if (WARN_ON(slen == 0))
source = NETFS_INVALID_READ;
if (source != NETFS_READ_FROM_CACHE) {
erofs_err(sb, "failed to fscache prepare_read (source %d)", source);
return -EIO;
}
refcount_inc(&req->ref);
iov_iter_xarray(&iter, ITER_DEST, &req->mapping->i_pages,
lstart + done, slen);
ret = fscache_read(cres, sstart, &iter, NETFS_READ_HOLE_FAIL,
erofs_fscache_subreq_complete, req);
if (ret == -EIOCBQUEUED)
ret = 0;
if (ret) {
erofs_err(sb, "failed to fscache_read (ret %d)", ret);
return ret;
}
done += slen;
}
DBG_BUGON(done != len);
return 0;
}
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
{
int ret;
struct erofs_fscache *ctx = folio_mapping(folio)->host->i_private;
struct erofs_fscache_request *req;
req = erofs_fscache_req_alloc(folio_mapping(folio),
folio_pos(folio), folio_size(folio));
if (IS_ERR(req)) {
folio_unlock(folio);
return PTR_ERR(req);
}
ret = erofs_fscache_read_folios_async(ctx->cookie, req,
folio_pos(folio), folio_size(folio));
if (ret)
req->error = ret;
erofs_fscache_req_put(req);
return ret;
}
static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary)
{
struct address_space *mapping = primary->mapping;
struct inode *inode = mapping->host;
struct super_block *sb = inode->i_sb;
struct erofs_fscache_request *req;
struct erofs_map_blocks map;
struct erofs_map_dev mdev;
struct iov_iter iter;
loff_t pos = primary->start + primary->submitted;
size_t count;
int ret;
map.m_la = pos;
ret = erofs_map_blocks(inode, &map);
if (ret)
return ret;
if (map.m_flags & EROFS_MAP_META) {
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
erofs_blk_t blknr;
size_t offset, size;
void *src;
/* For tail packing layout, the offset may be non-zero. */
offset = erofs_blkoff(sb, map.m_pa);
blknr = erofs_blknr(sb, map.m_pa);
size = map.m_llen;
src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
if (IS_ERR(src))
return PTR_ERR(src);
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
if (copy_to_iter(src + offset, size, &iter) != size) {
erofs_put_metabuf(&buf);
return -EFAULT;
}
iov_iter_zero(PAGE_SIZE - size, &iter);
erofs_put_metabuf(&buf);
primary->submitted += PAGE_SIZE;
return 0;
}
count = primary->len - primary->submitted;
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
iov_iter_zero(count, &iter);
primary->submitted += count;
return 0;
}
count = min_t(size_t, map.m_llen - (pos - map.m_la), count);
DBG_BUGON(!count || count % PAGE_SIZE);
mdev = (struct erofs_map_dev) {
.m_deviceid = map.m_deviceid,
.m_pa = map.m_pa,
};
ret = erofs_map_dev(sb, &mdev);
if (ret)
return ret;
req = erofs_fscache_req_chain(primary, count);
if (IS_ERR(req))
return PTR_ERR(req);
ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
req, mdev.m_pa + (pos - map.m_la), count);
erofs_fscache_req_put(req);
primary->submitted += count;
return ret;
}
static int erofs_fscache_data_read(struct erofs_fscache_request *req)
{
int ret;
do {
ret = erofs_fscache_data_read_slice(req);
if (ret)
req->error = ret;
} while (!ret && req->submitted < req->len);
return ret;
}
static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
{
struct erofs_fscache_request *req;
int ret;
req = erofs_fscache_req_alloc(folio_mapping(folio),
folio_pos(folio), folio_size(folio));
if (IS_ERR(req)) {
folio_unlock(folio);
return PTR_ERR(req);
}
ret = erofs_fscache_data_read(req);
erofs_fscache_req_put(req);
return ret;
}
static void erofs_fscache_readahead(struct readahead_control *rac)
{
struct erofs_fscache_request *req;
if (!readahead_count(rac))
return;
req = erofs_fscache_req_alloc(rac->mapping,
readahead_pos(rac), readahead_length(rac));
if (IS_ERR(req))
return;
/* The request completion will drop refs on the folios. */
while (readahead_folio(rac))
;
erofs_fscache_data_read(req);
erofs_fscache_req_put(req);
}
static const struct address_space_operations erofs_fscache_meta_aops = {
.read_folio = erofs_fscache_meta_read_folio,
};
const struct address_space_operations erofs_fscache_access_aops = {
.read_folio = erofs_fscache_read_folio,
.readahead = erofs_fscache_readahead,
};
static void erofs_fscache_domain_put(struct erofs_domain *domain)
{
mutex_lock(&erofs_domain_list_lock);
if (refcount_dec_and_test(&domain->ref)) {
list_del(&domain->list);
if (list_empty(&erofs_domain_list)) {
kern_unmount(erofs_pseudo_mnt);
erofs_pseudo_mnt = NULL;
}
fscache_relinquish_volume(domain->volume, NULL, false);
mutex_unlock(&erofs_domain_list_lock);
kfree(domain->domain_id);
kfree(domain);
return;
}
mutex_unlock(&erofs_domain_list_lock);
}
static int erofs_fscache_register_volume(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
char *domain_id = sbi->domain_id;
struct fscache_volume *volume;
char *name;
int ret = 0;
name = kasprintf(GFP_KERNEL, "erofs,%s",
domain_id ? domain_id : sbi->fsid);
if (!name)
return -ENOMEM;
volume = fscache_acquire_volume(name, NULL, NULL, 0);
if (IS_ERR_OR_NULL(volume)) {
erofs_err(sb, "failed to register volume for %s", name);
ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
volume = NULL;
}
sbi->volume = volume;
kfree(name);
return ret;
}
static int erofs_fscache_init_domain(struct super_block *sb)
{
int err;
struct erofs_domain *domain;
struct erofs_sb_info *sbi = EROFS_SB(sb);
domain = kzalloc(sizeof(struct erofs_domain), GFP_KERNEL);
if (!domain)
return -ENOMEM;
domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
if (!domain->domain_id) {
kfree(domain);
return -ENOMEM;
}
err = erofs_fscache_register_volume(sb);
if (err)
goto out;
if (!erofs_pseudo_mnt) {
erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
if (IS_ERR(erofs_pseudo_mnt)) {
err = PTR_ERR(erofs_pseudo_mnt);
goto out;
}
}
domain->volume = sbi->volume;
refcount_set(&domain->ref, 1);
list_add(&domain->list, &erofs_domain_list);
sbi->domain = domain;
return 0;
out:
kfree(domain->domain_id);
kfree(domain);
return err;
}
static int erofs_fscache_register_domain(struct super_block *sb)
{
int err;
struct erofs_domain *domain;
struct erofs_sb_info *sbi = EROFS_SB(sb);
mutex_lock(&erofs_domain_list_lock);
list_for_each_entry(domain, &erofs_domain_list, list) {
if (!strcmp(domain->domain_id, sbi->domain_id)) {
sbi->domain = domain;
sbi->volume = domain->volume;
refcount_inc(&domain->ref);
mutex_unlock(&erofs_domain_list_lock);
return 0;
}
}
err = erofs_fscache_init_domain(sb);
mutex_unlock(&erofs_domain_list_lock);
return err;
}
static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
char *name, unsigned int flags)
{
struct fscache_volume *volume = EROFS_SB(sb)->volume;
struct erofs_fscache *ctx;
struct fscache_cookie *cookie;
struct super_block *isb;
struct inode *inode;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ctx->node);
refcount_set(&ctx->ref, 1);
cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
name, strlen(name), NULL, 0, 0);
if (!cookie) {
erofs_err(sb, "failed to get cookie for %s", name);
ret = -EINVAL;
goto err;
}
fscache_use_cookie(cookie, false);
/*
* Allocate anonymous inode in global pseudo mount for shareable blobs,
* so that they are accessible among erofs fs instances.
*/
isb = flags & EROFS_REG_COOKIE_SHARE ? erofs_pseudo_mnt->mnt_sb : sb;
inode = new_inode(isb);
if (!inode) {
erofs_err(sb, "failed to get anon inode for %s", name);
ret = -ENOMEM;
goto err_cookie;
}
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
inode->i_blkbits = EROFS_SB(sb)->blkszbits;
inode->i_private = ctx;
ctx->cookie = cookie;
ctx->inode = inode;
return ctx;
err_cookie:
fscache_unuse_cookie(cookie, NULL, NULL);
fscache_relinquish_cookie(cookie, false);
err:
kfree(ctx);
return ERR_PTR(ret);
}
static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
{
fscache_unuse_cookie(ctx->cookie, NULL, NULL);
fscache_relinquish_cookie(ctx->cookie, false);
iput(ctx->inode);
kfree(ctx->name);
kfree(ctx);
}
static struct erofs_fscache *erofs_domain_init_cookie(struct super_block *sb,
char *name, unsigned int flags)
{
struct erofs_fscache *ctx;
struct erofs_domain *domain = EROFS_SB(sb)->domain;
ctx = erofs_fscache_acquire_cookie(sb, name, flags);
if (IS_ERR(ctx))
return ctx;
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name) {
erofs_fscache_relinquish_cookie(ctx);
return ERR_PTR(-ENOMEM);
}
refcount_inc(&domain->ref);
ctx->domain = domain;
list_add(&ctx->node, &erofs_domain_cookies_list);
return ctx;
}
static struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
char *name, unsigned int flags)
{
struct erofs_fscache *ctx;
struct erofs_domain *domain = EROFS_SB(sb)->domain;
flags |= EROFS_REG_COOKIE_SHARE;
mutex_lock(&erofs_domain_cookies_lock);
list_for_each_entry(ctx, &erofs_domain_cookies_list, node) {
if (ctx->domain != domain || strcmp(ctx->name, name))
continue;
if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
refcount_inc(&ctx->ref);
} else {
erofs_err(sb, "%s already exists in domain %s", name,
domain->domain_id);
ctx = ERR_PTR(-EEXIST);
}
mutex_unlock(&erofs_domain_cookies_lock);
return ctx;
}
ctx = erofs_domain_init_cookie(sb, name, flags);
mutex_unlock(&erofs_domain_cookies_lock);
return ctx;
}
struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
char *name,
unsigned int flags)
{
if (EROFS_SB(sb)->domain_id)
return erofs_domain_register_cookie(sb, name, flags);
return erofs_fscache_acquire_cookie(sb, name, flags);
}
void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
{
struct erofs_domain *domain = NULL;
if (!ctx)
return;
if (!ctx->domain)
return erofs_fscache_relinquish_cookie(ctx);
mutex_lock(&erofs_domain_cookies_lock);
if (refcount_dec_and_test(&ctx->ref)) {
domain = ctx->domain;
list_del(&ctx->node);
erofs_fscache_relinquish_cookie(ctx);
}
mutex_unlock(&erofs_domain_cookies_lock);
if (domain)
erofs_fscache_domain_put(domain);
}
int erofs_fscache_register_fs(struct super_block *sb)
{
int ret;
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fscache *fscache;
unsigned int flags = 0;
if (sbi->domain_id)
ret = erofs_fscache_register_domain(sb);
else
ret = erofs_fscache_register_volume(sb);
if (ret)
return ret;
/*
* When shared domain is enabled, using NEED_NOEXIST to guarantee
* the primary data blob (aka fsid) is unique in the shared domain.
*
* For non-shared-domain case, fscache_acquire_volume() invoked by
* erofs_fscache_register_volume() has already guaranteed
* the uniqueness of primary data blob.
*
* Acquired domain/volume will be relinquished in kill_sb() on error.
*/
if (sbi->domain_id)
flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
if (IS_ERR(fscache))
return PTR_ERR(fscache);
sbi->s_fscache = fscache;
return 0;
}
void erofs_fscache_unregister_fs(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
erofs_fscache_unregister_cookie(sbi->s_fscache);
if (sbi->domain)
erofs_fscache_domain_put(sbi->domain);
else
fscache_relinquish_volume(sbi->volume, NULL, false);
sbi->s_fscache = NULL;
sbi->volume = NULL;
sbi->domain = NULL;
}
| linux-master | fs/erofs/fscache.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C), 2008-2021, OPPO Mobile Comm Corp., Ltd.
* https://www.oppo.com/
*/
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include "internal.h"
enum {
attr_feature,
attr_pointer_ui,
attr_pointer_bool,
};
enum {
struct_erofs_sb_info,
struct_erofs_mount_opts,
};
struct erofs_attr {
struct attribute attr;
short attr_id;
int struct_type, offset;
};
#define EROFS_ATTR(_name, _mode, _id) \
static struct erofs_attr erofs_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.attr_id = attr_##_id, \
}
#define EROFS_ATTR_FUNC(_name, _mode) EROFS_ATTR(_name, _mode, _name)
#define EROFS_ATTR_FEATURE(_name) EROFS_ATTR(_name, 0444, feature)
#define EROFS_ATTR_OFFSET(_name, _mode, _id, _struct) \
static struct erofs_attr erofs_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.attr_id = attr_##_id, \
.struct_type = struct_##_struct, \
.offset = offsetof(struct _struct, _name),\
}
#define EROFS_ATTR_RW(_name, _id, _struct) \
EROFS_ATTR_OFFSET(_name, 0644, _id, _struct)
#define EROFS_RO_ATTR(_name, _id, _struct) \
EROFS_ATTR_OFFSET(_name, 0444, _id, _struct)
#define EROFS_ATTR_RW_UI(_name, _struct) \
EROFS_ATTR_RW(_name, pointer_ui, _struct)
#define EROFS_ATTR_RW_BOOL(_name, _struct) \
EROFS_ATTR_RW(_name, pointer_bool, _struct)
#define ATTR_LIST(name) (&erofs_attr_##name.attr)
#ifdef CONFIG_EROFS_FS_ZIP
EROFS_ATTR_RW_UI(sync_decompress, erofs_mount_opts);
#endif
static struct attribute *erofs_attrs[] = {
#ifdef CONFIG_EROFS_FS_ZIP
ATTR_LIST(sync_decompress),
#endif
NULL,
};
ATTRIBUTE_GROUPS(erofs);
/* Features this copy of erofs supports */
EROFS_ATTR_FEATURE(zero_padding);
EROFS_ATTR_FEATURE(compr_cfgs);
EROFS_ATTR_FEATURE(big_pcluster);
EROFS_ATTR_FEATURE(chunked_file);
EROFS_ATTR_FEATURE(device_table);
EROFS_ATTR_FEATURE(compr_head2);
EROFS_ATTR_FEATURE(sb_chksum);
EROFS_ATTR_FEATURE(ztailpacking);
EROFS_ATTR_FEATURE(fragments);
EROFS_ATTR_FEATURE(dedupe);
static struct attribute *erofs_feat_attrs[] = {
ATTR_LIST(zero_padding),
ATTR_LIST(compr_cfgs),
ATTR_LIST(big_pcluster),
ATTR_LIST(chunked_file),
ATTR_LIST(device_table),
ATTR_LIST(compr_head2),
ATTR_LIST(sb_chksum),
ATTR_LIST(ztailpacking),
ATTR_LIST(fragments),
ATTR_LIST(dedupe),
NULL,
};
ATTRIBUTE_GROUPS(erofs_feat);
static unsigned char *__struct_ptr(struct erofs_sb_info *sbi,
int struct_type, int offset)
{
if (struct_type == struct_erofs_sb_info)
return (unsigned char *)sbi + offset;
if (struct_type == struct_erofs_mount_opts)
return (unsigned char *)&sbi->opt + offset;
return NULL;
}
static ssize_t erofs_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info,
s_kobj);
struct erofs_attr *a = container_of(attr, struct erofs_attr, attr);
unsigned char *ptr = __struct_ptr(sbi, a->struct_type, a->offset);
switch (a->attr_id) {
case attr_feature:
return sysfs_emit(buf, "supported\n");
case attr_pointer_ui:
if (!ptr)
return 0;
return sysfs_emit(buf, "%u\n", *(unsigned int *)ptr);
case attr_pointer_bool:
if (!ptr)
return 0;
return sysfs_emit(buf, "%d\n", *(bool *)ptr);
}
return 0;
}
static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t len)
{
struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info,
s_kobj);
struct erofs_attr *a = container_of(attr, struct erofs_attr, attr);
unsigned char *ptr = __struct_ptr(sbi, a->struct_type, a->offset);
unsigned long t;
int ret;
switch (a->attr_id) {
case attr_pointer_ui:
if (!ptr)
return 0;
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret)
return ret;
if (t != (unsigned int)t)
return -ERANGE;
#ifdef CONFIG_EROFS_FS_ZIP
if (!strcmp(a->attr.name, "sync_decompress") &&
(t > EROFS_SYNC_DECOMPRESS_FORCE_OFF))
return -EINVAL;
#endif
*(unsigned int *)ptr = t;
return len;
case attr_pointer_bool:
if (!ptr)
return 0;
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret)
return ret;
if (t != 0 && t != 1)
return -EINVAL;
*(bool *)ptr = !!t;
return len;
}
return 0;
}
static void erofs_sb_release(struct kobject *kobj)
{
struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info,
s_kobj);
complete(&sbi->s_kobj_unregister);
}
static const struct sysfs_ops erofs_attr_ops = {
.show = erofs_attr_show,
.store = erofs_attr_store,
};
static const struct kobj_type erofs_sb_ktype = {
.default_groups = erofs_groups,
.sysfs_ops = &erofs_attr_ops,
.release = erofs_sb_release,
};
static const struct kobj_type erofs_ktype = {
.sysfs_ops = &erofs_attr_ops,
};
static struct kset erofs_root = {
.kobj = {.ktype = &erofs_ktype},
};
static const struct kobj_type erofs_feat_ktype = {
.default_groups = erofs_feat_groups,
.sysfs_ops = &erofs_attr_ops,
};
static struct kobject erofs_feat = {
.kset = &erofs_root,
};
int erofs_register_sysfs(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
char *name;
char *str = NULL;
int err;
if (erofs_is_fscache_mode(sb)) {
if (sbi->domain_id) {
str = kasprintf(GFP_KERNEL, "%s,%s", sbi->domain_id,
sbi->fsid);
if (!str)
return -ENOMEM;
name = str;
} else {
name = sbi->fsid;
}
} else {
name = sb->s_id;
}
sbi->s_kobj.kset = &erofs_root;
init_completion(&sbi->s_kobj_unregister);
err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s", name);
kfree(str);
if (err)
goto put_sb_kobj;
return 0;
put_sb_kobj:
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
return err;
}
void erofs_unregister_sysfs(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
if (sbi->s_kobj.state_in_sysfs) {
kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
}
}
int __init erofs_init_sysfs(void)
{
int ret;
kobject_set_name(&erofs_root.kobj, "erofs");
erofs_root.kobj.parent = fs_kobj;
ret = kset_register(&erofs_root);
if (ret)
goto root_err;
ret = kobject_init_and_add(&erofs_feat, &erofs_feat_ktype,
NULL, "features");
if (ret)
goto feat_err;
return ret;
feat_err:
kobject_put(&erofs_feat);
kset_unregister(&erofs_root);
root_err:
return ret;
}
void erofs_exit_sysfs(void)
{
kobject_put(&erofs_feat);
kset_unregister(&erofs_root);
}
| linux-master | fs/erofs/sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2021-2022, Alibaba Cloud
*/
#include <linux/security.h>
#include <linux/xxhash.h>
#include "xattr.h"
struct erofs_xattr_iter {
struct super_block *sb;
struct erofs_buf buf;
erofs_off_t pos;
void *kaddr;
char *buffer;
int buffer_size, buffer_ofs;
/* getxattr */
int index, infix_len;
struct qstr name;
/* listxattr */
struct dentry *dentry;
};
static int erofs_init_inode_xattrs(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct erofs_xattr_iter it;
unsigned int i;
struct erofs_xattr_ibody_header *ih;
struct super_block *sb = inode->i_sb;
int ret = 0;
/* the most case is that xattrs of this inode are initialized. */
if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
/*
* paired with smp_mb() at the end of the function to ensure
* fields will only be observed after the bit is set.
*/
smp_mb();
return 0;
}
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
return -ERESTARTSYS;
/* someone has initialized xattrs for us? */
if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
goto out_unlock;
/*
* bypass all xattr operations if ->xattr_isize is not greater than
* sizeof(struct erofs_xattr_ibody_header), in detail:
* 1) it is not enough to contain erofs_xattr_ibody_header then
* ->xattr_isize should be 0 (it means no xattr);
* 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
* undefined right now (maybe use later with some new sb feature).
*/
if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
erofs_err(sb,
"xattr_isize %d of nid %llu is not supported yet",
vi->xattr_isize, vi->nid);
ret = -EOPNOTSUPP;
goto out_unlock;
} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
if (vi->xattr_isize) {
erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
DBG_BUGON(1);
ret = -EFSCORRUPTED;
goto out_unlock; /* xattr ondisk layout error */
}
ret = -ENOATTR;
goto out_unlock;
}
it.buf = __EROFS_BUF_INITIALIZER;
erofs_init_metabuf(&it.buf, sb);
it.pos = erofs_iloc(inode) + vi->inode_isize;
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), EROFS_KMAP);
if (IS_ERR(it.kaddr)) {
ret = PTR_ERR(it.kaddr);
goto out_unlock;
}
ih = it.kaddr + erofs_blkoff(sb, it.pos);
vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter);
vi->xattr_shared_count = ih->h_shared_count;
vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
sizeof(uint), GFP_KERNEL);
if (!vi->xattr_shared_xattrs) {
erofs_put_metabuf(&it.buf);
ret = -ENOMEM;
goto out_unlock;
}
/* let's skip ibody header */
it.pos += sizeof(struct erofs_xattr_ibody_header);
for (i = 0; i < vi->xattr_shared_count; ++i) {
it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos),
EROFS_KMAP);
if (IS_ERR(it.kaddr)) {
kfree(vi->xattr_shared_xattrs);
vi->xattr_shared_xattrs = NULL;
ret = PTR_ERR(it.kaddr);
goto out_unlock;
}
vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)
(it.kaddr + erofs_blkoff(sb, it.pos)));
it.pos += sizeof(__le32);
}
erofs_put_metabuf(&it.buf);
/* paired with smp_mb() at the beginning of the function. */
smp_mb();
set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
out_unlock:
clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
return ret;
}
static bool erofs_xattr_user_list(struct dentry *dentry)
{
return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
}
static bool erofs_xattr_trusted_list(struct dentry *dentry)
{
return capable(CAP_SYS_ADMIN);
}
static int erofs_xattr_generic_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
if (handler->flags == EROFS_XATTR_INDEX_USER &&
!test_opt(&EROFS_I_SB(inode)->opt, XATTR_USER))
return -EOPNOTSUPP;
return erofs_getxattr(inode, handler->flags, name, buffer, size);
}
const struct xattr_handler erofs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.flags = EROFS_XATTR_INDEX_USER,
.list = erofs_xattr_user_list,
.get = erofs_xattr_generic_get,
};
const struct xattr_handler erofs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.flags = EROFS_XATTR_INDEX_TRUSTED,
.list = erofs_xattr_trusted_list,
.get = erofs_xattr_generic_get,
};
#ifdef CONFIG_EROFS_FS_SECURITY
const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.flags = EROFS_XATTR_INDEX_SECURITY,
.get = erofs_xattr_generic_get,
};
#endif
const struct xattr_handler *erofs_xattr_handlers[] = {
&erofs_xattr_user_handler,
&erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY
&erofs_xattr_security_handler,
#endif
NULL,
};
static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it,
unsigned int len)
{
unsigned int slice, processed;
struct super_block *sb = it->sb;
void *src;
for (processed = 0; processed < len; processed += slice) {
it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
EROFS_KMAP);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
src = it->kaddr + erofs_blkoff(sb, it->pos);
slice = min_t(unsigned int, sb->s_blocksize -
erofs_blkoff(sb, it->pos), len - processed);
memcpy(it->buffer + it->buffer_ofs, src, slice);
it->buffer_ofs += slice;
it->pos += slice;
}
return 0;
}
static int erofs_listxattr_foreach(struct erofs_xattr_iter *it)
{
struct erofs_xattr_entry entry;
unsigned int base_index, name_total, prefix_len, infix_len = 0;
const char *prefix, *infix = NULL;
int err;
/* 1. handle xattr entry */
entry = *(struct erofs_xattr_entry *)
(it->kaddr + erofs_blkoff(it->sb, it->pos));
it->pos += sizeof(struct erofs_xattr_entry);
base_index = entry.e_name_index;
if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
struct erofs_sb_info *sbi = EROFS_SB(it->sb);
struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
(entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
return 0;
infix = pf->prefix->infix;
infix_len = pf->infix_len;
base_index = pf->prefix->base_index;
}
prefix = erofs_xattr_prefix(base_index, it->dentry);
if (!prefix)
return 0;
prefix_len = strlen(prefix);
name_total = prefix_len + infix_len + entry.e_name_len + 1;
if (!it->buffer) {
it->buffer_ofs += name_total;
return 0;
}
if (it->buffer_ofs + name_total > it->buffer_size)
return -ERANGE;
memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
memcpy(it->buffer + it->buffer_ofs + prefix_len, infix, infix_len);
it->buffer_ofs += prefix_len + infix_len;
/* 2. handle xattr name */
err = erofs_xattr_copy_to_buffer(it, entry.e_name_len);
if (err)
return err;
it->buffer[it->buffer_ofs++] = '\0';
return 0;
}
static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
{
struct super_block *sb = it->sb;
struct erofs_xattr_entry entry;
unsigned int slice, processed, value_sz;
/* 1. handle xattr entry */
entry = *(struct erofs_xattr_entry *)
(it->kaddr + erofs_blkoff(sb, it->pos));
it->pos += sizeof(struct erofs_xattr_entry);
value_sz = le16_to_cpu(entry.e_value_size);
/* should also match the infix for long name prefixes */
if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
(entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
return -ENOATTR;
if (it->index != pf->prefix->base_index ||
it->name.len != entry.e_name_len + pf->infix_len)
return -ENOATTR;
if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
return -ENOATTR;
it->infix_len = pf->infix_len;
} else {
if (it->index != entry.e_name_index ||
it->name.len != entry.e_name_len)
return -ENOATTR;
it->infix_len = 0;
}
/* 2. handle xattr name */
for (processed = 0; processed < entry.e_name_len; processed += slice) {
it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
EROFS_KMAP);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
slice = min_t(unsigned int,
sb->s_blocksize - erofs_blkoff(sb, it->pos),
entry.e_name_len - processed);
if (memcmp(it->name.name + it->infix_len + processed,
it->kaddr + erofs_blkoff(sb, it->pos), slice))
return -ENOATTR;
it->pos += slice;
}
/* 3. handle xattr value */
if (!it->buffer) {
it->buffer_ofs = value_sz;
return 0;
}
if (it->buffer_size < value_sz)
return -ERANGE;
return erofs_xattr_copy_to_buffer(it, value_sz);
}
static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
struct inode *inode, bool getxattr)
{
struct erofs_inode *const vi = EROFS_I(inode);
unsigned int xattr_header_sz, remaining, entry_sz;
erofs_off_t next_pos;
int ret;
xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
sizeof(u32) * vi->xattr_shared_count;
if (xattr_header_sz >= vi->xattr_isize) {
DBG_BUGON(xattr_header_sz > vi->xattr_isize);
return -ENOATTR;
}
remaining = vi->xattr_isize - xattr_header_sz;
it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz;
while (remaining) {
it->kaddr = erofs_bread(&it->buf, erofs_blknr(it->sb, it->pos),
EROFS_KMAP);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
entry_sz = erofs_xattr_entry_size(it->kaddr +
erofs_blkoff(it->sb, it->pos));
/* xattr on-disk corruption: xattr entry beyond xattr_isize */
if (remaining < entry_sz) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
remaining -= entry_sz;
next_pos = it->pos + entry_sz;
if (getxattr)
ret = erofs_getxattr_foreach(it);
else
ret = erofs_listxattr_foreach(it);
if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
break;
it->pos = next_pos;
}
return ret;
}
static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
struct inode *inode, bool getxattr)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = it->sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
unsigned int i;
int ret = -ENOATTR;
for (i = 0; i < vi->xattr_shared_count; ++i) {
it->pos = erofs_pos(sb, sbi->xattr_blkaddr) +
vi->xattr_shared_xattrs[i] * sizeof(__le32);
it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
EROFS_KMAP);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
if (getxattr)
ret = erofs_getxattr_foreach(it);
else
ret = erofs_listxattr_foreach(it);
if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
break;
}
return ret;
}
int erofs_getxattr(struct inode *inode, int index, const char *name,
void *buffer, size_t buffer_size)
{
int ret;
unsigned int hashbit;
struct erofs_xattr_iter it;
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
if (!name)
return -EINVAL;
ret = erofs_init_inode_xattrs(inode);
if (ret)
return ret;
/* reserved flag is non-zero if there's any change of on-disk format */
if (erofs_sb_has_xattr_filter(sbi) && !sbi->xattr_filter_reserved) {
hashbit = xxh32(name, strlen(name),
EROFS_XATTR_FILTER_SEED + index);
hashbit &= EROFS_XATTR_FILTER_BITS - 1;
if (vi->xattr_name_filter & (1U << hashbit))
return -ENOATTR;
}
it.index = index;
it.name = (struct qstr)QSTR_INIT(name, strlen(name));
if (it.name.len > EROFS_NAME_LEN)
return -ERANGE;
it.sb = inode->i_sb;
it.buf = __EROFS_BUF_INITIALIZER;
erofs_init_metabuf(&it.buf, it.sb);
it.buffer = buffer;
it.buffer_size = buffer_size;
it.buffer_ofs = 0;
ret = erofs_xattr_iter_inline(&it, inode, true);
if (ret == -ENOATTR)
ret = erofs_xattr_iter_shared(&it, inode, true);
erofs_put_metabuf(&it.buf);
return ret ? ret : it.buffer_ofs;
}
ssize_t erofs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int ret;
struct erofs_xattr_iter it;
struct inode *inode = d_inode(dentry);
ret = erofs_init_inode_xattrs(inode);
if (ret == -ENOATTR)
return 0;
if (ret)
return ret;
it.sb = dentry->d_sb;
it.buf = __EROFS_BUF_INITIALIZER;
erofs_init_metabuf(&it.buf, it.sb);
it.dentry = dentry;
it.buffer = buffer;
it.buffer_size = buffer_size;
it.buffer_ofs = 0;
ret = erofs_xattr_iter_inline(&it, inode, false);
if (!ret || ret == -ENOATTR)
ret = erofs_xattr_iter_shared(&it, inode, false);
if (ret == -ENOATTR)
ret = 0;
erofs_put_metabuf(&it.buf);
return ret ? ret : it.buffer_ofs;
}
void erofs_xattr_prefixes_cleanup(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
int i;
if (sbi->xattr_prefixes) {
for (i = 0; i < sbi->xattr_prefix_count; i++)
kfree(sbi->xattr_prefixes[i].prefix);
kfree(sbi->xattr_prefixes);
sbi->xattr_prefixes = NULL;
}
}
int erofs_xattr_prefixes_init(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
erofs_off_t pos = (erofs_off_t)sbi->xattr_prefix_start << 2;
struct erofs_xattr_prefix_item *pfs;
int ret = 0, i, len;
if (!sbi->xattr_prefix_count)
return 0;
pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL);
if (!pfs)
return -ENOMEM;
if (sbi->packed_inode)
buf.inode = sbi->packed_inode;
else
erofs_init_metabuf(&buf, sb);
for (i = 0; i < sbi->xattr_prefix_count; i++) {
void *ptr = erofs_read_metadata(sb, &buf, &pos, &len);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
break;
} else if (len < sizeof(*pfs->prefix) ||
len > EROFS_NAME_LEN + sizeof(*pfs->prefix)) {
kfree(ptr);
ret = -EFSCORRUPTED;
break;
}
pfs[i].prefix = ptr;
pfs[i].infix_len = len - sizeof(struct erofs_xattr_long_prefix);
}
erofs_put_metabuf(&buf);
sbi->xattr_prefixes = pfs;
if (ret)
erofs_xattr_prefixes_cleanup(sb);
return ret;
}
#ifdef CONFIG_EROFS_FS_POSIX_ACL
struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
{
struct posix_acl *acl;
int prefix, rc;
char *value = NULL;
if (rcu)
return ERR_PTR(-ECHILD);
switch (type) {
case ACL_TYPE_ACCESS:
prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
break;
default:
return ERR_PTR(-EINVAL);
}
rc = erofs_getxattr(inode, prefix, "", NULL, 0);
if (rc > 0) {
value = kmalloc(rc, GFP_KERNEL);
if (!value)
return ERR_PTR(-ENOMEM);
rc = erofs_getxattr(inode, prefix, "", value, rc);
}
if (rc == -ENOATTR)
acl = NULL;
else if (rc < 0)
acl = ERR_PTR(rc);
else
acl = posix_acl_from_xattr(&init_user_ns, value, rc);
kfree(value);
return acl;
}
#endif
| linux-master | fs/erofs/xattr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2022 Alibaba Cloud
*/
#include "compress.h"
#include <linux/psi.h>
#include <linux/cpuhotplug.h>
#include <trace/events/erofs.h>
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_INLINE_BVECS 2
/*
* let's leave a type here in case of introducing
* another tagged pointer later.
*/
typedef void *z_erofs_next_pcluster_t;
struct z_erofs_bvec {
struct page *page;
int offset;
unsigned int end;
};
#define __Z_EROFS_BVSET(name, total) \
struct name { \
/* point to the next page which contains the following bvecs */ \
struct page *nextpage; \
struct z_erofs_bvec bvec[total]; \
}
__Z_EROFS_BVSET(z_erofs_bvset,);
__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
/*
* Structure fields follow one of the following exclusion rules.
*
* I: Modifiable by initialization/destruction paths and read-only
* for everyone else;
*
* L: Field should be protected by the pcluster lock;
*
* A: Field should be accessed / updated in atomic for parallelized code.
*/
struct z_erofs_pcluster {
struct erofs_workgroup obj;
struct mutex lock;
/* A: point to next chained pcluster or TAILs */
z_erofs_next_pcluster_t next;
/* L: the maximum decompression size of this round */
unsigned int length;
/* L: total number of bvecs */
unsigned int vcnt;
/* I: page offset of start position of decompression */
unsigned short pageofs_out;
/* I: page offset of inline compressed data */
unsigned short pageofs_in;
union {
/* L: inline a certain number of bvec for bootstrap */
struct z_erofs_bvset_inline bvset;
/* I: can be used to free the pcluster by RCU. */
struct rcu_head rcu;
};
union {
/* I: physical cluster size in pages */
unsigned short pclusterpages;
/* I: tailpacking inline compressed size */
unsigned short tailpacking_size;
};
/* I: compression algorithm format */
unsigned char algorithmformat;
/* L: whether partial decompression or not */
bool partial;
/* L: indicate several pageofs_outs or not */
bool multibases;
/* A: compressed bvecs (can be cached or inplaced pages) */
struct z_erofs_bvec compressed_bvecs[];
};
/* the end of a chain of pclusters */
#define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
#define Z_EROFS_PCLUSTER_NIL (NULL)
struct z_erofs_decompressqueue {
struct super_block *sb;
atomic_t pending_bios;
z_erofs_next_pcluster_t head;
union {
struct completion done;
struct work_struct work;
struct kthread_work kthread_work;
} u;
bool eio, sync;
};
static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
{
return !pcl->obj.index;
}
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
{
if (z_erofs_is_inline_pcluster(pcl))
return 1;
return pcl->pclusterpages;
}
/*
* bit 30: I/O error occurred on this page
* bit 0 - 29: remaining parts to complete this page
*/
#define Z_EROFS_PAGE_EIO (1 << 30)
static inline void z_erofs_onlinepage_init(struct page *page)
{
union {
atomic_t o;
unsigned long v;
} u = { .o = ATOMIC_INIT(1) };
set_page_private(page, u.v);
smp_wmb();
SetPagePrivate(page);
}
static inline void z_erofs_onlinepage_split(struct page *page)
{
atomic_inc((atomic_t *)&page->private);
}
static void z_erofs_onlinepage_endio(struct page *page, int err)
{
int orig, v;
DBG_BUGON(!PagePrivate(page));
do {
orig = atomic_read((atomic_t *)&page->private);
v = (orig - 1) | (err ? Z_EROFS_PAGE_EIO : 0);
} while (atomic_cmpxchg((atomic_t *)&page->private, orig, v) != orig);
if (!(v & ~Z_EROFS_PAGE_EIO)) {
set_page_private(page, 0);
ClearPagePrivate(page);
if (!(v & Z_EROFS_PAGE_EIO))
SetPageUptodate(page);
unlock_page(page);
}
}
#define Z_EROFS_ONSTACK_PAGES 32
/*
* since pclustersize is variable for big pcluster feature, introduce slab
* pools implementation for different pcluster sizes.
*/
struct z_erofs_pcluster_slab {
struct kmem_cache *slab;
unsigned int maxpages;
char name[48];
};
#define _PCLP(n) { .maxpages = n }
static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
_PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
_PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
};
struct z_erofs_bvec_iter {
struct page *bvpage;
struct z_erofs_bvset *bvset;
unsigned int nr, cur;
};
static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
{
if (iter->bvpage)
kunmap_local(iter->bvset);
return iter->bvpage;
}
static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
{
unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
/* have to access nextpage in advance, otherwise it will be unmapped */
struct page *nextpage = iter->bvset->nextpage;
struct page *oldpage;
DBG_BUGON(!nextpage);
oldpage = z_erofs_bvec_iter_end(iter);
iter->bvpage = nextpage;
iter->bvset = kmap_local_page(nextpage);
iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
iter->cur = 0;
return oldpage;
}
static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvset_inline *bvset,
unsigned int bootstrap_nr,
unsigned int cur)
{
*iter = (struct z_erofs_bvec_iter) {
.nr = bootstrap_nr,
.bvset = (struct z_erofs_bvset *)bvset,
};
while (cur > iter->nr) {
cur -= iter->nr;
z_erofs_bvset_flip(iter);
}
iter->cur = cur;
}
static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvec *bvec,
struct page **candidate_bvpage,
struct page **pagepool)
{
if (iter->cur >= iter->nr) {
struct page *nextpage = *candidate_bvpage;
if (!nextpage) {
nextpage = erofs_allocpage(pagepool, GFP_NOFS);
if (!nextpage)
return -ENOMEM;
set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
}
DBG_BUGON(iter->bvset->nextpage);
iter->bvset->nextpage = nextpage;
z_erofs_bvset_flip(iter);
iter->bvset->nextpage = NULL;
*candidate_bvpage = NULL;
}
iter->bvset->bvec[iter->cur++] = *bvec;
return 0;
}
static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvec *bvec,
struct page **old_bvpage)
{
if (iter->cur == iter->nr)
*old_bvpage = z_erofs_bvset_flip(iter);
else
*old_bvpage = NULL;
*bvec = iter->bvset->bvec[iter->cur++];
}
static void z_erofs_destroy_pcluster_pool(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
if (!pcluster_pool[i].slab)
continue;
kmem_cache_destroy(pcluster_pool[i].slab);
pcluster_pool[i].slab = NULL;
}
}
static int z_erofs_create_pcluster_pool(void)
{
struct z_erofs_pcluster_slab *pcs;
struct z_erofs_pcluster *a;
unsigned int size;
for (pcs = pcluster_pool;
pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
size = struct_size(a, compressed_bvecs, pcs->maxpages);
sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
pcs->slab = kmem_cache_create(pcs->name, size, 0,
SLAB_RECLAIM_ACCOUNT, NULL);
if (pcs->slab)
continue;
z_erofs_destroy_pcluster_pool();
return -ENOMEM;
}
return 0;
}
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
struct z_erofs_pcluster *pcl;
if (nrpages > pcs->maxpages)
continue;
pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
if (!pcl)
return ERR_PTR(-ENOMEM);
pcl->pclusterpages = nrpages;
return pcl;
}
return ERR_PTR(-EINVAL);
}
static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
{
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
int i;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
if (pclusterpages > pcs->maxpages)
continue;
kmem_cache_free(pcs->slab, pcl);
return;
}
DBG_BUGON(1);
}
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
static struct kthread_worker __rcu **z_erofs_pcpu_workers;
static void erofs_destroy_percpu_workers(void)
{
struct kthread_worker *worker;
unsigned int cpu;
for_each_possible_cpu(cpu) {
worker = rcu_dereference_protected(
z_erofs_pcpu_workers[cpu], 1);
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
if (worker)
kthread_destroy_worker(worker);
}
kfree(z_erofs_pcpu_workers);
}
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
{
struct kthread_worker *worker =
kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
if (IS_ERR(worker))
return worker;
if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
sched_set_fifo_low(worker->task);
return worker;
}
static int erofs_init_percpu_workers(void)
{
struct kthread_worker *worker;
unsigned int cpu;
z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
sizeof(struct kthread_worker *), GFP_ATOMIC);
if (!z_erofs_pcpu_workers)
return -ENOMEM;
for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */
worker = erofs_init_percpu_worker(cpu);
if (!IS_ERR(worker))
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
}
return 0;
}
#else
static inline void erofs_destroy_percpu_workers(void) {}
static inline int erofs_init_percpu_workers(void) { return 0; }
#endif
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
static enum cpuhp_state erofs_cpuhp_state;
static int erofs_cpu_online(unsigned int cpu)
{
struct kthread_worker *worker, *old;
worker = erofs_init_percpu_worker(cpu);
if (IS_ERR(worker))
return PTR_ERR(worker);
spin_lock(&z_erofs_pcpu_worker_lock);
old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
lockdep_is_held(&z_erofs_pcpu_worker_lock));
if (!old)
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
spin_unlock(&z_erofs_pcpu_worker_lock);
if (old)
kthread_destroy_worker(worker);
return 0;
}
static int erofs_cpu_offline(unsigned int cpu)
{
struct kthread_worker *worker;
spin_lock(&z_erofs_pcpu_worker_lock);
worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
lockdep_is_held(&z_erofs_pcpu_worker_lock));
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
spin_unlock(&z_erofs_pcpu_worker_lock);
synchronize_rcu();
if (worker)
kthread_destroy_worker(worker);
return 0;
}
static int erofs_cpu_hotplug_init(void)
{
int state;
state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
if (state < 0)
return state;
erofs_cpuhp_state = state;
return 0;
}
static void erofs_cpu_hotplug_destroy(void)
{
if (erofs_cpuhp_state)
cpuhp_remove_state_nocalls(erofs_cpuhp_state);
}
#else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
static inline int erofs_cpu_hotplug_init(void) { return 0; }
static inline void erofs_cpu_hotplug_destroy(void) {}
#endif
void z_erofs_exit_zip_subsystem(void)
{
erofs_cpu_hotplug_destroy();
erofs_destroy_percpu_workers();
destroy_workqueue(z_erofs_workqueue);
z_erofs_destroy_pcluster_pool();
}
int __init z_erofs_init_zip_subsystem(void)
{
int err = z_erofs_create_pcluster_pool();
if (err)
goto out_error_pcluster_pool;
z_erofs_workqueue = alloc_workqueue("erofs_worker",
WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
if (!z_erofs_workqueue) {
err = -ENOMEM;
goto out_error_workqueue_init;
}
err = erofs_init_percpu_workers();
if (err)
goto out_error_pcpu_worker;
err = erofs_cpu_hotplug_init();
if (err < 0)
goto out_error_cpuhp_init;
return err;
out_error_cpuhp_init:
erofs_destroy_percpu_workers();
out_error_pcpu_worker:
destroy_workqueue(z_erofs_workqueue);
out_error_workqueue_init:
z_erofs_destroy_pcluster_pool();
out_error_pcluster_pool:
return err;
}
enum z_erofs_pclustermode {
Z_EROFS_PCLUSTER_INFLIGHT,
/*
* a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
* could be dispatched into bypass queue later due to uptodated managed
* pages. All related online pages cannot be reused for inplace I/O (or
* bvpage) since it can be directly decoded without I/O submission.
*/
Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
/*
* The pcluster was just linked to a decompression chain by us. It can
* also be linked with the remaining pclusters, which means if the
* processing page is the tail page of a pcluster, this pcluster can
* safely use the whole page (since the previous pcluster is within the
* same chain) for in-place I/O, as illustrated below:
* ___________________________________________________
* | tail (partial) page | head (partial) page |
* | (of the current pcl) | (of the previous pcl) |
* |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
*
* [ (*) the page above can be used as inplace I/O. ]
*/
Z_EROFS_PCLUSTER_FOLLOWED,
};
struct z_erofs_decompress_frontend {
struct inode *const inode;
struct erofs_map_blocks map;
struct z_erofs_bvec_iter biter;
struct page *pagepool;
struct page *candidate_bvpage;
struct z_erofs_pcluster *pcl;
z_erofs_next_pcluster_t owned_head;
enum z_erofs_pclustermode mode;
erofs_off_t headoffset;
/* a pointer used to pick up inplace I/O pages */
unsigned int icur;
};
#define DECOMPRESS_FRONTEND_INIT(__i) { \
.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
.mode = Z_EROFS_PCLUSTER_FOLLOWED }
static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
{
unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
return false;
if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
return true;
if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
fe->map.m_la < fe->headoffset)
return true;
return false;
}
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
{
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl;
bool shouldalloc = z_erofs_should_alloc_cache(fe);
bool standalone = true;
/*
* optimistic allocation without direct reclaim since inplace I/O
* can be used if low memory otherwise.
*/
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
unsigned int i;
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
return;
for (i = 0; i < pcl->pclusterpages; ++i) {
struct page *page;
void *t; /* mark pages just found for debugging */
struct page *newpage = NULL;
/* the compressed page was loaded before */
if (READ_ONCE(pcl->compressed_bvecs[i].page))
continue;
page = find_get_page(mc, pcl->obj.index + i);
if (page) {
t = (void *)((unsigned long)page | 1);
} else {
/* I/O is needed, no possible to decompress directly */
standalone = false;
if (!shouldalloc)
continue;
/*
* try to use cached I/O if page allocation
* succeeds or fallback to in-place I/O instead
* to avoid any direct reclaim.
*/
newpage = erofs_allocpage(&fe->pagepool, gfp);
if (!newpage)
continue;
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
t = (void *)((unsigned long)newpage | 1);
}
if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
continue;
if (page)
put_page(page);
else if (newpage)
erofs_pagepool_add(&fe->pagepool, newpage);
}
/*
* don't do inplace I/O if all compressed pages are available in
* managed cache since it can be moved to the bypass queue instead.
*/
if (standalone)
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
/* called by erofs_shrinker to get rid of all compressed_pages */
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp)
{
struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj);
int i;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
/*
* refcount of workgroup is now freezed as 0,
* therefore no need to worry about available decompression users.
*/
for (i = 0; i < pcl->pclusterpages; ++i) {
struct page *page = pcl->compressed_bvecs[i].page;
if (!page)
continue;
/* block other users from reclaiming or migrating the page */
if (!trylock_page(page))
return -EBUSY;
if (!erofs_page_is_managed(sbi, page))
continue;
/* barrier is implied in the following 'unlock_page' */
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
detach_page_private(page);
unlock_page(page);
}
return 0;
}
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{
struct z_erofs_pcluster *pcl = folio_get_private(folio);
bool ret;
int i;
if (!folio_test_private(folio))
return true;
ret = false;
spin_lock(&pcl->obj.lockref.lock);
if (pcl->obj.lockref.count > 0)
goto out;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
for (i = 0; i < pcl->pclusterpages; ++i) {
if (pcl->compressed_bvecs[i].page == &folio->page) {
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
ret = true;
break;
}
}
if (ret)
folio_detach_private(folio);
out:
spin_unlock(&pcl->obj.lockref.lock);
return ret;
}
/*
* It will be called only on inode eviction. In case that there are still some
* decompression requests in progress, wait with rescheduling for a bit here.
* An extra lock could be introduced instead but it seems unnecessary.
*/
static void z_erofs_cache_invalidate_folio(struct folio *folio,
size_t offset, size_t length)
{
const size_t stop = length + offset;
/* Check for potential overflow in debug mode */
DBG_BUGON(stop > folio_size(folio) || stop < length);
if (offset == 0 && stop == folio_size(folio))
while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
cond_resched();
}
static const struct address_space_operations z_erofs_cache_aops = {
.release_folio = z_erofs_cache_release_folio,
.invalidate_folio = z_erofs_cache_invalidate_folio,
};
int erofs_init_managed_cache(struct super_block *sb)
{
struct inode *const inode = new_inode(sb);
if (!inode)
return -ENOMEM;
set_nlink(inode, 1);
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &z_erofs_cache_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
EROFS_SB(sb)->managed_cache = inode;
return 0;
}
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
struct z_erofs_bvec *bvec)
{
struct z_erofs_pcluster *const pcl = fe->pcl;
while (fe->icur > 0) {
if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
NULL, bvec->page)) {
pcl->compressed_bvecs[fe->icur] = *bvec;
return true;
}
}
return false;
}
/* callers must be with pcluster lock held */
static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
struct z_erofs_bvec *bvec, bool exclusive)
{
int ret;
if (exclusive) {
/* give priority for inplaceio to use file pages first */
if (z_erofs_try_inplace_io(fe, bvec))
return 0;
/* otherwise, check if it can be used as a bvpage */
if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
!fe->candidate_bvpage)
fe->candidate_bvpage = bvec->page;
}
ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
&fe->pagepool);
fe->pcl->vcnt += (ret >= 0);
return ret;
}
static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
{
struct z_erofs_pcluster *pcl = f->pcl;
z_erofs_next_pcluster_t *owned_head = &f->owned_head;
/* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
*owned_head) == Z_EROFS_PCLUSTER_NIL) {
*owned_head = &pcl->next;
/* so we can attach this pcluster to our submission chain. */
f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
return;
}
/* type 2, it belongs to an ongoing chain */
f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
}
static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
bool ztailpacking = map->m_flags & EROFS_MAP_META;
struct z_erofs_pcluster *pcl;
struct erofs_workgroup *grp;
int err;
if (!(map->m_flags & EROFS_MAP_ENCODED) ||
(!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
/* no available pcluster, let's allocate one */
pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 :
map->m_plen >> PAGE_SHIFT);
if (IS_ERR(pcl))
return PTR_ERR(pcl);
spin_lock_init(&pcl->obj.lockref.lock);
pcl->algorithmformat = map->m_algorithmformat;
pcl->length = 0;
pcl->partial = true;
/* new pclusters should be claimed as type 1, primary and followed */
pcl->next = fe->owned_head;
pcl->pageofs_out = map->m_la & ~PAGE_MASK;
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
/*
* lock all primary followed works before visible to others
* and mutex_trylock *never* fails for a new pcluster.
*/
mutex_init(&pcl->lock);
DBG_BUGON(!mutex_trylock(&pcl->lock));
if (ztailpacking) {
pcl->obj.index = 0; /* which indicates ztailpacking */
pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
pcl->tailpacking_size = map->m_plen;
} else {
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
if (IS_ERR(grp)) {
err = PTR_ERR(grp);
goto err_out;
}
if (grp != &pcl->obj) {
fe->pcl = container_of(grp,
struct z_erofs_pcluster, obj);
err = -EEXIST;
goto err_out;
}
}
fe->owned_head = &pcl->next;
fe->pcl = pcl;
return 0;
err_out:
mutex_unlock(&pcl->lock);
z_erofs_free_pcluster(pcl);
return err;
}
static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
struct erofs_workgroup *grp = NULL;
int ret;
DBG_BUGON(fe->pcl);
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
if (!(map->m_flags & EROFS_MAP_META)) {
grp = erofs_find_workgroup(sb, blknr);
} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
if (grp) {
fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
ret = -EEXIST;
} else {
ret = z_erofs_register_pcluster(fe);
}
if (ret == -EEXIST) {
mutex_lock(&fe->pcl->lock);
z_erofs_try_to_claim_pcluster(fe);
} else if (ret) {
return ret;
}
z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
if (!z_erofs_is_inline_pcluster(fe->pcl)) {
/* bind cache first when cached decompression is preferred */
z_erofs_bind_cache(fe);
} else {
void *mptr;
mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
if (IS_ERR(mptr)) {
ret = PTR_ERR(mptr);
erofs_err(sb, "failed to get inline data %d", ret);
return ret;
}
get_page(map->buf.page);
WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
/* file-backed inplace I/O pages are traversed in reverse order */
fe->icur = z_erofs_pclusterpages(fe->pcl);
return 0;
}
/*
* keep in mind that no referenced pclusters will be freed
* only after a RCU grace period.
*/
static void z_erofs_rcu_callback(struct rcu_head *head)
{
z_erofs_free_pcluster(container_of(head,
struct z_erofs_pcluster, rcu));
}
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
{
struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj);
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
}
static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
{
struct z_erofs_pcluster *pcl = fe->pcl;
if (!pcl)
return;
z_erofs_bvec_iter_end(&fe->biter);
mutex_unlock(&pcl->lock);
if (fe->candidate_bvpage)
fe->candidate_bvpage = NULL;
/*
* if all pending pages are added, don't hold its reference
* any longer if the pcluster isn't hosted by ourselves.
*/
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
erofs_workgroup_put(&pcl->obj);
fe->pcl = NULL;
}
static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
unsigned int cur, unsigned int end, erofs_off_t pos)
{
struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
unsigned int cnt;
u8 *src;
if (!packed_inode)
return -EFSCORRUPTED;
buf.inode = packed_inode;
for (; cur < end; cur += cnt, pos += cnt) {
cnt = min_t(unsigned int, end - cur,
sb->s_blocksize - erofs_blkoff(sb, pos));
src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(src)) {
erofs_put_metabuf(&buf);
return PTR_ERR(src);
}
memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
}
erofs_put_metabuf(&buf);
return 0;
}
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct page *page)
{
struct inode *const inode = fe->inode;
struct erofs_map_blocks *const map = &fe->map;
const loff_t offset = page_offset(page);
bool tight = true, exclusive;
unsigned int cur, end, len, split;
int err = 0;
z_erofs_onlinepage_init(page);
split = 0;
end = PAGE_SIZE;
repeat:
if (offset + end - 1 < map->m_la ||
offset + end - 1 >= map->m_la + map->m_llen) {
z_erofs_pcluster_end(fe);
map->m_la = offset + end - 1;
map->m_llen = 0;
err = z_erofs_map_blocks_iter(inode, map, 0);
if (err)
goto out;
}
cur = offset > map->m_la ? 0 : map->m_la - offset;
/* bump split parts first to avoid several separate cases */
++split;
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
zero_user_segment(page, cur, end);
tight = false;
goto next_part;
}
if (map->m_flags & EROFS_MAP_FRAGMENT) {
erofs_off_t fpos = offset + cur - map->m_la;
len = min_t(unsigned int, map->m_llen - fpos, end - cur);
err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len,
EROFS_I(inode)->z_fragmentoff + fpos);
if (err)
goto out;
tight = false;
goto next_part;
}
if (!fe->pcl) {
err = z_erofs_pcluster_begin(fe);
if (err)
goto out;
}
/*
* Ensure the current partial page belongs to this submit chain rather
* than other concurrent submit chains or the noio(bypass) chain since
* those chains are handled asynchronously thus the page cannot be used
* for inplace I/O or bvpage (should be processed in a strict order.)
*/
tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
exclusive = (!cur && ((split <= 1) || tight));
if (cur)
tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
.page = page,
.offset = offset - map->m_la,
.end = end,
}), exclusive);
if (err)
goto out;
z_erofs_onlinepage_split(page);
if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
fe->pcl->multibases = true;
if (fe->pcl->length < offset + end - map->m_la) {
fe->pcl->length = offset + end - map->m_la;
fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
}
if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
!(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
fe->pcl->length == map->m_llen)
fe->pcl->partial = false;
next_part:
/* shorten the remaining extent to update progress */
map->m_llen = offset + cur - map->m_la;
map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
end = cur;
if (end > 0)
goto repeat;
out:
z_erofs_onlinepage_endio(page, err);
return err;
}
static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
unsigned int readahead_pages)
{
/* auto: enable for read_folio, disable for readahead */
if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
!readahead_pages)
return true;
if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
(readahead_pages <= sbi->opt.max_sync_decompress_pages))
return true;
return false;
}
static bool z_erofs_page_is_invalidated(struct page *page)
{
return !page->mapping && !z_erofs_is_shortlived_page(page);
}
struct z_erofs_decompress_backend {
struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
struct super_block *sb;
struct z_erofs_pcluster *pcl;
/* pages with the longest decompressed length for deduplication */
struct page **decompressed_pages;
/* pages to keep the compressed data */
struct page **compressed_pages;
struct list_head decompressed_secondary_bvecs;
struct page **pagepool;
unsigned int onstack_used, nr_pages;
};
struct z_erofs_bvec_item {
struct z_erofs_bvec bvec;
struct list_head list;
};
static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
struct z_erofs_bvec *bvec)
{
struct z_erofs_bvec_item *item;
unsigned int pgnr;
if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
(bvec->end == PAGE_SIZE ||
bvec->offset + bvec->end == be->pcl->length)) {
pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
DBG_BUGON(pgnr >= be->nr_pages);
if (!be->decompressed_pages[pgnr]) {
be->decompressed_pages[pgnr] = bvec->page;
return;
}
}
/* (cold path) one pcluster is requested multiple times */
item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
item->bvec = *bvec;
list_add(&item->list, &be->decompressed_secondary_bvecs);
}
static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
int err)
{
unsigned int off0 = be->pcl->pageofs_out;
struct list_head *p, *n;
list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
struct z_erofs_bvec_item *bvi;
unsigned int end, cur;
void *dst, *src;
bvi = container_of(p, struct z_erofs_bvec_item, list);
cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
bvi->bvec.end);
dst = kmap_local_page(bvi->bvec.page);
while (cur < end) {
unsigned int pgnr, scur, len;
pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
DBG_BUGON(pgnr >= be->nr_pages);
scur = bvi->bvec.offset + cur -
((pgnr << PAGE_SHIFT) - off0);
len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
if (!be->decompressed_pages[pgnr]) {
err = -EFSCORRUPTED;
cur += len;
continue;
}
src = kmap_local_page(be->decompressed_pages[pgnr]);
memcpy(dst + cur, src + scur, len);
kunmap_local(src);
cur += len;
}
kunmap_local(dst);
z_erofs_onlinepage_endio(bvi->bvec.page, err);
list_del(p);
kfree(bvi);
}
}
static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
{
struct z_erofs_pcluster *pcl = be->pcl;
struct z_erofs_bvec_iter biter;
struct page *old_bvpage;
int i;
z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
for (i = 0; i < pcl->vcnt; ++i) {
struct z_erofs_bvec bvec;
z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
if (old_bvpage)
z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
z_erofs_do_decompressed_bvec(be, &bvec);
}
old_bvpage = z_erofs_bvec_iter_end(&biter);
if (old_bvpage)
z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
}
static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
bool *overlapped)
{
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
int i, err = 0;
*overlapped = false;
for (i = 0; i < pclusterpages; ++i) {
struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
struct page *page = bvec->page;
/* compressed pages ought to be present before decompressing */
if (!page) {
DBG_BUGON(1);
continue;
}
be->compressed_pages[i] = page;
if (z_erofs_is_inline_pcluster(pcl)) {
if (!PageUptodate(page))
err = -EIO;
continue;
}
DBG_BUGON(z_erofs_page_is_invalidated(page));
if (!z_erofs_is_shortlived_page(page)) {
if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
if (!PageUptodate(page))
err = -EIO;
continue;
}
z_erofs_do_decompressed_bvec(be, bvec);
*overlapped = true;
}
}
if (err)
return err;
return 0;
}
static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
int err)
{
struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
const struct z_erofs_decompressor *decompressor =
&erofs_decompressors[pcl->algorithmformat];
unsigned int i, inputsize;
int err2;
struct page *page;
bool overlapped;
mutex_lock(&pcl->lock);
be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
/* allocate (de)compressed page arrays if cannot be kept on stack */
be->decompressed_pages = NULL;
be->compressed_pages = NULL;
be->onstack_used = 0;
if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
be->decompressed_pages = be->onstack_pages;
be->onstack_used = be->nr_pages;
memset(be->decompressed_pages, 0,
sizeof(struct page *) * be->nr_pages);
}
if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
be->compressed_pages = be->onstack_pages + be->onstack_used;
if (!be->decompressed_pages)
be->decompressed_pages =
kvcalloc(be->nr_pages, sizeof(struct page *),
GFP_KERNEL | __GFP_NOFAIL);
if (!be->compressed_pages)
be->compressed_pages =
kvcalloc(pclusterpages, sizeof(struct page *),
GFP_KERNEL | __GFP_NOFAIL);
z_erofs_parse_out_bvecs(be);
err2 = z_erofs_parse_in_bvecs(be, &overlapped);
if (err2)
err = err2;
if (err)
goto out;
if (z_erofs_is_inline_pcluster(pcl))
inputsize = pcl->tailpacking_size;
else
inputsize = pclusterpages * PAGE_SIZE;
err = decompressor->decompress(&(struct z_erofs_decompress_req) {
.sb = be->sb,
.in = be->compressed_pages,
.out = be->decompressed_pages,
.pageofs_in = pcl->pageofs_in,
.pageofs_out = pcl->pageofs_out,
.inputsize = inputsize,
.outputsize = pcl->length,
.alg = pcl->algorithmformat,
.inplace_io = overlapped,
.partial_decoding = pcl->partial,
.fillgaps = pcl->multibases,
}, be->pagepool);
out:
/* must handle all compressed pages before actual file pages */
if (z_erofs_is_inline_pcluster(pcl)) {
page = pcl->compressed_bvecs[0].page;
WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
put_page(page);
} else {
for (i = 0; i < pclusterpages; ++i) {
page = pcl->compressed_bvecs[i].page;
if (erofs_page_is_managed(sbi, page))
continue;
/* recycle all individual short-lived pages */
(void)z_erofs_put_shortlivedpage(be->pagepool, page);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
}
}
if (be->compressed_pages < be->onstack_pages ||
be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
kvfree(be->compressed_pages);
z_erofs_fill_other_copies(be, err);
for (i = 0; i < be->nr_pages; ++i) {
page = be->decompressed_pages[i];
if (!page)
continue;
DBG_BUGON(z_erofs_page_is_invalidated(page));
/* recycle all individual short-lived pages */
if (z_erofs_put_shortlivedpage(be->pagepool, page))
continue;
z_erofs_onlinepage_endio(page, err);
}
if (be->decompressed_pages != be->onstack_pages)
kvfree(be->decompressed_pages);
pcl->length = 0;
pcl->partial = true;
pcl->multibases = false;
pcl->bvset.nextpage = NULL;
pcl->vcnt = 0;
/* pcluster lock MUST be taken before the following line */
WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
mutex_unlock(&pcl->lock);
return err;
}
static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct page **pagepool)
{
struct z_erofs_decompress_backend be = {
.sb = io->sb,
.pagepool = pagepool,
.decompressed_secondary_bvecs =
LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
};
z_erofs_next_pcluster_t owned = io->head;
while (owned != Z_EROFS_PCLUSTER_TAIL) {
DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
be.pcl = container_of(owned, struct z_erofs_pcluster, next);
owned = READ_ONCE(be.pcl->next);
z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
if (z_erofs_is_inline_pcluster(be.pcl))
z_erofs_free_pcluster(be.pcl);
else
erofs_workgroup_put(&be.pcl->obj);
}
}
static void z_erofs_decompressqueue_work(struct work_struct *work)
{
struct z_erofs_decompressqueue *bgq =
container_of(work, struct z_erofs_decompressqueue, u.work);
struct page *pagepool = NULL;
DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
z_erofs_decompress_queue(bgq, &pagepool);
erofs_release_pages(&pagepool);
kvfree(bgq);
}
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
{
z_erofs_decompressqueue_work((struct work_struct *)work);
}
#endif
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
int bios)
{
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
/* wake up the caller thread for sync decompression */
if (io->sync) {
if (!atomic_add_return(bios, &io->pending_bios))
complete(&io->u.done);
return;
}
if (atomic_add_return(bios, &io->pending_bios))
return;
/* Use (kthread_)work and sync decompression for atomic contexts only */
if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
struct kthread_worker *worker;
rcu_read_lock();
worker = rcu_dereference(
z_erofs_pcpu_workers[raw_smp_processor_id()]);
if (!worker) {
INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
queue_work(z_erofs_workqueue, &io->u.work);
} else {
kthread_queue_work(worker, &io->u.kthread_work);
}
rcu_read_unlock();
#else
queue_work(z_erofs_workqueue, &io->u.work);
#endif
/* enable sync decompression for readahead */
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
return;
}
z_erofs_decompressqueue_work(&io->u.work);
}
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unsigned int nr,
struct page **pagepool,
struct address_space *mc)
{
const pgoff_t index = pcl->obj.index;
gfp_t gfp = mapping_gfp_mask(mc);
bool tocache = false;
struct address_space *mapping;
struct page *oldpage, *page;
int justfound;
repeat:
page = READ_ONCE(pcl->compressed_bvecs[nr].page);
oldpage = page;
if (!page)
goto out_allocpage;
justfound = (unsigned long)page & 1UL;
page = (struct page *)((unsigned long)page & ~1UL);
/*
* preallocated cached pages, which is used to avoid direct reclaim
* otherwise, it will go inplace I/O path instead.
*/
if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
set_page_private(page, 0);
tocache = true;
goto out_tocache;
}
mapping = READ_ONCE(page->mapping);
/*
* file-backed online pages in plcuster are all locked steady,
* therefore it is impossible for `mapping' to be NULL.
*/
if (mapping && mapping != mc)
/* ought to be unmanaged pages */
goto out;
/* directly return for shortlived page as well */
if (z_erofs_is_shortlived_page(page))
goto out;
lock_page(page);
/* only true if page reclaim goes wrong, should never happen */
DBG_BUGON(justfound && PagePrivate(page));
/* the page is still in manage cache */
if (page->mapping == mc) {
WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
if (!PagePrivate(page)) {
/*
* impossible to be !PagePrivate(page) for
* the current restriction as well if
* the page is already in compressed_bvecs[].
*/
DBG_BUGON(!justfound);
justfound = 0;
set_page_private(page, (unsigned long)pcl);
SetPagePrivate(page);
}
/* no need to submit io if it is already up-to-date */
if (PageUptodate(page)) {
unlock_page(page);
page = NULL;
}
goto out;
}
/*
* the managed page has been truncated, it's unsafe to
* reuse this one, let's allocate a new cache-managed page.
*/
DBG_BUGON(page->mapping);
DBG_BUGON(!justfound);
tocache = true;
unlock_page(page);
put_page(page);
out_allocpage:
page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
oldpage, page)) {
erofs_pagepool_add(pagepool, page);
cond_resched();
goto repeat;
}
out_tocache:
if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
/* turn into temporary page if fails (1 ref) */
set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
goto out;
}
attach_page_private(page, pcl);
/* drop a refcount added by allocpage (then we have 2 refs here) */
put_page(page);
out: /* the only exit (for tracing and debugging) */
return page;
}
static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
struct z_erofs_decompressqueue *fgq, bool *fg)
{
struct z_erofs_decompressqueue *q;
if (fg && !*fg) {
q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
if (!q) {
*fg = true;
goto fg_out;
}
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
kthread_init_work(&q->u.kthread_work,
z_erofs_decompressqueue_kthread_work);
#else
INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
#endif
} else {
fg_out:
q = fgq;
init_completion(&fgq->u.done);
atomic_set(&fgq->pending_bios, 0);
q->eio = false;
q->sync = true;
}
q->sb = sb;
q->head = Z_EROFS_PCLUSTER_TAIL;
return q;
}
/* define decompression jobqueue types */
enum {
JQ_BYPASS,
JQ_SUBMIT,
NR_JOBQUEUES,
};
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
z_erofs_next_pcluster_t qtail[],
z_erofs_next_pcluster_t owned_head)
{
z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
WRITE_ONCE(*submit_qtail, owned_head);
WRITE_ONCE(*bypass_qtail, &pcl->next);
qtail[JQ_BYPASS] = &pcl->next;
}
static void z_erofs_decompressqueue_endio(struct bio *bio)
{
struct z_erofs_decompressqueue *q = bio->bi_private;
blk_status_t err = bio->bi_status;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
DBG_BUGON(PageUptodate(page));
DBG_BUGON(z_erofs_page_is_invalidated(page));
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
if (!err)
SetPageUptodate(page);
unlock_page(page);
}
}
if (err)
q->eio = true;
z_erofs_decompress_kickoff(q, -1);
bio_put(bio);
}
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
struct z_erofs_decompressqueue *fgq,
bool *force_fg, bool readahead)
{
struct super_block *sb = f->inode->i_sb;
struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
z_erofs_next_pcluster_t owned_head = f->owned_head;
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
pgoff_t last_index;
struct block_device *last_bdev;
unsigned int nr_bios = 0;
struct bio *bio = NULL;
unsigned long pflags;
int memstall = 0;
/*
* if managed cache is enabled, bypass jobqueue is needed,
* no need to read from device for all pclusters in this queue.
*/
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */
q[JQ_SUBMIT]->head = owned_head;
do {
struct erofs_map_dev mdev;
struct z_erofs_pcluster *pcl;
pgoff_t cur, end;
unsigned int i = 0;
bool bypass = true;
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
pcl = container_of(owned_head, struct z_erofs_pcluster, next);
owned_head = READ_ONCE(pcl->next);
if (z_erofs_is_inline_pcluster(pcl)) {
move_to_bypass_jobqueue(pcl, qtail, owned_head);
continue;
}
/* no device id here, thus it will always succeed */
mdev = (struct erofs_map_dev) {
.m_pa = erofs_pos(sb, pcl->obj.index),
};
(void)erofs_map_dev(sb, &mdev);
cur = erofs_blknr(sb, mdev.m_pa);
end = cur + pcl->pclusterpages;
do {
struct page *page;
page = pickup_page_for_submission(pcl, i++,
&f->pagepool, mc);
if (!page)
continue;
if (bio && (cur != last_index + 1 ||
last_bdev != mdev.m_bdev)) {
submit_bio_retry:
submit_bio(bio);
if (memstall) {
psi_memstall_leave(&pflags);
memstall = 0;
}
bio = NULL;
}
if (unlikely(PageWorkingset(page)) && !memstall) {
psi_memstall_enter(&pflags);
memstall = 1;
}
if (!bio) {
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_decompressqueue_endio;
last_bdev = mdev.m_bdev;
bio->bi_iter.bi_sector = (sector_t)cur <<
(sb->s_blocksize_bits - 9);
bio->bi_private = q[JQ_SUBMIT];
if (readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios;
}
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
goto submit_bio_retry;
last_index = cur;
bypass = false;
} while (++cur < end);
if (!bypass)
qtail[JQ_SUBMIT] = &pcl->next;
else
move_to_bypass_jobqueue(pcl, qtail, owned_head);
} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
if (bio) {
submit_bio(bio);
if (memstall)
psi_memstall_leave(&pflags);
}
/*
* although background is preferred, no one is pending for submission.
* don't issue decompression but drop it directly instead.
*/
if (!*force_fg && !nr_bios) {
kvfree(q[JQ_SUBMIT]);
return;
}
z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
}
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
bool force_fg, bool ra)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
return;
z_erofs_submit_queue(f, io, &force_fg, ra);
/* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
if (!force_fg)
return;
/* wait until all bios are completed */
wait_for_completion_io(&io[JQ_SUBMIT].u.done);
/* handle synchronous decompress queue in the caller context */
z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
}
/*
* Since partial uptodate is still unimplemented for now, we have to use
* approximate readmore strategies as a start.
*/
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
struct readahead_control *rac, bool backmost)
{
struct inode *inode = f->inode;
struct erofs_map_blocks *map = &f->map;
erofs_off_t cur, end, headoffset = f->headoffset;
int err;
if (backmost) {
if (rac)
end = headoffset + readahead_length(rac) - 1;
else
end = headoffset + PAGE_SIZE - 1;
map->m_la = end;
err = z_erofs_map_blocks_iter(inode, map,
EROFS_GET_BLOCKS_READMORE);
if (err)
return;
/* expand ra for the trailing edge if readahead */
if (rac) {
cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
readahead_expand(rac, headoffset, cur - headoffset);
return;
}
end = round_up(end, PAGE_SIZE);
} else {
end = round_up(map->m_la, PAGE_SIZE);
if (!map->m_llen)
return;
}
cur = map->m_la + map->m_llen - 1;
while ((cur >= end) && (cur < i_size_read(inode))) {
pgoff_t index = cur >> PAGE_SHIFT;
struct page *page;
page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
if (page) {
if (PageUptodate(page))
unlock_page(page);
else
(void)z_erofs_do_read_page(f, page);
put_page(page);
}
if (cur < PAGE_SIZE)
break;
cur = (index << PAGE_SHIFT) - 1;
}
}
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
struct inode *const inode = folio->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
int err;
trace_erofs_read_folio(folio, false);
f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
z_erofs_pcluster_readmore(&f, NULL, true);
err = z_erofs_do_read_page(&f, &folio->page);
z_erofs_pcluster_readmore(&f, NULL, false);
z_erofs_pcluster_end(&f);
/* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
if (err && err != -EINTR)
erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
err, folio->index, EROFS_I(inode)->nid);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
return err;
}
static void z_erofs_readahead(struct readahead_control *rac)
{
struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
struct folio *head = NULL, *folio;
unsigned int nr_folios;
int err;
f.headoffset = readahead_pos(rac);
z_erofs_pcluster_readmore(&f, rac, true);
nr_folios = readahead_count(rac);
trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
while ((folio = readahead_folio(rac))) {
folio->private = head;
head = folio;
}
/* traverse in reverse order for best metadata I/O performance */
while (head) {
folio = head;
head = folio_get_private(folio);
err = z_erofs_do_read_page(&f, &folio->page);
if (err && err != -EINTR)
erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
folio->index, EROFS_I(inode)->nid);
}
z_erofs_pcluster_readmore(&f, rac, false);
z_erofs_pcluster_end(&f);
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
}
const struct address_space_operations z_erofs_aops = {
.read_folio = z_erofs_read_folio,
.readahead = z_erofs_readahead,
};
| linux-master | fs/erofs/zdata.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2022, Alibaba Cloud
*/
#include "internal.h"
static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
void *dentry_blk, struct erofs_dirent *de,
unsigned int nameoff, unsigned int maxsize)
{
const struct erofs_dirent *end = dentry_blk + nameoff;
while (de < end) {
const char *de_name;
unsigned int de_namelen;
unsigned char d_type;
d_type = fs_ftype_to_dtype(de->file_type);
nameoff = le16_to_cpu(de->nameoff);
de_name = (char *)dentry_blk + nameoff;
/* the last dirent in the block? */
if (de + 1 >= end)
de_namelen = strnlen(de_name, maxsize - nameoff);
else
de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
/* a corrupted entry is found */
if (nameoff + de_namelen > maxsize ||
de_namelen > EROFS_NAME_LEN) {
erofs_err(dir->i_sb, "bogus dirent @ nid %llu",
EROFS_I(dir)->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
if (!dir_emit(ctx, de_name, de_namelen,
le64_to_cpu(de->nid), d_type))
return 1;
++de;
ctx->pos += sizeof(struct erofs_dirent);
}
return 0;
}
static int erofs_readdir(struct file *f, struct dir_context *ctx)
{
struct inode *dir = file_inode(f);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct super_block *sb = dir->i_sb;
unsigned long bsz = sb->s_blocksize;
const size_t dirsize = i_size_read(dir);
unsigned int i = erofs_blknr(sb, ctx->pos);
unsigned int ofs = erofs_blkoff(sb, ctx->pos);
int err = 0;
bool initial = true;
buf.inode = dir;
while (ctx->pos < dirsize) {
struct erofs_dirent *de;
unsigned int nameoff, maxsize;
de = erofs_bread(&buf, i, EROFS_KMAP);
if (IS_ERR(de)) {
erofs_err(sb, "fail to readdir of logical block %u of nid %llu",
i, EROFS_I(dir)->nid);
err = PTR_ERR(de);
break;
}
nameoff = le16_to_cpu(de->nameoff);
if (nameoff < sizeof(struct erofs_dirent) || nameoff >= bsz) {
erofs_err(sb, "invalid de[0].nameoff %u @ nid %llu",
nameoff, EROFS_I(dir)->nid);
err = -EFSCORRUPTED;
break;
}
maxsize = min_t(unsigned int, dirsize - ctx->pos + ofs, bsz);
/* search dirents at the arbitrary position */
if (initial) {
initial = false;
ofs = roundup(ofs, sizeof(struct erofs_dirent));
ctx->pos = erofs_pos(sb, i) + ofs;
if (ofs >= nameoff)
goto skip_this;
}
err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs,
nameoff, maxsize);
if (err)
break;
skip_this:
ctx->pos = erofs_pos(sb, i) + maxsize;
++i;
ofs = 0;
}
erofs_put_metabuf(&buf);
return err < 0 ? err : 0;
}
const struct file_operations erofs_dir_fops = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = erofs_readdir,
};
| linux-master | fs/erofs/dir.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018-2019 HUAWEI, Inc.
* https://www.huawei.com/
*/
#include "internal.h"
#include <asm/unaligned.h>
#include <trace/events/erofs.h>
struct z_erofs_maprecorder {
struct inode *inode;
struct erofs_map_blocks *map;
void *kaddr;
unsigned long lcn;
/* compression extent information gathered */
u8 type, headtype;
u16 clusterofs;
u16 delta[2];
erofs_blk_t pblk, compressedblks;
erofs_off_t nextpackoff;
bool partialref;
};
static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
unsigned long lcn)
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) +
vi->inode_isize + vi->xattr_isize) +
lcn * sizeof(struct z_erofs_lcluster_index);
struct z_erofs_lcluster_index *di;
unsigned int advise, type;
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
if (IS_ERR(m->kaddr))
return PTR_ERR(m->kaddr);
m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
m->lcn = lcn;
di = m->kaddr + erofs_blkoff(inode->i_sb, pos);
advise = le16_to_cpu(di->di_advise);
type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
switch (type) {
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
m->clusterofs = 1 << vi->z_logical_clusterbits;
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->compressedblks = m->delta[0] &
~Z_EROFS_LI_D0_CBLKCNT;
m->delta[0] = 1;
}
m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
break;
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
if (advise & Z_EROFS_LI_PARTIAL_REF)
m->partialref = true;
m->clusterofs = le16_to_cpu(di->di_clusterofs);
if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->pblk = le32_to_cpu(di->di_u.blkaddr);
break;
default:
DBG_BUGON(1);
return -EOPNOTSUPP;
}
m->type = type;
return 0;
}
static unsigned int decode_compactedbits(unsigned int lobits,
unsigned int lomask,
u8 *in, unsigned int pos, u8 *type)
{
const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
const unsigned int lo = v & lomask;
*type = (v >> lobits) & 3;
return lo;
}
static int get_compacted_la_distance(unsigned int lclusterbits,
unsigned int encodebits,
unsigned int vcnt, u8 *in, int i)
{
const unsigned int lomask = (1 << lclusterbits) - 1;
unsigned int lo, d1 = 0;
u8 type;
DBG_BUGON(i >= vcnt);
do {
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
return d1;
++d1;
} while (++i < vcnt);
/* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
d1 += lo - 1;
return d1;
}
static int unpack_compacted_index(struct z_erofs_maprecorder *m,
unsigned int amortizedshift,
erofs_off_t pos, bool lookahead)
{
struct erofs_inode *const vi = EROFS_I(m->inode);
const unsigned int lclusterbits = vi->z_logical_clusterbits;
const unsigned int lomask = (1 << lclusterbits) - 1;
unsigned int vcnt, base, lo, encodebits, nblk, eofs;
int i;
u8 *in, type;
bool big_pcluster;
if (1 << amortizedshift == 4 && lclusterbits <= 14)
vcnt = 2;
else if (1 << amortizedshift == 2 && lclusterbits == 12)
vcnt = 16;
else
return -EOPNOTSUPP;
/* it doesn't equal to round_up(..) */
m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
(vcnt << amortizedshift);
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
eofs = erofs_blkoff(m->inode->i_sb, pos);
base = round_down(eofs, vcnt << amortizedshift);
in = m->kaddr + base;
i = (eofs - base) >> amortizedshift;
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
m->type = type;
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
m->clusterofs = 1 << lclusterbits;
/* figure out lookahead_distance: delta[1] if needed */
if (lookahead)
m->delta[1] = get_compacted_la_distance(lclusterbits,
encodebits, vcnt, in, i);
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
if (!big_pcluster) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
m->delta[0] = 1;
return 0;
} else if (i + 1 != (int)vcnt) {
m->delta[0] = lo;
return 0;
}
/*
* since the last lcluster in the pack is special,
* of which lo saves delta[1] rather than delta[0].
* Hence, get delta[0] by the previous lcluster indirectly.
*/
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * (i - 1), &type);
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
lo = 0;
else if (lo & Z_EROFS_LI_D0_CBLKCNT)
lo = 1;
m->delta[0] = lo + 1;
return 0;
}
m->clusterofs = lo;
m->delta[0] = 0;
/* figout out blkaddr (pblk) for HEAD lclusters */
if (!big_pcluster) {
nblk = 1;
while (i > 0) {
--i;
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
i -= lo;
if (i >= 0)
++nblk;
}
} else {
nblk = 0;
while (i > 0) {
--i;
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
--i;
nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
continue;
}
/* bigpcluster shouldn't have plain d0 == 1 */
if (lo <= 1) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
i -= lo - 2;
continue;
}
++nblk;
}
}
in += (vcnt << amortizedshift) - sizeof(__le32);
m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
return 0;
}
static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
unsigned long lcn, bool lookahead)
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
unsigned int totalidx = erofs_iblks(inode);
unsigned int compacted_4b_initial, compacted_2b;
unsigned int amortizedshift;
erofs_off_t pos;
if (lcn >= totalidx)
return -EINVAL;
m->lcn = lcn;
/* used to align to 32-byte (compacted_2b) alignment */
compacted_4b_initial = (32 - ebase % 32) / 4;
if (compacted_4b_initial == 32 / 4)
compacted_4b_initial = 0;
if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
compacted_4b_initial < totalidx)
compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
else
compacted_2b = 0;
pos = ebase;
if (lcn < compacted_4b_initial) {
amortizedshift = 2;
goto out;
}
pos += compacted_4b_initial * 4;
lcn -= compacted_4b_initial;
if (lcn < compacted_2b) {
amortizedshift = 1;
goto out;
}
pos += compacted_2b * 2;
lcn -= compacted_2b;
amortizedshift = 2;
out:
pos += lcn * (1 << amortizedshift);
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
if (IS_ERR(m->kaddr))
return PTR_ERR(m->kaddr);
return unpack_compacted_index(m, amortizedshift, pos, lookahead);
}
static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn, bool lookahead)
{
switch (EROFS_I(m->inode)->datalayout) {
case EROFS_INODE_COMPRESSED_FULL:
return z_erofs_load_full_lcluster(m, lcn);
case EROFS_INODE_COMPRESSED_COMPACT:
return z_erofs_load_compact_lcluster(m, lcn, lookahead);
default:
return -EINVAL;
}
}
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned int lookback_distance)
{
struct super_block *sb = m->inode->i_sb;
struct erofs_inode *const vi = EROFS_I(m->inode);
const unsigned int lclusterbits = vi->z_logical_clusterbits;
while (m->lcn >= lookback_distance) {
unsigned long lcn = m->lcn - lookback_distance;
int err;
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
switch (m->type) {
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
lookback_distance = m->delta[0];
if (!lookback_distance)
goto err_bogus;
continue;
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
m->headtype = m->type;
m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
return 0;
default:
erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
m->type, lcn, vi->nid);
DBG_BUGON(1);
return -EOPNOTSUPP;
}
}
err_bogus:
erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
lookback_distance, m->lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
unsigned int initial_lcn)
{
struct super_block *sb = m->inode->i_sb;
struct erofs_inode *const vi = EROFS_I(m->inode);
struct erofs_map_blocks *const map = m->map;
const unsigned int lclusterbits = vi->z_logical_clusterbits;
unsigned long lcn;
int err;
DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
DBG_BUGON(m->type != m->headtype);
if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
map->m_plen = 1ULL << lclusterbits;
return 0;
}
lcn = m->lcn + 1;
if (m->compressedblks)
goto out;
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
/*
* If the 1st NONHEAD lcluster has already been handled initially w/o
* valid compressedblks, which means at least it mustn't be CBLKCNT, or
* an internal implemenatation error is detected.
*
* The following code can also handle it properly anyway, but let's
* BUG_ON in the debugging mode only for developers to notice that.
*/
DBG_BUGON(lcn == initial_lcn &&
m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
switch (m->type) {
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
/*
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
* rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
*/
m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
break;
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
if (m->delta[0] != 1)
goto err_bonus_cblkcnt;
if (m->compressedblks)
break;
fallthrough;
default:
erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
out:
map->m_plen = erofs_pos(sb, m->compressedblks);
return 0;
err_bonus_cblkcnt:
erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
{
struct inode *inode = m->inode;
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_map_blocks *map = m->map;
unsigned int lclusterbits = vi->z_logical_clusterbits;
u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
int err;
do {
/* handle the last EOF pcluster (no next HEAD lcluster) */
if ((lcn << lclusterbits) >= inode->i_size) {
map->m_llen = inode->i_size - map->m_la;
return 0;
}
err = z_erofs_load_lcluster_from_disk(m, lcn, true);
if (err)
return err;
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
DBG_BUGON(!m->delta[1] &&
m->clusterofs != 1 << lclusterbits);
} else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
/* go on until the next HEAD lcluster */
if (lcn != headlcn)
break;
m->delta[1] = 1;
} else {
erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
m->type, lcn, vi->nid);
DBG_BUGON(1);
return -EOPNOTSUPP;
}
lcn += m->delta[1];
} while (m->delta[1]);
map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
return 0;
}
static int z_erofs_do_map_blocks(struct inode *inode,
struct erofs_map_blocks *map, int flags)
{
struct erofs_inode *const vi = EROFS_I(inode);
bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
struct z_erofs_maprecorder m = {
.inode = inode,
.map = map,
};
int err = 0;
unsigned int lclusterbits, endoff;
unsigned long initial_lcn;
unsigned long long ofs, end;
lclusterbits = vi->z_logical_clusterbits;
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
initial_lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1);
err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
if (err)
goto unmap_out;
if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
vi->z_idataoff = m.nextpackoff;
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
end = (m.lcn + 1ULL) << lclusterbits;
switch (m.type) {
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
if (endoff >= m.clusterofs) {
m.headtype = m.type;
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
/*
* For ztailpacking files, in order to inline data more
* effectively, special EOF lclusters are now supported
* which can have three parts at most.
*/
if (ztailpacking && end > inode->i_size)
end = inode->i_size;
break;
}
/* m.lcn should be >= 1 if endoff < m.clusterofs */
if (!m.lcn) {
erofs_err(inode->i_sb,
"invalid logical cluster 0 at nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto unmap_out;
}
end = (m.lcn << lclusterbits) | m.clusterofs;
map->m_flags |= EROFS_MAP_FULL_MAPPED;
m.delta[0] = 1;
fallthrough;
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
/* get the corresponding first chunk */
err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err)
goto unmap_out;
break;
default:
erofs_err(inode->i_sb,
"unknown type %u @ offset %llu of nid %llu",
m.type, ofs, vi->nid);
err = -EOPNOTSUPP;
goto unmap_out;
}
if (m.partialref)
map->m_flags |= EROFS_MAP_PARTIAL_REF;
map->m_llen = end - map->m_la;
if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
vi->z_tailextent_headlcn = m.lcn;
/* for non-compact indexes, fragmentoff is 64 bits */
if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
vi->z_fragmentoff |= (u64)m.pblk << 32;
}
if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
map->m_flags |= EROFS_MAP_META;
map->m_pa = vi->z_idataoff;
map->m_plen = vi->z_idata_size;
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
map->m_flags |= EROFS_MAP_FRAGMENT;
} else {
map->m_pa = erofs_pos(inode->i_sb, m.pblk);
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
if (err)
goto unmap_out;
}
if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
if (map->m_llen > map->m_plen) {
DBG_BUGON(1);
err = -EFSCORRUPTED;
goto unmap_out;
}
if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
map->m_algorithmformat =
Z_EROFS_COMPRESSION_INTERLACED;
else
map->m_algorithmformat =
Z_EROFS_COMPRESSION_SHIFTED;
} else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
map->m_algorithmformat = vi->z_algorithmtype[1];
} else {
map->m_algorithmformat = vi->z_algorithmtype[0];
}
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
(map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE) &&
map->m_llen >= i_blocksize(inode))) {
err = z_erofs_get_extent_decompressedlen(&m);
if (!err)
map->m_flags |= EROFS_MAP_FULL_MAPPED;
}
unmap_out:
erofs_unmap_metabuf(&m.map->buf);
return err;
}
static int z_erofs_fill_inode_lazy(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
int err, headnr;
erofs_off_t pos;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
void *kaddr;
struct z_erofs_map_header *h;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
/*
* paired with smp_mb() at the end of the function to ensure
* fields will only be observed after the bit is set.
*/
smp_mb();
return 0;
}
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
return -ERESTARTSYS;
err = 0;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
goto out_unlock;
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr);
goto out_unlock;
}
h = kaddr + erofs_blkoff(sb, pos);
/*
* if the highest bit of the 8-byte map header is set, the whole file
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
*/
if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
vi->z_tailextent_headlcn = 0;
goto done;
}
vi->z_advise = le16_to_cpu(h->h_advise);
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
headnr = 0;
if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
err = -EOPNOTSUPP;
goto out_put_metabuf;
}
vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto out_put_metabuf;
}
if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
goto out_put_metabuf;
}
if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
struct erofs_map_blocks map = {
.buf = __EROFS_BUF_INITIALIZER
};
vi->z_idata_size = le16_to_cpu(h->h_idata_size);
err = z_erofs_do_map_blocks(inode, &map,
EROFS_GET_BLOCKS_FINDTAIL);
erofs_put_metabuf(&map.buf);
if (!map.m_plen ||
erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
erofs_err(sb, "invalid tail-packing pclustersize %llu",
map.m_plen);
err = -EFSCORRUPTED;
}
if (err < 0)
goto out_put_metabuf;
}
if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
!(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
struct erofs_map_blocks map = {
.buf = __EROFS_BUF_INITIALIZER
};
vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
err = z_erofs_do_map_blocks(inode, &map,
EROFS_GET_BLOCKS_FINDTAIL);
erofs_put_metabuf(&map.buf);
if (err < 0)
goto out_put_metabuf;
}
done:
/* paired with smp_mb() at the beginning of the function */
smp_mb();
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
out_put_metabuf:
erofs_put_metabuf(&buf);
out_unlock:
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
return err;
}
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags)
{
struct erofs_inode *const vi = EROFS_I(inode);
int err = 0;
trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
/* when trying to read beyond EOF, leave it unmapped */
if (map->m_la >= inode->i_size) {
map->m_llen = map->m_la + 1 - inode->i_size;
map->m_la = inode->i_size;
map->m_flags = 0;
goto out;
}
err = z_erofs_fill_inode_lazy(inode);
if (err)
goto out;
if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
!vi->z_tailextent_headlcn) {
map->m_la = 0;
map->m_llen = inode->i_size;
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
EROFS_MAP_FRAGMENT;
goto out;
}
err = z_erofs_do_map_blocks(inode, map, flags);
out:
trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
return err;
}
static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
int ret;
struct erofs_map_blocks map = { .m_la = offset };
ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
erofs_put_metabuf(&map.buf);
if (ret < 0)
return ret;
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = map.m_la;
iomap->length = map.m_llen;
if (map.m_flags & EROFS_MAP_MAPPED) {
iomap->type = IOMAP_MAPPED;
iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
IOMAP_NULL_ADDR : map.m_pa;
} else {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
/*
* No strict rule on how to describe extents for post EOF, yet
* we need to do like below. Otherwise, iomap itself will get
* into an endless loop on post EOF.
*
* Calculate the effective offset by subtracting extent start
* (map.m_la) from the requested offset, and add it to length.
* (NB: offset >= map.m_la always)
*/
if (iomap->offset >= inode->i_size)
iomap->length = length + offset - map.m_la;
}
iomap->flags = 0;
return 0;
}
const struct iomap_ops z_erofs_iomap_report_ops = {
.iomap_begin = z_erofs_iomap_begin_report,
};
| linux-master | fs/erofs/zmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/xz.h>
#include <linux/module.h>
#include "compress.h"
struct z_erofs_lzma {
struct z_erofs_lzma *next;
struct xz_dec_microlzma *state;
struct xz_buf buf;
u8 bounce[PAGE_SIZE];
};
/* considering the LZMA performance, no need to use a lockless list for now */
static DEFINE_SPINLOCK(z_erofs_lzma_lock);
static unsigned int z_erofs_lzma_max_dictsize;
static unsigned int z_erofs_lzma_nstrms, z_erofs_lzma_avail_strms;
static struct z_erofs_lzma *z_erofs_lzma_head;
static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq);
module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444);
void z_erofs_lzma_exit(void)
{
/* there should be no running fs instance */
while (z_erofs_lzma_avail_strms) {
struct z_erofs_lzma *strm;
spin_lock(&z_erofs_lzma_lock);
strm = z_erofs_lzma_head;
if (!strm) {
spin_unlock(&z_erofs_lzma_lock);
DBG_BUGON(1);
return;
}
z_erofs_lzma_head = NULL;
spin_unlock(&z_erofs_lzma_lock);
while (strm) {
struct z_erofs_lzma *n = strm->next;
if (strm->state)
xz_dec_microlzma_end(strm->state);
kfree(strm);
--z_erofs_lzma_avail_strms;
strm = n;
}
}
}
int __init z_erofs_lzma_init(void)
{
unsigned int i;
/* by default, use # of possible CPUs instead */
if (!z_erofs_lzma_nstrms)
z_erofs_lzma_nstrms = num_possible_cpus();
for (i = 0; i < z_erofs_lzma_nstrms; ++i) {
struct z_erofs_lzma *strm = kzalloc(sizeof(*strm), GFP_KERNEL);
if (!strm) {
z_erofs_lzma_exit();
return -ENOMEM;
}
spin_lock(&z_erofs_lzma_lock);
strm->next = z_erofs_lzma_head;
z_erofs_lzma_head = strm;
spin_unlock(&z_erofs_lzma_lock);
++z_erofs_lzma_avail_strms;
}
return 0;
}
int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lzma_cfgs *lzma, int size)
{
static DEFINE_MUTEX(lzma_resize_mutex);
unsigned int dict_size, i;
struct z_erofs_lzma *strm, *head = NULL;
int err;
if (!lzma || size < sizeof(struct z_erofs_lzma_cfgs)) {
erofs_err(sb, "invalid lzma cfgs, size=%u", size);
return -EINVAL;
}
if (lzma->format) {
erofs_err(sb, "unidentified lzma format %x, please check kernel version",
le16_to_cpu(lzma->format));
return -EINVAL;
}
dict_size = le32_to_cpu(lzma->dict_size);
if (dict_size > Z_EROFS_LZMA_MAX_DICT_SIZE || dict_size < 4096) {
erofs_err(sb, "unsupported lzma dictionary size %u",
dict_size);
return -EINVAL;
}
erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!");
/* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */
mutex_lock(&lzma_resize_mutex);
if (z_erofs_lzma_max_dictsize >= dict_size) {
mutex_unlock(&lzma_resize_mutex);
return 0;
}
/* 1. collect/isolate all streams for the following check */
for (i = 0; i < z_erofs_lzma_avail_strms; ++i) {
struct z_erofs_lzma *last;
again:
spin_lock(&z_erofs_lzma_lock);
strm = z_erofs_lzma_head;
if (!strm) {
spin_unlock(&z_erofs_lzma_lock);
wait_event(z_erofs_lzma_wq,
READ_ONCE(z_erofs_lzma_head));
goto again;
}
z_erofs_lzma_head = NULL;
spin_unlock(&z_erofs_lzma_lock);
for (last = strm; last->next; last = last->next)
++i;
last->next = head;
head = strm;
}
err = 0;
/* 2. walk each isolated stream and grow max dict_size if needed */
for (strm = head; strm; strm = strm->next) {
if (strm->state)
xz_dec_microlzma_end(strm->state);
strm->state = xz_dec_microlzma_alloc(XZ_PREALLOC, dict_size);
if (!strm->state)
err = -ENOMEM;
}
/* 3. push back all to the global list and update max dict_size */
spin_lock(&z_erofs_lzma_lock);
DBG_BUGON(z_erofs_lzma_head);
z_erofs_lzma_head = head;
spin_unlock(&z_erofs_lzma_lock);
wake_up_all(&z_erofs_lzma_wq);
z_erofs_lzma_max_dictsize = dict_size;
mutex_unlock(&lzma_resize_mutex);
return err;
}
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool)
{
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
const unsigned int nrpages_in =
PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
unsigned int inlen, outlen, pageofs;
struct z_erofs_lzma *strm;
u8 *kin;
bool bounced = false;
int no, ni, j, err = 0;
/* 1. get the exact LZMA compressed size */
kin = kmap(*rq->in);
err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
min_t(unsigned int, rq->inputsize,
rq->sb->s_blocksize - rq->pageofs_in));
if (err) {
kunmap(*rq->in);
return err;
}
/* 2. get an available lzma context */
again:
spin_lock(&z_erofs_lzma_lock);
strm = z_erofs_lzma_head;
if (!strm) {
spin_unlock(&z_erofs_lzma_lock);
wait_event(z_erofs_lzma_wq, READ_ONCE(z_erofs_lzma_head));
goto again;
}
z_erofs_lzma_head = strm->next;
spin_unlock(&z_erofs_lzma_lock);
/* 3. multi-call decompress */
inlen = rq->inputsize;
outlen = rq->outputsize;
xz_dec_microlzma_reset(strm->state, inlen, outlen,
!rq->partial_decoding);
pageofs = rq->pageofs_out;
strm->buf.in = kin + rq->pageofs_in;
strm->buf.in_pos = 0;
strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in);
inlen -= strm->buf.in_size;
strm->buf.out = NULL;
strm->buf.out_pos = 0;
strm->buf.out_size = 0;
for (ni = 0, no = -1;;) {
enum xz_ret xz_err;
if (strm->buf.out_pos == strm->buf.out_size) {
if (strm->buf.out) {
kunmap(rq->out[no]);
strm->buf.out = NULL;
}
if (++no >= nrpages_out || !outlen) {
erofs_err(rq->sb, "decompressed buf out of bound");
err = -EFSCORRUPTED;
break;
}
strm->buf.out_pos = 0;
strm->buf.out_size = min_t(u32, outlen,
PAGE_SIZE - pageofs);
outlen -= strm->buf.out_size;
if (!rq->out[no] && rq->fillgaps) /* deduped */
rq->out[no] = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
if (rq->out[no])
strm->buf.out = kmap(rq->out[no]) + pageofs;
pageofs = 0;
} else if (strm->buf.in_pos == strm->buf.in_size) {
kunmap(rq->in[ni]);
if (++ni >= nrpages_in || !inlen) {
erofs_err(rq->sb, "compressed buf out of bound");
err = -EFSCORRUPTED;
break;
}
strm->buf.in_pos = 0;
strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE);
inlen -= strm->buf.in_size;
kin = kmap(rq->in[ni]);
strm->buf.in = kin;
bounced = false;
}
/*
* Handle overlapping: Use bounced buffer if the compressed
* data is under processing; Otherwise, Use short-lived pages
* from the on-stack pagepool where pages share with the same
* request.
*/
if (!bounced && rq->out[no] == rq->in[ni]) {
memcpy(strm->bounce, strm->buf.in, strm->buf.in_size);
strm->buf.in = strm->bounce;
bounced = true;
}
for (j = ni + 1; j < nrpages_in; ++j) {
struct page *tmppage;
if (rq->out[no] != rq->in[j])
continue;
DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
rq->in[j]));
tmppage = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
}
xz_err = xz_dec_microlzma_run(strm->state, &strm->buf);
DBG_BUGON(strm->buf.out_pos > strm->buf.out_size);
DBG_BUGON(strm->buf.in_pos > strm->buf.in_size);
if (xz_err != XZ_OK) {
if (xz_err == XZ_STREAM_END && !outlen)
break;
erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]",
xz_err, rq->inputsize, rq->outputsize);
err = -EFSCORRUPTED;
break;
}
}
if (no < nrpages_out && strm->buf.out)
kunmap(rq->out[no]);
if (ni < nrpages_in)
kunmap(rq->in[ni]);
/* 4. push back LZMA stream context to the global list */
spin_lock(&z_erofs_lzma_lock);
strm->next = z_erofs_lzma_head;
z_erofs_lzma_head = strm;
spin_unlock(&z_erofs_lzma_lock);
wake_up(&z_erofs_lzma_wq);
return err;
}
| linux-master | fs/erofs/decompressor_lzma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Gao Xiang <[email protected]>
*
* For low-latency decompression algorithms (e.g. lz4), reserve consecutive
* per-CPU virtual memory (in pages) in advance to store such inplace I/O
* data if inplace decompression is failed (due to unmet inplace margin for
* example).
*/
#include "internal.h"
struct erofs_pcpubuf {
raw_spinlock_t lock;
void *ptr;
struct page **pages;
unsigned int nrpages;
};
static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);
void *erofs_get_pcpubuf(unsigned int requiredpages)
__acquires(pcb->lock)
{
struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);
raw_spin_lock(&pcb->lock);
/* check if the per-CPU buffer is too small */
if (requiredpages > pcb->nrpages) {
raw_spin_unlock(&pcb->lock);
put_cpu_var(erofs_pcb);
/* (for sparse checker) pretend pcb->lock is still taken */
__acquire(pcb->lock);
return NULL;
}
return pcb->ptr;
}
void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
{
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());
DBG_BUGON(pcb->ptr != ptr);
raw_spin_unlock(&pcb->lock);
put_cpu_var(erofs_pcb);
}
/* the next step: support per-CPU page buffers hotplug */
int erofs_pcpubuf_growsize(unsigned int nrpages)
{
static DEFINE_MUTEX(pcb_resize_mutex);
static unsigned int pcb_nrpages;
struct page *pagepool = NULL;
int delta, cpu, ret, i;
mutex_lock(&pcb_resize_mutex);
delta = nrpages - pcb_nrpages;
ret = 0;
/* avoid shrinking pcpubuf, since no idea how many fses rely on */
if (delta <= 0)
goto out;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
struct page **pages, **oldpages;
void *ptr, *old_ptr;
pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
break;
}
for (i = 0; i < nrpages; ++i) {
pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
if (!pages[i]) {
ret = -ENOMEM;
oldpages = pages;
goto free_pagearray;
}
}
ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
if (!ptr) {
ret = -ENOMEM;
oldpages = pages;
goto free_pagearray;
}
raw_spin_lock(&pcb->lock);
old_ptr = pcb->ptr;
pcb->ptr = ptr;
oldpages = pcb->pages;
pcb->pages = pages;
i = pcb->nrpages;
pcb->nrpages = nrpages;
raw_spin_unlock(&pcb->lock);
if (!oldpages) {
DBG_BUGON(old_ptr);
continue;
}
if (old_ptr)
vunmap(old_ptr);
free_pagearray:
while (i)
erofs_pagepool_add(&pagepool, oldpages[--i]);
kfree(oldpages);
if (ret)
break;
}
pcb_nrpages = nrpages;
erofs_release_pages(&pagepool);
out:
mutex_unlock(&pcb_resize_mutex);
return ret;
}
void __init erofs_pcpubuf_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
raw_spin_lock_init(&pcb->lock);
}
}
void erofs_pcpubuf_exit(void)
{
int cpu, i;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
if (pcb->ptr) {
vunmap(pcb->ptr);
pcb->ptr = NULL;
}
if (!pcb->pages)
continue;
for (i = 0; i < pcb->nrpages; ++i)
if (pcb->pages[i])
put_page(pcb->pages[i]);
kfree(pcb->pages);
pcb->pages = NULL;
}
}
| linux-master | fs/erofs/pcpubuf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2021, Alibaba Cloud
*/
#include "xattr.h"
#include <trace/events/erofs.h>
static void *erofs_read_inode(struct erofs_buf *buf,
struct inode *inode, unsigned int *ofs)
{
struct super_block *sb = inode->i_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_inode *vi = EROFS_I(inode);
const erofs_off_t inode_loc = erofs_iloc(inode);
erofs_blk_t blkaddr, nblks = 0;
void *kaddr;
struct erofs_inode_compact *dic;
struct erofs_inode_extended *die, *copied = NULL;
unsigned int ifmt;
int err;
blkaddr = erofs_blknr(sb, inode_loc);
*ofs = erofs_blkoff(sb, inode_loc);
kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
if (IS_ERR(kaddr)) {
erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
vi->nid, PTR_ERR(kaddr));
return kaddr;
}
dic = kaddr + *ofs;
ifmt = le16_to_cpu(dic->i_format);
if (ifmt & ~EROFS_I_ALL) {
erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
ifmt, vi->nid);
err = -EOPNOTSUPP;
goto err_out;
}
vi->datalayout = erofs_inode_datalayout(ifmt);
if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
vi->datalayout, vi->nid);
err = -EOPNOTSUPP;
goto err_out;
}
switch (erofs_inode_version(ifmt)) {
case EROFS_INODE_LAYOUT_EXTENDED:
vi->inode_isize = sizeof(struct erofs_inode_extended);
/* check if the extended inode acrosses block boundary */
if (*ofs + vi->inode_isize <= sb->s_blocksize) {
*ofs += vi->inode_isize;
die = (struct erofs_inode_extended *)dic;
} else {
const unsigned int gotten = sb->s_blocksize - *ofs;
copied = kmalloc(vi->inode_isize, GFP_NOFS);
if (!copied) {
err = -ENOMEM;
goto err_out;
}
memcpy(copied, dic, gotten);
kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
EROFS_KMAP);
if (IS_ERR(kaddr)) {
erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
vi->nid, PTR_ERR(kaddr));
kfree(copied);
return kaddr;
}
*ofs = vi->inode_isize - gotten;
memcpy((u8 *)copied + gotten, kaddr, *ofs);
die = copied;
}
vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
inode->i_mode = le16_to_cpu(die->i_mode);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
break;
case S_IFCHR:
case S_IFBLK:
inode->i_rdev =
new_decode_dev(le32_to_cpu(die->i_u.rdev));
break;
case S_IFIFO:
case S_IFSOCK:
inode->i_rdev = 0;
break;
default:
goto bogusimode;
}
i_uid_write(inode, le32_to_cpu(die->i_uid));
i_gid_write(inode, le32_to_cpu(die->i_gid));
set_nlink(inode, le32_to_cpu(die->i_nlink));
/* extended inode has its own timestamp */
inode_set_ctime(inode, le64_to_cpu(die->i_mtime),
le32_to_cpu(die->i_mtime_nsec));
inode->i_size = le64_to_cpu(die->i_size);
/* total blocks for compressed files */
if (erofs_inode_is_data_compressed(vi->datalayout))
nblks = le32_to_cpu(die->i_u.compressed_blocks);
else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
/* fill chunked inode summary info */
vi->chunkformat = le16_to_cpu(die->i_u.c.format);
kfree(copied);
copied = NULL;
break;
case EROFS_INODE_LAYOUT_COMPACT:
vi->inode_isize = sizeof(struct erofs_inode_compact);
*ofs += vi->inode_isize;
vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
inode->i_mode = le16_to_cpu(dic->i_mode);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
break;
case S_IFCHR:
case S_IFBLK:
inode->i_rdev =
new_decode_dev(le32_to_cpu(dic->i_u.rdev));
break;
case S_IFIFO:
case S_IFSOCK:
inode->i_rdev = 0;
break;
default:
goto bogusimode;
}
i_uid_write(inode, le16_to_cpu(dic->i_uid));
i_gid_write(inode, le16_to_cpu(dic->i_gid));
set_nlink(inode, le16_to_cpu(dic->i_nlink));
/* use build time for compact inodes */
inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec);
inode->i_size = le32_to_cpu(dic->i_size);
if (erofs_inode_is_data_compressed(vi->datalayout))
nblks = le32_to_cpu(dic->i_u.compressed_blocks);
else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
break;
default:
erofs_err(inode->i_sb,
"unsupported on-disk inode version %u of nid %llu",
erofs_inode_version(ifmt), vi->nid);
err = -EOPNOTSUPP;
goto err_out;
}
if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
erofs_err(inode->i_sb,
"unsupported chunk format %x of nid %llu",
vi->chunkformat, vi->nid);
err = -EOPNOTSUPP;
goto err_out;
}
vi->chunkbits = sb->s_blocksize_bits +
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
}
inode->i_mtime = inode->i_atime = inode_get_ctime(inode);
inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
(vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
vi->datalayout == EROFS_INODE_CHUNK_BASED))
inode->i_flags |= S_DAX;
if (!nblks)
/* measure inode.i_blocks as generic filesystems */
inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
else
inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
return kaddr;
bogusimode:
erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
inode->i_mode, vi->nid);
err = -EFSCORRUPTED;
err_out:
DBG_BUGON(1);
kfree(copied);
erofs_put_metabuf(buf);
return ERR_PTR(err);
}
static int erofs_fill_symlink(struct inode *inode, void *kaddr,
unsigned int m_pofs)
{
struct erofs_inode *vi = EROFS_I(inode);
unsigned int bsz = i_blocksize(inode);
char *lnk;
/* if it cannot be handled with fast symlink scheme */
if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
inode->i_size >= bsz || inode->i_size < 0) {
inode->i_op = &erofs_symlink_iops;
return 0;
}
lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
if (!lnk)
return -ENOMEM;
m_pofs += vi->xattr_isize;
/* inline symlink data shouldn't cross block boundary */
if (m_pofs + inode->i_size > bsz) {
kfree(lnk);
erofs_err(inode->i_sb,
"inline data cross block boundary @ nid %llu",
vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
memcpy(lnk, kaddr + m_pofs, inode->i_size);
lnk[inode->i_size] = '\0';
inode->i_link = lnk;
inode->i_op = &erofs_fast_symlink_iops;
return 0;
}
static int erofs_fill_inode(struct inode *inode)
{
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
void *kaddr;
unsigned int ofs;
int err = 0;
trace_erofs_fill_inode(inode);
/* read inode base data from disk */
kaddr = erofs_read_inode(&buf, inode, &ofs);
if (IS_ERR(kaddr))
return PTR_ERR(kaddr);
/* setup the new inode */
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &erofs_generic_iops;
if (erofs_inode_is_data_compressed(vi->datalayout))
inode->i_fop = &generic_ro_fops;
else
inode->i_fop = &erofs_file_fops;
break;
case S_IFDIR:
inode->i_op = &erofs_dir_iops;
inode->i_fop = &erofs_dir_fops;
inode_nohighmem(inode);
break;
case S_IFLNK:
err = erofs_fill_symlink(inode, kaddr, ofs);
if (err)
goto out_unlock;
inode_nohighmem(inode);
break;
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
inode->i_op = &erofs_generic_iops;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
goto out_unlock;
default:
err = -EFSCORRUPTED;
goto out_unlock;
}
if (erofs_inode_is_data_compressed(vi->datalayout)) {
#ifdef CONFIG_EROFS_FS_ZIP
if (!erofs_is_fscache_mode(inode->i_sb) &&
inode->i_sb->s_blocksize_bits == PAGE_SHIFT) {
inode->i_mapping->a_ops = &z_erofs_aops;
err = 0;
goto out_unlock;
}
#endif
err = -EOPNOTSUPP;
goto out_unlock;
}
inode->i_mapping->a_ops = &erofs_raw_access_aops;
mapping_set_large_folios(inode->i_mapping);
#ifdef CONFIG_EROFS_FS_ONDEMAND
if (erofs_is_fscache_mode(inode->i_sb))
inode->i_mapping->a_ops = &erofs_fscache_access_aops;
#endif
out_unlock:
erofs_put_metabuf(&buf);
return err;
}
/*
* ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
* so that it will fit.
*/
static ino_t erofs_squash_ino(erofs_nid_t nid)
{
ino_t ino = (ino_t)nid;
if (sizeof(ino_t) < sizeof(erofs_nid_t))
ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
return ino;
}
static int erofs_iget5_eq(struct inode *inode, void *opaque)
{
return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
}
static int erofs_iget5_set(struct inode *inode, void *opaque)
{
const erofs_nid_t nid = *(erofs_nid_t *)opaque;
inode->i_ino = erofs_squash_ino(nid);
EROFS_I(inode)->nid = nid;
return 0;
}
struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
{
struct inode *inode;
inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
erofs_iget5_set, &nid);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
int err = erofs_fill_inode(inode);
if (err) {
iget_failed(inode);
return ERR_PTR(err);
}
unlock_new_inode(inode);
}
return inode;
}
int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
struct inode *const inode = d_inode(path->dentry);
if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
stat->attributes |= STATX_ATTR_COMPRESSED;
stat->attributes |= STATX_ATTR_IMMUTABLE;
stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE);
generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
const struct inode_operations erofs_generic_iops = {
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
.get_inode_acl = erofs_get_acl,
.fiemap = erofs_fiemap,
};
const struct inode_operations erofs_symlink_iops = {
.get_link = page_get_link,
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
.get_inode_acl = erofs_get_acl,
};
const struct inode_operations erofs_fast_symlink_iops = {
.get_link = simple_get_link,
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
.get_inode_acl = erofs_get_acl,
};
| linux-master | fs/erofs/inode.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
*/
#include "internal.h"
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
{
struct page *page = *pagepool;
if (page) {
DBG_BUGON(page_ref_count(page) != 1);
*pagepool = (struct page *)page_private(page);
} else {
page = alloc_page(gfp);
}
return page;
}
void erofs_release_pages(struct page **pagepool)
{
while (*pagepool) {
struct page *page = *pagepool;
*pagepool = (struct page *)page_private(page);
put_page(page);
}
}
#ifdef CONFIG_EROFS_FS_ZIP
/* global shrink count (for all mounted EROFS instances) */
static atomic_long_t erofs_global_shrink_cnt;
static bool erofs_workgroup_get(struct erofs_workgroup *grp)
{
if (lockref_get_not_zero(&grp->lockref))
return true;
spin_lock(&grp->lockref.lock);
if (__lockref_is_dead(&grp->lockref)) {
spin_unlock(&grp->lockref.lock);
return false;
}
if (!grp->lockref.count++)
atomic_long_dec(&erofs_global_shrink_cnt);
spin_unlock(&grp->lockref.lock);
return true;
}
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
pgoff_t index)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_workgroup *grp;
repeat:
rcu_read_lock();
grp = xa_load(&sbi->managed_pslots, index);
if (grp) {
if (!erofs_workgroup_get(grp)) {
/* prefer to relax rcu read side */
rcu_read_unlock();
goto repeat;
}
DBG_BUGON(index != grp->index);
}
rcu_read_unlock();
return grp;
}
struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
struct erofs_workgroup *grp)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
struct erofs_workgroup *pre;
/*
* Bump up before making this visible to others for the XArray in order
* to avoid potential UAF without serialized by xa_lock.
*/
lockref_get(&grp->lockref);
repeat:
xa_lock(&sbi->managed_pslots);
pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
NULL, grp, GFP_NOFS);
if (pre) {
if (xa_is_err(pre)) {
pre = ERR_PTR(xa_err(pre));
} else if (!erofs_workgroup_get(pre)) {
/* try to legitimize the current in-tree one */
xa_unlock(&sbi->managed_pslots);
cond_resched();
goto repeat;
}
lockref_put_return(&grp->lockref);
grp = pre;
}
xa_unlock(&sbi->managed_pslots);
return grp;
}
static void __erofs_workgroup_free(struct erofs_workgroup *grp)
{
atomic_long_dec(&erofs_global_shrink_cnt);
erofs_workgroup_free_rcu(grp);
}
void erofs_workgroup_put(struct erofs_workgroup *grp)
{
if (lockref_put_or_lock(&grp->lockref))
return;
DBG_BUGON(__lockref_is_dead(&grp->lockref));
if (grp->lockref.count == 1)
atomic_long_inc(&erofs_global_shrink_cnt);
--grp->lockref.count;
spin_unlock(&grp->lockref.lock);
}
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp)
{
int free = false;
spin_lock(&grp->lockref.lock);
if (grp->lockref.count)
goto out;
/*
* Note that all cached pages should be detached before deleted from
* the XArray. Otherwise some cached pages could be still attached to
* the orphan old workgroup when the new one is available in the tree.
*/
if (erofs_try_to_free_all_cached_pages(sbi, grp))
goto out;
/*
* It's impossible to fail after the workgroup is freezed,
* however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance.
*/
DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
lockref_mark_dead(&grp->lockref);
free = true;
out:
spin_unlock(&grp->lockref.lock);
if (free)
__erofs_workgroup_free(grp);
return free;
}
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned long nr_shrink)
{
struct erofs_workgroup *grp;
unsigned int freed = 0;
unsigned long index;
xa_lock(&sbi->managed_pslots);
xa_for_each(&sbi->managed_pslots, index, grp) {
/* try to shrink each valid workgroup */
if (!erofs_try_to_release_workgroup(sbi, grp))
continue;
xa_unlock(&sbi->managed_pslots);
++freed;
if (!--nr_shrink)
return freed;
xa_lock(&sbi->managed_pslots);
}
xa_unlock(&sbi->managed_pslots);
return freed;
}
/* protected by 'erofs_sb_list_lock' */
static unsigned int shrinker_run_no;
/* protects the mounted 'erofs_sb_list' */
static DEFINE_SPINLOCK(erofs_sb_list_lock);
static LIST_HEAD(erofs_sb_list);
void erofs_shrinker_register(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
mutex_init(&sbi->umount_mutex);
spin_lock(&erofs_sb_list_lock);
list_add(&sbi->list, &erofs_sb_list);
spin_unlock(&erofs_sb_list_lock);
}
void erofs_shrinker_unregister(struct super_block *sb)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex);
/* clean up all remaining workgroups in memory */
erofs_shrink_workstation(sbi, ~0UL);
spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list);
spin_unlock(&erofs_sb_list_lock);
mutex_unlock(&sbi->umount_mutex);
}
static unsigned long erofs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
return atomic_long_read(&erofs_global_shrink_cnt);
}
static unsigned long erofs_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct erofs_sb_info *sbi;
struct list_head *p;
unsigned long nr = sc->nr_to_scan;
unsigned int run_no;
unsigned long freed = 0;
spin_lock(&erofs_sb_list_lock);
do {
run_no = ++shrinker_run_no;
} while (run_no == 0);
/* Iterate over all mounted superblocks and try to shrink them */
p = erofs_sb_list.next;
while (p != &erofs_sb_list) {
sbi = list_entry(p, struct erofs_sb_info, list);
/*
* We move the ones we do to the end of the list, so we stop
* when we see one we have already done.
*/
if (sbi->shrinker_run_no == run_no)
break;
if (!mutex_trylock(&sbi->umount_mutex)) {
p = p->next;
continue;
}
spin_unlock(&erofs_sb_list_lock);
sbi->shrinker_run_no = run_no;
freed += erofs_shrink_workstation(sbi, nr - freed);
spin_lock(&erofs_sb_list_lock);
/* Get the next list element before we move this one */
p = p->next;
/*
* Move this one to the end of the list to provide some
* fairness.
*/
list_move_tail(&sbi->list, &erofs_sb_list);
mutex_unlock(&sbi->umount_mutex);
if (freed >= nr)
break;
}
spin_unlock(&erofs_sb_list_lock);
return freed;
}
static struct shrinker erofs_shrinker_info = {
.scan_objects = erofs_shrink_scan,
.count_objects = erofs_shrink_count,
.seeks = DEFAULT_SEEKS,
};
int __init erofs_init_shrinker(void)
{
return register_shrinker(&erofs_shrinker_info, "erofs-shrinker");
}
void erofs_exit_shrinker(void)
{
unregister_shrinker(&erofs_shrinker_info);
}
#endif /* !CONFIG_EROFS_FS_ZIP */
| linux-master | fs/erofs/utils.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2022, Alibaba Cloud
*/
#include "xattr.h"
#include <trace/events/erofs.h>
struct erofs_qstr {
const unsigned char *name;
const unsigned char *end;
};
/* based on the end of qn is accurate and it must have the trailing '\0' */
static inline int erofs_dirnamecmp(const struct erofs_qstr *qn,
const struct erofs_qstr *qd,
unsigned int *matched)
{
unsigned int i = *matched;
/*
* on-disk error, let's only BUG_ON in the debugging mode.
* otherwise, it will return 1 to just skip the invalid name
* and go on (in consideration of the lookup performance).
*/
DBG_BUGON(qd->name > qd->end);
/* qd could not have trailing '\0' */
/* However it is absolutely safe if < qd->end */
while (qd->name + i < qd->end && qd->name[i] != '\0') {
if (qn->name[i] != qd->name[i]) {
*matched = i;
return qn->name[i] > qd->name[i] ? 1 : -1;
}
++i;
}
*matched = i;
/* See comments in __d_alloc on the terminating NUL character */
return qn->name[i] == '\0' ? 0 : 1;
}
#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
u8 *data,
unsigned int dirblksize,
const int ndirents)
{
int head, back;
unsigned int startprfx, endprfx;
struct erofs_dirent *const de = (struct erofs_dirent *)data;
/* since the 1st dirent has been evaluated previously */
head = 1;
back = ndirents - 1;
startprfx = endprfx = 0;
while (head <= back) {
const int mid = head + (back - head) / 2;
const int nameoff = nameoff_from_disk(de[mid].nameoff,
dirblksize);
unsigned int matched = min(startprfx, endprfx);
struct erofs_qstr dname = {
.name = data + nameoff,
.end = mid >= ndirents - 1 ?
data + dirblksize :
data + nameoff_from_disk(de[mid + 1].nameoff,
dirblksize)
};
/* string comparison without already matched prefix */
int ret = erofs_dirnamecmp(name, &dname, &matched);
if (!ret) {
return de + mid;
} else if (ret > 0) {
head = mid + 1;
startprfx = matched;
} else {
back = mid - 1;
endprfx = matched;
}
}
return ERR_PTR(-ENOENT);
}
static void *erofs_find_target_block(struct erofs_buf *target,
struct inode *dir, struct erofs_qstr *name, int *_ndirents)
{
unsigned int bsz = i_blocksize(dir);
int head = 0, back = erofs_iblks(dir) - 1;
unsigned int startprfx = 0, endprfx = 0;
void *candidate = ERR_PTR(-ENOENT);
while (head <= back) {
const int mid = head + (back - head) / 2;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_dirent *de;
buf.inode = dir;
de = erofs_bread(&buf, mid, EROFS_KMAP);
if (!IS_ERR(de)) {
const int nameoff = nameoff_from_disk(de->nameoff, bsz);
const int ndirents = nameoff / sizeof(*de);
int diff;
unsigned int matched;
struct erofs_qstr dname;
if (!ndirents) {
erofs_put_metabuf(&buf);
erofs_err(dir->i_sb,
"corrupted dir block %d @ nid %llu",
mid, EROFS_I(dir)->nid);
DBG_BUGON(1);
de = ERR_PTR(-EFSCORRUPTED);
goto out;
}
matched = min(startprfx, endprfx);
dname.name = (u8 *)de + nameoff;
if (ndirents == 1)
dname.end = (u8 *)de + bsz;
else
dname.end = (u8 *)de +
nameoff_from_disk(de[1].nameoff, bsz);
/* string comparison without already matched prefix */
diff = erofs_dirnamecmp(name, &dname, &matched);
if (!diff) {
*_ndirents = 0;
goto out;
} else if (diff > 0) {
head = mid + 1;
startprfx = matched;
if (!IS_ERR(candidate))
erofs_put_metabuf(target);
*target = buf;
candidate = de;
*_ndirents = ndirents;
} else {
erofs_put_metabuf(&buf);
back = mid - 1;
endprfx = matched;
}
continue;
}
out: /* free if the candidate is valid */
if (!IS_ERR(candidate))
erofs_put_metabuf(target);
return de;
}
return candidate;
}
int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid,
unsigned int *d_type)
{
int ndirents;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_dirent *de;
struct erofs_qstr qn;
if (!dir->i_size)
return -ENOENT;
qn.name = name->name;
qn.end = name->name + name->len;
buf.inode = dir;
ndirents = 0;
de = erofs_find_target_block(&buf, dir, &qn, &ndirents);
if (IS_ERR(de))
return PTR_ERR(de);
if (ndirents)
de = find_target_dirent(&qn, (u8 *)de, i_blocksize(dir),
ndirents);
if (!IS_ERR(de)) {
*nid = le64_to_cpu(de->nid);
*d_type = de->file_type;
}
erofs_put_metabuf(&buf);
return PTR_ERR_OR_ZERO(de);
}
static struct dentry *erofs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
int err;
erofs_nid_t nid;
unsigned int d_type;
struct inode *inode;
trace_erofs_lookup(dir, dentry, flags);
if (dentry->d_name.len > EROFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
if (err == -ENOENT)
/* negative dentry */
inode = NULL;
else if (err)
inode = ERR_PTR(err);
else
inode = erofs_iget(dir->i_sb, nid);
return d_splice_alias(inode, dentry);
}
const struct inode_operations erofs_dir_iops = {
.lookup = erofs_lookup,
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
.get_inode_acl = erofs_get_acl,
.fiemap = erofs_fiemap,
};
| linux-master | fs/erofs/namei.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2019 HUAWEI, Inc.
* https://www.huawei.com/
*/
#include "compress.h"
#include <linux/module.h>
#include <linux/lz4.h>
#ifndef LZ4_DISTANCE_MAX /* history window size */
#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
#endif
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
#endif
struct z_erofs_lz4_decompress_ctx {
struct z_erofs_decompress_req *rq;
/* # of encoded, decoded pages */
unsigned int inpages, outpages;
/* decoded block total length (used for in-place decompression) */
unsigned int oend;
};
int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int size)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
u16 distance;
if (lz4) {
if (size < sizeof(struct z_erofs_lz4_cfgs)) {
erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
return -EINVAL;
}
distance = le16_to_cpu(lz4->max_distance);
sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
if (!sbi->lz4.max_pclusterblks) {
sbi->lz4.max_pclusterblks = 1; /* reserved case */
} else if (sbi->lz4.max_pclusterblks >
erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
erofs_err(sb, "too large lz4 pclusterblks %u",
sbi->lz4.max_pclusterblks);
return -EINVAL;
}
} else {
distance = le16_to_cpu(dsb->u1.lz4_max_distance);
sbi->lz4.max_pclusterblks = 1;
}
sbi->lz4.max_distance_pages = distance ?
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
LZ4_MAX_DISTANCE_PAGES;
return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
}
/*
* Fill all gaps with bounce pages if it's a sparse page list. Also check if
* all physical pages are consecutive, which can be seen for moderate CR.
*/
static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
struct page **pagepool)
{
struct z_erofs_decompress_req *rq = ctx->rq;
struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
BITS_PER_LONG)] = { 0 };
unsigned int lz4_max_distance_pages =
EROFS_SB(rq->sb)->lz4.max_distance_pages;
void *kaddr = NULL;
unsigned int i, j, top;
top = 0;
for (i = j = 0; i < ctx->outpages; ++i, ++j) {
struct page *const page = rq->out[i];
struct page *victim;
if (j >= lz4_max_distance_pages)
j = 0;
/* 'valid' bounced can only be tested after a complete round */
if (!rq->fillgaps && test_bit(j, bounced)) {
DBG_BUGON(i < lz4_max_distance_pages);
DBG_BUGON(top >= lz4_max_distance_pages);
availables[top++] = rq->out[i - lz4_max_distance_pages];
}
if (page) {
__clear_bit(j, bounced);
if (!PageHighMem(page)) {
if (!i) {
kaddr = page_address(page);
continue;
}
if (kaddr &&
kaddr + PAGE_SIZE == page_address(page)) {
kaddr += PAGE_SIZE;
continue;
}
}
kaddr = NULL;
continue;
}
kaddr = NULL;
__set_bit(j, bounced);
if (top) {
victim = availables[--top];
get_page(victim);
} else {
victim = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
}
rq->out[i] = victim;
}
return kaddr ? 1 : 0;
}
static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
void *inpage, unsigned int *inputmargin, int *maptype,
bool may_inplace)
{
struct z_erofs_decompress_req *rq = ctx->rq;
unsigned int omargin, total, i, j;
struct page **in;
void *src, *tmp;
if (rq->inplace_io) {
omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
if (rq->partial_decoding || !may_inplace ||
omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
goto docopy;
for (i = 0; i < ctx->inpages; ++i) {
DBG_BUGON(rq->in[i] == NULL);
for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
if (rq->out[j] == rq->in[i])
goto docopy;
}
}
if (ctx->inpages <= 1) {
*maptype = 0;
return inpage;
}
kunmap_local(inpage);
might_sleep();
src = erofs_vm_map_ram(rq->in, ctx->inpages);
if (!src)
return ERR_PTR(-ENOMEM);
*maptype = 1;
return src;
docopy:
/* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq->in;
src = erofs_get_pcpubuf(ctx->inpages);
if (!src) {
DBG_BUGON(1);
kunmap_local(inpage);
return ERR_PTR(-EFAULT);
}
tmp = src;
total = rq->inputsize;
while (total) {
unsigned int page_copycnt =
min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
if (!inpage)
inpage = kmap_local_page(*in);
memcpy(tmp, inpage + *inputmargin, page_copycnt);
kunmap_local(inpage);
inpage = NULL;
tmp += page_copycnt;
total -= page_copycnt;
++in;
*inputmargin = 0;
}
*maptype = 2;
return src;
}
/*
* Get the exact inputsize with zero_padding feature.
* - For LZ4, it should work if zero_padding feature is on (5.3+);
* - For MicroLZMA, it'd be enabled all the time.
*/
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize)
{
const char *padend;
padend = memchr_inv(padbuf, 0, padbufsize);
if (!padend)
return -EFSCORRUPTED;
rq->inputsize -= padend - padbuf;
rq->pageofs_in += padend - padbuf;
return 0;
}
static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
u8 *out)
{
struct z_erofs_decompress_req *rq = ctx->rq;
bool support_0padding = false, may_inplace = false;
unsigned int inputmargin;
u8 *headpage, *src;
int ret, maptype;
DBG_BUGON(*rq->in == NULL);
headpage = kmap_local_page(*rq->in);
/* LZ4 decompression inplace is only safe if zero_padding is enabled */
if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
support_0padding = true;
ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
min_t(unsigned int, rq->inputsize,
rq->sb->s_blocksize - rq->pageofs_in));
if (ret) {
kunmap_local(headpage);
return ret;
}
may_inplace = !((rq->pageofs_in + rq->inputsize) &
(rq->sb->s_blocksize - 1));
}
inputmargin = rq->pageofs_in;
src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
&maptype, may_inplace);
if (IS_ERR(src))
return PTR_ERR(src);
/* legacy format could compress extra data in a pcluster. */
if (rq->partial_decoding || !support_0padding)
ret = LZ4_decompress_safe_partial(src + inputmargin, out,
rq->inputsize, rq->outputsize, rq->outputsize);
else
ret = LZ4_decompress_safe(src + inputmargin, out,
rq->inputsize, rq->outputsize);
if (ret != rq->outputsize) {
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
ret, rq->inputsize, inputmargin, rq->outputsize);
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
16, 1, src + inputmargin, rq->inputsize, true);
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
16, 1, out, rq->outputsize, true);
if (ret >= 0)
memset(out + ret, 0, rq->outputsize - ret);
ret = -EIO;
} else {
ret = 0;
}
if (maptype == 0) {
kunmap_local(headpage);
} else if (maptype == 1) {
vm_unmap_ram(src, ctx->inpages);
} else if (maptype == 2) {
erofs_put_pcpubuf(src);
} else {
DBG_BUGON(1);
return -EFAULT;
}
return ret;
}
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool)
{
struct z_erofs_lz4_decompress_ctx ctx;
unsigned int dst_maptype;
void *dst;
int ret;
ctx.rq = rq;
ctx.oend = rq->pageofs_out + rq->outputsize;
ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
/* one optimized fast path only for non bigpcluster cases yet */
if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out);
dst = kmap_local_page(*rq->out);
dst_maptype = 0;
goto dstmap_out;
}
/* general decoding path which can be used for all cases */
ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
if (ret < 0) {
return ret;
} else if (ret > 0) {
dst = page_address(*rq->out);
dst_maptype = 1;
} else {
dst = erofs_vm_map_ram(rq->out, ctx.outpages);
if (!dst)
return -ENOMEM;
dst_maptype = 2;
}
dstmap_out:
ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
if (!dst_maptype)
kunmap_local(dst);
else if (dst_maptype == 2)
vm_unmap_ram(dst, ctx.outpages);
return ret;
}
static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
struct page **pagepool)
{
const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
const unsigned int outpages =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
PAGE_SIZE - rq->pageofs_out);
const unsigned int lefthalf = rq->outputsize - righthalf;
const unsigned int interlaced_offset =
rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out;
u8 *src;
if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
if (rq->out[0] == *rq->in) {
DBG_BUGON(rq->pageofs_out);
return 0;
}
src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in;
if (rq->out[0])
memcpy_to_page(rq->out[0], rq->pageofs_out,
src + interlaced_offset, righthalf);
if (outpages > inpages) {
DBG_BUGON(!rq->out[outpages - 1]);
if (rq->out[outpages - 1] != rq->in[inpages - 1]) {
memcpy_to_page(rq->out[outpages - 1], 0, src +
(interlaced_offset ? 0 : righthalf),
lefthalf);
} else if (!interlaced_offset) {
memmove(src, src + righthalf, lefthalf);
flush_dcache_page(rq->in[inpages - 1]);
}
}
kunmap_local(src);
return 0;
}
const struct z_erofs_decompressor erofs_decompressors[] = {
[Z_EROFS_COMPRESSION_SHIFTED] = {
.decompress = z_erofs_transform_plain,
.name = "shifted"
},
[Z_EROFS_COMPRESSION_INTERLACED] = {
.decompress = z_erofs_transform_plain,
.name = "interlaced"
},
[Z_EROFS_COMPRESSION_LZ4] = {
.decompress = z_erofs_lz4_decompress,
.name = "lz4"
},
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
[Z_EROFS_COMPRESSION_LZMA] = {
.decompress = z_erofs_lzma_decompress,
.name = "lzma"
},
#endif
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
[Z_EROFS_COMPRESSION_DEFLATE] = {
.decompress = z_erofs_deflate_decompress,
.name = "deflate"
},
#endif
};
| linux-master | fs/erofs/decompressor.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2021, Alibaba Cloud
*/
#include "internal.h"
#include <linux/prefetch.h>
#include <linux/sched/mm.h>
#include <linux/dax.h>
#include <trace/events/erofs.h>
void erofs_unmap_metabuf(struct erofs_buf *buf)
{
if (buf->kmap_type == EROFS_KMAP)
kunmap_local(buf->base);
buf->base = NULL;
buf->kmap_type = EROFS_NO_KMAP;
}
void erofs_put_metabuf(struct erofs_buf *buf)
{
if (!buf->page)
return;
erofs_unmap_metabuf(buf);
put_page(buf->page);
buf->page = NULL;
}
/*
* Derive the block size from inode->i_blkbits to make compatible with
* anonymous inode in fscache mode.
*/
void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
enum erofs_kmap_type type)
{
struct inode *inode = buf->inode;
erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
pgoff_t index = offset >> PAGE_SHIFT;
struct page *page = buf->page;
struct folio *folio;
unsigned int nofs_flag;
if (!page || page->index != index) {
erofs_put_metabuf(buf);
nofs_flag = memalloc_nofs_save();
folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(folio))
return folio;
/* should already be PageUptodate, no need to lock page */
page = folio_file_page(folio, index);
buf->page = page;
}
if (buf->kmap_type == EROFS_NO_KMAP) {
if (type == EROFS_KMAP)
buf->base = kmap_local_page(page);
buf->kmap_type = type;
} else if (buf->kmap_type != type) {
DBG_BUGON(1);
return ERR_PTR(-EFAULT);
}
if (type == EROFS_NO_KMAP)
return NULL;
return buf->base + (offset & ~PAGE_MASK);
}
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
{
if (erofs_is_fscache_mode(sb))
buf->inode = EROFS_SB(sb)->s_fscache->inode;
else
buf->inode = sb->s_bdev->bd_inode;
}
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
erofs_blk_t blkaddr, enum erofs_kmap_type type)
{
erofs_init_metabuf(buf, sb);
return erofs_bread(buf, blkaddr, type);
}
static int erofs_map_blocks_flatmode(struct inode *inode,
struct erofs_map_blocks *map)
{
erofs_blk_t nblocks, lastblk;
u64 offset = map->m_la;
struct erofs_inode *vi = EROFS_I(inode);
struct super_block *sb = inode->i_sb;
bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
nblocks = erofs_iblks(inode);
lastblk = nblocks - tailendpacking;
/* there is no hole in flatmode */
map->m_flags = EROFS_MAP_MAPPED;
if (offset < erofs_pos(sb, lastblk)) {
map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
map->m_plen = erofs_pos(sb, lastblk) - offset;
} else if (tailendpacking) {
map->m_pa = erofs_iloc(inode) + vi->inode_isize +
vi->xattr_isize + erofs_blkoff(sb, offset);
map->m_plen = inode->i_size - offset;
/* inline data should be located in the same meta block */
if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
erofs_err(sb, "inline data cross block boundary @ nid %llu",
vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
map->m_flags |= EROFS_MAP_META;
} else {
erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
vi->nid, inode->i_size, map->m_la);
DBG_BUGON(1);
return -EIO;
}
return 0;
}
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
{
struct super_block *sb = inode->i_sb;
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_inode_chunk_index *idx;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
u64 chunknr;
unsigned int unit;
erofs_off_t pos;
void *kaddr;
int err = 0;
trace_erofs_map_blocks_enter(inode, map, 0);
map->m_deviceid = 0;
if (map->m_la >= inode->i_size) {
/* leave out-of-bound access unmapped */
map->m_flags = 0;
map->m_plen = 0;
goto out;
}
if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
err = erofs_map_blocks_flatmode(inode, map);
goto out;
}
if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
unit = sizeof(*idx); /* chunk index */
else
unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
chunknr = map->m_la >> vi->chunkbits;
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
vi->xattr_isize, unit) + unit * chunknr;
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr);
goto out;
}
map->m_la = chunknr << vi->chunkbits;
map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
round_up(inode->i_size - map->m_la, sb->s_blocksize));
/* handle block map */
if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
map->m_flags = 0;
} else {
map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
map->m_flags = EROFS_MAP_MAPPED;
}
goto out_unlock;
}
/* parse chunk indexes */
idx = kaddr + erofs_blkoff(sb, pos);
switch (le32_to_cpu(idx->blkaddr)) {
case EROFS_NULL_ADDR:
map->m_flags = 0;
break;
default:
map->m_deviceid = le16_to_cpu(idx->device_id) &
EROFS_SB(sb)->device_id_mask;
map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
map->m_flags = EROFS_MAP_MAPPED;
break;
}
out_unlock:
erofs_put_metabuf(&buf);
out:
if (!err)
map->m_llen = map->m_plen;
trace_erofs_map_blocks_exit(inode, map, 0, err);
return err;
}
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
{
struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
struct erofs_device_info *dif;
int id;
map->m_bdev = sb->s_bdev;
map->m_daxdev = EROFS_SB(sb)->dax_dev;
map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
map->m_fscache = EROFS_SB(sb)->s_fscache;
if (map->m_deviceid) {
down_read(&devs->rwsem);
dif = idr_find(&devs->tree, map->m_deviceid - 1);
if (!dif) {
up_read(&devs->rwsem);
return -ENODEV;
}
if (devs->flatdev) {
map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
up_read(&devs->rwsem);
return 0;
}
map->m_bdev = dif->bdev;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;
up_read(&devs->rwsem);
} else if (devs->extra_devices && !devs->flatdev) {
down_read(&devs->rwsem);
idr_for_each_entry(&devs->tree, dif, id) {
erofs_off_t startoff, length;
if (!dif->mapped_blkaddr)
continue;
startoff = erofs_pos(sb, dif->mapped_blkaddr);
length = erofs_pos(sb, dif->blocks);
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
map->m_pa -= startoff;
map->m_bdev = dif->bdev;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;
break;
}
}
up_read(&devs->rwsem);
}
return 0;
}
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
{
int ret;
struct super_block *sb = inode->i_sb;
struct erofs_map_blocks map;
struct erofs_map_dev mdev;
map.m_la = offset;
map.m_llen = length;
ret = erofs_map_blocks(inode, &map);
if (ret < 0)
return ret;
mdev = (struct erofs_map_dev) {
.m_deviceid = map.m_deviceid,
.m_pa = map.m_pa,
};
ret = erofs_map_dev(sb, &mdev);
if (ret)
return ret;
iomap->offset = map.m_la;
if (flags & IOMAP_DAX)
iomap->dax_dev = mdev.m_daxdev;
else
iomap->bdev = mdev.m_bdev;
iomap->length = map.m_llen;
iomap->flags = 0;
iomap->private = NULL;
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
if (!iomap->length)
iomap->length = length;
return 0;
}
if (map.m_flags & EROFS_MAP_META) {
void *ptr;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
iomap->type = IOMAP_INLINE;
ptr = erofs_read_metabuf(&buf, sb,
erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
iomap->private = buf.base;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa;
if (flags & IOMAP_DAX)
iomap->addr += mdev.m_dax_part_off;
}
return 0;
}
static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
ssize_t written, unsigned int flags, struct iomap *iomap)
{
void *ptr = iomap->private;
if (ptr) {
struct erofs_buf buf = {
.page = kmap_to_page(ptr),
.base = ptr,
.kmap_type = EROFS_KMAP,
};
DBG_BUGON(iomap->type != IOMAP_INLINE);
erofs_put_metabuf(&buf);
} else {
DBG_BUGON(iomap->type == IOMAP_INLINE);
}
return written;
}
static const struct iomap_ops erofs_iomap_ops = {
.iomap_begin = erofs_iomap_begin,
.iomap_end = erofs_iomap_end,
};
int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
#ifdef CONFIG_EROFS_FS_ZIP
return iomap_fiemap(inode, fieinfo, start, len,
&z_erofs_iomap_report_ops);
#else
return -EOPNOTSUPP;
#endif
}
return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
}
/*
* since we dont have write or truncate flows, so no inode
* locking needs to be held at the moment.
*/
static int erofs_read_folio(struct file *file, struct folio *folio)
{
return iomap_read_folio(folio, &erofs_iomap_ops);
}
static void erofs_readahead(struct readahead_control *rac)
{
return iomap_readahead(rac, &erofs_iomap_ops);
}
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
{
return iomap_bmap(mapping, block, &erofs_iomap_ops);
}
static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
/* no need taking (shared) inode lock since it's a ro filesystem */
if (!iov_iter_count(to))
return 0;
#ifdef CONFIG_FS_DAX
if (IS_DAX(inode))
return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
#endif
if (iocb->ki_flags & IOCB_DIRECT) {
struct block_device *bdev = inode->i_sb->s_bdev;
unsigned int blksize_mask;
if (bdev)
blksize_mask = bdev_logical_block_size(bdev) - 1;
else
blksize_mask = i_blocksize(inode) - 1;
if ((iocb->ki_pos | iov_iter_count(to) |
iov_iter_alignment(to)) & blksize_mask)
return -EINVAL;
return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
NULL, 0, NULL, 0);
}
return filemap_read(iocb, to, 0);
}
/* for uncompressed (aligned) files and raw access for other files */
const struct address_space_operations erofs_raw_access_aops = {
.read_folio = erofs_read_folio,
.readahead = erofs_readahead,
.bmap = erofs_bmap,
.direct_IO = noop_direct_IO,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
};
#ifdef CONFIG_FS_DAX
static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
unsigned int order)
{
return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
}
static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
{
return erofs_dax_huge_fault(vmf, 0);
}
static const struct vm_operations_struct erofs_dax_vm_ops = {
.fault = erofs_dax_fault,
.huge_fault = erofs_dax_huge_fault,
};
static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
if (!IS_DAX(file_inode(file)))
return generic_file_readonly_mmap(file, vma);
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
vma->vm_ops = &erofs_dax_vm_ops;
vm_flags_set(vma, VM_HUGEPAGE);
return 0;
}
#else
#define erofs_file_mmap generic_file_readonly_mmap
#endif
const struct file_operations erofs_file_fops = {
.llseek = generic_file_llseek,
.read_iter = erofs_file_read_iter,
.mmap = erofs_file_mmap,
.splice_read = filemap_splice_read,
};
| linux-master | fs/erofs/data.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/module.h>
#include <linux/zlib.h>
#include "compress.h"
struct z_erofs_deflate {
struct z_erofs_deflate *next;
struct z_stream_s z;
u8 bounce[PAGE_SIZE];
};
static DEFINE_SPINLOCK(z_erofs_deflate_lock);
static unsigned int z_erofs_deflate_nstrms, z_erofs_deflate_avail_strms;
static struct z_erofs_deflate *z_erofs_deflate_head;
static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq);
module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444);
void z_erofs_deflate_exit(void)
{
/* there should be no running fs instance */
while (z_erofs_deflate_avail_strms) {
struct z_erofs_deflate *strm;
spin_lock(&z_erofs_deflate_lock);
strm = z_erofs_deflate_head;
if (!strm) {
spin_unlock(&z_erofs_deflate_lock);
continue;
}
z_erofs_deflate_head = NULL;
spin_unlock(&z_erofs_deflate_lock);
while (strm) {
struct z_erofs_deflate *n = strm->next;
vfree(strm->z.workspace);
kfree(strm);
--z_erofs_deflate_avail_strms;
strm = n;
}
}
}
int __init z_erofs_deflate_init(void)
{
/* by default, use # of possible CPUs instead */
if (!z_erofs_deflate_nstrms)
z_erofs_deflate_nstrms = num_possible_cpus();
for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
++z_erofs_deflate_avail_strms) {
struct z_erofs_deflate *strm;
strm = kzalloc(sizeof(*strm), GFP_KERNEL);
if (!strm)
goto out_failed;
/* XXX: in-kernel zlib cannot shrink windowbits currently */
strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
if (!strm->z.workspace) {
kfree(strm);
goto out_failed;
}
spin_lock(&z_erofs_deflate_lock);
strm->next = z_erofs_deflate_head;
z_erofs_deflate_head = strm;
spin_unlock(&z_erofs_deflate_lock);
}
return 0;
out_failed:
pr_err("failed to allocate zlib workspace\n");
z_erofs_deflate_exit();
return -ENOMEM;
}
int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_deflate_cfgs *dfl, int size)
{
if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
erofs_err(sb, "invalid deflate cfgs, size=%u", size);
return -EINVAL;
}
if (dfl->windowbits > MAX_WBITS) {
erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
return -EOPNOTSUPP;
}
erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
return 0;
}
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool)
{
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
const unsigned int nrpages_in =
PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
struct super_block *sb = rq->sb;
unsigned int insz, outsz, pofs;
struct z_erofs_deflate *strm;
u8 *kin, *kout = NULL;
bool bounced = false;
int no = -1, ni = 0, j = 0, zerr, err;
/* 1. get the exact DEFLATE compressed size */
kin = kmap_local_page(*rq->in);
err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
min_t(unsigned int, rq->inputsize,
sb->s_blocksize - rq->pageofs_in));
if (err) {
kunmap_local(kin);
return err;
}
/* 2. get an available DEFLATE context */
again:
spin_lock(&z_erofs_deflate_lock);
strm = z_erofs_deflate_head;
if (!strm) {
spin_unlock(&z_erofs_deflate_lock);
wait_event(z_erofs_deflate_wq, READ_ONCE(z_erofs_deflate_head));
goto again;
}
z_erofs_deflate_head = strm->next;
spin_unlock(&z_erofs_deflate_lock);
/* 3. multi-call decompress */
insz = rq->inputsize;
outsz = rq->outputsize;
zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
if (zerr != Z_OK) {
err = -EIO;
goto failed_zinit;
}
pofs = rq->pageofs_out;
strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in);
insz -= strm->z.avail_in;
strm->z.next_in = kin + rq->pageofs_in;
strm->z.avail_out = 0;
while (1) {
if (!strm->z.avail_out) {
if (++no >= nrpages_out || !outsz) {
erofs_err(sb, "insufficient space for decompressed data");
err = -EFSCORRUPTED;
break;
}
if (kout)
kunmap_local(kout);
strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
outsz -= strm->z.avail_out;
if (!rq->out[no]) {
rq->out[no] = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
kout = kmap_local_page(rq->out[no]);
strm->z.next_out = kout + pofs;
pofs = 0;
}
if (!strm->z.avail_in && insz) {
if (++ni >= nrpages_in) {
erofs_err(sb, "invalid compressed data");
err = -EFSCORRUPTED;
break;
}
if (kout) { /* unlike kmap(), take care of the orders */
j = strm->z.next_out - kout;
kunmap_local(kout);
}
kunmap_local(kin);
strm->z.avail_in = min_t(u32, insz, PAGE_SIZE);
insz -= strm->z.avail_in;
kin = kmap_local_page(rq->in[ni]);
strm->z.next_in = kin;
bounced = false;
if (kout) {
kout = kmap_local_page(rq->out[no]);
strm->z.next_out = kout + j;
}
}
/*
* Handle overlapping: Use bounced buffer if the compressed
* data is under processing; Or use short-lived pages from the
* on-stack pagepool where pages share among the same request
* and not _all_ inplace I/O pages are needed to be doubled.
*/
if (!bounced && rq->out[no] == rq->in[ni]) {
memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in);
strm->z.next_in = strm->bounce;
bounced = true;
}
for (j = ni + 1; j < nrpages_in; ++j) {
struct page *tmppage;
if (rq->out[no] != rq->in[j])
continue;
DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
rq->in[j]));
tmppage = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
}
zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH);
if (zerr != Z_OK || !(outsz + strm->z.avail_out)) {
if (zerr == Z_OK && rq->partial_decoding)
break;
if (zerr == Z_STREAM_END && !outsz)
break;
erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
zerr, rq->inputsize, rq->outputsize);
err = -EFSCORRUPTED;
break;
}
}
if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
err = -EIO;
if (kout)
kunmap_local(kout);
failed_zinit:
kunmap_local(kin);
/* 4. push back DEFLATE stream context to the global list */
spin_lock(&z_erofs_deflate_lock);
strm->next = z_erofs_deflate_head;
z_erofs_deflate_head = strm;
spin_unlock(&z_erofs_deflate_lock);
wake_up(&z_erofs_deflate_wq);
return err;
}
| linux-master | fs/erofs/decompressor_deflate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* super.c
*/
/*
* This file implements code to read the superblock, read and initialise
* in-memory structures at mount time, and all the VFS glue code to register
* the filesystem.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/magic.h>
#include <linux/xattr.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "decompressor.h"
#include "xattr.h"
static struct file_system_type squashfs_fs_type;
static const struct super_operations squashfs_super_ops;
enum Opt_errors {
Opt_errors_continue,
Opt_errors_panic,
};
enum squashfs_param {
Opt_errors,
Opt_threads,
};
struct squashfs_mount_opts {
enum Opt_errors errors;
const struct squashfs_decompressor_thread_ops *thread_ops;
int thread_num;
};
static const struct constant_table squashfs_param_errors[] = {
{"continue", Opt_errors_continue },
{"panic", Opt_errors_panic },
{}
};
static const struct fs_parameter_spec squashfs_fs_parameters[] = {
fsparam_enum("errors", Opt_errors, squashfs_param_errors),
fsparam_string("threads", Opt_threads),
{}
};
static int squashfs_parse_param_threads_str(const char *str, struct squashfs_mount_opts *opts)
{
#ifdef CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT
if (strcmp(str, "single") == 0) {
opts->thread_ops = &squashfs_decompressor_single;
return 0;
}
if (strcmp(str, "multi") == 0) {
opts->thread_ops = &squashfs_decompressor_multi;
return 0;
}
if (strcmp(str, "percpu") == 0) {
opts->thread_ops = &squashfs_decompressor_percpu;
return 0;
}
#endif
return -EINVAL;
}
static int squashfs_parse_param_threads_num(const char *str, struct squashfs_mount_opts *opts)
{
#ifdef CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS
int ret;
unsigned long num;
ret = kstrtoul(str, 0, &num);
if (ret != 0)
return -EINVAL;
if (num > 1) {
opts->thread_ops = &squashfs_decompressor_multi;
if (num > opts->thread_ops->max_decompressors())
return -EINVAL;
opts->thread_num = (int)num;
return 0;
}
#ifdef CONFIG_SQUASHFS_DECOMP_SINGLE
if (num == 1) {
opts->thread_ops = &squashfs_decompressor_single;
opts->thread_num = 1;
return 0;
}
#endif
#endif /* !CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS */
return -EINVAL;
}
static int squashfs_parse_param_threads(const char *str, struct squashfs_mount_opts *opts)
{
int ret = squashfs_parse_param_threads_str(str, opts);
if (ret == 0)
return ret;
return squashfs_parse_param_threads_num(str, opts);
}
static int squashfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct squashfs_mount_opts *opts = fc->fs_private;
struct fs_parse_result result;
int opt;
opt = fs_parse(fc, squashfs_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case Opt_errors:
opts->errors = result.uint_32;
break;
case Opt_threads:
if (squashfs_parse_param_threads(param->string, opts) != 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct squashfs_decompressor *supported_squashfs_filesystem(
struct fs_context *fc,
short major, short minor, short id)
{
const struct squashfs_decompressor *decompressor;
if (major < SQUASHFS_MAJOR) {
errorf(fc, "Major/Minor mismatch, older Squashfs %d.%d "
"filesystems are unsupported", major, minor);
return NULL;
} else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) {
errorf(fc, "Major/Minor mismatch, trying to mount newer "
"%d.%d filesystem", major, minor);
errorf(fc, "Please update your kernel");
return NULL;
}
decompressor = squashfs_lookup_decompressor(id);
if (!decompressor->supported) {
errorf(fc, "Filesystem uses \"%s\" compression. This is not supported",
decompressor->name);
return NULL;
}
return decompressor;
}
static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct squashfs_mount_opts *opts = fc->fs_private;
struct squashfs_sb_info *msblk;
struct squashfs_super_block *sblk = NULL;
struct inode *root;
long long root_inode;
unsigned short flags;
unsigned int fragments;
u64 lookup_table_start, xattr_id_table_start, next_table;
int err;
TRACE("Entered squashfs_fill_superblock\n");
sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
if (sb->s_fs_info == NULL) {
ERROR("Failed to allocate squashfs_sb_info\n");
return -ENOMEM;
}
msblk = sb->s_fs_info;
msblk->thread_ops = opts->thread_ops;
msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
msblk->devblksize_log2 = ffz(~msblk->devblksize);
mutex_init(&msblk->meta_index_mutex);
/*
* msblk->bytes_used is checked in squashfs_read_table to ensure reads
* are not beyond filesystem end. But as we're using
* squashfs_read_table here to read the superblock (including the value
* of bytes_used) we need to set it to an initial sensible dummy value
*/
msblk->bytes_used = sizeof(*sblk);
sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk));
if (IS_ERR(sblk)) {
errorf(fc, "unable to read squashfs_super_block");
err = PTR_ERR(sblk);
sblk = NULL;
goto failed_mount;
}
err = -EINVAL;
/* Check it is a SQUASHFS superblock */
sb->s_magic = le32_to_cpu(sblk->s_magic);
if (sb->s_magic != SQUASHFS_MAGIC) {
if (!(fc->sb_flags & SB_SILENT))
errorf(fc, "Can't find a SQUASHFS superblock on %pg",
sb->s_bdev);
goto failed_mount;
}
if (opts->thread_num == 0) {
msblk->max_thread_num = msblk->thread_ops->max_decompressors();
} else {
msblk->max_thread_num = opts->thread_num;
}
/* Check the MAJOR & MINOR versions and lookup compression type */
msblk->decompressor = supported_squashfs_filesystem(
fc,
le16_to_cpu(sblk->s_major),
le16_to_cpu(sblk->s_minor),
le16_to_cpu(sblk->compression));
if (msblk->decompressor == NULL)
goto failed_mount;
/* Check the filesystem does not extend beyond the end of the
block device */
msblk->bytes_used = le64_to_cpu(sblk->bytes_used);
if (msblk->bytes_used < 0 ||
msblk->bytes_used > bdev_nr_bytes(sb->s_bdev))
goto failed_mount;
/* Check block size for sanity */
msblk->block_size = le32_to_cpu(sblk->block_size);
if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE)
goto insanity;
/*
* Check the system page size is not larger than the filesystem
* block size (by default 128K). This is currently not supported.
*/
if (PAGE_SIZE > msblk->block_size) {
errorf(fc, "Page size > filesystem block size (%d). This is "
"currently not supported!", msblk->block_size);
goto failed_mount;
}
/* Check block log for sanity */
msblk->block_log = le16_to_cpu(sblk->block_log);
if (msblk->block_log > SQUASHFS_FILE_MAX_LOG)
goto failed_mount;
/* Check that block_size and block_log match */
if (msblk->block_size != (1 << msblk->block_log))
goto insanity;
/* Check the root inode for sanity */
root_inode = le64_to_cpu(sblk->root_inode);
if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE)
goto insanity;
msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
msblk->inodes = le32_to_cpu(sblk->inodes);
msblk->fragments = le32_to_cpu(sblk->fragments);
msblk->ids = le16_to_cpu(sblk->no_ids);
flags = le16_to_cpu(sblk->flags);
TRACE("Found valid superblock on %pg\n", sb->s_bdev);
TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags)
? "un" : "");
TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags)
? "un" : "");
TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
TRACE("Block size %d\n", msblk->block_size);
TRACE("Number of inodes %d\n", msblk->inodes);
TRACE("Number of fragments %d\n", msblk->fragments);
TRACE("Number of ids %d\n", msblk->ids);
TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
TRACE("sblk->fragment_table_start %llx\n",
(u64) le64_to_cpu(sblk->fragment_table_start));
TRACE("sblk->id_table_start %llx\n",
(u64) le64_to_cpu(sblk->id_table_start));
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_min = 0;
sb->s_time_max = U32_MAX;
sb->s_flags |= SB_RDONLY;
sb->s_op = &squashfs_super_ops;
err = -ENOMEM;
msblk->block_cache = squashfs_cache_init("metadata",
SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
if (msblk->block_cache == NULL)
goto failed_mount;
/* Allocate read_page block */
msblk->read_page = squashfs_cache_init("data",
msblk->max_thread_num, msblk->block_size);
if (msblk->read_page == NULL) {
errorf(fc, "Failed to allocate read_page block");
goto failed_mount;
}
if (msblk->devblksize == PAGE_SIZE) {
struct inode *cache = new_inode(sb);
if (cache == NULL)
goto failed_mount;
set_nlink(cache, 1);
cache->i_size = OFFSET_MAX;
mapping_set_gfp_mask(cache->i_mapping, GFP_NOFS);
msblk->cache_mapping = cache->i_mapping;
}
msblk->stream = squashfs_decompressor_setup(sb, flags);
if (IS_ERR(msblk->stream)) {
err = PTR_ERR(msblk->stream);
msblk->stream = NULL;
goto insanity;
}
/* Handle xattrs */
sb->s_xattr = squashfs_xattr_handlers;
xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
if (xattr_id_table_start == SQUASHFS_INVALID_BLK) {
next_table = msblk->bytes_used;
goto allocate_id_index_table;
}
/* Allocate and read xattr id lookup table */
msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
if (IS_ERR(msblk->xattr_id_table)) {
errorf(fc, "unable to read xattr id index table");
err = PTR_ERR(msblk->xattr_id_table);
msblk->xattr_id_table = NULL;
if (err != -ENOTSUPP)
goto failed_mount;
}
next_table = msblk->xattr_table;
allocate_id_index_table:
/* Allocate and read id index table */
msblk->id_table = squashfs_read_id_index_table(sb,
le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
if (IS_ERR(msblk->id_table)) {
errorf(fc, "unable to read id index table");
err = PTR_ERR(msblk->id_table);
msblk->id_table = NULL;
goto failed_mount;
}
next_table = le64_to_cpu(msblk->id_table[0]);
/* Handle inode lookup table */
lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
if (lookup_table_start == SQUASHFS_INVALID_BLK)
goto handle_fragments;
/* Allocate and read inode lookup table */
msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
lookup_table_start, next_table, msblk->inodes);
if (IS_ERR(msblk->inode_lookup_table)) {
errorf(fc, "unable to read inode lookup table");
err = PTR_ERR(msblk->inode_lookup_table);
msblk->inode_lookup_table = NULL;
goto failed_mount;
}
next_table = le64_to_cpu(msblk->inode_lookup_table[0]);
sb->s_export_op = &squashfs_export_ops;
handle_fragments:
fragments = msblk->fragments;
if (fragments == 0)
goto check_directory_table;
msblk->fragment_cache = squashfs_cache_init("fragment",
SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
if (msblk->fragment_cache == NULL) {
err = -ENOMEM;
goto failed_mount;
}
/* Allocate and read fragment index table */
msblk->fragment_index = squashfs_read_fragment_index_table(sb,
le64_to_cpu(sblk->fragment_table_start), next_table, fragments);
if (IS_ERR(msblk->fragment_index)) {
errorf(fc, "unable to read fragment index table");
err = PTR_ERR(msblk->fragment_index);
msblk->fragment_index = NULL;
goto failed_mount;
}
next_table = le64_to_cpu(msblk->fragment_index[0]);
check_directory_table:
/* Sanity check directory_table */
if (msblk->directory_table > next_table) {
err = -EINVAL;
goto insanity;
}
/* Sanity check inode_table */
if (msblk->inode_table >= msblk->directory_table) {
err = -EINVAL;
goto insanity;
}
/* allocate root */
root = new_inode(sb);
if (!root) {
err = -ENOMEM;
goto failed_mount;
}
err = squashfs_read_inode(root, root_inode);
if (err) {
make_bad_inode(root);
iput(root);
goto failed_mount;
}
insert_inode_hash(root);
sb->s_root = d_make_root(root);
if (sb->s_root == NULL) {
ERROR("Root inode create failed\n");
err = -ENOMEM;
goto failed_mount;
}
TRACE("Leaving squashfs_fill_super\n");
kfree(sblk);
return 0;
insanity:
errorf(fc, "squashfs image failed sanity check");
failed_mount:
squashfs_cache_delete(msblk->block_cache);
squashfs_cache_delete(msblk->fragment_cache);
squashfs_cache_delete(msblk->read_page);
if (msblk->cache_mapping)
iput(msblk->cache_mapping->host);
msblk->thread_ops->destroy(msblk);
kfree(msblk->inode_lookup_table);
kfree(msblk->fragment_index);
kfree(msblk->id_table);
kfree(msblk->xattr_id_table);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
kfree(sblk);
return err;
}
static int squashfs_get_tree(struct fs_context *fc)
{
return get_tree_bdev(fc, squashfs_fill_super);
}
static int squashfs_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct squashfs_mount_opts *opts = fc->fs_private;
sync_filesystem(fc->root->d_sb);
fc->sb_flags |= SB_RDONLY;
msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
return 0;
}
static void squashfs_free_fs_context(struct fs_context *fc)
{
kfree(fc->fs_private);
}
static const struct fs_context_operations squashfs_context_ops = {
.get_tree = squashfs_get_tree,
.free = squashfs_free_fs_context,
.parse_param = squashfs_parse_param,
.reconfigure = squashfs_reconfigure,
};
static int squashfs_show_options(struct seq_file *s, struct dentry *root)
{
struct super_block *sb = root->d_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
if (msblk->panic_on_errors)
seq_puts(s, ",errors=panic");
else
seq_puts(s, ",errors=continue");
#ifdef CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT
if (msblk->thread_ops == &squashfs_decompressor_single) {
seq_puts(s, ",threads=single");
return 0;
}
if (msblk->thread_ops == &squashfs_decompressor_percpu) {
seq_puts(s, ",threads=percpu");
return 0;
}
#endif
#ifdef CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS
seq_printf(s, ",threads=%d", msblk->max_thread_num);
#endif
return 0;
}
static int squashfs_init_fs_context(struct fs_context *fc)
{
struct squashfs_mount_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return -ENOMEM;
#ifdef CONFIG_SQUASHFS_DECOMP_SINGLE
opts->thread_ops = &squashfs_decompressor_single;
#elif defined(CONFIG_SQUASHFS_DECOMP_MULTI)
opts->thread_ops = &squashfs_decompressor_multi;
#elif defined(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU)
opts->thread_ops = &squashfs_decompressor_percpu;
#else
#error "fail: unknown squashfs decompression thread mode?"
#endif
opts->thread_num = 0;
fc->fs_private = opts;
fc->ops = &squashfs_context_ops;
return 0;
}
static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev);
TRACE("Entered squashfs_statfs\n");
buf->f_type = SQUASHFS_MAGIC;
buf->f_bsize = msblk->block_size;
buf->f_blocks = ((msblk->bytes_used - 1) >> msblk->block_log) + 1;
buf->f_bfree = buf->f_bavail = 0;
buf->f_files = msblk->inodes;
buf->f_ffree = 0;
buf->f_namelen = SQUASHFS_NAME_LEN;
buf->f_fsid = u64_to_fsid(id);
return 0;
}
static void squashfs_put_super(struct super_block *sb)
{
if (sb->s_fs_info) {
struct squashfs_sb_info *sbi = sb->s_fs_info;
squashfs_cache_delete(sbi->block_cache);
squashfs_cache_delete(sbi->fragment_cache);
squashfs_cache_delete(sbi->read_page);
if (sbi->cache_mapping)
iput(sbi->cache_mapping->host);
sbi->thread_ops->destroy(sbi);
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);
kfree(sbi->inode_lookup_table);
kfree(sbi->xattr_id_table);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
}
static struct kmem_cache *squashfs_inode_cachep;
static void init_once(void *foo)
{
struct squashfs_inode_info *ei = foo;
inode_init_once(&ei->vfs_inode);
}
static int __init init_inodecache(void)
{
squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
sizeof(struct squashfs_inode_info), 0,
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
init_once);
return squashfs_inode_cachep ? 0 : -ENOMEM;
}
static void destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(squashfs_inode_cachep);
}
static int __init init_squashfs_fs(void)
{
int err = init_inodecache();
if (err)
return err;
err = register_filesystem(&squashfs_fs_type);
if (err) {
destroy_inodecache();
return err;
}
pr_info("version 4.0 (2009/01/31) Phillip Lougher\n");
return 0;
}
static void __exit exit_squashfs_fs(void)
{
unregister_filesystem(&squashfs_fs_type);
destroy_inodecache();
}
static struct inode *squashfs_alloc_inode(struct super_block *sb)
{
struct squashfs_inode_info *ei =
alloc_inode_sb(sb, squashfs_inode_cachep, GFP_KERNEL);
return ei ? &ei->vfs_inode : NULL;
}
static void squashfs_free_inode(struct inode *inode)
{
kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
}
static struct file_system_type squashfs_fs_type = {
.owner = THIS_MODULE,
.name = "squashfs",
.init_fs_context = squashfs_init_fs_context,
.parameters = squashfs_fs_parameters,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("squashfs");
static const struct super_operations squashfs_super_ops = {
.alloc_inode = squashfs_alloc_inode,
.free_inode = squashfs_free_inode,
.statfs = squashfs_statfs,
.put_super = squashfs_put_super,
.show_options = squashfs_show_options,
};
module_init(init_squashfs_fs);
module_exit(exit_squashfs_fs);
MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem");
MODULE_AUTHOR("Phillip Lougher <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | fs/squashfs/super.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* fragment.c
*/
/*
* This file implements code to handle compressed fragments (tail-end packed
* datablocks).
*
* Regular files contain a fragment index which is mapped to a fragment
* location on disk and compressed size using a fragment lookup table.
* Like everything in Squashfs this fragment lookup table is itself stored
* compressed into metadata blocks. A second index table is used to locate
* these. This second index table for speed of access (and because it
* is small) is read at mount time and cached in memory.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
/*
* Look-up fragment using the fragment index table. Return the on disk
* location of the fragment and its compressed size
*/
int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
u64 *fragment_block)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int block, offset, size;
struct squashfs_fragment_entry fragment_entry;
u64 start_block;
if (fragment >= msblk->fragments)
return -EIO;
block = SQUASHFS_FRAGMENT_INDEX(fragment);
offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
start_block = le64_to_cpu(msblk->fragment_index[block]);
size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
&offset, sizeof(fragment_entry));
if (size < 0)
return size;
*fragment_block = le64_to_cpu(fragment_entry.start_block);
return squashfs_block_size(fragment_entry.size);
}
/*
* Read the uncompressed fragment lookup table indexes off disk into memory
*/
__le64 *squashfs_read_fragment_index_table(struct super_block *sb,
u64 fragment_table_start, u64 next_table, unsigned int fragments)
{
unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
__le64 *table;
/*
* Sanity check, length bytes should not extend into the next table -
* this check also traps instances where fragment_table_start is
* incorrectly larger than the next table start
*/
if (fragment_table_start + length > next_table)
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, fragment_table_start, length);
/*
* table[0] points to the first fragment table metadata block, this
* should be less than fragment_table_start
*/
if (!IS_ERR(table) && le64_to_cpu(table[0]) >= fragment_table_start) {
kfree(table);
return ERR_PTR(-EINVAL);
}
return table;
}
| linux-master | fs/squashfs/fragment.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* block.c
*/
/*
* This file implements the low-level routines to read and decompress
* datablocks and metadata blocks.
*/
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/string.h>
#include <linux/bio.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
/*
* Returns the amount of bytes copied to the page actor.
*/
static int copy_bio_to_actor(struct bio *bio,
struct squashfs_page_actor *actor,
int offset, int req_length)
{
void *actor_addr;
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
int copied_bytes = 0;
int actor_offset = 0;
squashfs_actor_nobuff(actor);
actor_addr = squashfs_first_page(actor);
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
return 0;
while (copied_bytes < req_length) {
int bytes_to_copy = min_t(int, bvec->bv_len - offset,
PAGE_SIZE - actor_offset);
bytes_to_copy = min_t(int, bytes_to_copy,
req_length - copied_bytes);
if (!IS_ERR(actor_addr))
memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
offset, bytes_to_copy);
actor_offset += bytes_to_copy;
copied_bytes += bytes_to_copy;
offset += bytes_to_copy;
if (actor_offset >= PAGE_SIZE) {
actor_addr = squashfs_next_page(actor);
if (!actor_addr)
break;
actor_offset = 0;
}
if (offset >= bvec->bv_len) {
if (!bio_next_segment(bio, &iter_all))
break;
offset = 0;
}
}
squashfs_finish_page(actor);
return copied_bytes;
}
static int squashfs_bio_read_cached(struct bio *fullbio,
struct address_space *cache_mapping, u64 index, int length,
u64 read_start, u64 read_end, int page_count)
{
struct page *head_to_cache = NULL, *tail_to_cache = NULL;
struct block_device *bdev = fullbio->bi_bdev;
int start_idx = 0, end_idx = 0;
struct bvec_iter_all iter_all;
struct bio *bio = NULL;
struct bio_vec *bv;
int idx = 0;
int err = 0;
bio_for_each_segment_all(bv, fullbio, iter_all) {
struct page *page = bv->bv_page;
if (page->mapping == cache_mapping) {
idx++;
continue;
}
/*
* We only use this when the device block size is the same as
* the page size, so read_start and read_end cover full pages.
*
* Compare these to the original required index and length to
* only cache pages which were requested partially, since these
* are the ones which are likely to be needed when reading
* adjacent blocks.
*/
if (idx == 0 && index != read_start)
head_to_cache = page;
else if (idx == page_count - 1 && index + length != read_end)
tail_to_cache = page;
if (!bio || idx != end_idx) {
struct bio *new = bio_alloc_clone(bdev, fullbio,
GFP_NOIO, &fs_bio_set);
if (bio) {
bio_trim(bio, start_idx * PAGE_SECTORS,
(end_idx - start_idx) * PAGE_SECTORS);
bio_chain(bio, new);
submit_bio(bio);
}
bio = new;
start_idx = idx;
}
idx++;
end_idx = idx;
}
if (bio) {
bio_trim(bio, start_idx * PAGE_SECTORS,
(end_idx - start_idx) * PAGE_SECTORS);
err = submit_bio_wait(bio);
bio_put(bio);
}
if (err)
return err;
if (head_to_cache) {
int ret = add_to_page_cache_lru(head_to_cache, cache_mapping,
read_start >> PAGE_SHIFT,
GFP_NOIO);
if (!ret) {
SetPageUptodate(head_to_cache);
unlock_page(head_to_cache);
}
}
if (tail_to_cache) {
int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping,
(read_end >> PAGE_SHIFT) - 1,
GFP_NOIO);
if (!ret) {
SetPageUptodate(tail_to_cache);
unlock_page(tail_to_cache);
}
}
return 0;
}
static struct page *squashfs_get_cache_page(struct address_space *mapping,
pgoff_t index)
{
struct page *page;
if (!mapping)
return NULL;
page = find_get_page(mapping, index);
if (!page)
return NULL;
if (!PageUptodate(page)) {
put_page(page);
return NULL;
}
return page;
}
static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
struct bio **biop, int *block_offset)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct address_space *cache_mapping = msblk->cache_mapping;
const u64 read_start = round_down(index, msblk->devblksize);
const sector_t block = read_start >> msblk->devblksize_log2;
const u64 read_end = round_up(index + length, msblk->devblksize);
const sector_t block_end = read_end >> msblk->devblksize_log2;
int offset = read_start - round_down(index, PAGE_SIZE);
int total_len = (block_end - block) << msblk->devblksize_log2;
const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE);
int error, i;
struct bio *bio;
bio = bio_kmalloc(page_count, GFP_NOIO);
if (!bio)
return -ENOMEM;
bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ);
bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
for (i = 0; i < page_count; ++i) {
unsigned int len =
min_t(unsigned int, PAGE_SIZE - offset, total_len);
pgoff_t index = (read_start >> PAGE_SHIFT) + i;
struct page *page;
page = squashfs_get_cache_page(cache_mapping, index);
if (!page)
page = alloc_page(GFP_NOIO);
if (!page) {
error = -ENOMEM;
goto out_free_bio;
}
/*
* Use the __ version to avoid merging since we need each page
* to be separate when we check for and avoid cached pages.
*/
__bio_add_page(bio, page, len, offset);
offset = 0;
total_len -= len;
}
if (cache_mapping)
error = squashfs_bio_read_cached(bio, cache_mapping, index,
length, read_start, read_end,
page_count);
else
error = submit_bio_wait(bio);
if (error)
goto out_free_bio;
*biop = bio;
*block_offset = index & ((1 << msblk->devblksize_log2) - 1);
return 0;
out_free_bio:
bio_free_pages(bio);
bio_uninit(bio);
kfree(bio);
return error;
}
/*
* Read and decompress a metadata block or datablock. Length is non-zero
* if a datablock is being read (the size is stored elsewhere in the
* filesystem), otherwise the length is obtained from the first two bytes of
* the metadata block. A bit in the length field indicates if the block
* is stored uncompressed in the filesystem (usually because compression
* generated a larger block - this does occasionally happen with compression
* algorithms).
*/
int squashfs_read_data(struct super_block *sb, u64 index, int length,
u64 *next_index, struct squashfs_page_actor *output)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct bio *bio = NULL;
int compressed;
int res;
int offset;
if (length) {
/*
* Datablock.
*/
compressed = SQUASHFS_COMPRESSED_BLOCK(length);
length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
index, compressed ? "" : "un", length, output->length);
} else {
/*
* Metadata block.
*/
const u8 *data;
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
if (index + 2 > msblk->bytes_used) {
res = -EIO;
goto out;
}
res = squashfs_bio_read(sb, index, 2, &bio, &offset);
if (res)
goto out;
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
res = -EIO;
goto out_free_bio;
}
/* Extract the length of the metadata block */
data = bvec_virt(bvec);
length = data[offset];
if (offset < bvec->bv_len - 1) {
length |= data[offset + 1] << 8;
} else {
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
res = -EIO;
goto out_free_bio;
}
data = bvec_virt(bvec);
length |= data[0] << 8;
}
bio_free_pages(bio);
bio_uninit(bio);
kfree(bio);
compressed = SQUASHFS_COMPRESSED(length);
length = SQUASHFS_COMPRESSED_SIZE(length);
index += 2;
TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
compressed ? "" : "un", length);
}
if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used) {
res = -EIO;
goto out;
}
if (next_index)
*next_index = index + length;
res = squashfs_bio_read(sb, index, length, &bio, &offset);
if (res)
goto out;
if (compressed) {
if (!msblk->stream) {
res = -EIO;
goto out_free_bio;
}
res = msblk->thread_ops->decompress(msblk, bio, offset, length, output);
} else {
res = copy_bio_to_actor(bio, output, offset, length);
}
out_free_bio:
bio_free_pages(bio);
bio_uninit(bio);
kfree(bio);
out:
if (res < 0) {
ERROR("Failed to read block 0x%llx: %d\n", index, res);
if (msblk->panic_on_errors)
panic("squashfs read failed");
}
return res;
}
| linux-master | fs/squashfs/block.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2010
* Phillip Lougher <[email protected]>
*
* xattr.c
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/xattr.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
static const struct xattr_handler *squashfs_xattr_handler(int);
ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
size_t buffer_size)
{
struct inode *inode = d_inode(d);
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
+ msblk->xattr_table;
int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
int count = squashfs_i(inode)->xattr_count;
size_t rest = buffer_size;
int err;
/* check that the file system has xattrs */
if (msblk->xattr_id_table == NULL)
return -EOPNOTSUPP;
/* loop reading each xattr name */
while (count--) {
struct squashfs_xattr_entry entry;
struct squashfs_xattr_val val;
const struct xattr_handler *handler;
int name_size;
err = squashfs_read_metadata(sb, &entry, &start, &offset,
sizeof(entry));
if (err < 0)
goto failed;
name_size = le16_to_cpu(entry.size);
handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
if (handler && (!handler->list || handler->list(d))) {
const char *prefix = handler->prefix ?: handler->name;
size_t prefix_size = strlen(prefix);
if (buffer) {
if (prefix_size + name_size + 1 > rest) {
err = -ERANGE;
goto failed;
}
memcpy(buffer, prefix, prefix_size);
buffer += prefix_size;
}
err = squashfs_read_metadata(sb, buffer, &start,
&offset, name_size);
if (err < 0)
goto failed;
if (buffer) {
buffer[name_size] = '\0';
buffer += name_size + 1;
}
rest -= prefix_size + name_size + 1;
} else {
/* no handler or insuffficient privileges, so skip */
err = squashfs_read_metadata(sb, NULL, &start,
&offset, name_size);
if (err < 0)
goto failed;
}
/* skip remaining xattr entry */
err = squashfs_read_metadata(sb, &val, &start, &offset,
sizeof(val));
if (err < 0)
goto failed;
err = squashfs_read_metadata(sb, NULL, &start, &offset,
le32_to_cpu(val.vsize));
if (err < 0)
goto failed;
}
err = buffer_size - rest;
failed:
return err;
}
static int squashfs_xattr_get(struct inode *inode, int name_index,
const char *name, void *buffer, size_t buffer_size)
{
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
+ msblk->xattr_table;
int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
int count = squashfs_i(inode)->xattr_count;
int name_len = strlen(name);
int err, vsize;
char *target = kmalloc(name_len, GFP_KERNEL);
if (target == NULL)
return -ENOMEM;
/* loop reading each xattr name */
for (; count; count--) {
struct squashfs_xattr_entry entry;
struct squashfs_xattr_val val;
int type, prefix, name_size;
err = squashfs_read_metadata(sb, &entry, &start, &offset,
sizeof(entry));
if (err < 0)
goto failed;
name_size = le16_to_cpu(entry.size);
type = le16_to_cpu(entry.type);
prefix = type & SQUASHFS_XATTR_PREFIX_MASK;
if (prefix == name_index && name_size == name_len)
err = squashfs_read_metadata(sb, target, &start,
&offset, name_size);
else
err = squashfs_read_metadata(sb, NULL, &start,
&offset, name_size);
if (err < 0)
goto failed;
if (prefix == name_index && name_size == name_len &&
strncmp(target, name, name_size) == 0) {
/* found xattr */
if (type & SQUASHFS_XATTR_VALUE_OOL) {
__le64 xattr_val;
u64 xattr;
/* val is a reference to the real location */
err = squashfs_read_metadata(sb, &val, &start,
&offset, sizeof(val));
if (err < 0)
goto failed;
err = squashfs_read_metadata(sb, &xattr_val,
&start, &offset, sizeof(xattr_val));
if (err < 0)
goto failed;
xattr = le64_to_cpu(xattr_val);
start = SQUASHFS_XATTR_BLK(xattr) +
msblk->xattr_table;
offset = SQUASHFS_XATTR_OFFSET(xattr);
}
/* read xattr value */
err = squashfs_read_metadata(sb, &val, &start, &offset,
sizeof(val));
if (err < 0)
goto failed;
vsize = le32_to_cpu(val.vsize);
if (buffer) {
if (vsize > buffer_size) {
err = -ERANGE;
goto failed;
}
err = squashfs_read_metadata(sb, buffer, &start,
&offset, vsize);
if (err < 0)
goto failed;
}
break;
}
/* no match, skip remaining xattr entry */
err = squashfs_read_metadata(sb, &val, &start, &offset,
sizeof(val));
if (err < 0)
goto failed;
err = squashfs_read_metadata(sb, NULL, &start, &offset,
le32_to_cpu(val.vsize));
if (err < 0)
goto failed;
}
err = count ? vsize : -ENODATA;
failed:
kfree(target);
return err;
}
static int squashfs_xattr_handler_get(const struct xattr_handler *handler,
struct dentry *unused,
struct inode *inode,
const char *name,
void *buffer, size_t size)
{
return squashfs_xattr_get(inode, handler->flags, name,
buffer, size);
}
/*
* User namespace support
*/
static const struct xattr_handler squashfs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.flags = SQUASHFS_XATTR_USER,
.get = squashfs_xattr_handler_get
};
/*
* Trusted namespace support
*/
static bool squashfs_trusted_xattr_handler_list(struct dentry *d)
{
return capable(CAP_SYS_ADMIN);
}
static const struct xattr_handler squashfs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.flags = SQUASHFS_XATTR_TRUSTED,
.list = squashfs_trusted_xattr_handler_list,
.get = squashfs_xattr_handler_get
};
/*
* Security namespace support
*/
static const struct xattr_handler squashfs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.flags = SQUASHFS_XATTR_SECURITY,
.get = squashfs_xattr_handler_get
};
static const struct xattr_handler *squashfs_xattr_handler(int type)
{
if (type & ~(SQUASHFS_XATTR_PREFIX_MASK | SQUASHFS_XATTR_VALUE_OOL))
/* ignore unrecognised type */
return NULL;
switch (type & SQUASHFS_XATTR_PREFIX_MASK) {
case SQUASHFS_XATTR_USER:
return &squashfs_xattr_user_handler;
case SQUASHFS_XATTR_TRUSTED:
return &squashfs_xattr_trusted_handler;
case SQUASHFS_XATTR_SECURITY:
return &squashfs_xattr_security_handler;
default:
/* ignore unrecognised type */
return NULL;
}
}
const struct xattr_handler *squashfs_xattr_handlers[] = {
&squashfs_xattr_user_handler,
&squashfs_xattr_trusted_handler,
&squashfs_xattr_security_handler,
NULL
};
| linux-master | fs/squashfs/xattr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* zstd_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/zstd.h>
#include <linux/vmalloc.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
struct workspace {
void *mem;
size_t mem_size;
size_t window_size;
};
static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
{
struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL);
if (wksp == NULL)
goto failed;
wksp->window_size = max_t(size_t,
msblk->block_size, SQUASHFS_METADATA_SIZE);
wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
wksp->mem = vmalloc(wksp->mem_size);
if (wksp->mem == NULL)
goto failed;
return wksp;
failed:
ERROR("Failed to allocate zstd workspace\n");
kfree(wksp);
return ERR_PTR(-ENOMEM);
}
static void zstd_free(void *strm)
{
struct workspace *wksp = strm;
if (wksp)
vfree(wksp->mem);
kfree(wksp);
}
static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
struct workspace *wksp = strm;
zstd_dstream *stream;
size_t total_out = 0;
int error = 0;
zstd_in_buffer in_buf = { NULL, 0, 0 };
zstd_out_buffer out_buf = { NULL, 0, 0 };
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
if (!stream) {
ERROR("Failed to initialize zstd decompressor\n");
return -EIO;
}
out_buf.size = PAGE_SIZE;
out_buf.dst = squashfs_first_page(output);
if (IS_ERR(out_buf.dst)) {
error = PTR_ERR(out_buf.dst);
goto finish;
}
for (;;) {
size_t zstd_err;
if (in_buf.pos == in_buf.size) {
const void *data;
int avail;
if (!bio_next_segment(bio, &iter_all)) {
error = -EIO;
break;
}
avail = min(length, ((int)bvec->bv_len) - offset);
data = bvec_virt(bvec);
length -= avail;
in_buf.src = data + offset;
in_buf.size = avail;
in_buf.pos = 0;
offset = 0;
}
if (out_buf.pos == out_buf.size) {
out_buf.dst = squashfs_next_page(output);
if (IS_ERR(out_buf.dst)) {
error = PTR_ERR(out_buf.dst);
break;
} else if (out_buf.dst == NULL) {
/* Shouldn't run out of pages
* before stream is done.
*/
error = -EIO;
break;
}
out_buf.pos = 0;
out_buf.size = PAGE_SIZE;
}
total_out -= out_buf.pos;
zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
total_out += out_buf.pos; /* add the additional data produced */
if (zstd_err == 0)
break;
if (zstd_is_error(zstd_err)) {
ERROR("zstd decompression error: %d\n",
(int)zstd_get_error_code(zstd_err));
error = -EIO;
break;
}
}
finish:
squashfs_finish_page(output);
return error ? error : total_out;
}
const struct squashfs_decompressor squashfs_zstd_comp_ops = {
.init = zstd_init,
.free = zstd_free,
.decompress = zstd_uncompress,
.id = ZSTD_COMPRESSION,
.name = "zstd",
.alloc_buffer = 1,
.supported = 1
};
| linux-master | fs/squashfs/zstd_wrapper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013
* Phillip Lougher <[email protected]>
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/local_lock.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "squashfs.h"
/*
* This file implements multi-threaded decompression using percpu
* variables, one thread per cpu core.
*/
struct squashfs_stream {
void *stream;
local_lock_t lock;
};
static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
void *comp_opts)
{
struct squashfs_stream *stream;
struct squashfs_stream __percpu *percpu;
int err, cpu;
percpu = alloc_percpu(struct squashfs_stream);
if (percpu == NULL)
return ERR_PTR(-ENOMEM);
for_each_possible_cpu(cpu) {
stream = per_cpu_ptr(percpu, cpu);
stream->stream = msblk->decompressor->init(msblk, comp_opts);
if (IS_ERR(stream->stream)) {
err = PTR_ERR(stream->stream);
goto out;
}
local_lock_init(&stream->lock);
}
kfree(comp_opts);
return (__force void *) percpu;
out:
for_each_possible_cpu(cpu) {
stream = per_cpu_ptr(percpu, cpu);
if (!IS_ERR_OR_NULL(stream->stream))
msblk->decompressor->free(stream->stream);
}
free_percpu(percpu);
return ERR_PTR(err);
}
static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
{
struct squashfs_stream __percpu *percpu =
(struct squashfs_stream __percpu *) msblk->stream;
struct squashfs_stream *stream;
int cpu;
if (msblk->stream) {
for_each_possible_cpu(cpu) {
stream = per_cpu_ptr(percpu, cpu);
msblk->decompressor->free(stream->stream);
}
free_percpu(percpu);
}
}
static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
int offset, int length, struct squashfs_page_actor *output)
{
struct squashfs_stream *stream;
struct squashfs_stream __percpu *percpu =
(struct squashfs_stream __percpu *) msblk->stream;
int res;
local_lock(&percpu->lock);
stream = this_cpu_ptr(percpu);
res = msblk->decompressor->decompress(msblk, stream->stream, bio,
offset, length, output);
local_unlock(&percpu->lock);
if (res < 0)
ERROR("%s decompression failed, data probably corrupt\n",
msblk->decompressor->name);
return res;
}
static int squashfs_max_decompressors(void)
{
return num_possible_cpus();
}
const struct squashfs_decompressor_thread_ops squashfs_decompressor_percpu = {
.create = squashfs_decompressor_create,
.destroy = squashfs_decompressor_destroy,
.decompress = squashfs_decompress,
.max_decompressors = squashfs_max_decompressors,
};
| linux-master | fs/squashfs/decompressor_multi_percpu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
* Phillip Lougher <[email protected]>
*
* xz_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/xz.h>
#include <linux/bitops.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
struct squashfs_xz {
struct xz_dec *state;
struct xz_buf buf;
};
struct disk_comp_opts {
__le32 dictionary_size;
__le32 flags;
};
struct comp_opts {
int dict_size;
};
static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
void *buff, int len)
{
struct disk_comp_opts *comp_opts = buff;
struct comp_opts *opts;
int err = 0, n;
opts = kmalloc(sizeof(*opts), GFP_KERNEL);
if (opts == NULL) {
err = -ENOMEM;
goto out2;
}
if (comp_opts) {
/* check compressor options are the expected length */
if (len < sizeof(*comp_opts)) {
err = -EIO;
goto out;
}
opts->dict_size = le32_to_cpu(comp_opts->dictionary_size);
/* the dictionary size should be 2^n or 2^n+2^(n+1) */
n = ffs(opts->dict_size) - 1;
if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) +
(1 << (n + 1))) {
err = -EIO;
goto out;
}
} else
/* use defaults */
opts->dict_size = max_t(int, msblk->block_size,
SQUASHFS_METADATA_SIZE);
return opts;
out:
kfree(opts);
out2:
return ERR_PTR(err);
}
static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff)
{
struct comp_opts *comp_opts = buff;
struct squashfs_xz *stream;
int err;
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL) {
err = -ENOMEM;
goto failed;
}
stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size);
if (stream->state == NULL) {
kfree(stream);
err = -ENOMEM;
goto failed;
}
return stream;
failed:
ERROR("Failed to initialise xz decompressor\n");
return ERR_PTR(err);
}
static void squashfs_xz_free(void *strm)
{
struct squashfs_xz *stream = strm;
if (stream) {
xz_dec_end(stream->state);
kfree(stream);
}
}
static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
int total = 0, error = 0;
struct squashfs_xz *stream = strm;
xz_dec_reset(stream->state);
stream->buf.in_pos = 0;
stream->buf.in_size = 0;
stream->buf.out_pos = 0;
stream->buf.out_size = PAGE_SIZE;
stream->buf.out = squashfs_first_page(output);
if (IS_ERR(stream->buf.out)) {
error = PTR_ERR(stream->buf.out);
goto finish;
}
for (;;) {
enum xz_ret xz_err;
if (stream->buf.in_pos == stream->buf.in_size) {
const void *data;
int avail;
if (!bio_next_segment(bio, &iter_all)) {
/* XZ_STREAM_END must be reached. */
error = -EIO;
break;
}
avail = min(length, ((int)bvec->bv_len) - offset);
data = bvec_virt(bvec);
length -= avail;
stream->buf.in = data + offset;
stream->buf.in_size = avail;
stream->buf.in_pos = 0;
offset = 0;
}
if (stream->buf.out_pos == stream->buf.out_size) {
stream->buf.out = squashfs_next_page(output);
if (IS_ERR(stream->buf.out)) {
error = PTR_ERR(stream->buf.out);
break;
} else if (stream->buf.out != NULL) {
stream->buf.out_pos = 0;
total += PAGE_SIZE;
}
}
xz_err = xz_dec_run(stream->state, &stream->buf);
if (xz_err == XZ_STREAM_END)
break;
if (xz_err != XZ_OK) {
error = -EIO;
break;
}
}
finish:
squashfs_finish_page(output);
return error ? error : total + stream->buf.out_pos;
}
const struct squashfs_decompressor squashfs_xz_comp_ops = {
.init = squashfs_xz_init,
.comp_opts = squashfs_xz_comp_opts,
.free = squashfs_xz_free,
.decompress = squashfs_xz_uncompress,
.id = XZ_COMPRESSION,
.name = "xz",
.alloc_buffer = 1,
.supported = 1
};
| linux-master | fs/squashfs/xz_wrapper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013
* Minchan Kim <[email protected]>
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/bio.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/cpumask.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "squashfs.h"
/*
* This file implements multi-threaded decompression in the
* decompressor framework
*/
/*
* The reason that multiply two is that a CPU can request new I/O
* while it is waiting previous request.
*/
#define MAX_DECOMPRESSOR (num_online_cpus() * 2)
static int squashfs_max_decompressors(void)
{
return MAX_DECOMPRESSOR;
}
struct squashfs_stream {
void *comp_opts;
struct list_head strm_list;
struct mutex mutex;
int avail_decomp;
wait_queue_head_t wait;
};
struct decomp_stream {
void *stream;
struct list_head list;
};
static void put_decomp_stream(struct decomp_stream *decomp_strm,
struct squashfs_stream *stream)
{
mutex_lock(&stream->mutex);
list_add(&decomp_strm->list, &stream->strm_list);
mutex_unlock(&stream->mutex);
wake_up(&stream->wait);
}
static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
void *comp_opts)
{
struct squashfs_stream *stream;
struct decomp_stream *decomp_strm = NULL;
int err = -ENOMEM;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
goto out;
stream->comp_opts = comp_opts;
mutex_init(&stream->mutex);
INIT_LIST_HEAD(&stream->strm_list);
init_waitqueue_head(&stream->wait);
/*
* We should have a decompressor at least as default
* so if we fail to allocate new decompressor dynamically,
* we could always fall back to default decompressor and
* file system works.
*/
decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
if (!decomp_strm)
goto out;
decomp_strm->stream = msblk->decompressor->init(msblk,
stream->comp_opts);
if (IS_ERR(decomp_strm->stream)) {
err = PTR_ERR(decomp_strm->stream);
goto out;
}
list_add(&decomp_strm->list, &stream->strm_list);
stream->avail_decomp = 1;
return stream;
out:
kfree(decomp_strm);
kfree(stream);
return ERR_PTR(err);
}
static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
{
struct squashfs_stream *stream = msblk->stream;
if (stream) {
struct decomp_stream *decomp_strm;
while (!list_empty(&stream->strm_list)) {
decomp_strm = list_entry(stream->strm_list.prev,
struct decomp_stream, list);
list_del(&decomp_strm->list);
msblk->decompressor->free(decomp_strm->stream);
kfree(decomp_strm);
stream->avail_decomp--;
}
WARN_ON(stream->avail_decomp);
kfree(stream->comp_opts);
kfree(stream);
}
}
static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk,
struct squashfs_stream *stream)
{
struct decomp_stream *decomp_strm;
while (1) {
mutex_lock(&stream->mutex);
/* There is available decomp_stream */
if (!list_empty(&stream->strm_list)) {
decomp_strm = list_entry(stream->strm_list.prev,
struct decomp_stream, list);
list_del(&decomp_strm->list);
mutex_unlock(&stream->mutex);
break;
}
/*
* If there is no available decomp and already full,
* let's wait for releasing decomp from other users.
*/
if (stream->avail_decomp >= msblk->max_thread_num)
goto wait;
/* Let's allocate new decomp */
decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
if (!decomp_strm)
goto wait;
decomp_strm->stream = msblk->decompressor->init(msblk,
stream->comp_opts);
if (IS_ERR(decomp_strm->stream)) {
kfree(decomp_strm);
goto wait;
}
stream->avail_decomp++;
WARN_ON(stream->avail_decomp > msblk->max_thread_num);
mutex_unlock(&stream->mutex);
break;
wait:
/*
* If system memory is tough, let's for other's
* releasing instead of hurting VM because it could
* make page cache thrashing.
*/
mutex_unlock(&stream->mutex);
wait_event(stream->wait,
!list_empty(&stream->strm_list));
}
return decomp_strm;
}
static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
int offset, int length,
struct squashfs_page_actor *output)
{
int res;
struct squashfs_stream *stream = msblk->stream;
struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream);
res = msblk->decompressor->decompress(msblk, decomp_stream->stream,
bio, offset, length, output);
put_decomp_stream(decomp_stream, stream);
if (res < 0)
ERROR("%s decompression failed, data probably corrupt\n",
msblk->decompressor->name);
return res;
}
const struct squashfs_decompressor_thread_ops squashfs_decompressor_multi = {
.create = squashfs_decompressor_create,
.destroy = squashfs_decompressor_destroy,
.decompress = squashfs_decompress,
.max_decompressors = squashfs_max_decompressors,
};
| linux-master | fs/squashfs/decompressor_multi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013
* Phillip Lougher <[email protected]>
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/bio.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "squashfs.h"
/*
* This file implements single-threaded decompression in the
* decompressor framework
*/
struct squashfs_stream {
void *stream;
struct mutex mutex;
};
static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
void *comp_opts)
{
struct squashfs_stream *stream;
int err = -ENOMEM;
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL)
goto out;
stream->stream = msblk->decompressor->init(msblk, comp_opts);
if (IS_ERR(stream->stream)) {
err = PTR_ERR(stream->stream);
goto out;
}
kfree(comp_opts);
mutex_init(&stream->mutex);
return stream;
out:
kfree(stream);
return ERR_PTR(err);
}
static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
{
struct squashfs_stream *stream = msblk->stream;
if (stream) {
msblk->decompressor->free(stream->stream);
kfree(stream);
}
}
static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
int offset, int length,
struct squashfs_page_actor *output)
{
int res;
struct squashfs_stream *stream = msblk->stream;
mutex_lock(&stream->mutex);
res = msblk->decompressor->decompress(msblk, stream->stream, bio,
offset, length, output);
mutex_unlock(&stream->mutex);
if (res < 0)
ERROR("%s decompression failed, data probably corrupt\n",
msblk->decompressor->name);
return res;
}
static int squashfs_max_decompressors(void)
{
return 1;
}
const struct squashfs_decompressor_thread_ops squashfs_decompressor_single = {
.create = squashfs_decompressor_create,
.destroy = squashfs_decompressor_destroy,
.decompress = squashfs_decompress,
.max_decompressors = squashfs_max_decompressors,
};
| linux-master | fs/squashfs/decompressor_single.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013
* Phillip Lougher <[email protected]>
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "page_actor.h"
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int expected)
{
struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int start_index = target_page->index & ~mask;
int end_index = start_index | mask;
int i, n, pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
void *pageaddr;
if (end_index > file_end)
end_index = file_end;
pages = end_index - start_index + 1;
page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
if (page == NULL)
return res;
/* Try to grab all the pages covered by the Squashfs block */
for (i = 0, n = start_index; n <= end_index; n++) {
page[i] = (n == target_page->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
if (page[i] == NULL)
continue;
if (PageUptodate(page[i])) {
unlock_page(page[i]);
put_page(page[i]);
continue;
}
i++;
}
pages = i;
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
if (actor == NULL)
goto out;
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
squashfs_page_actor_free(actor);
if (res < 0)
goto mark_errored;
if (res != expected) {
res = -EIO;
goto mark_errored;
}
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
if (page[pages - 1]->index == end_index && bytes) {
pageaddr = kmap_local_page(page[pages - 1]);
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_local(pageaddr);
}
/* Mark pages as uptodate, unlock and release */
for (i = 0; i < pages; i++) {
flush_dcache_page(page[i]);
SetPageUptodate(page[i]);
unlock_page(page[i]);
if (page[i] != target_page)
put_page(page[i]);
}
kfree(page);
return 0;
mark_errored:
/* Decompression failed, mark pages as errored. Target_page is
* dealt with by the caller
*/
for (i = 0; i < pages; i++) {
if (page[i] == NULL || page[i] == target_page)
continue;
flush_dcache_page(page[i]);
SetPageError(page[i]);
unlock_page(page[i]);
put_page(page[i]);
}
out:
kfree(page);
return res;
}
| linux-master | fs/squashfs/file_direct.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* dir.c
*/
/*
* This file implements code to read directories from disk.
*
* See namei.c for a description of directory organisation on disk.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
static const unsigned char squashfs_filetype_table[] = {
DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
};
/*
* Lookup offset (f_pos) in the directory index, returning the
* metadata block containing it.
*
* If we get an error reading the index then return the part of the index
* (if any) we have managed to read - the index isn't essential, just
* quicker.
*/
static int get_dir_index_using_offset(struct super_block *sb,
u64 *next_block, int *next_offset, u64 index_start, int index_offset,
int i_count, u64 f_pos)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int err, i, index, length = 0;
unsigned int size;
struct squashfs_dir_index dir_index;
TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n",
i_count, f_pos);
/*
* Translate from external f_pos to the internal f_pos. This
* is offset by 3 because we invent "." and ".." entries which are
* not actually stored in the directory.
*/
if (f_pos <= 3)
return f_pos;
f_pos -= 3;
for (i = 0; i < i_count; i++) {
err = squashfs_read_metadata(sb, &dir_index, &index_start,
&index_offset, sizeof(dir_index));
if (err < 0)
break;
index = le32_to_cpu(dir_index.index);
if (index > f_pos)
/*
* Found the index we're looking for.
*/
break;
size = le32_to_cpu(dir_index.size) + 1;
/* size should never be larger than SQUASHFS_NAME_LEN */
if (size > SQUASHFS_NAME_LEN)
break;
err = squashfs_read_metadata(sb, NULL, &index_start,
&index_offset, size);
if (err < 0)
break;
length = index;
*next_block = le32_to_cpu(dir_index.start_block) +
msblk->directory_table;
}
*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
/*
* Translate back from internal f_pos to external f_pos.
*/
return length + 3;
}
static int squashfs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
u64 block = squashfs_i(inode)->start + msblk->directory_table;
int offset = squashfs_i(inode)->offset, length, err;
unsigned int inode_number, dir_count, size, type;
struct squashfs_dir_header dirh;
struct squashfs_dir_entry *dire;
TRACE("Entered squashfs_readdir [%llx:%x]\n", block, offset);
dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
if (dire == NULL) {
ERROR("Failed to allocate squashfs_dir_entry\n");
goto finish;
}
/*
* Return "." and ".." entries as the first two filenames in the
* directory. To maximise compression these two entries are not
* stored in the directory, and so we invent them here.
*
* It also means that the external f_pos is offset by 3 from the
* on-disk directory f_pos.
*/
while (ctx->pos < 3) {
char *name;
int i_ino;
if (ctx->pos == 0) {
name = ".";
size = 1;
i_ino = inode->i_ino;
} else {
name = "..";
size = 2;
i_ino = squashfs_i(inode)->parent;
}
if (!dir_emit(ctx, name, size, i_ino,
squashfs_filetype_table[1]))
goto finish;
ctx->pos += size;
}
length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
squashfs_i(inode)->dir_idx_start,
squashfs_i(inode)->dir_idx_offset,
squashfs_i(inode)->dir_idx_cnt,
ctx->pos);
while (length < i_size_read(inode)) {
/*
* Read directory header
*/
err = squashfs_read_metadata(inode->i_sb, &dirh, &block,
&offset, sizeof(dirh));
if (err < 0)
goto failed_read;
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
if (dir_count > SQUASHFS_DIR_COUNT)
goto failed_read;
while (dir_count--) {
/*
* Read directory entry.
*/
err = squashfs_read_metadata(inode->i_sb, dire, &block,
&offset, sizeof(*dire));
if (err < 0)
goto failed_read;
size = le16_to_cpu(dire->size) + 1;
/* size should never be larger than SQUASHFS_NAME_LEN */
if (size > SQUASHFS_NAME_LEN)
goto failed_read;
err = squashfs_read_metadata(inode->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
goto failed_read;
length += sizeof(*dire) + size;
if (ctx->pos >= length)
continue;
dire->name[size] = '\0';
inode_number = le32_to_cpu(dirh.inode_number) +
((short) le16_to_cpu(dire->inode_number));
type = le16_to_cpu(dire->type);
if (type > SQUASHFS_MAX_DIR_TYPE)
goto failed_read;
if (!dir_emit(ctx, dire->name, size,
inode_number,
squashfs_filetype_table[type]))
goto finish;
ctx->pos = length;
}
}
finish:
kfree(dire);
return 0;
failed_read:
ERROR("Unable to read directory block [%llx:%x]\n", block, offset);
kfree(dire);
return 0;
}
const struct file_operations squashfs_dir_ops = {
.read = generic_read_dir,
.iterate_shared = squashfs_readdir,
.llseek = generic_file_llseek,
};
| linux-master | fs/squashfs/dir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2010
* Phillip Lougher <[email protected]>
*
* xattr_id.c
*/
/*
* This file implements code to map the 32-bit xattr id stored in the inode
* into the on disk location of the xattr data.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "xattr.h"
/*
* Map xattr id using the xattr id look up table
*/
int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
int *count, unsigned int *size, unsigned long long *xattr)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_XATTR_BLOCK(index);
int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
u64 start_block;
struct squashfs_xattr_id id;
int err;
if (index >= msblk->xattr_ids)
return -EINVAL;
start_block = le64_to_cpu(msblk->xattr_id_table[block]);
err = squashfs_read_metadata(sb, &id, &start_block, &offset,
sizeof(id));
if (err < 0)
return err;
*xattr = le64_to_cpu(id.xattr);
*size = le32_to_cpu(id.size);
*count = le32_to_cpu(id.count);
return 0;
}
/*
* Read uncompressed xattr id lookup table indexes from disk into memory
*/
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
u64 *xattr_table_start, unsigned int *xattr_ids)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
unsigned int len, indexes;
struct squashfs_xattr_id_table *id_table;
__le64 *table;
u64 start, end;
int n;
id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
if (IS_ERR(id_table))
return (__le64 *) id_table;
*xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
*xattr_ids = le32_to_cpu(id_table->xattr_ids);
kfree(id_table);
/* Sanity check values */
/* there is always at least one xattr id */
if (*xattr_ids == 0)
return ERR_PTR(-EINVAL);
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
/*
* The computed size of the index table (len bytes) should exactly
* match the table start and end points
*/
start = table_start + sizeof(*id_table);
end = msblk->bytes_used;
if (len != (end - start))
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, start, len);
if (IS_ERR(table))
return table;
/* table[0], table[1], ... table[indexes - 1] store the locations
* of the compressed xattr id blocks. Each entry should be less than
* the next (i.e. table[0] < table[1]), and the difference between them
* should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
* should be less than table_start, and again the difference
* shouls be SQUASHFS_METADATA_SIZE or less.
*
* Finally xattr_table_start should be less than table[0].
*/
for (n = 0; n < (indexes - 1); n++) {
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}
start = le64_to_cpu(table[indexes - 1]);
if (start >= table_start || (table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
if (*xattr_table_start >= le64_to_cpu(table[0])) {
kfree(table);
return ERR_PTR(-EINVAL);
}
return table;
}
| linux-master | fs/squashfs/xattr_id.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* inode.c
*/
/*
* This file implements code to create and read inodes from disk.
*
* Inodes in Squashfs are identified by a 48-bit inode which encodes the
* location of the compressed metadata block containing the inode, and the byte
* offset into that block where the inode is placed (<block, offset>).
*
* To maximise compression there are different inodes for each file type
* (regular file, directory, device, etc.), the inode contents and length
* varying with the type.
*
* To further maximise compression, two types of regular file inode and
* directory inode are defined: inodes optimised for frequently occurring
* regular files and directories, and extended types where extra
* information has to be stored.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/xattr.h>
#include <linux/pagemap.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "xattr.h"
/*
* Initialise VFS inode with the base inode information common to all
* Squashfs inode types. Sqsh_ino contains the unswapped base inode
* off disk.
*/
static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
struct squashfs_base_inode *sqsh_ino)
{
uid_t i_uid;
gid_t i_gid;
int err;
err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
if (err)
return err;
err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &i_gid);
if (err)
return err;
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
inode_set_ctime(inode, inode->i_mtime.tv_sec, 0);
inode->i_mode = le16_to_cpu(sqsh_ino->mode);
inode->i_size = 0;
return err;
}
struct inode *squashfs_iget(struct super_block *sb, long long ino,
unsigned int ino_number)
{
struct inode *inode = iget_locked(sb, ino_number);
int err;
TRACE("Entered squashfs_iget\n");
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
err = squashfs_read_inode(inode, ino);
if (err) {
iget_failed(inode);
return ERR_PTR(err);
}
unlock_new_inode(inode);
return inode;
}
/*
* Initialise VFS inode by reading inode from inode table (compressed
* metadata). The format and amount of data read depends on type.
*/
int squashfs_read_inode(struct inode *inode, long long ino)
{
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
union squashfs_inode squashfs_ino;
struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
int xattr_id = SQUASHFS_INVALID_XATTR;
TRACE("Entered squashfs_read_inode\n");
/*
* Read inode base common to all inode types.
*/
err = squashfs_read_metadata(sb, sqshb_ino, &block,
&offset, sizeof(*sqshb_ino));
if (err < 0)
goto failed_read;
err = squashfs_new_inode(sb, inode, sqshb_ino);
if (err)
goto failed_read;
block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
offset = SQUASHFS_INODE_OFFSET(ino);
type = le16_to_cpu(sqshb_ino->inode_type);
switch (type) {
case SQUASHFS_REG_TYPE: {
unsigned int frag_offset, frag;
int frag_size;
u64 frag_blk;
struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
frag = le32_to_cpu(sqsh_ino->fragment);
if (frag != SQUASHFS_INVALID_FRAG) {
frag_offset = le32_to_cpu(sqsh_ino->offset);
frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
if (frag_size < 0) {
err = frag_size;
goto failed_read;
}
} else {
frag_blk = SQUASHFS_INVALID_BLK;
frag_size = 0;
frag_offset = 0;
}
set_nlink(inode, 1);
inode->i_size = le32_to_cpu(sqsh_ino->file_size);
inode->i_fop = &generic_ro_fops;
inode->i_mode |= S_IFREG;
inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
squashfs_i(inode)->fragment_block = frag_blk;
squashfs_i(inode)->fragment_size = frag_size;
squashfs_i(inode)->fragment_offset = frag_offset;
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
offset, squashfs_i(inode)->start, block, offset);
break;
}
case SQUASHFS_LREG_TYPE: {
unsigned int frag_offset, frag;
int frag_size;
u64 frag_blk;
struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
frag = le32_to_cpu(sqsh_ino->fragment);
if (frag != SQUASHFS_INVALID_FRAG) {
frag_offset = le32_to_cpu(sqsh_ino->offset);
frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
if (frag_size < 0) {
err = frag_size;
goto failed_read;
}
} else {
frag_blk = SQUASHFS_INVALID_BLK;
frag_size = 0;
frag_offset = 0;
}
xattr_id = le32_to_cpu(sqsh_ino->xattr);
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le64_to_cpu(sqsh_ino->file_size);
inode->i_op = &squashfs_inode_ops;
inode->i_fop = &generic_ro_fops;
inode->i_mode |= S_IFREG;
inode->i_blocks = (inode->i_size -
le64_to_cpu(sqsh_ino->sparse) + 511) >> 9;
squashfs_i(inode)->fragment_block = frag_blk;
squashfs_i(inode)->fragment_size = frag_size;
squashfs_i(inode)->fragment_offset = frag_offset;
squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
offset, squashfs_i(inode)->start, block, offset);
break;
}
case SQUASHFS_DIR_TYPE: {
struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le16_to_cpu(sqsh_ino->file_size);
inode->i_op = &squashfs_dir_inode_ops;
inode->i_fop = &squashfs_dir_ops;
inode->i_mode |= S_IFDIR;
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
squashfs_i(inode)->dir_idx_cnt = 0;
squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
TRACE("Directory inode %x:%x, start_block %llx, offset %x\n",
SQUASHFS_INODE_BLK(ino), offset,
squashfs_i(inode)->start,
le16_to_cpu(sqsh_ino->offset));
break;
}
case SQUASHFS_LDIR_TYPE: {
struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
xattr_id = le32_to_cpu(sqsh_ino->xattr);
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le32_to_cpu(sqsh_ino->file_size);
inode->i_op = &squashfs_dir_inode_ops;
inode->i_fop = &squashfs_dir_ops;
inode->i_mode |= S_IFDIR;
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
squashfs_i(inode)->dir_idx_start = block;
squashfs_i(inode)->dir_idx_offset = offset;
squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count);
squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
TRACE("Long directory inode %x:%x, start_block %llx, offset "
"%x\n", SQUASHFS_INODE_BLK(ino), offset,
squashfs_i(inode)->start,
le16_to_cpu(sqsh_ino->offset));
break;
}
case SQUASHFS_SYMLINK_TYPE:
case SQUASHFS_LSYMLINK_TYPE: {
struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
inode->i_op = &squashfs_symlink_inode_ops;
inode_nohighmem(inode);
inode->i_data.a_ops = &squashfs_symlink_aops;
inode->i_mode |= S_IFLNK;
squashfs_i(inode)->start = block;
squashfs_i(inode)->offset = offset;
if (type == SQUASHFS_LSYMLINK_TYPE) {
__le32 xattr;
err = squashfs_read_metadata(sb, NULL, &block,
&offset, inode->i_size);
if (err < 0)
goto failed_read;
err = squashfs_read_metadata(sb, &xattr, &block,
&offset, sizeof(xattr));
if (err < 0)
goto failed_read;
xattr_id = le32_to_cpu(xattr);
}
TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
"%x\n", SQUASHFS_INODE_BLK(ino), offset,
block, offset);
break;
}
case SQUASHFS_BLKDEV_TYPE:
case SQUASHFS_CHRDEV_TYPE: {
struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
unsigned int rdev;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_CHRDEV_TYPE)
inode->i_mode |= S_IFCHR;
else
inode->i_mode |= S_IFBLK;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
break;
}
case SQUASHFS_LBLKDEV_TYPE:
case SQUASHFS_LCHRDEV_TYPE: {
struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev;
unsigned int rdev;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_LCHRDEV_TYPE)
inode->i_mode |= S_IFCHR;
else
inode->i_mode |= S_IFBLK;
xattr_id = le32_to_cpu(sqsh_ino->xattr);
inode->i_op = &squashfs_inode_ops;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
break;
}
case SQUASHFS_FIFO_TYPE:
case SQUASHFS_SOCKET_TYPE: {
struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_FIFO_TYPE)
inode->i_mode |= S_IFIFO;
else
inode->i_mode |= S_IFSOCK;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
break;
}
case SQUASHFS_LFIFO_TYPE:
case SQUASHFS_LSOCKET_TYPE: {
struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc;
err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
sizeof(*sqsh_ino));
if (err < 0)
goto failed_read;
if (type == SQUASHFS_LFIFO_TYPE)
inode->i_mode |= S_IFIFO;
else
inode->i_mode |= S_IFSOCK;
xattr_id = le32_to_cpu(sqsh_ino->xattr);
inode->i_op = &squashfs_inode_ops;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
break;
}
default:
ERROR("Unknown inode type %d in squashfs_iget!\n", type);
return -EINVAL;
}
if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) {
err = squashfs_xattr_lookup(sb, xattr_id,
&squashfs_i(inode)->xattr_count,
&squashfs_i(inode)->xattr_size,
&squashfs_i(inode)->xattr);
if (err < 0)
goto failed_read;
inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9)
+ 1;
} else
squashfs_i(inode)->xattr_count = 0;
return 0;
failed_read:
ERROR("Unable to read inode 0x%llx\n", ino);
return err;
}
const struct inode_operations squashfs_inode_ops = {
.listxattr = squashfs_listxattr
};
| linux-master | fs/squashfs/inode.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* namei.c
*/
/*
* This file implements code to do filename lookup in directories.
*
* Like inodes, directories are packed into compressed metadata blocks, stored
* in a directory table. Directories are accessed using the start address of
* the metablock containing the directory and the offset into the
* decompressed block (<block, offset>).
*
* Directories are organised in a slightly complex way, and are not simply
* a list of file names. The organisation takes advantage of the
* fact that (in most cases) the inodes of the files will be in the same
* compressed metadata block, and therefore, can share the start block.
* Directories are therefore organised in a two level list, a directory
* header containing the shared start block value, and a sequence of directory
* entries, each of which share the shared start block. A new directory header
* is written once/if the inode start block changes. The directory
* header/directory entry list is repeated as many times as necessary.
*
* Directories are sorted, and can contain a directory index to speed up
* file lookup. Directory indexes store one entry per metablock, each entry
* storing the index/filename mapping to the first directory header
* in each metadata block. Directories are sorted in alphabetical order,
* and at lookup the index is scanned linearly looking for the first filename
* alphabetically larger than the filename being looked up. At this point the
* location of the metadata block the filename is in has been found.
* The general idea of the index is ensure only one metadata block needs to be
* decompressed to do a lookup irrespective of the length of the directory.
* This scheme has the advantage that it doesn't require extra memory overhead
* and doesn't require much extra storage on disk.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dcache.h>
#include <linux/xattr.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "xattr.h"
/*
* Lookup name in the directory index, returning the location of the metadata
* block containing it, and the directory index this represents.
*
* If we get an error reading the index then return the part of the index
* (if any) we have managed to read - the index isn't essential, just
* quicker.
*/
static int get_dir_index_using_name(struct super_block *sb,
u64 *next_block, int *next_offset, u64 index_start,
int index_offset, int i_count, const char *name,
int len)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int i, length = 0, err;
unsigned int size;
struct squashfs_dir_index *index;
char *str;
TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
if (index == NULL) {
ERROR("Failed to allocate squashfs_dir_index\n");
goto out;
}
str = &index->name[SQUASHFS_NAME_LEN + 1];
strncpy(str, name, len);
str[len] = '\0';
for (i = 0; i < i_count; i++) {
err = squashfs_read_metadata(sb, index, &index_start,
&index_offset, sizeof(*index));
if (err < 0)
break;
size = le32_to_cpu(index->size) + 1;
if (size > SQUASHFS_NAME_LEN)
break;
err = squashfs_read_metadata(sb, index->name, &index_start,
&index_offset, size);
if (err < 0)
break;
index->name[size] = '\0';
if (strcmp(index->name, str) > 0)
break;
length = le32_to_cpu(index->index);
*next_block = le32_to_cpu(index->start_block) +
msblk->directory_table;
}
*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
kfree(index);
out:
/*
* Return index (f_pos) of the looked up metadata block. Translate
* from internal f_pos to external f_pos which is offset by 3 because
* we invent "." and ".." entries which are not actually stored in the
* directory.
*/
return length + 3;
}
static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
const unsigned char *name = dentry->d_name.name;
int len = dentry->d_name.len;
struct inode *inode = NULL;
struct squashfs_sb_info *msblk = dir->i_sb->s_fs_info;
struct squashfs_dir_header dirh;
struct squashfs_dir_entry *dire;
u64 block = squashfs_i(dir)->start + msblk->directory_table;
int offset = squashfs_i(dir)->offset;
int err, length;
unsigned int dir_count, size;
TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
if (dire == NULL) {
ERROR("Failed to allocate squashfs_dir_entry\n");
return ERR_PTR(-ENOMEM);
}
if (len > SQUASHFS_NAME_LEN) {
err = -ENAMETOOLONG;
goto failed;
}
length = get_dir_index_using_name(dir->i_sb, &block, &offset,
squashfs_i(dir)->dir_idx_start,
squashfs_i(dir)->dir_idx_offset,
squashfs_i(dir)->dir_idx_cnt, name, len);
while (length < i_size_read(dir)) {
/*
* Read directory header.
*/
err = squashfs_read_metadata(dir->i_sb, &dirh, &block,
&offset, sizeof(dirh));
if (err < 0)
goto read_failure;
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
if (dir_count > SQUASHFS_DIR_COUNT)
goto data_error;
while (dir_count--) {
/*
* Read directory entry.
*/
err = squashfs_read_metadata(dir->i_sb, dire, &block,
&offset, sizeof(*dire));
if (err < 0)
goto read_failure;
size = le16_to_cpu(dire->size) + 1;
/* size should never be larger than SQUASHFS_NAME_LEN */
if (size > SQUASHFS_NAME_LEN)
goto data_error;
err = squashfs_read_metadata(dir->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
goto read_failure;
length += sizeof(*dire) + size;
if (name[0] < dire->name[0])
goto exit_lookup;
if (len == size && !strncmp(name, dire->name, len)) {
unsigned int blk, off, ino_num;
long long ino;
blk = le32_to_cpu(dirh.start_block);
off = le16_to_cpu(dire->offset);
ino_num = le32_to_cpu(dirh.inode_number) +
(short) le16_to_cpu(dire->inode_number);
ino = SQUASHFS_MKINODE(blk, off);
TRACE("calling squashfs_iget for directory "
"entry %s, inode %x:%x, %d\n", name,
blk, off, ino_num);
inode = squashfs_iget(dir->i_sb, ino, ino_num);
goto exit_lookup;
}
}
}
exit_lookup:
kfree(dire);
return d_splice_alias(inode, dentry);
data_error:
err = -EIO;
read_failure:
ERROR("Unable to read directory block [%llx:%x]\n",
squashfs_i(dir)->start + msblk->directory_table,
squashfs_i(dir)->offset);
failed:
kfree(dire);
return ERR_PTR(err);
}
const struct inode_operations squashfs_dir_inode_ops = {
.lookup = squashfs_lookup,
.listxattr = squashfs_listxattr
};
| linux-master | fs/squashfs/namei.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
* Phillip Lougher <[email protected]>
*
* decompressor.c
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "squashfs.h"
#include "page_actor.h"
/*
* This file (and decompressor.h) implements a decompressor framework for
* Squashfs, allowing multiple decompressors to be easily supported
*/
static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
};
#ifndef CONFIG_SQUASHFS_LZ4
static const struct squashfs_decompressor squashfs_lz4_comp_ops = {
NULL, NULL, NULL, NULL, LZ4_COMPRESSION, "lz4", 0
};
#endif
#ifndef CONFIG_SQUASHFS_LZO
static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
};
#endif
#ifndef CONFIG_SQUASHFS_XZ
static const struct squashfs_decompressor squashfs_xz_comp_ops = {
NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
};
#endif
#ifndef CONFIG_SQUASHFS_ZLIB
static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
};
#endif
#ifndef CONFIG_SQUASHFS_ZSTD
static const struct squashfs_decompressor squashfs_zstd_comp_ops = {
NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0
};
#endif
static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
NULL, NULL, NULL, NULL, 0, "unknown", 0
};
static const struct squashfs_decompressor *decompressor[] = {
&squashfs_zlib_comp_ops,
&squashfs_lz4_comp_ops,
&squashfs_lzo_comp_ops,
&squashfs_xz_comp_ops,
&squashfs_lzma_unsupported_comp_ops,
&squashfs_zstd_comp_ops,
&squashfs_unknown_comp_ops
};
const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
{
int i;
for (i = 0; decompressor[i]->id; i++)
if (id == decompressor[i]->id)
break;
return decompressor[i];
}
static void *get_comp_opts(struct super_block *sb, unsigned short flags)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
void *buffer = NULL, *comp_opts;
struct squashfs_page_actor *actor = NULL;
int length = 0;
/*
* Read decompressor specific options from file system if present
*/
if (SQUASHFS_COMP_OPTS(flags)) {
buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (buffer == NULL) {
comp_opts = ERR_PTR(-ENOMEM);
goto out;
}
actor = squashfs_page_actor_init(&buffer, 1, 0);
if (actor == NULL) {
comp_opts = ERR_PTR(-ENOMEM);
goto out;
}
length = squashfs_read_data(sb,
sizeof(struct squashfs_super_block), 0, NULL, actor);
if (length < 0) {
comp_opts = ERR_PTR(length);
goto out;
}
}
comp_opts = squashfs_comp_opts(msblk, buffer, length);
out:
kfree(actor);
kfree(buffer);
return comp_opts;
}
void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
void *stream, *comp_opts = get_comp_opts(sb, flags);
if (IS_ERR(comp_opts))
return comp_opts;
stream = msblk->thread_ops->create(msblk, comp_opts);
if (IS_ERR(stream))
kfree(comp_opts);
return stream;
}
| linux-master | fs/squashfs/decompressor.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013, 2014
* Phillip Lougher <[email protected]>
*/
#include <linux/bio.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
#define LZ4_LEGACY 1
struct lz4_comp_opts {
__le32 version;
__le32 flags;
};
struct squashfs_lz4 {
void *input;
void *output;
};
static void *lz4_comp_opts(struct squashfs_sb_info *msblk,
void *buff, int len)
{
struct lz4_comp_opts *comp_opts = buff;
/* LZ4 compressed filesystems always have compression options */
if (comp_opts == NULL || len < sizeof(*comp_opts))
return ERR_PTR(-EIO);
if (le32_to_cpu(comp_opts->version) != LZ4_LEGACY) {
/* LZ4 format currently used by the kernel is the 'legacy'
* format */
ERROR("Unknown LZ4 version\n");
return ERR_PTR(-EINVAL);
}
return NULL;
}
static void *lz4_init(struct squashfs_sb_info *msblk, void *buff)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
struct squashfs_lz4 *stream;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
stream->input = vmalloc(block_size);
if (stream->input == NULL)
goto failed2;
stream->output = vmalloc(block_size);
if (stream->output == NULL)
goto failed3;
return stream;
failed3:
vfree(stream->input);
failed2:
kfree(stream);
failed:
ERROR("Failed to initialise LZ4 decompressor\n");
return ERR_PTR(-ENOMEM);
}
static void lz4_free(void *strm)
{
struct squashfs_lz4 *stream = strm;
if (stream) {
vfree(stream->input);
vfree(stream->output);
}
kfree(stream);
}
static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
struct squashfs_lz4 *stream = strm;
void *buff = stream->input, *data;
int bytes = length, res;
while (bio_next_segment(bio, &iter_all)) {
int avail = min(bytes, ((int)bvec->bv_len) - offset);
data = bvec_virt(bvec);
memcpy(buff, data + offset, avail);
buff += avail;
bytes -= avail;
offset = 0;
}
res = LZ4_decompress_safe(stream->input, stream->output,
length, output->length);
if (res < 0)
return -EIO;
bytes = res;
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
if (!IS_ERR(data))
memcpy(data, buff, bytes);
break;
}
if (!IS_ERR(data))
memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
}
squashfs_finish_page(output);
return res;
}
const struct squashfs_decompressor squashfs_lz4_comp_ops = {
.init = lz4_init,
.comp_opts = lz4_comp_opts,
.free = lz4_free,
.decompress = lz4_uncompress,
.id = LZ4_COMPRESSION,
.name = "lz4",
.alloc_buffer = 0,
.supported = 1
};
| linux-master | fs/squashfs/lz4_wrapper.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* export.c
*/
/*
* This file implements code to make Squashfs filesystems exportable (NFS etc.)
*
* The export code uses an inode lookup table to map inode numbers passed in
* filehandles to an inode location on disk. This table is stored compressed
* into metadata blocks. A second index table is used to locate these. This
* second index table for speed of access (and because it is small) is read at
* mount time and cached in memory.
*
* The inode lookup table is used only by the export code, inode disk
* locations are directly encoded in directories, enabling direct access
* without an intermediate lookup for all operations except the export ops.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/dcache.h>
#include <linux/exportfs.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
/*
* Look-up inode number (ino) in table, returning the inode location.
*/
static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
u64 start;
__le64 ino;
int err;
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
return -EINVAL;
start = le64_to_cpu(msblk->inode_lookup_table[blk]);
err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
if (err < 0)
return err;
TRACE("squashfs_inode_lookup, inode = 0x%llx\n",
(u64) le64_to_cpu(ino));
return le64_to_cpu(ino);
}
static struct dentry *squashfs_export_iget(struct super_block *sb,
unsigned int ino_num)
{
long long ino;
struct dentry *dentry = ERR_PTR(-ENOENT);
TRACE("Entered squashfs_export_iget\n");
ino = squashfs_inode_lookup(sb, ino_num);
if (ino >= 0)
dentry = d_obtain_alias(squashfs_iget(sb, ino, ino_num));
return dentry;
}
static struct dentry *squashfs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if ((fh_type != FILEID_INO32_GEN && fh_type != FILEID_INO32_GEN_PARENT)
|| fh_len < 2)
return NULL;
return squashfs_export_iget(sb, fid->i32.ino);
}
static struct dentry *squashfs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if (fh_type != FILEID_INO32_GEN_PARENT || fh_len < 4)
return NULL;
return squashfs_export_iget(sb, fid->i32.parent_ino);
}
static struct dentry *squashfs_get_parent(struct dentry *child)
{
struct inode *inode = d_inode(child);
unsigned int parent_ino = squashfs_i(inode)->parent;
return squashfs_export_iget(inode->i_sb, parent_ino);
}
/*
* Read uncompressed inode lookup table indexes off disk into memory
*/
__le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
u64 lookup_table_start, u64 next_table, unsigned int inodes)
{
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
int n;
__le64 *table;
u64 start, end;
TRACE("In read_inode_lookup_table, length %d\n", length);
/* Sanity check values */
/* there should always be at least one inode */
if (inodes == 0)
return ERR_PTR(-EINVAL);
/*
* The computed size of the lookup table (length bytes) should exactly
* match the table start and end points
*/
if (length != (next_table - lookup_table_start))
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, lookup_table_start, length);
if (IS_ERR(table))
return table;
/*
* table0], table[1], ... table[indexes - 1] store the locations
* of the compressed inode lookup blocks. Each entry should be
* less than the next (i.e. table[0] < table[1]), and the difference
* between them should be SQUASHFS_METADATA_SIZE or less.
* table[indexes - 1] should be less than lookup_table_start, and
* again the difference should be SQUASHFS_METADATA_SIZE or less
*/
for (n = 0; n < (indexes - 1); n++) {
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);
if (start >= end
|| (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}
start = le64_to_cpu(table[indexes - 1]);
if (start >= lookup_table_start ||
(lookup_table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
return table;
}
const struct export_operations squashfs_export_ops = {
.fh_to_dentry = squashfs_fh_to_dentry,
.fh_to_parent = squashfs_fh_to_parent,
.get_parent = squashfs_get_parent
};
| linux-master | fs/squashfs/export.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* symlink.c
*/
/*
* This file implements code to handle symbolic links.
*
* The data contents of symbolic links are stored inside the symbolic
* link inode within the inode table. This allows the normally small symbolic
* link to be compressed as part of the inode table, achieving much greater
* compression than if the symbolic link was compressed individually.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "xattr.h"
static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
int index = page->index << PAGE_SHIFT;
u64 block = squashfs_i(inode)->start;
int offset = squashfs_i(inode)->offset;
int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
int bytes, copied;
void *pageaddr;
struct squashfs_cache_entry *entry;
TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
"%llx, offset %x\n", page->index, block, offset);
/*
* Skip index bytes into symlink metadata.
*/
if (index) {
bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
index);
if (bytes < 0) {
ERROR("Unable to read symlink [%llx:%x]\n",
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
goto error_out;
}
}
/*
* Read length bytes from symlink metadata. Squashfs_read_metadata
* is not used here because it can sleep and we want to use
* kmap_atomic to map the page. Instead call the underlying
* squashfs_cache_get routine. As length bytes may overlap metadata
* blocks, we may need to call squashfs_cache_get multiple times.
*/
for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
if (entry->error) {
ERROR("Unable to read symlink [%llx:%x]\n",
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
squashfs_cache_put(entry);
goto error_out;
}
pageaddr = kmap_atomic(page);
copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
length - bytes);
if (copied == length - bytes)
memset(pageaddr + length, 0, PAGE_SIZE - length);
else
block = entry->next_index;
kunmap_atomic(pageaddr);
squashfs_cache_put(entry);
}
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
return 0;
error_out:
SetPageError(page);
unlock_page(page);
return 0;
}
const struct address_space_operations squashfs_symlink_aops = {
.read_folio = squashfs_symlink_read_folio
};
const struct inode_operations squashfs_symlink_inode_ops = {
.get_link = page_get_link,
.listxattr = squashfs_listxattr
};
| linux-master | fs/squashfs/symlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* cache.c
*/
/*
* Blocks in Squashfs are compressed. To avoid repeatedly decompressing
* recently accessed data Squashfs uses two small metadata and fragment caches.
*
* This file implements a generic cache implementation used for both caches,
* plus functions layered ontop of the generic cache implementation to
* access the metadata and fragment caches.
*
* To avoid out of memory and fragmentation issues with vmalloc the cache
* uses sequences of kmalloced PAGE_SIZE buffers.
*
* It should be noted that the cache is not used for file datablocks, these
* are decompressed and cached in the page-cache in the normal way. The
* cache is only used to temporarily cache fragment and metadata blocks
* which have been read as as a result of a metadata (i.e. inode or
* directory) or fragment access. Because metadata and fragments are packed
* together into blocks (to gain greater compression) the read of a particular
* piece of metadata or fragment will retrieve other metadata/fragments which
* have been packed with it, these because of locality-of-reference may be read
* in the near future. Temporarily caching them ensures they are available for
* near future access without requiring an additional read and decompress.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/pagemap.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "page_actor.h"
/*
* Look-up block in cache, and increment usage count. If not in cache, read
* and decompress it from disk.
*/
struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
struct squashfs_cache *cache, u64 block, int length)
{
int i, n;
struct squashfs_cache_entry *entry;
spin_lock(&cache->lock);
while (1) {
for (i = cache->curr_blk, n = 0; n < cache->entries; n++) {
if (cache->entry[i].block == block) {
cache->curr_blk = i;
break;
}
i = (i + 1) % cache->entries;
}
if (n == cache->entries) {
/*
* Block not in cache, if all cache entries are used
* go to sleep waiting for one to become available.
*/
if (cache->unused == 0) {
cache->num_waiters++;
spin_unlock(&cache->lock);
wait_event(cache->wait_queue, cache->unused);
spin_lock(&cache->lock);
cache->num_waiters--;
continue;
}
/*
* At least one unused cache entry. A simple
* round-robin strategy is used to choose the entry to
* be evicted from the cache.
*/
i = cache->next_blk;
for (n = 0; n < cache->entries; n++) {
if (cache->entry[i].refcount == 0)
break;
i = (i + 1) % cache->entries;
}
cache->next_blk = (i + 1) % cache->entries;
entry = &cache->entry[i];
/*
* Initialise chosen cache entry, and fill it in from
* disk.
*/
cache->unused--;
entry->block = block;
entry->refcount = 1;
entry->pending = 1;
entry->num_waiters = 0;
entry->error = 0;
spin_unlock(&cache->lock);
entry->length = squashfs_read_data(sb, block, length,
&entry->next_index, entry->actor);
spin_lock(&cache->lock);
if (entry->length < 0)
entry->error = entry->length;
entry->pending = 0;
/*
* While filling this entry one or more other processes
* have looked it up in the cache, and have slept
* waiting for it to become available.
*/
if (entry->num_waiters) {
spin_unlock(&cache->lock);
wake_up_all(&entry->wait_queue);
} else
spin_unlock(&cache->lock);
goto out;
}
/*
* Block already in cache. Increment refcount so it doesn't
* get reused until we're finished with it, if it was
* previously unused there's one less cache entry available
* for reuse.
*/
entry = &cache->entry[i];
if (entry->refcount == 0)
cache->unused--;
entry->refcount++;
/*
* If the entry is currently being filled in by another process
* go to sleep waiting for it to become available.
*/
if (entry->pending) {
entry->num_waiters++;
spin_unlock(&cache->lock);
wait_event(entry->wait_queue, !entry->pending);
} else
spin_unlock(&cache->lock);
goto out;
}
out:
TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
cache->name, i, entry->block, entry->refcount, entry->error);
if (entry->error)
ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
block);
return entry;
}
/*
* Release cache entry, once usage count is zero it can be reused.
*/
void squashfs_cache_put(struct squashfs_cache_entry *entry)
{
struct squashfs_cache *cache = entry->cache;
spin_lock(&cache->lock);
entry->refcount--;
if (entry->refcount == 0) {
cache->unused++;
/*
* If there's any processes waiting for a block to become
* available, wake one up.
*/
if (cache->num_waiters) {
spin_unlock(&cache->lock);
wake_up(&cache->wait_queue);
return;
}
}
spin_unlock(&cache->lock);
}
/*
* Delete cache reclaiming all kmalloced buffers.
*/
void squashfs_cache_delete(struct squashfs_cache *cache)
{
int i, j;
if (cache == NULL)
return;
for (i = 0; i < cache->entries; i++) {
if (cache->entry[i].data) {
for (j = 0; j < cache->pages; j++)
kfree(cache->entry[i].data[j]);
kfree(cache->entry[i].data);
}
kfree(cache->entry[i].actor);
}
kfree(cache->entry);
kfree(cache);
}
/*
* Initialise cache allocating the specified number of entries, each of
* size block_size. To avoid vmalloc fragmentation issues each entry
* is allocated as a sequence of kmalloced PAGE_SIZE buffers.
*/
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
{
int i, j;
struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL) {
ERROR("Failed to allocate %s cache\n", name);
return NULL;
}
cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
if (cache->entry == NULL) {
ERROR("Failed to allocate %s cache\n", name);
goto cleanup;
}
cache->curr_blk = 0;
cache->next_blk = 0;
cache->unused = entries;
cache->entries = entries;
cache->block_size = block_size;
cache->pages = block_size >> PAGE_SHIFT;
cache->pages = cache->pages ? cache->pages : 1;
cache->name = name;
cache->num_waiters = 0;
spin_lock_init(&cache->lock);
init_waitqueue_head(&cache->wait_queue);
for (i = 0; i < entries; i++) {
struct squashfs_cache_entry *entry = &cache->entry[i];
init_waitqueue_head(&cache->entry[i].wait_queue);
entry->cache = cache;
entry->block = SQUASHFS_INVALID_BLK;
entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
if (entry->data == NULL) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
}
for (j = 0; j < cache->pages; j++) {
entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (entry->data[j] == NULL) {
ERROR("Failed to allocate %s buffer\n", name);
goto cleanup;
}
}
entry->actor = squashfs_page_actor_init(entry->data,
cache->pages, 0);
if (entry->actor == NULL) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
}
}
return cache;
cleanup:
squashfs_cache_delete(cache);
return NULL;
}
/*
* Copy up to length bytes from cache entry to buffer starting at offset bytes
* into the cache entry. If there's not length bytes then copy the number of
* bytes available. In all cases return the number of bytes copied.
*/
int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
int offset, int length)
{
int remaining = length;
if (length == 0)
return 0;
else if (buffer == NULL)
return min(length, entry->length - offset);
while (offset < entry->length) {
void *buff = entry->data[offset / PAGE_SIZE]
+ (offset % PAGE_SIZE);
int bytes = min_t(int, entry->length - offset,
PAGE_SIZE - (offset % PAGE_SIZE));
if (bytes >= remaining) {
memcpy(buffer, buff, remaining);
remaining = 0;
break;
}
memcpy(buffer, buff, bytes);
buffer += bytes;
remaining -= bytes;
offset += bytes;
}
return length - remaining;
}
/*
* Read length bytes from metadata position <block, offset> (block is the
* start of the compressed block on disk, and offset is the offset into
* the block once decompressed). Data is packed into consecutive blocks,
* and length bytes may require reading more than one block.
*/
int squashfs_read_metadata(struct super_block *sb, void *buffer,
u64 *block, int *offset, int length)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int bytes, res = length;
struct squashfs_cache_entry *entry;
TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
if (unlikely(length < 0))
return -EIO;
while (length) {
entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
if (entry->error) {
res = entry->error;
goto error;
} else if (*offset >= entry->length) {
res = -EIO;
goto error;
}
bytes = squashfs_copy_data(buffer, entry, *offset, length);
if (buffer)
buffer += bytes;
length -= bytes;
*offset += bytes;
if (*offset == entry->length) {
*block = entry->next_index;
*offset = 0;
}
squashfs_cache_put(entry);
}
return res;
error:
squashfs_cache_put(entry);
return res;
}
/*
* Look-up in the fragmment cache the fragment located at <start_block> in the
* filesystem. If necessary read and decompress it from disk.
*/
struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
u64 start_block, int length)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
length);
}
/*
* Read and decompress the datablock located at <start_block> in the
* filesystem. The cache is used here to avoid duplicating locking and
* read/decompress code.
*/
struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
u64 start_block, int length)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
return squashfs_cache_get(sb, msblk->read_page, start_block, length);
}
/*
* Read a filesystem table (uncompressed sequence of bytes) from disk
*/
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
{
int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
int i, res;
void *table, *buffer, **data;
struct squashfs_page_actor *actor;
table = buffer = kmalloc(length, GFP_KERNEL);
if (table == NULL)
return ERR_PTR(-ENOMEM);
data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
if (data == NULL) {
res = -ENOMEM;
goto failed;
}
actor = squashfs_page_actor_init(data, pages, length);
if (actor == NULL) {
res = -ENOMEM;
goto failed2;
}
for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
data[i] = buffer;
res = squashfs_read_data(sb, block, length |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
kfree(data);
kfree(actor);
if (res < 0)
goto failed;
return table;
failed2:
kfree(data);
failed:
kfree(table);
return ERR_PTR(res);
}
| linux-master | fs/squashfs/cache.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* file.c
*/
/*
* This file contains code for handling regular files. A regular file
* consists of a sequence of contiguous compressed blocks, and/or a
* compressed fragment block (tail-end packed block). The compressed size
* of each datablock is stored in a block list contained within the
* file inode (itself stored in one or more compressed metadata blocks).
*
* To speed up access to datablocks when reading 'large' files (256 Mbytes or
* larger), the code implements an index cache that caches the mapping from
* block index to datablock location on disk.
*
* The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
* retaining a simple and space-efficient block list on disk. The cache
* is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
* Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
* The index cache is designed to be memory efficient, and by default uses
* 16 KiB.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "page_actor.h"
/*
* Locate cache slot in range [offset, index] for specified inode. If
* there's more than one return the slot closest to index.
*/
static struct meta_index *locate_meta_index(struct inode *inode, int offset,
int index)
{
struct meta_index *meta = NULL;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int i;
mutex_lock(&msblk->meta_index_mutex);
TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
if (msblk->meta_index == NULL)
goto not_allocated;
for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
if (msblk->meta_index[i].inode_number == inode->i_ino &&
msblk->meta_index[i].offset >= offset &&
msblk->meta_index[i].offset <= index &&
msblk->meta_index[i].locked == 0) {
TRACE("locate_meta_index: entry %d, offset %d\n", i,
msblk->meta_index[i].offset);
meta = &msblk->meta_index[i];
offset = meta->offset;
}
}
if (meta)
meta->locked = 1;
not_allocated:
mutex_unlock(&msblk->meta_index_mutex);
return meta;
}
/*
* Find and initialise an empty cache slot for index offset.
*/
static struct meta_index *empty_meta_index(struct inode *inode, int offset,
int skip)
{
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
struct meta_index *meta = NULL;
int i;
mutex_lock(&msblk->meta_index_mutex);
TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
if (msblk->meta_index == NULL) {
/*
* First time cache index has been used, allocate and
* initialise. The cache index could be allocated at
* mount time but doing it here means it is allocated only
* if a 'large' file is read.
*/
msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
sizeof(*(msblk->meta_index)), GFP_KERNEL);
if (msblk->meta_index == NULL) {
ERROR("Failed to allocate meta_index\n");
goto failed;
}
for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
msblk->meta_index[i].inode_number = 0;
msblk->meta_index[i].locked = 0;
}
msblk->next_meta_index = 0;
}
for (i = SQUASHFS_META_SLOTS; i &&
msblk->meta_index[msblk->next_meta_index].locked; i--)
msblk->next_meta_index = (msblk->next_meta_index + 1) %
SQUASHFS_META_SLOTS;
if (i == 0) {
TRACE("empty_meta_index: failed!\n");
goto failed;
}
TRACE("empty_meta_index: returned meta entry %d, %p\n",
msblk->next_meta_index,
&msblk->meta_index[msblk->next_meta_index]);
meta = &msblk->meta_index[msblk->next_meta_index];
msblk->next_meta_index = (msblk->next_meta_index + 1) %
SQUASHFS_META_SLOTS;
meta->inode_number = inode->i_ino;
meta->offset = offset;
meta->skip = skip;
meta->entries = 0;
meta->locked = 1;
failed:
mutex_unlock(&msblk->meta_index_mutex);
return meta;
}
static void release_meta_index(struct inode *inode, struct meta_index *meta)
{
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
mutex_lock(&msblk->meta_index_mutex);
meta->locked = 0;
mutex_unlock(&msblk->meta_index_mutex);
}
/*
* Read the next n blocks from the block list, starting from
* metadata block <start_block, offset>.
*/
static long long read_indexes(struct super_block *sb, int n,
u64 *start_block, int *offset)
{
int err, i;
long long block = 0;
__le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (blist == NULL) {
ERROR("read_indexes: Failed to allocate block_list\n");
return -ENOMEM;
}
while (n) {
int blocks = min_t(int, n, PAGE_SIZE >> 2);
err = squashfs_read_metadata(sb, blist, start_block,
offset, blocks << 2);
if (err < 0) {
ERROR("read_indexes: reading block [%llx:%x]\n",
*start_block, *offset);
goto failure;
}
for (i = 0; i < blocks; i++) {
int size = squashfs_block_size(blist[i]);
if (size < 0) {
err = size;
goto failure;
}
block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
}
n -= blocks;
}
kfree(blist);
return block;
failure:
kfree(blist);
return err;
}
/*
* Each cache index slot has SQUASHFS_META_ENTRIES, each of which
* can cache one index -> datablock/blocklist-block mapping. We wish
* to distribute these over the length of the file, entry[0] maps index x,
* entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on.
* The larger the file, the greater the skip factor. The skip factor is
* limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
* the number of metadata blocks that need to be read fits into the cache.
* If the skip factor is limited in this way then the file will use multiple
* slots.
*/
static inline int calculate_skip(u64 blocks)
{
u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
* SQUASHFS_META_INDEXES);
return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
}
/*
* Search and grow the index cache for the specified inode, returning the
* on-disk locations of the datablock and block list metadata block
* <index_block, index_offset> for index (scaled to nearest cache index).
*/
static int fill_meta_index(struct inode *inode, int index,
u64 *index_block, int *index_offset, u64 *data_block)
{
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
int offset = 0;
struct meta_index *meta;
struct meta_entry *meta_entry;
u64 cur_index_block = squashfs_i(inode)->block_list_start;
int cur_offset = squashfs_i(inode)->offset;
u64 cur_data_block = squashfs_i(inode)->start;
int err, i;
/*
* Scale index to cache index (cache slot entry)
*/
index /= SQUASHFS_META_INDEXES * skip;
while (offset < index) {
meta = locate_meta_index(inode, offset + 1, index);
if (meta == NULL) {
meta = empty_meta_index(inode, offset + 1, skip);
if (meta == NULL)
goto all_done;
} else {
offset = index < meta->offset + meta->entries ? index :
meta->offset + meta->entries - 1;
meta_entry = &meta->meta_entry[offset - meta->offset];
cur_index_block = meta_entry->index_block +
msblk->inode_table;
cur_offset = meta_entry->offset;
cur_data_block = meta_entry->data_block;
TRACE("get_meta_index: offset %d, meta->offset %d, "
"meta->entries %d\n", offset, meta->offset,
meta->entries);
TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
" data_block 0x%llx\n", cur_index_block,
cur_offset, cur_data_block);
}
/*
* If necessary grow cache slot by reading block list. Cache
* slot is extended up to index or to the end of the slot, in
* which case further slots will be used.
*/
for (i = meta->offset + meta->entries; i <= index &&
i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
int blocks = skip * SQUASHFS_META_INDEXES;
long long res = read_indexes(inode->i_sb, blocks,
&cur_index_block, &cur_offset);
if (res < 0) {
if (meta->entries == 0)
/*
* Don't leave an empty slot on read
* error allocated to this inode...
*/
meta->inode_number = 0;
err = res;
goto failed;
}
cur_data_block += res;
meta_entry = &meta->meta_entry[i - meta->offset];
meta_entry->index_block = cur_index_block -
msblk->inode_table;
meta_entry->offset = cur_offset;
meta_entry->data_block = cur_data_block;
meta->entries++;
offset++;
}
TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
meta->offset, meta->entries);
release_meta_index(inode, meta);
}
all_done:
*index_block = cur_index_block;
*index_offset = cur_offset;
*data_block = cur_data_block;
/*
* Scale cache index (cache slot entry) to index
*/
return offset * SQUASHFS_META_INDEXES * skip;
failed:
release_meta_index(inode, meta);
return err;
}
/*
* Get the on-disk location and compressed size of the datablock
* specified by index. Fill_meta_index() does most of the work.
*/
static int read_blocklist(struct inode *inode, int index, u64 *block)
{
u64 start;
long long blks;
int offset;
__le32 size;
int res = fill_meta_index(inode, index, &start, &offset, block);
TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset"
" 0x%x, block 0x%llx\n", res, index, start, offset,
*block);
if (res < 0)
return res;
/*
* res contains the index of the mapping returned by fill_meta_index(),
* this will likely be less than the desired index (because the
* meta_index cache works at a higher granularity). Read any
* extra block indexes needed.
*/
if (res < index) {
blks = read_indexes(inode->i_sb, index - res, &start, &offset);
if (blks < 0)
return (int) blks;
*block += blks;
}
/*
* Read length of block specified by index.
*/
res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
sizeof(size));
if (res < 0)
return res;
return squashfs_block_size(size);
}
void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
{
int copied;
void *pageaddr;
pageaddr = kmap_atomic(page);
copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
memset(pageaddr + copied, 0, PAGE_SIZE - copied);
kunmap_atomic(pageaddr);
flush_dcache_page(page);
if (copied == avail)
SetPageUptodate(page);
else
SetPageError(page);
}
/* Copy data into page cache */
void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
int bytes, int offset)
{
struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int start_index = page->index & ~mask, end_index = start_index | mask;
/*
* Loop copying datablock into pages. As the datablock likely covers
* many PAGE_SIZE pages (default block size is 128 KiB) explicitly
* grab the pages from the page cache, except for the page that we've
* been called to fill.
*/
for (i = start_index; i <= end_index && bytes > 0; i++,
bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
struct page *push_page;
int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
push_page = (i == page->index) ? page :
grab_cache_page_nowait(page->mapping, i);
if (!push_page)
continue;
if (PageUptodate(push_page))
goto skip_page;
squashfs_fill_page(push_page, buffer, offset, avail);
skip_page:
unlock_page(push_page);
if (i != page->index)
put_page(push_page);
}
}
/* Read datablock stored packed inside a fragment (tail-end packed block) */
static int squashfs_readpage_fragment(struct page *page, int expected)
{
struct inode *inode = page->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
int res = buffer->error;
if (res)
ERROR("Unable to read page, block %llx, size %x\n",
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
else
squashfs_copy_cache(page, buffer, expected,
squashfs_i(inode)->fragment_offset);
squashfs_cache_put(buffer);
return res;
}
static int squashfs_readpage_sparse(struct page *page, int expected)
{
squashfs_copy_cache(page, NULL, expected, 0);
return 0;
}
static int squashfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int index = page->index >> (msblk->block_log - PAGE_SHIFT);
int file_end = i_size_read(inode) >> msblk->block_log;
int expected = index == file_end ?
(i_size_read(inode) & (msblk->block_size - 1)) :
msblk->block_size;
int res = 0;
void *pageaddr;
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
page->index, squashfs_i(inode)->start);
if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
PAGE_SHIFT))
goto out;
if (index < file_end || squashfs_i(inode)->fragment_block ==
SQUASHFS_INVALID_BLK) {
u64 block = 0;
res = read_blocklist(inode, index, &block);
if (res < 0)
goto error_out;
if (res == 0)
res = squashfs_readpage_sparse(page, expected);
else
res = squashfs_readpage_block(page, block, res, expected);
} else
res = squashfs_readpage_fragment(page, expected);
if (!res)
return 0;
error_out:
SetPageError(page);
out:
pageaddr = kmap_atomic(page);
memset(pageaddr, 0, PAGE_SIZE);
kunmap_atomic(pageaddr);
flush_dcache_page(page);
if (res == 0)
SetPageUptodate(page);
unlock_page(page);
return res;
}
static int squashfs_readahead_fragment(struct page **page,
unsigned int pages, unsigned int expected)
{
struct inode *inode = page[0]->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int error = buffer->error;
if (error)
goto out;
expected += squashfs_i(inode)->fragment_offset;
for (n = 0; n < pages; n++) {
unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
unsigned int offset = base + squashfs_i(inode)->fragment_offset;
if (expected > offset) {
unsigned int avail = min_t(unsigned int, expected -
offset, PAGE_SIZE);
squashfs_fill_page(page[n], buffer, offset, avail);
}
unlock_page(page[n]);
put_page(page[n]);
}
out:
squashfs_cache_put(buffer);
return error;
}
static void squashfs_readahead(struct readahead_control *ractl)
{
struct inode *inode = ractl->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
size_t mask = (1UL << msblk->block_log) - 1;
unsigned short shift = msblk->block_log - PAGE_SHIFT;
loff_t start = readahead_pos(ractl) & ~mask;
size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
struct squashfs_page_actor *actor;
unsigned int nr_pages = 0;
struct page **pages;
int i, file_end = i_size_read(inode) >> msblk->block_log;
unsigned int max_pages = 1UL << shift;
readahead_expand(ractl, start, (len | mask) + 1);
pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
if (!pages)
return;
for (;;) {
pgoff_t index;
int res, bsize;
u64 block = 0;
unsigned int expected;
struct page *last_page;
expected = start >> msblk->block_log == file_end ?
(i_size_read(inode) & (msblk->block_size - 1)) :
msblk->block_size;
max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
nr_pages = __readahead_batch(ractl, pages, max_pages);
if (!nr_pages)
break;
if (readahead_pos(ractl) >= i_size_read(inode))
goto skip_pages;
index = pages[0]->index >> shift;
if ((pages[nr_pages - 1]->index >> shift) != index)
goto skip_pages;
if (index == file_end && squashfs_i(inode)->fragment_block !=
SQUASHFS_INVALID_BLK) {
res = squashfs_readahead_fragment(pages, nr_pages,
expected);
if (res)
goto skip_pages;
continue;
}
bsize = read_blocklist(inode, index, &block);
if (bsize == 0)
goto skip_pages;
actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
expected);
if (!actor)
goto skip_pages;
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
last_page = squashfs_page_actor_free(actor);
if (res == expected) {
int bytes;
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
if (index == file_end && bytes && last_page)
memzero_page(last_page, bytes,
PAGE_SIZE - bytes);
for (i = 0; i < nr_pages; i++) {
flush_dcache_page(pages[i]);
SetPageUptodate(pages[i]);
}
}
for (i = 0; i < nr_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
}
}
kfree(pages);
return;
skip_pages:
for (i = 0; i < nr_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
}
kfree(pages);
}
const struct address_space_operations squashfs_aops = {
.read_folio = squashfs_read_folio,
.readahead = squashfs_readahead
};
| linux-master | fs/squashfs/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2010 LG Electronics
* Chan Jeong <[email protected]>
*
* lzo_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/lzo.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
struct squashfs_lzo {
void *input;
void *output;
};
static void *lzo_init(struct squashfs_sb_info *msblk, void *buff)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
struct squashfs_lzo *stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
stream->input = vmalloc(block_size);
if (stream->input == NULL)
goto failed;
stream->output = vmalloc(block_size);
if (stream->output == NULL)
goto failed2;
return stream;
failed2:
vfree(stream->input);
failed:
ERROR("Failed to allocate lzo workspace\n");
kfree(stream);
return ERR_PTR(-ENOMEM);
}
static void lzo_free(void *strm)
{
struct squashfs_lzo *stream = strm;
if (stream) {
vfree(stream->input);
vfree(stream->output);
}
kfree(stream);
}
static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
struct squashfs_lzo *stream = strm;
void *buff = stream->input, *data;
int bytes = length, res;
size_t out_len = output->length;
while (bio_next_segment(bio, &iter_all)) {
int avail = min(bytes, ((int)bvec->bv_len) - offset);
data = bvec_virt(bvec);
memcpy(buff, data + offset, avail);
buff += avail;
bytes -= avail;
offset = 0;
}
res = lzo1x_decompress_safe(stream->input, (size_t)length,
stream->output, &out_len);
if (res != LZO_E_OK)
goto failed;
res = bytes = (int)out_len;
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
if (!IS_ERR(data))
memcpy(data, buff, bytes);
break;
} else {
if (!IS_ERR(data))
memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
}
}
squashfs_finish_page(output);
return res;
failed:
return -EIO;
}
const struct squashfs_decompressor squashfs_lzo_comp_ops = {
.init = lzo_init,
.free = lzo_free,
.decompress = lzo_uncompress,
.id = LZO_COMPRESSION,
.name = "lzo",
.alloc_buffer = 0,
.supported = 1
};
| linux-master | fs/squashfs/lzo_wrapper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013
* Phillip Lougher <[email protected]>
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
/* Read separately compressed datablock and memcopy into page cache */
int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
{
struct inode *i = page->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
block, bsize);
int res = buffer->error;
if (res)
ERROR("Unable to read page, block %llx, size %x\n", block,
bsize);
else
squashfs_copy_cache(page, buffer, expected, 0);
squashfs_cache_put(buffer);
return res;
}
| linux-master | fs/squashfs/file_cache.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013
* Phillip Lougher <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "page_actor.h"
/*
* This file contains implementations of page_actor for decompressing into
* an intermediate buffer, and for decompressing directly into the
* page cache.
*
* Calling code should avoid sleeping between calls to squashfs_first_page()
* and squashfs_finish_page().
*/
/* Implementation of page_actor for decompressing into intermediate buffer */
static void *cache_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->buffer[0];
}
static void *cache_next_page(struct squashfs_page_actor *actor)
{
if (actor->next_page == actor->pages)
return NULL;
return actor->buffer[actor->next_page++];
}
static void cache_finish_page(struct squashfs_page_actor *actor)
{
/* empty */
}
struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
actor->length = length ? : pages * PAGE_SIZE;
actor->buffer = buffer;
actor->pages = pages;
actor->next_page = 0;
actor->tmp_buffer = NULL;
actor->squashfs_first_page = cache_first_page;
actor->squashfs_next_page = cache_next_page;
actor->squashfs_finish_page = cache_finish_page;
return actor;
}
/* Implementation of page_actor for decompressing directly into page cache. */
static void *handle_next_page(struct squashfs_page_actor *actor)
{
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (actor->returned_pages == max_pages)
return NULL;
if ((actor->next_page == actor->pages) ||
(actor->next_index != actor->page[actor->next_page]->index)) {
actor->next_index++;
actor->returned_pages++;
actor->last_page = NULL;
return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
}
actor->next_index++;
actor->returned_pages++;
actor->last_page = actor->page[actor->next_page];
return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
}
static void *direct_first_page(struct squashfs_page_actor *actor)
{
return handle_next_page(actor);
}
static void *direct_next_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr) {
kunmap_local(actor->pageaddr);
actor->pageaddr = NULL;
}
return handle_next_page(actor);
}
static void direct_finish_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_local(actor->pageaddr);
}
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
struct page **page, int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
if (msblk->decompressor->alloc_buffer) {
actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (actor->tmp_buffer == NULL) {
kfree(actor);
return NULL;
}
} else
actor->tmp_buffer = NULL;
actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
actor->returned_pages = 0;
actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
actor->pageaddr = NULL;
actor->last_page = NULL;
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
actor->squashfs_first_page = direct_first_page;
actor->squashfs_next_page = direct_next_page;
actor->squashfs_finish_page = direct_finish_page;
return actor;
}
| linux-master | fs/squashfs/page_actor.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
* Phillip Lougher <[email protected]>
*
* zlib_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/zlib.h>
#include <linux/vmalloc.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
static void *zlib_init(struct squashfs_sb_info *dummy, void *buff)
{
z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
stream->workspace = vmalloc(zlib_inflate_workspacesize());
if (stream->workspace == NULL)
goto failed;
return stream;
failed:
ERROR("Failed to allocate zlib workspace\n");
kfree(stream);
return ERR_PTR(-ENOMEM);
}
static void zlib_free(void *strm)
{
z_stream *stream = strm;
if (stream)
vfree(stream->workspace);
kfree(stream);
}
static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
int zlib_init = 0, error = 0;
z_stream *stream = strm;
stream->avail_out = PAGE_SIZE;
stream->next_out = squashfs_first_page(output);
stream->avail_in = 0;
if (IS_ERR(stream->next_out)) {
error = PTR_ERR(stream->next_out);
goto finish;
}
for (;;) {
int zlib_err;
if (stream->avail_in == 0) {
const void *data;
int avail;
if (!bio_next_segment(bio, &iter_all)) {
/* Z_STREAM_END must be reached. */
error = -EIO;
break;
}
avail = min(length, ((int)bvec->bv_len) - offset);
data = bvec_virt(bvec);
length -= avail;
stream->next_in = data + offset;
stream->avail_in = avail;
offset = 0;
}
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
if (IS_ERR(stream->next_out)) {
error = PTR_ERR(stream->next_out);
break;
} else if (stream->next_out != NULL)
stream->avail_out = PAGE_SIZE;
}
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
if (zlib_err != Z_OK) {
error = -EIO;
break;
}
zlib_init = 1;
}
zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH);
if (zlib_err == Z_STREAM_END)
break;
if (zlib_err != Z_OK) {
error = -EIO;
break;
}
}
finish:
squashfs_finish_page(output);
if (!error)
if (zlib_inflateEnd(stream) != Z_OK)
error = -EIO;
return error ? error : stream->total_out;
}
const struct squashfs_decompressor squashfs_zlib_comp_ops = {
.init = zlib_init,
.free = zlib_free,
.decompress = zlib_uncompress,
.id = ZLIB_COMPRESSION,
.name = "zlib",
.alloc_buffer = 1,
.supported = 1
};
| linux-master | fs/squashfs/zlib_wrapper.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <[email protected]>
*
* id.c
*/
/*
* This file implements code to handle uids and gids.
*
* For space efficiency regular files store uid and gid indexes, which are
* converted to 32-bit uids/gids using an id look up table. This table is
* stored compressed into metadata blocks. A second index table is used to
* locate these. This second index table for speed of access (and because it
* is small) is read at mount time and cached in memory.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
/*
* Map uid/gid index into real 32-bit uid/gid using the id look up table
*/
int squashfs_get_id(struct super_block *sb, unsigned int index,
unsigned int *id)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_ID_BLOCK(index);
int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
u64 start_block;
__le32 disk_id;
int err;
if (index >= msblk->ids)
return -EINVAL;
start_block = le64_to_cpu(msblk->id_table[block]);
err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
sizeof(disk_id));
if (err < 0)
return err;
*id = le32_to_cpu(disk_id);
return 0;
}
/*
* Read uncompressed id lookup table indexes from disk into memory
*/
__le64 *squashfs_read_id_index_table(struct super_block *sb,
u64 id_table_start, u64 next_table, unsigned short no_ids)
{
unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
int n;
__le64 *table;
u64 start, end;
TRACE("In read_id_index_table, length %d\n", length);
/* Sanity check values */
/* there should always be at least one id */
if (no_ids == 0)
return ERR_PTR(-EINVAL);
/*
* The computed size of the index table (length bytes) should exactly
* match the table start and end points
*/
if (length != (next_table - id_table_start))
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, id_table_start, length);
if (IS_ERR(table))
return table;
/*
* table[0], table[1], ... table[indexes - 1] store the locations
* of the compressed id blocks. Each entry should be less than
* the next (i.e. table[0] < table[1]), and the difference between them
* should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
* should be less than id_table_start, and again the difference
* should be SQUASHFS_METADATA_SIZE or less
*/
for (n = 0; n < (indexes - 1); n++) {
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}
start = le64_to_cpu(table[indexes - 1]);
if (start >= id_table_start || (id_table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
return table;
}
| linux-master | fs/squashfs/id.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/parser.h>
#include <linux/completion.h>
#include <linux/vfs.h>
#include <linux/quotaops.h>
#include <linux/mount.h>
#include <linux/moduleparam.h>
#include <linux/kthread.h>
#include <linux/posix_acl.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_inode.h"
#include "jfs_metapage.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_acl.h"
#include "jfs_debug.h"
#include "jfs_xattr.h"
#include "jfs_dinode.h"
MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
MODULE_LICENSE("GPL");
static struct kmem_cache *jfs_inode_cachep;
static const struct super_operations jfs_super_operations;
static const struct export_operations jfs_export_operations;
static struct file_system_type jfs_fs_type;
#define MAX_COMMIT_THREADS 64
static int commit_threads;
module_param(commit_threads, int, 0);
MODULE_PARM_DESC(commit_threads, "Number of commit threads");
static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
struct task_struct *jfsIOthread;
struct task_struct *jfsSyncThread;
#ifdef CONFIG_JFS_DEBUG
int jfsloglevel = JFS_LOGLEVEL_WARN;
module_param(jfsloglevel, int, 0644);
MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
#endif
static void jfs_handle_error(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
if (sb_rdonly(sb))
return;
updateSuper(sb, FM_DIRTY);
if (sbi->flag & JFS_ERR_PANIC)
panic("JFS (device %s): panic forced after error\n",
sb->s_id);
else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
jfs_err("ERROR: (device %s): remounting filesystem as read-only",
sb->s_id);
sb->s_flags |= SB_RDONLY;
}
/* nothing is done for continue beyond marking the superblock dirty */
}
void jfs_error(struct super_block *sb, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_err("ERROR: (device %s): %ps: %pV\n",
sb->s_id, __builtin_return_address(0), &vaf);
va_end(args);
jfs_handle_error(sb);
}
static struct inode *jfs_alloc_inode(struct super_block *sb)
{
struct jfs_inode_info *jfs_inode;
jfs_inode = alloc_inode_sb(sb, jfs_inode_cachep, GFP_NOFS);
if (!jfs_inode)
return NULL;
#ifdef CONFIG_QUOTA
memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot));
#endif
return &jfs_inode->vfs_inode;
}
static void jfs_free_inode(struct inode *inode)
{
kmem_cache_free(jfs_inode_cachep, JFS_IP(inode));
}
static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
s64 maxinodes;
struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
jfs_info("In jfs_statfs");
buf->f_type = JFS_SUPER_MAGIC;
buf->f_bsize = sbi->bsize;
buf->f_blocks = sbi->bmap->db_mapsize;
buf->f_bfree = sbi->bmap->db_nfree;
buf->f_bavail = sbi->bmap->db_nfree;
/*
* If we really return the number of allocated & free inodes, some
* applications will fail because they won't see enough free inodes.
* We'll try to calculate some guess as to how many inodes we can
* really allocate
*
* buf->f_files = atomic_read(&imap->im_numinos);
* buf->f_ffree = atomic_read(&imap->im_numfree);
*/
maxinodes = min((s64) atomic_read(&imap->im_numinos) +
((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
<< L2INOSPEREXT), (s64) 0xffffffffLL);
buf->f_files = maxinodes;
buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
atomic_read(&imap->im_numfree));
buf->f_fsid.val[0] = crc32_le(0, (char *)&sbi->uuid,
sizeof(sbi->uuid)/2);
buf->f_fsid.val[1] = crc32_le(0,
(char *)&sbi->uuid + sizeof(sbi->uuid)/2,
sizeof(sbi->uuid)/2);
buf->f_namelen = JFS_NAME_MAX;
return 0;
}
#ifdef CONFIG_QUOTA
static int jfs_quota_off(struct super_block *sb, int type);
static int jfs_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
static void jfs_quota_off_umount(struct super_block *sb)
{
int type;
for (type = 0; type < MAXQUOTAS; type++)
jfs_quota_off(sb, type);
}
static const struct quotactl_ops jfs_quotactl_ops = {
.quota_on = jfs_quota_on,
.quota_off = jfs_quota_off,
.quota_sync = dquot_quota_sync,
.get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk,
.get_nextdqblk = dquot_get_next_dqblk,
};
#else
static inline void jfs_quota_off_umount(struct super_block *sb)
{
}
#endif
static void jfs_put_super(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
int rc;
jfs_info("In jfs_put_super");
jfs_quota_off_umount(sb);
rc = jfs_umount(sb);
if (rc)
jfs_err("jfs_umount failed with return code %d", rc);
unload_nls(sbi->nls_tab);
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
iput(sbi->direct_inode);
kfree(sbi);
}
enum {
Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
Opt_discard, Opt_nodiscard, Opt_discard_minblk
};
static const match_table_t tokens = {
{Opt_integrity, "integrity"},
{Opt_nointegrity, "nointegrity"},
{Opt_iocharset, "iocharset=%s"},
{Opt_resize, "resize=%u"},
{Opt_resize_nosize, "resize"},
{Opt_errors, "errors=%s"},
{Opt_ignore, "noquota"},
{Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_umask, "umask=%u"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_discard_minblk, "discard=%u"},
{Opt_err, NULL}
};
static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
int *flag)
{
void *nls_map = (void *)-1; /* -1: no change; NULL: none */
char *p;
struct jfs_sb_info *sbi = JFS_SBI(sb);
*newLVSize = 0;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_integrity:
*flag &= ~JFS_NOINTEGRITY;
break;
case Opt_nointegrity:
*flag |= JFS_NOINTEGRITY;
break;
case Opt_ignore:
/* Silently ignore the quota options */
/* Don't do anything ;-) */
break;
case Opt_iocharset:
if (nls_map && nls_map != (void *) -1)
unload_nls(nls_map);
if (!strcmp(args[0].from, "none"))
nls_map = NULL;
else {
nls_map = load_nls(args[0].from);
if (!nls_map) {
pr_err("JFS: charset not found\n");
goto cleanup;
}
}
break;
case Opt_resize:
{
char *resize = args[0].from;
int rc = kstrtoll(resize, 0, newLVSize);
if (rc)
goto cleanup;
break;
}
case Opt_resize_nosize:
{
*newLVSize = sb_bdev_nr_blocks(sb);
if (*newLVSize == 0)
pr_err("JFS: Cannot determine volume size\n");
break;
}
case Opt_errors:
{
char *errors = args[0].from;
if (!errors || !*errors)
goto cleanup;
if (!strcmp(errors, "continue")) {
*flag &= ~JFS_ERR_REMOUNT_RO;
*flag &= ~JFS_ERR_PANIC;
*flag |= JFS_ERR_CONTINUE;
} else if (!strcmp(errors, "remount-ro")) {
*flag &= ~JFS_ERR_CONTINUE;
*flag &= ~JFS_ERR_PANIC;
*flag |= JFS_ERR_REMOUNT_RO;
} else if (!strcmp(errors, "panic")) {
*flag &= ~JFS_ERR_CONTINUE;
*flag &= ~JFS_ERR_REMOUNT_RO;
*flag |= JFS_ERR_PANIC;
} else {
pr_err("JFS: %s is an invalid error handler\n",
errors);
goto cleanup;
}
break;
}
#ifdef CONFIG_QUOTA
case Opt_quota:
case Opt_usrquota:
*flag |= JFS_USRQUOTA;
break;
case Opt_grpquota:
*flag |= JFS_GRPQUOTA;
break;
#else
case Opt_usrquota:
case Opt_grpquota:
case Opt_quota:
pr_err("JFS: quota operations not supported\n");
break;
#endif
case Opt_uid:
{
char *uid = args[0].from;
uid_t val;
int rc = kstrtouint(uid, 0, &val);
if (rc)
goto cleanup;
sbi->uid = make_kuid(current_user_ns(), val);
if (!uid_valid(sbi->uid))
goto cleanup;
break;
}
case Opt_gid:
{
char *gid = args[0].from;
gid_t val;
int rc = kstrtouint(gid, 0, &val);
if (rc)
goto cleanup;
sbi->gid = make_kgid(current_user_ns(), val);
if (!gid_valid(sbi->gid))
goto cleanup;
break;
}
case Opt_umask:
{
char *umask = args[0].from;
int rc = kstrtouint(umask, 8, &sbi->umask);
if (rc)
goto cleanup;
if (sbi->umask & ~0777) {
pr_err("JFS: Invalid value of umask\n");
goto cleanup;
}
break;
}
case Opt_discard:
/* if set to 1, even copying files will cause
* trimming :O
* -> user has more control over the online trimming
*/
sbi->minblks_trim = 64;
if (bdev_max_discard_sectors(sb->s_bdev))
*flag |= JFS_DISCARD;
else
pr_err("JFS: discard option not supported on device\n");
break;
case Opt_nodiscard:
*flag &= ~JFS_DISCARD;
break;
case Opt_discard_minblk:
{
char *minblks_trim = args[0].from;
int rc;
if (bdev_max_discard_sectors(sb->s_bdev)) {
*flag |= JFS_DISCARD;
rc = kstrtouint(minblks_trim, 0,
&sbi->minblks_trim);
if (rc)
goto cleanup;
} else
pr_err("JFS: discard option not supported on device\n");
break;
}
default:
printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
p);
goto cleanup;
}
}
if (nls_map != (void *) -1) {
/* Discard old (if remount) */
unload_nls(sbi->nls_tab);
sbi->nls_tab = nls_map;
}
return 1;
cleanup:
if (nls_map && nls_map != (void *) -1)
unload_nls(nls_map);
return 0;
}
static int jfs_remount(struct super_block *sb, int *flags, char *data)
{
s64 newLVSize = 0;
int rc = 0;
int flag = JFS_SBI(sb)->flag;
int ret;
sync_filesystem(sb);
if (!parse_options(data, sb, &newLVSize, &flag))
return -EINVAL;
if (newLVSize) {
if (sb_rdonly(sb)) {
pr_err("JFS: resize requires volume to be mounted read-write\n");
return -EROFS;
}
rc = jfs_extendfs(sb, newLVSize, 0);
if (rc)
return rc;
}
if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
/*
* Invalidate any previously read metadata. fsck may have
* changed the on-disk data since we mounted r/o
*/
truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
JFS_SBI(sb)->flag = flag;
ret = jfs_mount_rw(sb, 1);
/* mark the fs r/w for quota activity */
sb->s_flags &= ~SB_RDONLY;
dquot_resume(sb, -1);
return ret;
}
if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
rc = dquot_suspend(sb, -1);
if (rc < 0)
return rc;
rc = jfs_umount_rw(sb);
JFS_SBI(sb)->flag = flag;
return rc;
}
if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
if (!sb_rdonly(sb)) {
rc = jfs_umount_rw(sb);
if (rc)
return rc;
JFS_SBI(sb)->flag = flag;
ret = jfs_mount_rw(sb, 1);
return ret;
}
JFS_SBI(sb)->flag = flag;
return 0;
}
static int jfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct jfs_sb_info *sbi;
struct inode *inode;
int rc;
s64 newLVSize = 0;
int flag, ret = -EINVAL;
jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
sb->s_max_links = JFS_LINK_MAX;
sb->s_time_min = 0;
sb->s_time_max = U32_MAX;
sbi->sb = sb;
sbi->uid = INVALID_UID;
sbi->gid = INVALID_GID;
sbi->umask = -1;
/* initialize the mount flag and determine the default error handler */
flag = JFS_ERR_REMOUNT_RO;
if (!parse_options((char *) data, sb, &newLVSize, &flag))
goto out_kfree;
sbi->flag = flag;
#ifdef CONFIG_JFS_POSIX_ACL
sb->s_flags |= SB_POSIXACL;
#endif
if (newLVSize) {
pr_err("resize option for remount only\n");
goto out_kfree;
}
/*
* Initialize blocksize to 4K.
*/
sb_set_blocksize(sb, PSIZE);
/*
* Set method vectors.
*/
sb->s_op = &jfs_super_operations;
sb->s_export_op = &jfs_export_operations;
sb->s_xattr = jfs_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->dq_op = &dquot_operations;
sb->s_qcop = &jfs_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
#endif
/*
* Initialize direct-mapping inode/address-space
*/
inode = new_inode(sb);
if (inode == NULL) {
ret = -ENOMEM;
goto out_unload;
}
inode->i_size = bdev_nr_bytes(sb->s_bdev);
inode->i_mapping->a_ops = &jfs_metapage_aops;
inode_fake_hash(inode);
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
sbi->direct_inode = inode;
rc = jfs_mount(sb);
if (rc) {
if (!silent)
jfs_err("jfs_mount failed w/return code = %d", rc);
goto out_mount_failed;
}
if (sb_rdonly(sb))
sbi->log = NULL;
else {
rc = jfs_mount_rw(sb, 0);
if (rc) {
if (!silent) {
jfs_err("jfs_mount_rw failed, return code = %d",
rc);
}
goto out_no_rw;
}
}
sb->s_magic = JFS_SUPER_MAGIC;
if (sbi->mntflag & JFS_OS2)
sb->s_d_op = &jfs_ci_dentry_operations;
inode = jfs_iget(sb, ROOT_I);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out_no_rw;
}
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto out_no_root;
/* logical blocks are represented by 40 bits in pxd_t, etc.
* and page cache is indexed by long
*/
sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE);
sb->s_time_gran = 1;
return 0;
out_no_root:
jfs_err("jfs_read_super: get root dentry failed");
out_no_rw:
rc = jfs_umount(sb);
if (rc)
jfs_err("jfs_umount failed with return code %d", rc);
out_mount_failed:
filemap_write_and_wait(sbi->direct_inode->i_mapping);
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
make_bad_inode(sbi->direct_inode);
iput(sbi->direct_inode);
sbi->direct_inode = NULL;
out_unload:
unload_nls(sbi->nls_tab);
out_kfree:
kfree(sbi);
return ret;
}
static int jfs_freeze(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
int rc = 0;
if (!sb_rdonly(sb)) {
txQuiesce(sb);
rc = lmLogShutdown(log);
if (rc) {
jfs_error(sb, "lmLogShutdown failed\n");
/* let operations fail rather than hang */
txResume(sb);
return rc;
}
rc = updateSuper(sb, FM_CLEAN);
if (rc) {
jfs_err("jfs_freeze: updateSuper failed");
/*
* Don't fail here. Everything succeeded except
* marking the superblock clean, so there's really
* no harm in leaving it frozen for now.
*/
}
}
return 0;
}
static int jfs_unfreeze(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
int rc = 0;
if (!sb_rdonly(sb)) {
rc = updateSuper(sb, FM_MOUNT);
if (rc) {
jfs_error(sb, "updateSuper failed\n");
goto out;
}
rc = lmLogInit(log);
if (rc)
jfs_error(sb, "lmLogInit failed\n");
out:
txResume(sb);
}
return rc;
}
static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
}
static int jfs_sync_fs(struct super_block *sb, int wait)
{
struct jfs_log *log = JFS_SBI(sb)->log;
/* log == NULL indicates read-only mount */
if (log) {
/*
* Write quota structures to quota file, sync_blockdev() will
* write them to disk later
*/
dquot_writeback_dquots(sb, -1);
jfs_flush_journal(log, wait);
jfs_syncpt(log, 0);
}
return 0;
}
static int jfs_show_options(struct seq_file *seq, struct dentry *root)
{
struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
if (uid_valid(sbi->uid))
seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
if (gid_valid(sbi->gid))
seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
if (sbi->umask != -1)
seq_printf(seq, ",umask=%03o", sbi->umask);
if (sbi->flag & JFS_NOINTEGRITY)
seq_puts(seq, ",nointegrity");
if (sbi->flag & JFS_DISCARD)
seq_printf(seq, ",discard=%u", sbi->minblks_trim);
if (sbi->nls_tab)
seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
if (sbi->flag & JFS_ERR_CONTINUE)
seq_printf(seq, ",errors=continue");
if (sbi->flag & JFS_ERR_PANIC)
seq_printf(seq, ",errors=panic");
#ifdef CONFIG_QUOTA
if (sbi->flag & JFS_USRQUOTA)
seq_puts(seq, ",usrquota");
if (sbi->flag & JFS_GRPQUOTA)
seq_puts(seq, ",grpquota");
#endif
return 0;
}
#ifdef CONFIG_QUOTA
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t toread;
struct buffer_head tmp_bh;
struct buffer_head *bh;
loff_t i_size = i_size_read(inode);
if (off > i_size)
return 0;
if (off+len > i_size)
len = i_size-off;
toread = len;
while (toread > 0) {
tocopy = min_t(size_t, sb->s_blocksize - offset, toread);
tmp_bh.b_state = 0;
tmp_bh.b_size = i_blocksize(inode);
err = jfs_get_block(inode, blk, &tmp_bh, 0);
if (err)
return err;
if (!buffer_mapped(&tmp_bh)) /* A hole? */
memset(data, 0, tocopy);
else {
bh = sb_bread(sb, tmp_bh.b_blocknr);
if (!bh)
return -EIO;
memcpy(data, bh->b_data+offset, tocopy);
brelse(bh);
}
offset = 0;
toread -= tocopy;
data += tocopy;
blk++;
}
return len;
}
/* Write to quotafile */
static ssize_t jfs_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t towrite = len;
struct buffer_head tmp_bh;
struct buffer_head *bh;
inode_lock(inode);
while (towrite > 0) {
tocopy = min_t(size_t, sb->s_blocksize - offset, towrite);
tmp_bh.b_state = 0;
tmp_bh.b_size = i_blocksize(inode);
err = jfs_get_block(inode, blk, &tmp_bh, 1);
if (err)
goto out;
if (offset || tocopy != sb->s_blocksize)
bh = sb_bread(sb, tmp_bh.b_blocknr);
else
bh = sb_getblk(sb, tmp_bh.b_blocknr);
if (!bh) {
err = -EIO;
goto out;
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
flush_dcache_page(bh->b_page);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
brelse(bh);
offset = 0;
towrite -= tocopy;
data += tocopy;
blk++;
}
out:
if (len == towrite) {
inode_unlock(inode);
return err;
}
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
inode_unlock(inode);
return len - towrite;
}
static struct dquot **jfs_get_dquots(struct inode *inode)
{
return JFS_IP(inode)->i_dquot;
}
static int jfs_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path)
{
int err;
struct inode *inode;
err = dquot_quota_on(sb, type, format_id, path);
if (err)
return err;
inode = d_inode(path->dentry);
inode_lock(inode);
JFS_IP(inode)->mode2 |= JFS_NOATIME_FL | JFS_IMMUTABLE_FL;
inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
S_NOATIME | S_IMMUTABLE);
inode_unlock(inode);
mark_inode_dirty(inode);
return 0;
}
static int jfs_quota_off(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
int err;
if (!inode || !igrab(inode))
goto out;
err = dquot_quota_off(sb, type);
if (err)
goto out_put;
inode_lock(inode);
JFS_IP(inode)->mode2 &= ~(JFS_NOATIME_FL | JFS_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
inode_unlock(inode);
mark_inode_dirty(inode);
out_put:
iput(inode);
return err;
out:
return dquot_quota_off(sb, type);
}
#endif
static const struct super_operations jfs_super_operations = {
.alloc_inode = jfs_alloc_inode,
.free_inode = jfs_free_inode,
.dirty_inode = jfs_dirty_inode,
.write_inode = jfs_write_inode,
.evict_inode = jfs_evict_inode,
.put_super = jfs_put_super,
.sync_fs = jfs_sync_fs,
.freeze_fs = jfs_freeze,
.unfreeze_fs = jfs_unfreeze,
.statfs = jfs_statfs,
.remount_fs = jfs_remount,
.show_options = jfs_show_options,
#ifdef CONFIG_QUOTA
.quota_read = jfs_quota_read,
.quota_write = jfs_quota_write,
.get_dquots = jfs_get_dquots,
#endif
};
static const struct export_operations jfs_export_operations = {
.fh_to_dentry = jfs_fh_to_dentry,
.fh_to_parent = jfs_fh_to_parent,
.get_parent = jfs_get_parent,
};
static struct file_system_type jfs_fs_type = {
.owner = THIS_MODULE,
.name = "jfs",
.mount = jfs_do_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("jfs");
static void init_once(void *foo)
{
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
init_rwsem(&jfs_ip->rdwrlock);
mutex_init(&jfs_ip->commit_mutex);
init_rwsem(&jfs_ip->xattr_sem);
spin_lock_init(&jfs_ip->ag_lock);
jfs_ip->active_ag = -1;
inode_init_once(&jfs_ip->vfs_inode);
}
static int __init init_jfs_fs(void)
{
int i;
int rc;
jfs_inode_cachep =
kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
offsetof(struct jfs_inode_info, i_inline_all),
sizeof_field(struct jfs_inode_info, i_inline_all),
init_once);
if (jfs_inode_cachep == NULL)
return -ENOMEM;
/*
* Metapage initialization
*/
rc = metapage_init();
if (rc) {
jfs_err("metapage_init failed w/rc = %d", rc);
goto free_slab;
}
/*
* Transaction Manager initialization
*/
rc = txInit();
if (rc) {
jfs_err("txInit failed w/rc = %d", rc);
goto free_metapage;
}
/*
* I/O completion thread (endio)
*/
jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
if (IS_ERR(jfsIOthread)) {
rc = PTR_ERR(jfsIOthread);
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
goto end_txmngr;
}
if (commit_threads < 1)
commit_threads = num_online_cpus();
if (commit_threads > MAX_COMMIT_THREADS)
commit_threads = MAX_COMMIT_THREADS;
for (i = 0; i < commit_threads; i++) {
jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL,
"jfsCommit");
if (IS_ERR(jfsCommitThread[i])) {
rc = PTR_ERR(jfsCommitThread[i]);
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
commit_threads = i;
goto kill_committask;
}
}
jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
if (IS_ERR(jfsSyncThread)) {
rc = PTR_ERR(jfsSyncThread);
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
goto kill_committask;
}
#ifdef PROC_FS_JFS
jfs_proc_init();
#endif
rc = register_filesystem(&jfs_fs_type);
if (!rc)
return 0;
#ifdef PROC_FS_JFS
jfs_proc_clean();
#endif
kthread_stop(jfsSyncThread);
kill_committask:
for (i = 0; i < commit_threads; i++)
kthread_stop(jfsCommitThread[i]);
kthread_stop(jfsIOthread);
end_txmngr:
txExit();
free_metapage:
metapage_exit();
free_slab:
kmem_cache_destroy(jfs_inode_cachep);
return rc;
}
static void __exit exit_jfs_fs(void)
{
int i;
jfs_info("exit_jfs_fs called");
txExit();
metapage_exit();
kthread_stop(jfsIOthread);
for (i = 0; i < commit_threads; i++)
kthread_stop(jfsCommitThread[i]);
kthread_stop(jfsSyncThread);
#ifdef PROC_FS_JFS
jfs_proc_clean();
#endif
unregister_filesystem(&jfs_fs_type);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(jfs_inode_cachep);
}
module_init(init_jfs_fs)
module_exit(exit_jfs_fs)
| linux-master | fs/jfs/super.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
/*
* jfs_umount.c
*
* note: file system in transition to aggregate/fileset:
* (ref. jfs_mount.c)
*
* file system unmount is interpreted as mount of the single/only
* fileset in the aggregate and, if unmount of the last fileset,
* as unmount of the aggerate;
*/
#include <linux/fs.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_metapage.h"
#include "jfs_debug.h"
/*
* NAME: jfs_umount(vfsp, flags, crp)
*
* FUNCTION: vfs_umount()
*
* PARAMETERS: vfsp - virtual file system pointer
* flags - unmount for shutdown
* crp - credential
*
* RETURN : EBUSY - device has open files
*/
int jfs_umount(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct inode *ipbmap = sbi->ipbmap;
struct inode *ipimap = sbi->ipimap;
struct inode *ipaimap = sbi->ipaimap;
struct inode *ipaimap2 = sbi->ipaimap2;
struct jfs_log *log;
int rc = 0;
jfs_info("UnMount JFS: sb:0x%p", sb);
/*
* update superblock and close log
*
* if mounted read-write and log based recovery was enabled
*/
if ((log = sbi->log))
/*
* Wait for outstanding transactions to be written to log:
*/
jfs_flush_journal(log, 2);
/*
* close fileset inode allocation map (aka fileset inode)
*/
diUnmount(ipimap, 0);
diFreeSpecial(ipimap);
sbi->ipimap = NULL;
/*
* close secondary aggregate inode allocation map
*/
if (ipaimap2) {
diUnmount(ipaimap2, 0);
diFreeSpecial(ipaimap2);
sbi->ipaimap2 = NULL;
}
/*
* close aggregate inode allocation map
*/
diUnmount(ipaimap, 0);
diFreeSpecial(ipaimap);
sbi->ipaimap = NULL;
/*
* close aggregate block allocation map
*/
dbUnmount(ipbmap, 0);
diFreeSpecial(ipbmap);
sbi->ipbmap = NULL;
/*
* Make sure all metadata makes it to disk before we mark
* the superblock as clean
*/
filemap_write_and_wait(sbi->direct_inode->i_mapping);
/*
* ensure all file system file pages are propagated to their
* home blocks on disk (and their in-memory buffer pages are
* invalidated) BEFORE updating file system superblock state
* (to signify file system is unmounted cleanly, and thus in
* consistent state) and log superblock active file system
* list (to signify skip logredo()).
*/
if (log) { /* log = NULL if read-only mount */
updateSuper(sb, FM_CLEAN);
/*
* close log:
*
* remove file system from log active file system list.
*/
rc = lmLogClose(sb);
}
jfs_info("UnMount JFS Complete: rc = %d", rc);
return rc;
}
int jfs_umount_rw(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
if (!log)
return 0;
/*
* close log:
*
* remove file system from log active file system list.
*/
jfs_flush_journal(log, 2);
/*
* Make sure all metadata makes it to disk
*/
dbSync(sbi->ipbmap);
diSync(sbi->ipimap);
/*
* Note that we have to do this even if sync_blockdev() will
* do exactly the same a few instructions later: We can't
* mark the superblock clean before everything is flushed to
* disk.
*/
filemap_write_and_wait(sbi->direct_inode->i_mapping);
updateSuper(sb, FM_CLEAN);
return lmLogClose(sb);
}
| linux-master | fs/jfs/jfs_umount.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_dinode.h"
#include "jfs_imap.h"
#include "jfs_dmap.h"
#include "jfs_superblock.h"
#include "jfs_txnmgr.h"
#include "jfs_debug.h"
#define BITSPERPAGE (PSIZE << 3)
#define L2MEGABYTE 20
#define MEGABYTE (1 << L2MEGABYTE)
#define MEGABYTE32 (MEGABYTE << 5)
/* convert block number to bmap file page number */
#define BLKTODMAPN(b)\
(((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
/*
* jfs_extendfs()
*
* function: extend file system;
*
* |-------------------------------|----------|----------|
* file system space fsck inline log
* workspace space
*
* input:
* new LVSize: in LV blocks (required)
* new LogSize: in LV blocks (optional)
* new FSSize: in LV blocks (optional)
*
* new configuration:
* 1. set new LogSize as specified or default from new LVSize;
* 2. compute new FSCKSize from new LVSize;
* 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
* assert(new FSSize >= old FSSize),
* i.e., file system must not be shrunk;
*/
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
int rc = 0;
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct inode *ipbmap = sbi->ipbmap;
struct inode *ipbmap2;
struct inode *ipimap = sbi->ipimap;
struct jfs_log *log = sbi->log;
struct bmap *bmp = sbi->bmap;
s64 newLogAddress, newFSCKAddress;
int newFSCKSize;
s64 newMapSize = 0, mapSize;
s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
s64 oldLVSize;
s64 newFSSize;
s64 VolumeSize;
int newNpages = 0, nPages, newPage, xlen, t32;
int tid;
int log_formatted = 0;
struct inode *iplist[1];
struct jfs_superblock *j_sb, *j_sb2;
s64 old_agsize;
int agsizechanged = 0;
struct buffer_head *bh, *bh2;
/* If the volume hasn't grown, get out now */
if (sbi->mntflag & JFS_INLINELOG)
oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
else
oldLVSize = addressPXD(&sbi->fsckpxd) +
lengthPXD(&sbi->fsckpxd);
if (oldLVSize >= newLVSize) {
printk(KERN_WARNING
"jfs_extendfs: volume hasn't grown, returning\n");
goto out;
}
VolumeSize = sb_bdev_nr_blocks(sb);
if (VolumeSize) {
if (newLVSize > VolumeSize) {
printk(KERN_WARNING "jfs_extendfs: invalid size\n");
rc = -EINVAL;
goto out;
}
} else {
/* check the device */
bh = sb_bread(sb, newLVSize - 1);
if (!bh) {
printk(KERN_WARNING "jfs_extendfs: invalid size\n");
rc = -EINVAL;
goto out;
}
bforget(bh);
}
/* Can't extend write-protected drive */
if (isReadOnly(ipbmap)) {
printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
rc = -EROFS;
goto out;
}
/*
* reconfigure LV spaces
* ---------------------
*
* validate new size, or, if not specified, determine new size
*/
/*
* reconfigure inline log space:
*/
if ((sbi->mntflag & JFS_INLINELOG)) {
if (newLogSize == 0) {
/*
* no size specified: default to 1/256 of aggregate
* size; rounded up to a megabyte boundary;
*/
newLogSize = newLVSize >> 8;
t32 = (1 << (20 - sbi->l2bsize)) - 1;
newLogSize = (newLogSize + t32) & ~t32;
newLogSize =
min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
} else {
/*
* convert the newLogSize to fs blocks.
*
* Since this is given in megabytes, it will always be
* an even number of pages.
*/
newLogSize = (newLogSize * MEGABYTE) >> sbi->l2bsize;
}
} else
newLogSize = 0;
newLogAddress = newLVSize - newLogSize;
/*
* reconfigure fsck work space:
*
* configure it to the end of the logical volume regardless of
* whether file system extends to the end of the aggregate;
* Need enough 4k pages to cover:
* - 1 bit per block in aggregate rounded up to BPERDMAP boundary
* - 1 extra page to handle control page and intermediate level pages
* - 50 extra pages for the chkdsk service log
*/
t64 = ((newLVSize - newLogSize + BPERDMAP - 1) >> L2BPERDMAP)
<< L2BPERDMAP;
t32 = DIV_ROUND_UP(t64, BITSPERPAGE) + 1 + 50;
newFSCKSize = t32 << sbi->l2nbperpage;
newFSCKAddress = newLogAddress - newFSCKSize;
/*
* compute new file system space;
*/
newFSSize = newLVSize - newLogSize - newFSCKSize;
/* file system cannot be shrunk */
if (newFSSize < bmp->db_mapsize) {
rc = -EINVAL;
goto out;
}
/*
* If we're expanding enough that the inline log does not overlap
* the old one, we can format the new log before we quiesce the
* filesystem.
*/
if ((sbi->mntflag & JFS_INLINELOG) && (newLogAddress > oldLVSize)) {
if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
goto out;
log_formatted = 1;
}
/*
* quiesce file system
*
* (prepare to move the inline log and to prevent map update)
*
* block any new transactions and wait for completion of
* all wip transactions and flush modified pages s.t.
* on-disk file system is in consistent state and
* log is not required for recovery.
*/
txQuiesce(sb);
/* Reset size of direct inode */
sbi->direct_inode->i_size = bdev_nr_bytes(sb->s_bdev);
if (sbi->mntflag & JFS_INLINELOG) {
/*
* deactivate old inline log
*/
lmLogShutdown(log);
/*
* mark on-disk super block for fs in transition;
*
* update on-disk superblock for the new space configuration
* of inline log space and fsck work space descriptors:
* N.B. FS descriptor is NOT updated;
*
* crash recovery:
* logredo(): if FM_EXTENDFS, return to fsck() for cleanup;
* fsck(): if FM_EXTENDFS, reformat inline log and fsck
* workspace from superblock inline log descriptor and fsck
* workspace descriptor;
*/
/* read in superblock */
if ((rc = readSuper(sb, &bh)))
goto error_out;
j_sb = (struct jfs_superblock *)bh->b_data;
/* mark extendfs() in progress */
j_sb->s_state |= cpu_to_le32(FM_EXTENDFS);
j_sb->s_xsize = cpu_to_le64(newFSSize);
PXDaddress(&j_sb->s_xfsckpxd, newFSCKAddress);
PXDlength(&j_sb->s_xfsckpxd, newFSCKSize);
PXDaddress(&j_sb->s_xlogpxd, newLogAddress);
PXDlength(&j_sb->s_xlogpxd, newLogSize);
/* synchronously update superblock */
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
/*
* format new inline log synchronously;
*
* crash recovery: if log move in progress,
* reformat log and exit success;
*/
if (!log_formatted)
if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
goto error_out;
/*
* activate new log
*/
log->base = newLogAddress;
log->size = newLogSize >> (L2LOGPSIZE - sb->s_blocksize_bits);
if ((rc = lmLogInit(log)))
goto error_out;
}
/*
* extend block allocation map
* ---------------------------
*
* extendfs() for new extension, retry after crash recovery;
*
* note: both logredo() and fsck() rebuild map from
* the bitmap and configuration parameter from superblock
* (disregarding all other control information in the map);
*
* superblock:
* s_size: aggregate size in physical blocks;
*/
/*
* compute the new block allocation map configuration
*
* map dinode:
* di_size: map file size in byte;
* di_nblocks: number of blocks allocated for map file;
* di_mapsize: number of blocks in aggregate (covered by map);
* map control page:
* db_mapsize: number of blocks in aggregate (covered by map);
*/
newMapSize = newFSSize;
/* number of data pages of new bmap file:
* roundup new size to full dmap page boundary and
* add 1 extra dmap page for next extendfs()
*/
t64 = (newMapSize - 1) + BPERDMAP;
newNpages = BLKTODMAPN(t64) + 1;
/*
* extend map from current map (WITHOUT growing mapfile)
*
* map new extension with unmapped part of the last partial
* dmap page, if applicable, and extra page(s) allocated
* at end of bmap by mkfs() or previous extendfs();
*/
extendBmap:
/* compute number of blocks requested to extend */
mapSize = bmp->db_mapsize;
XAddress = mapSize; /* eXtension Address */
XSize = newMapSize - mapSize; /* eXtension Size */
old_agsize = bmp->db_agsize; /* We need to know if this changes */
/* compute number of blocks that can be extended by current mapfile */
t64 = dbMapFileSizeToMapSize(ipbmap);
if (mapSize > t64) {
printk(KERN_ERR "jfs_extendfs: mapSize (0x%Lx) > t64 (0x%Lx)\n",
(long long) mapSize, (long long) t64);
rc = -EIO;
goto error_out;
}
nblocks = min(t64 - mapSize, XSize);
/*
* update map pages for new extension:
*
* update/init dmap and bubble up the control hierarchy
* incrementally fold up dmaps into upper levels;
* update bmap control page;
*/
if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
goto error_out;
agsizechanged |= (bmp->db_agsize != old_agsize);
/*
* the map now has extended to cover additional nblocks:
* dn_mapsize = oldMapsize + nblocks;
*/
/* ipbmap->i_mapsize += nblocks; */
XSize -= nblocks;
/*
* grow map file to cover remaining extension
* and/or one extra dmap page for next extendfs();
*
* allocate new map pages and its backing blocks, and
* update map file xtree
*/
/* compute number of data pages of current bmap file */
nPages = ipbmap->i_size >> L2PSIZE;
/* need to grow map file ? */
if (nPages == newNpages)
goto finalizeBmap;
/*
* grow bmap file for the new map pages required:
*
* allocate growth at the start of newly extended region;
* bmap file only grows sequentially, i.e., both data pages
* and possibly xtree index pages may grow in append mode,
* s.t. logredo() can reconstruct pre-extension state
* by washing away bmap file of pages outside s_size boundary;
*/
/*
* journal map file growth as if a regular file growth:
* (note: bmap is created with di_mode = IFJOURNAL|IFREG);
*
* journaling of bmap file growth is not required since
* logredo() do/can not use log records of bmap file growth
* but it provides careful write semantics, pmap update, etc.;
*/
/* synchronous write of data pages: bmap data pages are
* cached in meta-data cache, and not written out
* by txCommit();
*/
rc = filemap_fdatawait(ipbmap->i_mapping);
if (rc)
goto error_out;
rc = filemap_write_and_wait(ipbmap->i_mapping);
if (rc)
goto error_out;
diWriteSpecial(ipbmap, 0);
newPage = nPages; /* first new page number */
xoff = newPage << sbi->l2nbperpage;
xlen = (newNpages - nPages) << sbi->l2nbperpage;
xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1);
xaddr = XAddress;
tid = txBegin(sb, COMMIT_FORCE);
if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) {
txEnd(tid);
goto error_out;
}
/* update bmap file size */
ipbmap->i_size += xlen << sbi->l2bsize;
inode_add_bytes(ipbmap, xlen << sbi->l2bsize);
iplist[0] = ipbmap;
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid);
if (rc)
goto error_out;
/*
* map file has been grown now to cover extension to further out;
* di_size = new map file size;
*
* if huge extension, the previous extension based on previous
* map file size may not have been sufficient to cover whole extension
* (it could have been used up for new map pages),
* but the newly grown map file now covers lot bigger new free space
* available for further extension of map;
*/
/* any more blocks to extend ? */
if (XSize)
goto extendBmap;
finalizeBmap:
/* finalize bmap */
dbFinalizeBmap(ipbmap);
/*
* update inode allocation map
* ---------------------------
*
* move iag lists from old to new iag;
* agstart field is not updated for logredo() to reconstruct
* iag lists if system crash occurs.
* (computation of ag number from agstart based on agsize
* will correctly identify the new ag);
*/
/* if new AG size the same as old AG size, done! */
if (agsizechanged) {
if ((rc = diExtendFS(ipimap, ipbmap)))
goto error_out;
/* finalize imap */
if ((rc = diSync(ipimap)))
goto error_out;
}
/*
* finalize
* --------
*
* extension is committed when on-disk super block is
* updated with new descriptors: logredo will recover
* crash before it to pre-extension state;
*/
/* sync log to skip log replay of bmap file growth transaction; */
/* lmLogSync(log, 1); */
/*
* synchronous write bmap global control page;
* for crash before completion of write
* logredo() will recover to pre-extendfs state;
* for crash after completion of write,
* logredo() will recover post-extendfs state;
*/
if ((rc = dbSync(ipbmap)))
goto error_out;
/*
* copy primary bmap inode to secondary bmap inode
*/
ipbmap2 = diReadSpecial(sb, BMAP_I, 1);
if (ipbmap2 == NULL) {
printk(KERN_ERR "jfs_extendfs: diReadSpecial(bmap) failed\n");
goto error_out;
}
memcpy(&JFS_IP(ipbmap2)->i_xtroot, &JFS_IP(ipbmap)->i_xtroot, 288);
ipbmap2->i_size = ipbmap->i_size;
ipbmap2->i_blocks = ipbmap->i_blocks;
diWriteSpecial(ipbmap2, 1);
diFreeSpecial(ipbmap2);
/*
* update superblock
*/
if ((rc = readSuper(sb, &bh)))
goto error_out;
j_sb = (struct jfs_superblock *)bh->b_data;
/* mark extendfs() completion */
j_sb->s_state &= cpu_to_le32(~FM_EXTENDFS);
j_sb->s_size = cpu_to_le64(bmp->db_mapsize <<
le16_to_cpu(j_sb->s_l2bfactor));
j_sb->s_agsize = cpu_to_le32(bmp->db_agsize);
/* update inline log space descriptor */
if (sbi->mntflag & JFS_INLINELOG) {
PXDaddress(&(j_sb->s_logpxd), newLogAddress);
PXDlength(&(j_sb->s_logpxd), newLogSize);
}
/* record log's mount serial number */
j_sb->s_logserial = cpu_to_le32(log->serial);
/* update fsck work space descriptor */
PXDaddress(&(j_sb->s_fsckpxd), newFSCKAddress);
PXDlength(&(j_sb->s_fsckpxd), newFSCKSize);
j_sb->s_fscklog = 1;
/* sb->s_fsckloglen remains the same */
/* Update secondary superblock */
bh2 = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits);
if (bh2) {
j_sb2 = (struct jfs_superblock *)bh2->b_data;
memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock));
mark_buffer_dirty(bh);
sync_dirty_buffer(bh2);
brelse(bh2);
}
/* write primary superblock */
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
goto resume;
error_out:
jfs_error(sb, "\n");
resume:
/*
* resume file system transactions
*/
txResume(sb);
out:
return rc;
}
| linux-master | fs/jfs/resize.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Copyright (C) Christoph Hellwig, 2002
*/
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
#include <linux/slab.h>
#include <linux/quotaops.h>
#include <linux/security.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_debug.h"
#include "jfs_dinode.h"
#include "jfs_extent.h"
#include "jfs_metapage.h"
#include "jfs_xattr.h"
#include "jfs_acl.h"
/*
* jfs_xattr.c: extended attribute service
*
* Overall design --
*
* Format:
*
* Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
* value) and a variable (0 or more) number of extended attribute
* entries. Each extended attribute entry (jfs_ea) is a <name,value> double
* where <name> is constructed from a null-terminated ascii string
* (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
* (1 ... 65535 bytes). The in-memory format is
*
* 0 1 2 4 4 + namelen + 1
* +-------+--------+--------+----------------+-------------------+
* | Flags | Name | Value | Name String \0 | Data . . . . |
* | | Length | Length | | |
* +-------+--------+--------+----------------+-------------------+
*
* A jfs_ea_list then is structured as
*
* 0 4 4 + EA_SIZE(ea1)
* +------------+-------------------+--------------------+-----
* | Overall EA | First FEA Element | Second FEA Element | .....
* | List Size | | |
* +------------+-------------------+--------------------+-----
*
* On-disk:
*
* FEALISTs are stored on disk using blocks allocated by dbAlloc() and
* written directly. An EA list may be in-lined in the inode if there is
* sufficient room available.
*/
struct ea_buffer {
int flag; /* Indicates what storage xattr points to */
int max_size; /* largest xattr that fits in current buffer */
dxd_t new_ea; /* dxd to replace ea when modifying xattr */
struct metapage *mp; /* metapage containing ea list */
struct jfs_ea_list *xattr; /* buffer containing ea list */
};
/*
* ea_buffer.flag values
*/
#define EA_INLINE 0x0001
#define EA_EXTENT 0x0002
#define EA_NEW 0x0004
#define EA_MALLOC 0x0008
/*
* Mapping of on-disk attribute names: for on-disk attribute names with an
* unknown prefix (not "system.", "user.", "security.", or "trusted."), the
* prefix "os2." is prepended. On the way back to disk, "os2." prefixes are
* stripped and we make sure that the remaining name does not start with one
* of the know prefixes.
*/
static int is_known_namespace(const char *name)
{
if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
return false;
return true;
}
static inline int name_size(struct jfs_ea *ea)
{
if (is_known_namespace(ea->name))
return ea->namelen;
else
return ea->namelen + XATTR_OS2_PREFIX_LEN;
}
static inline int copy_name(char *buffer, struct jfs_ea *ea)
{
int len = ea->namelen;
if (!is_known_namespace(ea->name)) {
memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
buffer += XATTR_OS2_PREFIX_LEN;
len += XATTR_OS2_PREFIX_LEN;
}
memcpy(buffer, ea->name, ea->namelen);
buffer[ea->namelen] = 0;
return len;
}
/* Forward references */
static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
/*
* NAME: ea_write_inline
*
* FUNCTION: Attempt to write an EA inline if area is available
*
* PRE CONDITIONS:
* Already verified that the specified EA is small enough to fit inline
*
* PARAMETERS:
* ip - Inode pointer
* ealist - EA list pointer
* size - size of ealist in bytes
* ea - dxd_t structure to be filled in with necessary EA information
* if we successfully copy the EA inline
*
* NOTES:
* Checks if the inode's inline area is available. If so, copies EA inline
* and sets <ea> fields appropriately. Otherwise, returns failure, EA will
* have to be put into an extent.
*
* RETURNS: 0 for successful copy to inline area; -1 if area not available
*/
static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
int size, dxd_t * ea)
{
struct jfs_inode_info *ji = JFS_IP(ip);
/*
* Make sure we have an EA -- the NULL EA list is valid, but you
* can't copy it!
*/
if (ealist && size > sizeof (struct jfs_ea_list)) {
assert(size <= sizeof (ji->i_inline_ea));
/*
* See if the space is available or if it is already being
* used for an inline EA.
*/
if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
return -EPERM;
DXDsize(ea, size);
DXDlength(ea, 0);
DXDaddress(ea, 0);
memcpy(ji->i_inline_ea, ealist, size);
ea->flag = DXD_INLINE;
ji->mode2 &= ~INLINEEA;
} else {
ea->flag = 0;
DXDsize(ea, 0);
DXDlength(ea, 0);
DXDaddress(ea, 0);
/* Free up INLINE area */
if (ji->ea.flag & DXD_INLINE)
ji->mode2 |= INLINEEA;
}
return 0;
}
/*
* NAME: ea_write
*
* FUNCTION: Write an EA for an inode
*
* PRE CONDITIONS: EA has been verified
*
* PARAMETERS:
* ip - Inode pointer
* ealist - EA list pointer
* size - size of ealist in bytes
* ea - dxd_t structure to be filled in appropriately with where the
* EA was copied
*
* NOTES: Will write EA inline if able to, otherwise allocates blocks for an
* extent and synchronously writes it to those blocks.
*
* RETURNS: 0 for success; Anything else indicates failure
*/
static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
dxd_t * ea)
{
struct super_block *sb = ip->i_sb;
struct jfs_inode_info *ji = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(sb);
int nblocks;
s64 blkno;
int rc = 0, i;
char *cp;
s32 nbytes, nb;
s32 bytes_to_write;
struct metapage *mp;
/*
* Quick check to see if this is an in-linable EA. Short EAs
* and empty EAs are all in-linable, provided the space exists.
*/
if (!ealist || size <= sizeof (ji->i_inline_ea)) {
if (!ea_write_inline(ip, ealist, size, ea))
return 0;
}
/* figure out how many blocks we need */
nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
/* Allocate new blocks to quota. */
rc = dquot_alloc_block(ip, nblocks);
if (rc)
return rc;
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc) {
/*Rollback quota allocation. */
dquot_free_block(ip, nblocks);
return rc;
}
/*
* Now have nblocks worth of storage to stuff into the FEALIST.
* loop over the FEALIST copying data into the buffer one page at
* a time.
*/
cp = (char *) ealist;
nbytes = size;
for (i = 0; i < nblocks; i += sbi->nbperpage) {
/*
* Determine how many bytes for this request, and round up to
* the nearest aggregate block size
*/
nb = min(PSIZE, nbytes);
bytes_to_write =
((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
<< sb->s_blocksize_bits;
if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
rc = -EIO;
goto failed;
}
memcpy(mp->data, cp, nb);
/*
* We really need a way to propagate errors for
* forced writes like this one. --hch
*
* (__write_metapage => release_metapage => flush_metapage)
*/
#ifdef _JFS_FIXME
if ((rc = flush_metapage(mp))) {
/*
* the write failed -- this means that the buffer
* is still assigned and the blocks are not being
* used. this seems like the best error recovery
* we can get ...
*/
goto failed;
}
#else
flush_metapage(mp);
#endif
cp += PSIZE;
nbytes -= nb;
}
ea->flag = DXD_EXTENT;
DXDsize(ea, le32_to_cpu(ealist->size));
DXDlength(ea, nblocks);
DXDaddress(ea, blkno);
/* Free up INLINE area */
if (ji->ea.flag & DXD_INLINE)
ji->mode2 |= INLINEEA;
return 0;
failed:
/* Rollback quota allocation. */
dquot_free_block(ip, nblocks);
dbFree(ip, blkno, nblocks);
return rc;
}
/*
* NAME: ea_read_inline
*
* FUNCTION: Read an inlined EA into user's buffer
*
* PARAMETERS:
* ip - Inode pointer
* ealist - Pointer to buffer to fill in with EA
*
* RETURNS: 0
*/
static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
{
struct jfs_inode_info *ji = JFS_IP(ip);
int ea_size = sizeDXD(&ji->ea);
if (ea_size == 0) {
ealist->size = 0;
return 0;
}
/* Sanity Check */
if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
return -EIO;
if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
!= ea_size)
return -EIO;
memcpy(ealist, ji->i_inline_ea, ea_size);
return 0;
}
/*
* NAME: ea_read
*
* FUNCTION: copy EA data into user's buffer
*
* PARAMETERS:
* ip - Inode pointer
* ealist - Pointer to buffer to fill in with EA
*
* NOTES: If EA is inline calls ea_read_inline() to copy EA.
*
* RETURNS: 0 for success; other indicates failure
*/
static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
{
struct super_block *sb = ip->i_sb;
struct jfs_inode_info *ji = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(sb);
int nblocks;
s64 blkno;
char *cp = (char *) ealist;
int i;
int nbytes, nb;
s32 bytes_to_read;
struct metapage *mp;
/* quick check for in-line EA */
if (ji->ea.flag & DXD_INLINE)
return ea_read_inline(ip, ealist);
nbytes = sizeDXD(&ji->ea);
if (!nbytes) {
jfs_error(sb, "nbytes is 0\n");
return -EIO;
}
/*
* Figure out how many blocks were allocated when this EA list was
* originally written to disk.
*/
nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
/*
* I have found the disk blocks which were originally used to store
* the FEALIST. now i loop over each contiguous block copying the
* data into the buffer.
*/
for (i = 0; i < nblocks; i += sbi->nbperpage) {
/*
* Determine how many bytes for this request, and round up to
* the nearest aggregate block size
*/
nb = min(PSIZE, nbytes);
bytes_to_read =
((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
<< sb->s_blocksize_bits;
if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
return -EIO;
memcpy(cp, mp->data, nb);
release_metapage(mp);
cp += PSIZE;
nbytes -= nb;
}
return 0;
}
/*
* NAME: ea_get
*
* FUNCTION: Returns buffer containing existing extended attributes.
* The size of the buffer will be the larger of the existing
* attributes size, or min_size.
*
* The buffer, which may be inlined in the inode or in the
* page cache must be release by calling ea_release or ea_put
*
* PARAMETERS:
* inode - Inode pointer
* ea_buf - Structure to be populated with ealist and its metadata
* min_size- minimum size of buffer to be returned
*
* RETURNS: 0 for success; Other indicates failure
*/
static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
{
struct jfs_inode_info *ji = JFS_IP(inode);
struct super_block *sb = inode->i_sb;
int size;
int ea_size = sizeDXD(&ji->ea);
int blocks_needed, current_blocks;
s64 blkno;
int rc;
int quota_allocation = 0;
/* When fsck.jfs clears a bad ea, it doesn't clear the size */
if (ji->ea.flag == 0)
ea_size = 0;
if (ea_size == 0) {
if (min_size == 0) {
ea_buf->flag = 0;
ea_buf->max_size = 0;
ea_buf->xattr = NULL;
return 0;
}
if ((min_size <= sizeof (ji->i_inline_ea)) &&
(ji->mode2 & INLINEEA)) {
ea_buf->flag = EA_INLINE | EA_NEW;
ea_buf->max_size = sizeof (ji->i_inline_ea);
ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
DXDlength(&ea_buf->new_ea, 0);
DXDaddress(&ea_buf->new_ea, 0);
ea_buf->new_ea.flag = DXD_INLINE;
DXDsize(&ea_buf->new_ea, min_size);
return 0;
}
current_blocks = 0;
} else if (ji->ea.flag & DXD_INLINE) {
if (min_size <= sizeof (ji->i_inline_ea)) {
ea_buf->flag = EA_INLINE;
ea_buf->max_size = sizeof (ji->i_inline_ea);
ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
goto size_check;
}
current_blocks = 0;
} else {
if (!(ji->ea.flag & DXD_EXTENT)) {
jfs_error(sb, "invalid ea.flag\n");
return -EIO;
}
current_blocks = (ea_size + sb->s_blocksize - 1) >>
sb->s_blocksize_bits;
}
size = max(min_size, ea_size);
if (size > PSIZE) {
/*
* To keep the rest of the code simple. Allocate a
* contiguous buffer to work with. Make the buffer large
* enough to make use of the whole extent.
*/
ea_buf->max_size = (size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
if (ea_buf->xattr == NULL)
return -ENOMEM;
ea_buf->flag = EA_MALLOC;
if (ea_size == 0)
return 0;
if ((rc = ea_read(inode, ea_buf->xattr))) {
kfree(ea_buf->xattr);
ea_buf->xattr = NULL;
return rc;
}
goto size_check;
}
blocks_needed = (min_size + sb->s_blocksize - 1) >>
sb->s_blocksize_bits;
if (blocks_needed > current_blocks) {
/* Allocate new blocks to quota. */
rc = dquot_alloc_block(inode, blocks_needed);
if (rc)
return -EDQUOT;
quota_allocation = blocks_needed;
rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
&blkno);
if (rc)
goto clean_up;
DXDlength(&ea_buf->new_ea, blocks_needed);
DXDaddress(&ea_buf->new_ea, blkno);
ea_buf->new_ea.flag = DXD_EXTENT;
DXDsize(&ea_buf->new_ea, min_size);
ea_buf->flag = EA_EXTENT | EA_NEW;
ea_buf->mp = get_metapage(inode, blkno,
blocks_needed << sb->s_blocksize_bits,
1);
if (ea_buf->mp == NULL) {
dbFree(inode, blkno, (s64) blocks_needed);
rc = -EIO;
goto clean_up;
}
ea_buf->xattr = ea_buf->mp->data;
ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
if (ea_size == 0)
return 0;
if ((rc = ea_read(inode, ea_buf->xattr))) {
discard_metapage(ea_buf->mp);
dbFree(inode, blkno, (s64) blocks_needed);
goto clean_up;
}
goto size_check;
}
ea_buf->flag = EA_EXTENT;
ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
lengthDXD(&ji->ea) << sb->s_blocksize_bits,
1);
if (ea_buf->mp == NULL) {
rc = -EIO;
goto clean_up;
}
ea_buf->xattr = ea_buf->mp->data;
ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
size_check:
if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
printk(KERN_ERR "ea_get: invalid extended attribute\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
ea_buf->xattr, ea_size, 1);
ea_release(inode, ea_buf);
rc = -EIO;
goto clean_up;
}
return ea_size;
clean_up:
/* Rollback quota allocation */
if (quota_allocation)
dquot_free_block(inode, quota_allocation);
return (rc);
}
static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
{
if (ea_buf->flag & EA_MALLOC)
kfree(ea_buf->xattr);
else if (ea_buf->flag & EA_EXTENT) {
assert(ea_buf->mp);
release_metapage(ea_buf->mp);
if (ea_buf->flag & EA_NEW)
dbFree(inode, addressDXD(&ea_buf->new_ea),
lengthDXD(&ea_buf->new_ea));
}
}
static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
int new_size)
{
struct jfs_inode_info *ji = JFS_IP(inode);
unsigned long old_blocks, new_blocks;
int rc = 0;
if (new_size == 0) {
ea_release(inode, ea_buf);
ea_buf = NULL;
} else if (ea_buf->flag & EA_INLINE) {
assert(new_size <= sizeof (ji->i_inline_ea));
ji->mode2 &= ~INLINEEA;
ea_buf->new_ea.flag = DXD_INLINE;
DXDsize(&ea_buf->new_ea, new_size);
DXDaddress(&ea_buf->new_ea, 0);
DXDlength(&ea_buf->new_ea, 0);
} else if (ea_buf->flag & EA_MALLOC) {
rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
kfree(ea_buf->xattr);
} else if (ea_buf->flag & EA_NEW) {
/* We have already allocated a new dxd */
flush_metapage(ea_buf->mp);
} else {
/* ->xattr must point to original ea's metapage */
rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
discard_metapage(ea_buf->mp);
}
if (rc)
return rc;
old_blocks = new_blocks = 0;
if (ji->ea.flag & DXD_EXTENT) {
invalidate_dxd_metapages(inode, ji->ea);
old_blocks = lengthDXD(&ji->ea);
}
if (ea_buf) {
txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
if (ea_buf->new_ea.flag & DXD_EXTENT) {
new_blocks = lengthDXD(&ea_buf->new_ea);
if (ji->ea.flag & DXD_INLINE)
ji->mode2 |= INLINEEA;
}
ji->ea = ea_buf->new_ea;
} else {
txEA(tid, inode, &ji->ea, NULL);
if (ji->ea.flag & DXD_INLINE)
ji->mode2 |= INLINEEA;
ji->ea.flag = 0;
ji->ea.size = 0;
}
/* If old blocks exist, they must be removed from quota allocation. */
if (old_blocks)
dquot_free_block(inode, old_blocks);
inode_set_ctime_current(inode);
return 0;
}
int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
const void *value, size_t value_len, int flags)
{
struct jfs_ea_list *ealist;
struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
struct ea_buffer ea_buf;
int old_ea_size = 0;
int xattr_size;
int new_size;
int namelen = strlen(name);
int found = 0;
int rc;
int length;
down_write(&JFS_IP(inode)->xattr_sem);
xattr_size = ea_get(inode, &ea_buf, 0);
if (xattr_size < 0) {
rc = xattr_size;
goto out;
}
again:
ealist = (struct jfs_ea_list *) ea_buf.xattr;
new_size = sizeof (struct jfs_ea_list);
if (xattr_size) {
for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
ea = NEXT_EA(ea)) {
if ((namelen == ea->namelen) &&
(memcmp(name, ea->name, namelen) == 0)) {
found = 1;
if (flags & XATTR_CREATE) {
rc = -EEXIST;
goto release;
}
old_ea = ea;
old_ea_size = EA_SIZE(ea);
next_ea = NEXT_EA(ea);
} else
new_size += EA_SIZE(ea);
}
}
if (!found) {
if (flags & XATTR_REPLACE) {
rc = -ENODATA;
goto release;
}
if (value == NULL) {
rc = 0;
goto release;
}
}
if (value)
new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
if (new_size > ea_buf.max_size) {
/*
* We need to allocate more space for merged ea list.
* We should only have loop to again: once.
*/
ea_release(inode, &ea_buf);
xattr_size = ea_get(inode, &ea_buf, new_size);
if (xattr_size < 0) {
rc = xattr_size;
goto out;
}
goto again;
}
/* Remove old ea of the same name */
if (found) {
/* number of bytes following target EA */
length = (char *) END_EALIST(ealist) - (char *) next_ea;
if (length > 0)
memmove(old_ea, next_ea, length);
xattr_size -= old_ea_size;
}
/* Add new entry to the end */
if (value) {
if (xattr_size == 0)
/* Completely new ea list */
xattr_size = sizeof (struct jfs_ea_list);
/*
* The size of EA value is limitted by on-disk format up to
* __le16, there would be an overflow if the size is equal
* to XATTR_SIZE_MAX (65536). In order to avoid this issue,
* we can pre-checkup the value size against USHRT_MAX, and
* return -E2BIG in this case, which is consistent with the
* VFS setxattr interface.
*/
if (value_len >= USHRT_MAX) {
rc = -E2BIG;
goto release;
}
ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
ea->flag = 0;
ea->namelen = namelen;
ea->valuelen = (cpu_to_le16(value_len));
memcpy(ea->name, name, namelen);
ea->name[namelen] = 0;
if (value_len)
memcpy(&ea->name[namelen + 1], value, value_len);
xattr_size += EA_SIZE(ea);
}
/* DEBUG - If we did this right, these number match */
if (xattr_size != new_size) {
printk(KERN_ERR
"__jfs_setxattr: xattr_size = %d, new_size = %d\n",
xattr_size, new_size);
rc = -EINVAL;
goto release;
}
/*
* If we're left with an empty list, there's no ea
*/
if (new_size == sizeof (struct jfs_ea_list))
new_size = 0;
ealist->size = cpu_to_le32(new_size);
rc = ea_put(tid, inode, &ea_buf, new_size);
goto out;
release:
ea_release(inode, &ea_buf);
out:
up_write(&JFS_IP(inode)->xattr_sem);
return rc;
}
ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
size_t buf_size)
{
struct jfs_ea_list *ealist;
struct jfs_ea *ea;
struct ea_buffer ea_buf;
int xattr_size;
ssize_t size;
int namelen = strlen(name);
char *value;
down_read(&JFS_IP(inode)->xattr_sem);
xattr_size = ea_get(inode, &ea_buf, 0);
if (xattr_size < 0) {
size = xattr_size;
goto out;
}
if (xattr_size == 0)
goto not_found;
ealist = (struct jfs_ea_list *) ea_buf.xattr;
/* Find the named attribute */
for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
if ((namelen == ea->namelen) &&
memcmp(name, ea->name, namelen) == 0) {
/* Found it */
size = le16_to_cpu(ea->valuelen);
if (!data)
goto release;
else if (size > buf_size) {
size = -ERANGE;
goto release;
}
value = ((char *) &ea->name) + ea->namelen + 1;
memcpy(data, value, size);
goto release;
}
not_found:
size = -ENODATA;
release:
ea_release(inode, &ea_buf);
out:
up_read(&JFS_IP(inode)->xattr_sem);
return size;
}
/*
* No special permissions are needed to list attributes except for trusted.*
*/
static inline int can_list(struct jfs_ea *ea)
{
return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
XATTR_TRUSTED_PREFIX_LEN) ||
capable(CAP_SYS_ADMIN));
}
ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
{
struct inode *inode = d_inode(dentry);
char *buffer;
ssize_t size = 0;
int xattr_size;
struct jfs_ea_list *ealist;
struct jfs_ea *ea;
struct ea_buffer ea_buf;
down_read(&JFS_IP(inode)->xattr_sem);
xattr_size = ea_get(inode, &ea_buf, 0);
if (xattr_size < 0) {
size = xattr_size;
goto out;
}
if (xattr_size == 0)
goto release;
ealist = (struct jfs_ea_list *) ea_buf.xattr;
/* compute required size of list */
for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
if (can_list(ea))
size += name_size(ea) + 1;
}
if (!data)
goto release;
if (size > buf_size) {
size = -ERANGE;
goto release;
}
/* Copy attribute names to buffer */
buffer = data;
for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
if (can_list(ea)) {
int namelen = copy_name(buffer, ea);
buffer += namelen + 1;
}
}
release:
ea_release(inode, &ea_buf);
out:
up_read(&JFS_IP(inode)->xattr_sem);
return size;
}
static int __jfs_xattr_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct jfs_inode_info *ji = JFS_IP(inode);
tid_t tid;
int rc;
tid = txBegin(inode->i_sb, 0);
mutex_lock(&ji->commit_mutex);
rc = __jfs_setxattr(tid, inode, name, value, size, flags);
if (!rc)
rc = txCommit(tid, 1, &inode, 0);
txEnd(tid);
mutex_unlock(&ji->commit_mutex);
return rc;
}
static int jfs_xattr_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *value, size_t size)
{
name = xattr_full_name(handler, name);
return __jfs_getxattr(inode, name, value, size);
}
static int jfs_xattr_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
name = xattr_full_name(handler, name);
return __jfs_xattr_set(inode, name, value, size, flags);
}
static int jfs_xattr_get_os2(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *value, size_t size)
{
if (is_known_namespace(name))
return -EOPNOTSUPP;
return __jfs_getxattr(inode, name, value, size);
}
static int jfs_xattr_set_os2(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
if (is_known_namespace(name))
return -EOPNOTSUPP;
return __jfs_xattr_set(inode, name, value, size, flags);
}
static const struct xattr_handler jfs_user_xattr_handler = {
.prefix = XATTR_USER_PREFIX,
.get = jfs_xattr_get,
.set = jfs_xattr_set,
};
static const struct xattr_handler jfs_os2_xattr_handler = {
.prefix = XATTR_OS2_PREFIX,
.get = jfs_xattr_get_os2,
.set = jfs_xattr_set_os2,
};
static const struct xattr_handler jfs_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = jfs_xattr_get,
.set = jfs_xattr_set,
};
static const struct xattr_handler jfs_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = jfs_xattr_get,
.set = jfs_xattr_set,
};
const struct xattr_handler *jfs_xattr_handlers[] = {
&jfs_os2_xattr_handler,
&jfs_user_xattr_handler,
&jfs_security_xattr_handler,
&jfs_trusted_xattr_handler,
NULL,
};
#ifdef CONFIG_JFS_SECURITY
static int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
void *fs_info)
{
const struct xattr *xattr;
tid_t *tid = fs_info;
char *name;
int err = 0;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
strlen(xattr->name) + 1, GFP_NOFS);
if (!name) {
err = -ENOMEM;
break;
}
strcpy(name, XATTR_SECURITY_PREFIX);
strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
err = __jfs_setxattr(*tid, inode, name,
xattr->value, xattr->value_len, 0);
kfree(name);
if (err < 0)
break;
}
return err;
}
int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
return security_inode_init_security(inode, dir, qstr,
&jfs_initxattrs, &tid);
}
#endif
| linux-master | fs/jfs/xattr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
#include <linux/fs.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_debug.h"
#ifdef PROC_FS_JFS /* see jfs_debug.h */
#ifdef CONFIG_JFS_DEBUG
static int jfs_loglevel_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", jfsloglevel);
return 0;
}
static int jfs_loglevel_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, jfs_loglevel_proc_show, NULL);
}
static ssize_t jfs_loglevel_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
char c;
if (get_user(c, buffer))
return -EFAULT;
/* yes, I know this is an ASCIIism. --hch */
if (c < '0' || c > '9')
return -EINVAL;
jfsloglevel = c - '0';
return count;
}
static const struct proc_ops jfs_loglevel_proc_ops = {
.proc_open = jfs_loglevel_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = jfs_loglevel_proc_write,
};
#endif
void jfs_proc_init(void)
{
struct proc_dir_entry *base;
base = proc_mkdir("fs/jfs", NULL);
if (!base)
return;
#ifdef CONFIG_JFS_STATISTICS
proc_create_single("lmstats", 0, base, jfs_lmstats_proc_show);
proc_create_single("txstats", 0, base, jfs_txstats_proc_show);
proc_create_single("xtstat", 0, base, jfs_xtstat_proc_show);
proc_create_single("mpstat", 0, base, jfs_mpstat_proc_show);
#endif
#ifdef CONFIG_JFS_DEBUG
proc_create_single("TxAnchor", 0, base, jfs_txanchor_proc_show);
proc_create("loglevel", 0, base, &jfs_loglevel_proc_ops);
#endif
}
void jfs_proc_clean(void)
{
remove_proc_subtree("fs/jfs", NULL);
}
#endif /* PROC_FS_JFS */
| linux-master | fs/jfs/jfs_debug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
#include "jfs_imap.h"
#include "jfs_dinode.h"
#include "jfs_debug.h"
void jfs_set_inode_flags(struct inode *inode)
{
unsigned int flags = JFS_IP(inode)->mode2;
unsigned int new_fl = 0;
if (flags & JFS_IMMUTABLE_FL)
new_fl |= S_IMMUTABLE;
if (flags & JFS_APPEND_FL)
new_fl |= S_APPEND;
if (flags & JFS_NOATIME_FL)
new_fl |= S_NOATIME;
if (flags & JFS_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
if (flags & JFS_SYNC_FL)
new_fl |= S_SYNC;
inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND | S_NOATIME |
S_DIRSYNC | S_SYNC);
}
/*
* NAME: ialloc()
*
* FUNCTION: Allocate a new inode
*
*/
struct inode *ialloc(struct inode *parent, umode_t mode)
{
struct super_block *sb = parent->i_sb;
struct inode *inode;
struct jfs_inode_info *jfs_inode;
int rc;
inode = new_inode(sb);
if (!inode) {
jfs_warn("ialloc: new_inode returned NULL!");
return ERR_PTR(-ENOMEM);
}
jfs_inode = JFS_IP(inode);
rc = diAlloc(parent, S_ISDIR(mode), inode);
if (rc) {
jfs_warn("ialloc: diAlloc returned %d!", rc);
goto fail_put;
}
if (insert_inode_locked(inode) < 0) {
rc = -EINVAL;
goto fail_put;
}
inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
/*
* New inodes need to save sane values on disk when
* uid & gid mount options are used
*/
jfs_inode->saved_uid = inode->i_uid;
jfs_inode->saved_gid = inode->i_gid;
/*
* Allocate inode to quota.
*/
rc = dquot_initialize(inode);
if (rc)
goto fail_drop;
rc = dquot_alloc_inode(inode);
if (rc)
goto fail_drop;
/* inherit flags from parent */
jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT;
if (S_ISDIR(mode)) {
jfs_inode->mode2 |= IDIRECTORY;
jfs_inode->mode2 &= ~JFS_DIRSYNC_FL;
}
else {
jfs_inode->mode2 |= INLINEEA | ISPARSE;
if (S_ISLNK(mode))
jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL);
}
jfs_inode->mode2 |= inode->i_mode;
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
jfs_inode->otime = inode_get_ctime(inode).tv_sec;
inode->i_generation = JFS_SBI(sb)->gengen++;
jfs_inode->cflag = 0;
/* Zero remaining fields */
memset(&jfs_inode->acl, 0, sizeof(dxd_t));
memset(&jfs_inode->ea, 0, sizeof(dxd_t));
jfs_inode->next_index = 0;
jfs_inode->acltype = 0;
jfs_inode->btorder = 0;
jfs_inode->btindex = 0;
jfs_inode->bxflag = 0;
jfs_inode->blid = 0;
jfs_inode->atlhead = 0;
jfs_inode->atltail = 0;
jfs_inode->xtlid = 0;
jfs_set_inode_flags(inode);
jfs_info("ialloc returns inode = 0x%p", inode);
return inode;
fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
clear_nlink(inode);
discard_new_inode(inode);
return ERR_PTR(rc);
fail_put:
iput(inode);
return ERR_PTR(rc);
}
| linux-master | fs/jfs/jfs_inode.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Tino Reichardt, 2012
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_lock.h"
#include "jfs_metapage.h"
#include "jfs_debug.h"
#include "jfs_discard.h"
/*
* SERIALIZATION of the Block Allocation Map.
*
* the working state of the block allocation map is accessed in
* two directions:
*
* 1) allocation and free requests that start at the dmap
* level and move up through the dmap control pages (i.e.
* the vast majority of requests).
*
* 2) allocation requests that start at dmap control page
* level and work down towards the dmaps.
*
* the serialization scheme used here is as follows.
*
* requests which start at the bottom are serialized against each
* other through buffers and each requests holds onto its buffers
* as it works it way up from a single dmap to the required level
* of dmap control page.
* requests that start at the top are serialized against each other
* and request that start from the bottom by the multiple read/single
* write inode lock of the bmap inode. requests starting at the top
* take this lock in write mode while request starting at the bottom
* take the lock in read mode. a single top-down request may proceed
* exclusively while multiple bottoms-up requests may proceed
* simultaneously (under the protection of busy buffers).
*
* in addition to information found in dmaps and dmap control pages,
* the working state of the block allocation map also includes read/
* write information maintained in the bmap descriptor (i.e. total
* free block count, allocation group level free block counts).
* a single exclusive lock (BMAP_LOCK) is used to guard this information
* in the face of multiple-bottoms up requests.
* (lock ordering: IREAD_LOCK, BMAP_LOCK);
*
* accesses to the persistent state of the block allocation map (limited
* to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
*/
#define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock)
#define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock)
#define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock)
/*
* forward references
*/
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
static int dbBackSplit(dmtree_t * tp, int leafno);
static int dbJoin(dmtree_t * tp, int leafno, int newval);
static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
int level);
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks,
int l2nb, s64 * results);
static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
int l2nb,
s64 * results);
static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
s64 * results);
static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
s64 * results);
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
static int dbFindBits(u32 word, int l2nb);
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbMaxBud(u8 * cp);
static int blkstol2(s64 nb);
static int cntlz(u32 value);
static int cnttz(u32 word);
static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
static int dbInitDmapTree(struct dmap * dp);
static int dbInitTree(struct dmaptree * dtp);
static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
static int dbGetL2AGSize(s64 nblocks);
/*
* buddy table
*
* table used for determining buddy sizes within characters of
* dmap bitmap words. the characters themselves serve as indexes
* into the table, with the table elements yielding the maximum
* binary buddy of free bits within the character.
*/
static const s8 budtab[256] = {
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
};
/*
* NAME: dbMount()
*
* FUNCTION: initializate the block allocation map.
*
* memory is allocated for the in-core bmap descriptor and
* the in-core descriptor is initialized from disk.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient memory
* -EIO - i/o error
* -EINVAL - wrong bmap data
*/
int dbMount(struct inode *ipbmap)
{
struct bmap *bmp;
struct dbmap_disk *dbmp_le;
struct metapage *mp;
int i, err;
/*
* allocate/initialize the in-memory bmap descriptor
*/
/* allocate memory for the in-memory bmap descriptor */
bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
if (bmp == NULL)
return -ENOMEM;
/* read the on-disk bmap descriptor. */
mp = read_metapage(ipbmap,
BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
err = -EIO;
goto err_kfree_bmp;
}
/* copy the on-disk bmap descriptor to its in-memory version. */
dbmp_le = (struct dbmap_disk *) mp->data;
bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
err = -EINVAL;
goto err_release_metapage;
}
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
if (!bmp->db_numag) {
err = -EINVAL;
goto err_release_metapage;
}
bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
bmp->db_agl2size < 0) {
err = -EINVAL;
goto err_release_metapage;
}
if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
err = -EINVAL;
goto err_release_metapage;
}
for (i = 0; i < MAXAG; i++)
bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
bmp->db_maxfreebud = dbmp_le->dn_maxfreebud;
/* release the buffer. */
release_metapage(mp);
/* bind the bmap inode and the bmap descriptor to each other. */
bmp->db_ipbmap = ipbmap;
JFS_SBI(ipbmap->i_sb)->bmap = bmp;
memset(bmp->db_active, 0, sizeof(bmp->db_active));
/*
* allocate/initialize the bmap lock
*/
BMAP_LOCK_INIT(bmp);
return (0);
err_release_metapage:
release_metapage(mp);
err_kfree_bmp:
kfree(bmp);
return err;
}
/*
* NAME: dbUnmount()
*
* FUNCTION: terminate the block allocation map in preparation for
* file system unmount.
*
* the in-core bmap descriptor is written to disk and
* the memory for this descriptor is freed.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int dbUnmount(struct inode *ipbmap, int mounterror)
{
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
if (!(mounterror || isReadOnly(ipbmap)))
dbSync(ipbmap);
/*
* Invalidate the page cache buffers
*/
truncate_inode_pages(ipbmap->i_mapping, 0);
/* free the memory for the in-memory bmap. */
kfree(bmp);
JFS_SBI(ipbmap->i_sb)->bmap = NULL;
return (0);
}
/*
* dbSync()
*/
int dbSync(struct inode *ipbmap)
{
struct dbmap_disk *dbmp_le;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
struct metapage *mp;
int i;
/*
* write bmap global control page
*/
/* get the buffer for the on-disk bmap descriptor. */
mp = read_metapage(ipbmap,
BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
jfs_err("dbSync: read_metapage failed!");
return -EIO;
}
/* copy the in-memory version of the bmap to the on-disk version */
dbmp_le = (struct dbmap_disk *) mp->data;
dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
for (i = 0; i < MAXAG; i++)
dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;
/* write the buffer */
write_metapage(mp);
/*
* write out dirty pages of bmap
*/
filemap_write_and_wait(ipbmap->i_mapping);
diWriteSpecial(ipbmap, 0);
return (0);
}
/*
* NAME: dbFree()
*
* FUNCTION: free the specified block range from the working block
* allocation map.
*
* the blocks will be free from the working map one dmap
* at a time.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
{
struct metapage *mp;
struct dmap *dp;
int nb, rc;
s64 lblkno, rem;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
struct super_block *sb = ipbmap->i_sb;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* block to be freed better be within the mapsize. */
if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
IREAD_UNLOCK(ipbmap);
printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
(unsigned long long) blkno,
(unsigned long long) nblocks);
jfs_error(ip->i_sb, "block to be freed is outside the map\n");
return -EIO;
}
/**
* TRIM the blocks, when mounted with discard option
*/
if (JFS_SBI(sb)->flag & JFS_DISCARD)
if (JFS_SBI(sb)->minblks_trim <= nblocks)
jfs_issue_discard(ipbmap, blkno, nblocks);
/*
* free the blocks a dmap at a time.
*/
mp = NULL;
for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
/* release previous dmap if any */
if (mp) {
write_metapage(mp);
}
/* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
IREAD_UNLOCK(ipbmap);
return -EIO;
}
dp = (struct dmap *) mp->data;
/* determine the number of blocks to be freed from
* this dmap.
*/
nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
/* free the blocks. */
if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
jfs_error(ip->i_sb, "error in block map\n");
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (rc);
}
}
/* write the last buffer. */
if (mp)
write_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (0);
}
/*
* NAME: dbUpdatePMap()
*
* FUNCTION: update the allocation state (free or allocate) of the
* specified block range in the persistent block allocation map.
*
* the blocks will be updated in the persistent map one
* dmap at a time.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
* free - 'true' if block range is to be freed from the persistent
* map; 'false' if it is to be allocated.
* blkno - starting block number of the range.
* nblocks - number of contiguous blocks in the range.
* tblk - transaction block;
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int
dbUpdatePMap(struct inode *ipbmap,
int free, s64 blkno, s64 nblocks, struct tblock * tblk)
{
int nblks, dbitno, wbitno, rbits;
int word, nbits, nwords;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
s64 lblkno, rem, lastlblkno;
u32 mask;
struct dmap *dp;
struct metapage *mp;
struct jfs_log *log;
int lsn, difft, diffp;
unsigned long flags;
/* the blocks better be within the mapsize. */
if (blkno + nblocks > bmp->db_mapsize) {
printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
(unsigned long long) blkno,
(unsigned long long) nblocks);
jfs_error(ipbmap->i_sb, "blocks are outside the map\n");
return -EIO;
}
/* compute delta of transaction lsn from log syncpt */
lsn = tblk->lsn;
log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
logdiff(difft, lsn, log);
/*
* update the block state a dmap at a time.
*/
mp = NULL;
lastlblkno = 0;
for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) {
/* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
if (lblkno != lastlblkno) {
if (mp) {
write_metapage(mp);
}
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE,
0);
if (mp == NULL)
return -EIO;
metapage_wait_for_io(mp);
}
dp = (struct dmap *) mp->data;
/* determine the bit number and word within the dmap of
* the starting block. also determine how many blocks
* are to be updated within this dmap.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
nblks = min(rem, (s64)BPERDMAP - dbitno);
/* update the bits of the dmap words. the first and last
* words may only have a subset of their bits updated. if
* this is the case, we'll work against that word (i.e.
* partial first and/or last) only in a single pass. a
* single pass will also be used to update all words that
* are to have all their bits updated.
*/
for (rbits = nblks; rbits > 0;
rbits -= nbits, dbitno += nbits) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nbits = min(rbits, DBWORD - wbitno);
/* check if only part of the word is to be updated. */
if (nbits < DBWORD) {
/* update (free or allocate) the bits
* in this word.
*/
mask =
(ONES << (DBWORD - nbits) >> wbitno);
if (free)
dp->pmap[word] &=
cpu_to_le32(~mask);
else
dp->pmap[word] |=
cpu_to_le32(mask);
word += 1;
} else {
/* one or more words are to have all
* their bits updated. determine how
* many words and how many bits.
*/
nwords = rbits >> L2DBWORD;
nbits = nwords << L2DBWORD;
/* update (free or allocate) the bits
* in these words.
*/
if (free)
memset(&dp->pmap[word], 0,
nwords * 4);
else
memset(&dp->pmap[word], (int) ONES,
nwords * 4);
word += nwords;
}
}
/*
* update dmap lsn
*/
if (lblkno == lastlblkno)
continue;
lastlblkno = lblkno;
LOGSYNC_LOCK(log, flags);
if (mp->lsn != 0) {
/* inherit older/smaller lsn */
logdiff(diffp, mp->lsn, log);
if (difft < diffp) {
mp->lsn = lsn;
/* move bp after tblock in logsync list */
list_move(&mp->synclist, &tblk->synclist);
}
/* inherit younger/larger clsn */
logdiff(difft, tblk->clsn, log);
logdiff(diffp, mp->clsn, log);
if (difft > diffp)
mp->clsn = tblk->clsn;
} else {
mp->log = log;
mp->lsn = lsn;
/* insert bp after tblock in logsync list */
log->count++;
list_add(&mp->synclist, &tblk->synclist);
mp->clsn = tblk->clsn;
}
LOGSYNC_UNLOCK(log, flags);
}
/* write the last buffer. */
if (mp) {
write_metapage(mp);
}
return (0);
}
/*
* NAME: dbNextAG()
*
* FUNCTION: find the preferred allocation group for new allocations.
*
* Within the allocation groups, we maintain a preferred
* allocation group which consists of a group with at least
* average free space. It is the preferred group that we target
* new inode allocation towards. The tie-in between inode
* allocation and block allocation occurs as we allocate the
* first (data) block of an inode and specify the inode (block)
* as the allocation hint for this block.
*
* We try to avoid having more than one open file growing in
* an allocation group, as this will lead to fragmentation.
* This differs from the old OS/2 method of trying to keep
* empty ags around for large allocations.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* the preferred allocation group number.
*/
int dbNextAG(struct inode *ipbmap)
{
s64 avgfree;
int agpref;
s64 hwm = 0;
int i;
int next_best = -1;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
BMAP_LOCK(bmp);
/* determine the average number of free blocks within the ags. */
avgfree = (u32)bmp->db_nfree / bmp->db_numag;
/*
* if the current preferred ag does not have an active allocator
* and has at least average freespace, return it
*/
agpref = bmp->db_agpref;
if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
(bmp->db_agfree[agpref] >= avgfree))
goto unlock;
/* From the last preferred ag, find the next one with at least
* average free space.
*/
for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
if (agpref == bmp->db_numag)
agpref = 0;
if (atomic_read(&bmp->db_active[agpref]))
/* open file is currently growing in this ag */
continue;
if (bmp->db_agfree[agpref] >= avgfree) {
/* Return this one */
bmp->db_agpref = agpref;
goto unlock;
} else if (bmp->db_agfree[agpref] > hwm) {
/* Less than avg. freespace, but best so far */
hwm = bmp->db_agfree[agpref];
next_best = agpref;
}
}
/*
* If no inactive ag was found with average freespace, use the
* next best
*/
if (next_best != -1)
bmp->db_agpref = next_best;
/* else leave db_agpref unchanged */
unlock:
BMAP_UNLOCK(bmp);
/* return the preferred group.
*/
return (bmp->db_agpref);
}
/*
* NAME: dbAlloc()
*
* FUNCTION: attempt to allocate a specified number of contiguous free
* blocks from the working allocation block map.
*
* the block allocation policy uses hints and a multi-step
* approach.
*
* for allocation requests smaller than the number of blocks
* per dmap, we first try to allocate the new blocks
* immediately following the hint. if these blocks are not
* available, we try to allocate blocks near the hint. if
* no blocks near the hint are available, we next try to
* allocate within the same dmap as contains the hint.
*
* if no blocks are available in the dmap or the allocation
* request is larger than the dmap size, we try to allocate
* within the same allocation group as contains the hint. if
* this does not succeed, we finally try to allocate anywhere
* within the aggregate.
*
* we also try to allocate anywhere within the aggregate
* for allocation requests larger than the allocation group
* size or requests that specify no hint value.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* hint - allocation hint.
* nblocks - number of contiguous blocks in the range.
* results - on successful return, set to the starting block number
* of the newly allocated contiguous range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
{
int rc, agno;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp;
struct metapage *mp;
s64 lblkno, blkno;
struct dmap *dp;
int l2nb;
s64 mapSize;
int writers;
/* assert that nblocks is valid */
assert(nblocks > 0);
/* get the log2 number of blocks to be allocated.
* if the number of blocks is not a log2 multiple,
* it will be rounded up to the next log2 multiple.
*/
l2nb = BLKSTOL2(nblocks);
bmp = JFS_SBI(ip->i_sb)->bmap;
mapSize = bmp->db_mapsize;
/* the hint should be within the map */
if (hint >= mapSize) {
jfs_error(ip->i_sb, "the hint is outside the map\n");
return -EIO;
}
/* if the number of blocks to be allocated is greater than the
* allocation group size, try to allocate anywhere.
*/
if (l2nb > bmp->db_agl2size) {
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
rc = dbAllocAny(bmp, nblocks, l2nb, results);
goto write_unlock;
}
/*
* If no hint, let dbNextAG recommend an allocation group
*/
if (hint == 0)
goto pref_ag;
/* we would like to allocate close to the hint. adjust the
* hint to the block following the hint since the allocators
* will start looking for free space starting at this point.
*/
blkno = hint + 1;
if (blkno >= bmp->db_mapsize)
goto pref_ag;
agno = blkno >> bmp->db_agl2size;
/* check if blkno crosses over into a new allocation group.
* if so, check if we should allow allocations within this
* allocation group.
*/
if ((blkno & (bmp->db_agsize - 1)) == 0)
/* check if the AG is currently being written to.
* if so, call dbNextAG() to find a non-busy
* AG with sufficient free space.
*/
if (atomic_read(&bmp->db_active[agno]))
goto pref_ag;
/* check if the allocation request size can be satisfied from a
* single dmap. if so, try to allocate from the dmap containing
* the hint using a tiered strategy.
*/
if (nblocks <= BPERDMAP) {
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* get the buffer for the dmap containing the hint.
*/
rc = -EIO;
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
goto read_unlock;
dp = (struct dmap *) mp->data;
/* first, try to satisfy the allocation request with the
* blocks beginning at the hint.
*/
if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))
!= -ENOSPC) {
if (rc == 0) {
*results = blkno;
mark_metapage_dirty(mp);
}
release_metapage(mp);
goto read_unlock;
}
writers = atomic_read(&bmp->db_active[agno]);
if ((writers > 1) ||
((writers == 1) && (JFS_IP(ip)->active_ag != agno))) {
/*
* Someone else is writing in this allocation
* group. To avoid fragmenting, try another ag
*/
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
goto pref_ag;
}
/* next, try to satisfy the allocation request with blocks
* near the hint.
*/
if ((rc =
dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
!= -ENOSPC) {
if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
goto read_unlock;
}
/* try to satisfy the allocation request with blocks within
* the same dmap as the hint.
*/
if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
!= -ENOSPC) {
if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
goto read_unlock;
}
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
}
/* try to satisfy the allocation request with blocks within
* the same allocation group as the hint.
*/
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
goto write_unlock;
IWRITE_UNLOCK(ipbmap);
pref_ag:
/*
* Let dbNextAG recommend a preferred allocation group
*/
agno = dbNextAG(ipbmap);
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
/* Try to allocate within this allocation group. if that fails, try to
* allocate anywhere in the map.
*/
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
rc = dbAllocAny(bmp, nblocks, l2nb, results);
write_unlock:
IWRITE_UNLOCK(ipbmap);
return (rc);
read_unlock:
IREAD_UNLOCK(ipbmap);
return (rc);
}
/*
* NAME: dbReAlloc()
*
* FUNCTION: attempt to extend a current allocation by a specified
* number of blocks.
*
* this routine attempts to satisfy the allocation request
* by first trying to extend the existing allocation in
* place by allocating the additional blocks as the blocks
* immediately following the current allocation. if these
* blocks are not available, this routine will attempt to
* allocate a new set of contiguous blocks large enough
* to cover the existing allocation plus the additional
* number of blocks required.
*
* PARAMETERS:
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* allocation.
* addnblocks - number of blocks to add to the allocation.
* results - on successful return, set to the starting block number
* of the existing allocation if the existing allocation
* was extended in place or to a newly allocated contiguous
* range if the existing allocation could not be extended
* in place.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int
dbReAlloc(struct inode *ip,
s64 blkno, s64 nblocks, s64 addnblocks, s64 * results)
{
int rc;
/* try to extend the allocation in place.
*/
if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) {
*results = blkno;
return (0);
} else {
if (rc != -ENOSPC)
return (rc);
}
/* could not extend the allocation in place, so allocate a
* new set of blocks for the entire request (i.e. try to get
* a range of contiguous blocks large enough to cover the
* existing allocation plus the additional blocks.)
*/
return (dbAlloc
(ip, blkno + nblocks - 1, addnblocks + nblocks, results));
}
/*
* NAME: dbExtend()
*
* FUNCTION: attempt to extend a current allocation by a specified
* number of blocks.
*
* this routine attempts to satisfy the allocation request
* by first trying to extend the existing allocation in
* place by allocating the additional blocks as the blocks
* immediately following the current allocation.
*
* PARAMETERS:
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* allocation.
* addnblocks - number of blocks to add to the allocation.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
s64 lblkno, lastblkno, extblkno;
uint rel_block;
struct metapage *mp;
struct dmap *dp;
int rc;
struct inode *ipbmap = sbi->ipbmap;
struct bmap *bmp;
/*
* We don't want a non-aligned extent to cross a page boundary
*/
if (((rel_block = blkno & (sbi->nbperpage - 1))) &&
(rel_block + nblocks + addnblocks > sbi->nbperpage))
return -ENOSPC;
/* get the last block of the current allocation */
lastblkno = blkno + nblocks - 1;
/* determine the block number of the block following
* the existing allocation.
*/
extblkno = lastblkno + 1;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* better be within the file system */
bmp = sbi->bmap;
if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) {
IREAD_UNLOCK(ipbmap);
jfs_error(ip->i_sb, "the block is outside the filesystem\n");
return -EIO;
}
/* we'll attempt to extend the current allocation in place by
* allocating the additional blocks as the blocks immediately
* following the current allocation. we only try to extend the
* current allocation in place if the number of additional blocks
* can fit into a dmap, the last block of the current allocation
* is not the last block of the file system, and the start of the
* inplace extension is not on an allocation group boundary.
*/
if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
(extblkno & (bmp->db_agsize - 1)) == 0) {
IREAD_UNLOCK(ipbmap);
return -ENOSPC;
}
/* get the buffer for the dmap containing the first block
* of the extension.
*/
lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
IREAD_UNLOCK(ipbmap);
return -EIO;
}
dp = (struct dmap *) mp->data;
/* try to allocate the blocks immediately following the
* current allocation.
*/
rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks);
IREAD_UNLOCK(ipbmap);
/* were we successful ? */
if (rc == 0)
write_metapage(mp);
else
/* we were not successful */
release_metapage(mp);
return (rc);
}
/*
* NAME: dbAllocNext()
*
* FUNCTION: attempt to allocate the blocks of the specified block
* range within a dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - starting block number of the range.
* nblocks - number of contiguous free blocks of the range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw;
int l2size;
s8 *leaf;
u32 mask;
if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n");
return -EIO;
}
/* pick up a pointer to the leaves of the dmap tree.
*/
leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* check if the specified block range is contained within
* this dmap.
*/
if (dbitno + nblocks > BPERDMAP)
return -ENOSPC;
/* check if the starting leaf indicates that anything
* is free.
*/
if (leaf[word] == NOFREE)
return -ENOSPC;
/* check the dmaps words corresponding to block range to see
* if the block range is free. not all bits of the first and
* last words may be contained within the block range. if this
* is the case, we'll work against those words (i.e. partial first
* and/or last) on an individual basis (a single pass) and examine
* the actual bits to determine if they are free. a single pass
* will be used for all dmap words fully contained within the
* specified range. within this pass, the leaves of the dmap
* tree will be examined to determine if the blocks are free. a
* single leaf may describe the free space of multiple dmap
* words, so we may visit only a subset of the actual leaves
* corresponding to the dmap words of the block range.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of the word is to be examined.
*/
if (nb < DBWORD) {
/* check if the bits are free.
*/
mask = (ONES << (DBWORD - nb) >> wbitno);
if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask)
return -ENOSPC;
word += 1;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and how many bits.
*/
nwords = rembits >> L2DBWORD;
nb = nwords << L2DBWORD;
/* now examine the appropriate leaves to determine
* if the blocks are free.
*/
while (nwords > 0) {
/* does the leaf describe any free space ?
*/
if (leaf[word] < BUDMIN)
return -ENOSPC;
/* determine the l2 number of bits provided
* by this leaf.
*/
l2size =
min_t(int, leaf[word], NLSTOL2BSZ(nwords));
/* determine how many words were handled.
*/
nw = BUDSIZE(l2size, BUDMIN);
nwords -= nw;
word += nw;
}
}
}
/* allocate the blocks.
*/
return (dbAllocDmap(bmp, dp, blkno, nblocks));
}
/*
* NAME: dbAllocNear()
*
* FUNCTION: attempt to allocate a number of contiguous free blocks near
* a specified block (hint) within a dmap.
*
* starting with the dmap leaf that covers the hint, we'll
* check the next four contiguous leaves for sufficient free
* space. if sufficient free space is found, we'll allocate
* the desired free space.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - block number to allocate near.
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAllocNear(struct bmap * bmp,
struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
{
int word, lword, rc;
s8 *leaf;
if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n");
return -EIO;
}
leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
/* determine the word within the dmap that holds the hint
* (i.e. blkno). also, determine the last word in the dmap
* that we'll include in our examination.
*/
word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
lword = min(word + 4, LPERDMAP);
/* examine the leaves for sufficient free space.
*/
for (; word < lword; word++) {
/* does the leaf describe sufficient free space ?
*/
if (leaf[word] < l2nb)
continue;
/* determine the block number within the file system
* of the first block described by this dmap word.
*/
blkno = le64_to_cpu(dp->start) + (word << L2DBWORD);
/* if not all bits of the dmap word are free, get the
* starting bit number within the dmap word of the required
* string of free bits and adjust the block number with the
* value.
*/
if (leaf[word] < BUDMIN)
blkno +=
dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb);
/* allocate the blocks.
*/
if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
*results = blkno;
return (rc);
}
return -ENOSPC;
}
/*
* NAME: dbAllocAG()
*
* FUNCTION: attempt to allocate the specified number of contiguous
* free blocks within the specified allocation group.
*
* unless the allocation group size is equal to the number
* of blocks per dmap, the dmap control pages will be used to
* find the required free space, if available. we start the
* search at the highest dmap control page level which
* distinctly describes the allocation group's free space
* (i.e. the highest level at which the allocation group's
* free space is not mixed in with that of any other group).
* in addition, we start the search within this level at a
* height of the dmapctl dmtree at which the nodes distinctly
* describe the allocation group's free space. at this height,
* the allocation group's free space may be represented by 1
* or two sub-trees, depending on the allocation group size.
* we search the top nodes of these subtrees left to right for
* sufficient free space. if sufficient free space is found,
* the subtree is searched to find the leftmost leaf that
* has free space. once we have made it to the leaf, we
* move the search to the next lower level dmap control page
* corresponding to this leaf. we continue down the dmap control
* pages until we find the dmap that contains or starts the
* sufficient free space and we allocate at this dmap.
*
* if the allocation group size is equal to the dmap size,
* we'll start at the dmap corresponding to the allocation
* group and attempt the allocation at this level.
*
* the dmap control page search is also not performed if the
* allocation group is completely free and we go to the first
* dmap of the allocation group to do the allocation. this is
* done because the allocation group may be part (not the first
* part) of a larger binary buddy system, causing the dmap
* control pages to indicate no free space (NOFREE) within
* the allocation group.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* agno - allocation group number.
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* note: IWRITE_LOCK(ipmap) held on entry/exit;
*/
static int
dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
{
struct metapage *mp;
struct dmapctl *dcp;
int rc, ti, i, k, m, n, agperlev;
s64 blkno, lblkno;
int budmin;
/* allocation request should not be for more than the
* allocation group size.
*/
if (l2nb > bmp->db_agl2size) {
jfs_error(bmp->db_ipbmap->i_sb,
"allocation request is larger than the allocation group size\n");
return -EIO;
}
/* determine the starting block number of the allocation
* group.
*/
blkno = (s64) agno << bmp->db_agl2size;
/* check if the allocation group size is the minimum allocation
* group size or if the allocation group is completely free. if
* the allocation group size is the minimum size of BPERDMAP (i.e.
* 1 dmap), there is no need to search the dmap control page (below)
* that fully describes the allocation group since the allocation
* group is already fully described by a dmap. in this case, we
* just call dbAllocCtl() to search the dmap tree and allocate the
* required space if available.
*
* if the allocation group is completely free, dbAllocCtl() is
* also called to allocate the required space. this is done for
* two reasons. first, it makes no sense searching the dmap control
* pages for free space when we know that free space exists. second,
* the dmap control pages may indicate that the allocation group
* has no free space if the allocation group is part (not the first
* part) of a larger binary buddy system.
*/
if (bmp->db_agsize == BPERDMAP
|| bmp->db_agfree[agno] == bmp->db_agsize) {
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
if ((rc == -ENOSPC) &&
(bmp->db_agfree[agno] == bmp->db_agsize)) {
printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n",
(unsigned long long) blkno,
(unsigned long long) nblocks);
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocCtl failed in free AG\n");
}
return (rc);
}
/* the buffer for the dmap control page that fully describes the
* allocation group.
*/
lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
release_metapage(mp);
return -EIO;
}
/* search the subtree(s) of the dmap control page that describes
* the allocation group, looking for sufficient free space. to begin,
* determine how many allocation groups are represented in a dmap
* control page at the control page level (i.e. L0, L1, L2) that
* fully describes an allocation group. next, determine the starting
* tree index of this allocation group within the control page.
*/
agperlev =
(1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
/* dmap control page trees fan-out by 4 and a single allocation
* group may be described by 1 or 2 subtrees within the ag level
* dmap control page, depending upon the ag size. examine the ag's
* subtrees for sufficient free space, starting with the leftmost
* subtree.
*/
for (i = 0; i < bmp->db_agwidth; i++, ti++) {
/* is there sufficient free space ?
*/
if (l2nb > dcp->stree[ti])
continue;
/* sufficient free space found in a subtree. now search down
* the subtree to find the leftmost leaf that describes this
* free space.
*/
for (k = bmp->db_agheight; k > 0; k--) {
for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
if (l2nb <= dcp->stree[m + n]) {
ti = m + n;
break;
}
}
if (n == 4) {
jfs_error(bmp->db_ipbmap->i_sb,
"failed descending stree\n");
release_metapage(mp);
return -EIO;
}
}
/* determine the block number within the file system
* that corresponds to this leaf.
*/
if (bmp->db_aglevel == 2)
blkno = 0;
else if (bmp->db_aglevel == 1)
blkno &= ~(MAXL1SIZE - 1);
else /* bmp->db_aglevel == 0 */
blkno &= ~(MAXL0SIZE - 1);
blkno +=
((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin;
/* release the buffer in preparation for going down
* the next level of dmap control pages.
*/
release_metapage(mp);
/* check if we need to continue to search down the lower
* level dmap control pages. we need to if the number of
* blocks required is less than maximum number of blocks
* described at the next lower level.
*/
if (l2nb < budmin) {
/* search the lower level dmap control pages to get
* the starting block number of the dmap that
* contains or starts off the free space.
*/
if ((rc =
dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1,
&blkno))) {
if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb,
"control page inconsistent\n");
return -EIO;
}
return (rc);
}
}
/* allocate the blocks.
*/
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb,
"unable to allocate blocks\n");
rc = -EIO;
}
return (rc);
}
/* no space in the allocation group. release the buffer and
* return -ENOSPC.
*/
release_metapage(mp);
return -ENOSPC;
}
/*
* NAME: dbAllocAny()
*
* FUNCTION: attempt to allocate the specified number of contiguous
* free blocks anywhere in the file system.
*
* dbAllocAny() attempts to find the sufficient free space by
* searching down the dmap control pages, starting with the
* highest level (i.e. L0, L1, L2) control page. if free space
* large enough to satisfy the desired free space is found, the
* desired free space is allocated.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
{
int rc;
s64 blkno = 0;
/* starting with the top level dmap control page, search
* down the dmap control levels for sufficient free space.
* if free space is found, dbFindCtl() returns the starting
* block number of the dmap that contains or starts off the
* range of free space.
*/
if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno)))
return (rc);
/* allocate the blocks.
*/
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb, "unable to allocate blocks\n");
return -EIO;
}
return (rc);
}
/*
* NAME: dbDiscardAG()
*
* FUNCTION: attempt to discard (TRIM) all free blocks of specific AG
*
* algorithm:
* 1) allocate blocks, as large as possible and save them
* while holding IWRITE_LOCK on ipbmap
* 2) trim all these saved block/length values
* 3) mark the blocks free again
*
* benefit:
* - we work only on one ag at some time, minimizing how long we
* need to lock ipbmap
* - reading / writing the fs is possible most time, even on
* trimming
*
* downside:
* - we write two times to the dmapctl and dmap pages
* - but for me, this seems the best way, better ideas?
* /TR 2012
*
* PARAMETERS:
* ip - pointer to in-core inode
* agno - ag to trim
* minlen - minimum value of contiguous blocks
*
* RETURN VALUES:
* s64 - actual number of blocks trimmed
*/
s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
s64 nblocks, blkno;
u64 trimmed = 0;
int rc, l2nb;
struct super_block *sb = ipbmap->i_sb;
struct range2trim {
u64 blkno;
u64 nblocks;
} *totrim, *tt;
/* max blkno / nblocks pairs to trim */
int count = 0, range_cnt;
u64 max_ranges;
/* prevent others from writing new stuff here, while trimming */
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
nblocks = bmp->db_agfree[agno];
max_ranges = nblocks;
do_div(max_ranges, minlen);
range_cnt = min_t(u64, max_ranges + 1, 32 * 1024);
totrim = kmalloc_array(range_cnt, sizeof(struct range2trim), GFP_NOFS);
if (totrim == NULL) {
jfs_error(bmp->db_ipbmap->i_sb, "no memory for trim array\n");
IWRITE_UNLOCK(ipbmap);
return 0;
}
tt = totrim;
while (nblocks >= minlen) {
l2nb = BLKSTOL2(nblocks);
/* 0 = okay, -EIO = fatal, -ENOSPC -> try smaller block */
rc = dbAllocAG(bmp, agno, nblocks, l2nb, &blkno);
if (rc == 0) {
tt->blkno = blkno;
tt->nblocks = nblocks;
tt++; count++;
/* the whole ag is free, trim now */
if (bmp->db_agfree[agno] == 0)
break;
/* give a hint for the next while */
nblocks = bmp->db_agfree[agno];
continue;
} else if (rc == -ENOSPC) {
/* search for next smaller log2 block */
l2nb = BLKSTOL2(nblocks) - 1;
nblocks = 1LL << l2nb;
} else {
/* Trim any already allocated blocks */
jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
break;
}
/* check, if our trim array is full */
if (unlikely(count >= range_cnt - 1))
break;
}
IWRITE_UNLOCK(ipbmap);
tt->nblocks = 0; /* mark the current end */
for (tt = totrim; tt->nblocks != 0; tt++) {
/* when mounted with online discard, dbFree() will
* call jfs_issue_discard() itself */
if (!(JFS_SBI(sb)->flag & JFS_DISCARD))
jfs_issue_discard(ip, tt->blkno, tt->nblocks);
dbFree(ip, tt->blkno, tt->nblocks);
trimmed += tt->nblocks;
}
kfree(totrim);
return trimmed;
}
/*
* NAME: dbFindCtl()
*
* FUNCTION: starting at a specified dmap control page level and block
* number, search down the dmap control levels for a range of
* contiguous free blocks large enough to satisfy an allocation
* request for the specified number of free blocks.
*
* if sufficient contiguous free blocks are found, this routine
* returns the starting block number within a dmap page that
* contains or starts a range of contiqious free blocks that
* is sufficient in size.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* level - starting dmap control page level.
* l2nb - log2 number of contiguous free blocks desired.
* *blkno - on entry, starting block number for conducting the search.
* on successful return, the first block within a dmap page
* that contains or starts a range of contiguous free blocks.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
{
int rc, leafidx, lev;
s64 b, lblkno;
struct dmapctl *dcp;
int budmin;
struct metapage *mp;
/* starting at the specified dmap control page level and block
* number, search down the dmap control levels for the starting
* block number of a dmap page that contains or starts off
* sufficient free blocks.
*/
for (lev = level, b = *blkno; lev >= 0; lev--) {
/* get the buffer of the dmap control page for the block
* number and level (i.e. L0, L1, L2).
*/
lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"Corrupt dmapctl page\n");
release_metapage(mp);
return -EIO;
}
/* search the tree within the dmap control page for
* sufficient free space. if sufficient free space is found,
* dbFindLeaf() returns the index of the leaf at which
* free space was found.
*/
rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
/* release the buffer.
*/
release_metapage(mp);
/* space found ?
*/
if (rc) {
if (lev != level) {
jfs_error(bmp->db_ipbmap->i_sb,
"dmap inconsistent\n");
return -EIO;
}
return -ENOSPC;
}
/* adjust the block number to reflect the location within
* the dmap control page (i.e. the leaf) at which free
* space was found.
*/
b += (((s64) leafidx) << budmin);
/* we stop the search at this dmap control page level if
* the number of blocks required is greater than or equal
* to the maximum number of blocks described at the next
* (lower) level.
*/
if (l2nb >= budmin)
break;
}
*blkno = b;
return (0);
}
/*
* NAME: dbAllocCtl()
*
* FUNCTION: attempt to allocate a specified number of contiguous
* blocks starting within a specific dmap.
*
* this routine is called by higher level routines that search
* the dmap control pages above the actual dmaps for contiguous
* free space. the result of successful searches by these
* routines are the starting block numbers within dmaps, with
* the dmaps themselves containing the desired contiguous free
* space or starting a contiguous free space of desired size
* that is made up of the blocks of one or more dmaps. these
* calls should not fail due to insufficent resources.
*
* this routine is called in some cases where it is not known
* whether it will fail due to insufficient resources. more
* specifically, this occurs when allocating from an allocation
* group whose size is equal to the number of blocks per dmap.
* in this case, the dmap control pages are not examined prior
* to calling this routine (to save pathlength) and the call
* might fail.
*
* for a request size that fits within a dmap, this routine relies
* upon the dmap's dmtree to find the requested contiguous free
* space. for request sizes that are larger than a dmap, the
* requested free space will start at the first block of the
* first dmap (i.e. blkno).
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks to allocate.
* l2nb - log2 number of contiguous free blocks to allocate.
* blkno - starting block number of the dmap to start the allocation
* from.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
{
int rc, nb;
s64 b, lblkno, n;
struct metapage *mp;
struct dmap *dp;
/* check if the allocation request is confined to a single dmap.
*/
if (l2nb <= L2BPERDMAP) {
/* get the buffer for the dmap.
*/
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dp = (struct dmap *) mp->data;
/* try to allocate the blocks.
*/
rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
return (rc);
}
/* allocation request involving multiple dmaps. it must start on
* a dmap boundary.
*/
assert((blkno & (BPERDMAP - 1)) == 0);
/* allocate the blocks dmap by dmap.
*/
for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) {
/* get the buffer for the dmap.
*/
lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
rc = -EIO;
goto backout;
}
dp = (struct dmap *) mp->data;
/* the dmap better be all free.
*/
if (dp->tree.stree[ROOT] != L2BPERDMAP) {
release_metapage(mp);
jfs_error(bmp->db_ipbmap->i_sb,
"the dmap is not all free\n");
rc = -EIO;
goto backout;
}
/* determine how many blocks to allocate from this dmap.
*/
nb = min_t(s64, n, BPERDMAP);
/* allocate the blocks from the dmap.
*/
if ((rc = dbAllocDmap(bmp, dp, b, nb))) {
release_metapage(mp);
goto backout;
}
/* write the buffer.
*/
write_metapage(mp);
}
/* set the results (starting block number) and return.
*/
*results = blkno;
return (0);
/* something failed in handling an allocation request involving
* multiple dmaps. we'll try to clean up by backing out any
* allocation that has already happened for this request. if
* we fail in backing out the allocation, we'll mark the file
* system to indicate that blocks have been leaked.
*/
backout:
/* try to backout the allocations dmap by dmap.
*/
for (n = nblocks - n, b = blkno; n > 0;
n -= BPERDMAP, b += BPERDMAP) {
/* get the buffer for this dmap.
*/
lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
/* could not back out. mark the file system
* to indicate that we have leaked blocks.
*/
jfs_error(bmp->db_ipbmap->i_sb,
"I/O Error: Block Leakage\n");
continue;
}
dp = (struct dmap *) mp->data;
/* free the blocks is this dmap.
*/
if (dbFreeDmap(bmp, dp, b, BPERDMAP)) {
/* could not back out. mark the file system
* to indicate that we have leaked blocks.
*/
release_metapage(mp);
jfs_error(bmp->db_ipbmap->i_sb, "Block Leakage\n");
continue;
}
/* write the buffer.
*/
write_metapage(mp);
}
return (rc);
}
/*
* NAME: dbAllocDmapLev()
*
* FUNCTION: attempt to allocate a specified number of contiguous blocks
* from a specified dmap.
*
* this routine checks if the contiguous blocks are available.
* if so, nblocks of blocks are allocated; otherwise, ENOSPC is
* returned.
*
* PARAMETERS:
* mp - pointer to bmap descriptor
* dp - pointer to dmap to attempt to allocate blocks from.
* l2nb - log2 number of contiguous block desired.
* nblocks - actual number of contiguous block desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or
* IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
*/
static int
dbAllocDmapLev(struct bmap * bmp,
struct dmap * dp, int nblocks, int l2nb, s64 * results)
{
s64 blkno;
int leafidx, rc;
/* can't be more than a dmaps worth of blocks */
assert(l2nb <= L2BPERDMAP);
/* search the tree within the dmap page for sufficient
* free space. if sufficient free space is found, dbFindLeaf()
* returns the index of the leaf at which free space was found.
*/
if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
return -ENOSPC;
if (leafidx < 0)
return -EIO;
/* determine the block number within the file system corresponding
* to the leaf at which free space was found.
*/
blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD);
/* if not all bits of the dmap word are free, get the starting
* bit number within the dmap word of the required string of free
* bits and adjust the block number with this value.
*/
if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN)
blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb);
/* allocate the blocks */
if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
*results = blkno;
return (rc);
}
/*
* NAME: dbAllocDmap()
*
* FUNCTION: adjust the disk allocation map to reflect the allocation
* of a specified block range within a dmap.
*
* this routine allocates the specified blocks from the dmap
* through a call to dbAllocBits(). if the allocation of the
* block range causes the maximum string of free blocks within
* the dmap to change (i.e. the value of the root of the dmap's
* dmtree), this routine will cause this change to be reflected
* up through the appropriate levels of the dmap control pages
* by a call to dbAdjCtl() for the L0 dmap control page that
* covers this dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate the block range from.
* blkno - starting block number of the block to be allocated.
* nblocks - number of blocks to be allocated.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
s8 oldroot;
int rc;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
*/
oldroot = dp->tree.stree[ROOT];
/* allocate the specified (blocks) bits */
dbAllocBits(bmp, dp, blkno, nblocks);
/* if the root has not changed, done. */
if (dp->tree.stree[ROOT] == oldroot)
return (0);
/* root changed. bubble the change up to the dmap control pages.
* if the adjustment of the upper level control pages fails,
* backout the bit allocation (thus making everything consistent).
*/
if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0)))
dbFreeBits(bmp, dp, blkno, nblocks);
return (rc);
}
/*
* NAME: dbFreeDmap()
*
* FUNCTION: adjust the disk allocation map to reflect the allocation
* of a specified block range within a dmap.
*
* this routine frees the specified blocks from the dmap through
* a call to dbFreeBits(). if the deallocation of the block range
* causes the maximum string of free blocks within the dmap to
* change (i.e. the value of the root of the dmap's dmtree), this
* routine will cause this change to be reflected up through the
* appropriate levels of the dmap control pages by a call to
* dbAdjCtl() for the L0 dmap control page that covers this dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free the block range from.
* blkno - starting block number of the block to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
s8 oldroot;
int rc = 0, word;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
*/
oldroot = dp->tree.stree[ROOT];
/* free the specified (blocks) bits */
rc = dbFreeBits(bmp, dp, blkno, nblocks);
/* if error or the root has not changed, done. */
if (rc || (dp->tree.stree[ROOT] == oldroot))
return (rc);
/* root changed. bubble the change up to the dmap control pages.
* if the adjustment of the upper level control pages fails,
* backout the deallocation.
*/
if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) {
word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
/* as part of backing out the deallocation, we will have
* to back split the dmap tree if the deallocation caused
* the freed blocks to become part of a larger binary buddy
* system.
*/
if (dp->tree.stree[word] == NOFREE)
dbBackSplit((dmtree_t *) & dp->tree, word);
dbAllocBits(bmp, dp, blkno, nblocks);
}
return (rc);
}
/*
* NAME: dbAllocBits()
*
* FUNCTION: allocate a specified block range from a dmap.
*
* this routine updates the dmap to reflect the working
* state allocation of the specified block range. it directly
* updates the bits of the working map and causes the adjustment
* of the binary buddy system described by the dmap's dmtree
* leaves to reflect the bits allocated. it also causes the
* dmap's dmtree, as a whole, to reflect the allocated range.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate bits from.
* blkno - starting block number of the bits to be allocated.
* nblocks - number of bits to be allocated.
*
* RETURN VALUES: none
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree;
int size;
s8 *leaf;
/* pick up a pointer to the leaves of the dmap tree */
leaf = dp->tree.stree + LEAFIND;
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* block range better be within the dmap */
assert(dbitno + nblocks <= BPERDMAP);
/* allocate the bits of the dmap's words corresponding to the block
* range. not all bits of the first and last words may be contained
* within the block range. if this is the case, we'll work against
* those words (i.e. partial first and/or last) on an individual basis
* (a single pass), allocating the bits of interest by hand and
* updating the leaf corresponding to the dmap word. a single pass
* will be used for all dmap words fully contained within the
* specified range. within this pass, the bits of all fully contained
* dmap words will be marked as free in a single shot and the leaves
* will be updated. a single leaf may describe the free space of
* multiple dmap words, so we may update only a subset of the actual
* leaves corresponding to the dmap words of the block range.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of a word is to be allocated.
*/
if (nb < DBWORD) {
/* allocate (set to 1) the appropriate bits within
* this dmap word.
*/
dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
>> wbitno);
/* update the leaf for this dmap word. in addition
* to setting the leaf value to the binary buddy max
* of the updated dmap word, dbSplit() will split
* the binary system of the leaves if need be.
*/
dbSplit(tp, word, BUDMIN,
dbMaxBud((u8 *) & dp->wmap[word]));
word += 1;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and allocate (set to 1) the bits of these
* words.
*/
nwords = rembits >> L2DBWORD;
memset(&dp->wmap[word], (int) ONES, nwords * 4);
/* determine how many bits.
*/
nb = nwords << L2DBWORD;
/* now update the appropriate leaves to reflect
* the allocated words.
*/
for (; nwords > 0; nwords -= nw) {
if (leaf[word] < BUDMIN) {
jfs_error(bmp->db_ipbmap->i_sb,
"leaf page corrupt\n");
break;
}
/* determine what the leaf value should be
* updated to as the minimum of the l2 number
* of bits being allocated and the l2 number
* of bits currently described by this leaf.
*/
size = min_t(int, leaf[word],
NLSTOL2BSZ(nwords));
/* update the leaf to reflect the allocation.
* in addition to setting the leaf value to
* NOFREE, dbSplit() will split the binary
* system of the leaves to reflect the current
* allocation (size).
*/
dbSplit(tp, word, size, NOFREE);
/* get the number of dmap words handled */
nw = BUDSIZE(size, BUDMIN);
word += nw;
}
}
}
/* update the free count for this dmap */
le32_add_cpu(&dp->nfree, -nblocks);
BMAP_LOCK(bmp);
/* if this allocation group is completely free,
* update the maximum allocation group number if this allocation
* group is the new max.
*/
agno = blkno >> bmp->db_agl2size;
if (agno > bmp->db_maxag)
bmp->db_maxag = agno;
/* update the free count for the allocation group and map */
bmp->db_agfree[agno] -= nblocks;
bmp->db_nfree -= nblocks;
BMAP_UNLOCK(bmp);
}
/*
* NAME: dbFreeBits()
*
* FUNCTION: free a specified block range from a dmap.
*
* this routine updates the dmap to reflect the working
* state allocation of the specified block range. it directly
* updates the bits of the working map and causes the adjustment
* of the binary buddy system described by the dmap's dmtree
* leaves to reflect the bits freed. it also causes the dmap's
* dmtree, as a whole, to reflect the deallocated range.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free bits from.
* blkno - starting block number of the bits to be freed.
* nblocks - number of bits to be freed.
*
* RETURN VALUES: 0 for success
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree;
int rc = 0;
int size;
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* block range better be within the dmap.
*/
assert(dbitno + nblocks <= BPERDMAP);
/* free the bits of the dmaps words corresponding to the block range.
* not all bits of the first and last words may be contained within
* the block range. if this is the case, we'll work against those
* words (i.e. partial first and/or last) on an individual basis
* (a single pass), freeing the bits of interest by hand and updating
* the leaf corresponding to the dmap word. a single pass will be used
* for all dmap words fully contained within the specified range.
* within this pass, the bits of all fully contained dmap words will
* be marked as free in a single shot and the leaves will be updated. a
* single leaf may describe the free space of multiple dmap words,
* so we may update only a subset of the actual leaves corresponding
* to the dmap words of the block range.
*
* dbJoin() is used to update leaf values and will join the binary
* buddy system of the leaves if the new leaf values indicate this
* should be done.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of a word is to be freed.
*/
if (nb < DBWORD) {
/* free (zero) the appropriate bits within this
* dmap word.
*/
dp->wmap[word] &=
cpu_to_le32(~(ONES << (DBWORD - nb)
>> wbitno));
/* update the leaf for this dmap word.
*/
rc = dbJoin(tp, word,
dbMaxBud((u8 *) & dp->wmap[word]));
if (rc)
return rc;
word += 1;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and free (zero) the bits of these words.
*/
nwords = rembits >> L2DBWORD;
memset(&dp->wmap[word], 0, nwords * 4);
/* determine how many bits.
*/
nb = nwords << L2DBWORD;
/* now update the appropriate leaves to reflect
* the freed words.
*/
for (; nwords > 0; nwords -= nw) {
/* determine what the leaf value should be
* updated to as the minimum of the l2 number
* of bits being freed and the l2 (max) number
* of bits that can be described by this leaf.
*/
size =
min(LITOL2BSZ
(word, L2LPERDMAP, BUDMIN),
NLSTOL2BSZ(nwords));
/* update the leaf.
*/
rc = dbJoin(tp, word, size);
if (rc)
return rc;
/* get the number of dmap words handled.
*/
nw = BUDSIZE(size, BUDMIN);
word += nw;
}
}
}
/* update the free count for this dmap.
*/
le32_add_cpu(&dp->nfree, nblocks);
BMAP_LOCK(bmp);
/* update the free count for the allocation group and
* map.
*/
agno = blkno >> bmp->db_agl2size;
bmp->db_nfree += nblocks;
bmp->db_agfree[agno] += nblocks;
/* check if this allocation group is not completely free and
* if it is currently the maximum (rightmost) allocation group.
* if so, establish the new maximum allocation group number by
* searching left for the first allocation group with allocation.
*/
if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) ||
(agno == bmp->db_numag - 1 &&
bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) {
while (bmp->db_maxag > 0) {
bmp->db_maxag -= 1;
if (bmp->db_agfree[bmp->db_maxag] !=
bmp->db_agsize)
break;
}
/* re-establish the allocation group preference if the
* current preference is right of the maximum allocation
* group.
*/
if (bmp->db_agpref > bmp->db_maxag)
bmp->db_agpref = bmp->db_maxag;
}
BMAP_UNLOCK(bmp);
return 0;
}
/*
* NAME: dbAdjCtl()
*
* FUNCTION: adjust a dmap control page at a specified level to reflect
* the change in a lower level dmap or dmap control page's
* maximum string of free blocks (i.e. a change in the root
* of the lower level object's dmtree) due to the allocation
* or deallocation of a range of blocks with a single dmap.
*
* on entry, this routine is provided with the new value of
* the lower level dmap or dmap control page root and the
* starting block number of the block range whose allocation
* or deallocation resulted in the root change. this range
* is respresented by a single leaf of the current dmapctl
* and the leaf will be updated with this value, possibly
* causing a binary buddy system within the leaves to be
* split or joined. the update may also cause the dmapctl's
* dmtree to be updated.
*
* if the adjustment of the dmap control page, itself, causes its
* root to change, this change will be bubbled up to the next dmap
* control level by a recursive call to this routine, specifying
* the new root value and the next dmap control page level to
* be adjusted.
* PARAMETERS:
* bmp - pointer to bmap descriptor
* blkno - the first block of a block range within a dmap. it is
* the allocation or deallocation of this block range that
* requires the dmap control page to be adjusted.
* newval - the new value of the lower level dmap or dmap control
* page root.
* alloc - 'true' if adjustment is due to an allocation.
* level - current level of dmap control page (i.e. L0, L1, L2) to
* be adjusted.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
{
struct metapage *mp;
s8 oldroot;
int oldval;
s64 lblkno;
struct dmapctl *dcp;
int rc, leafno, ti;
/* get the buffer for the dmap control page for the specified
* block number and control page level.
*/
lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dcp = (struct dmapctl *) mp->data;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
release_metapage(mp);
return -EIO;
}
/* determine the leaf number corresponding to the block and
* the index within the dmap control tree.
*/
leafno = BLKTOCTLLEAF(blkno, dcp->budmin);
ti = leafno + le32_to_cpu(dcp->leafidx);
/* save the current leaf value and the current root level (i.e.
* maximum l2 free string described by this dmapctl).
*/
oldval = dcp->stree[ti];
oldroot = dcp->stree[ROOT];
/* check if this is a control page update for an allocation.
* if so, update the leaf to reflect the new leaf value using
* dbSplit(); otherwise (deallocation), use dbJoin() to update
* the leaf with the new value. in addition to updating the
* leaf, dbSplit() will also split the binary buddy system of
* the leaves, if required, and bubble new values within the
* dmapctl tree, if required. similarly, dbJoin() will join
* the binary buddy system of leaves and bubble new values up
* the dmapctl tree as required by the new leaf value.
*/
if (alloc) {
/* check if we are in the middle of a binary buddy
* system. this happens when we are performing the
* first allocation out of an allocation group that
* is part (not the first part) of a larger binary
* buddy system. if we are in the middle, back split
* the system prior to calling dbSplit() which assumes
* that it is at the front of a binary buddy system.
*/
if (oldval == NOFREE) {
rc = dbBackSplit((dmtree_t *) dcp, leafno);
if (rc) {
release_metapage(mp);
return rc;
}
oldval = dcp->stree[ti];
}
dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
} else {
rc = dbJoin((dmtree_t *) dcp, leafno, newval);
if (rc) {
release_metapage(mp);
return rc;
}
}
/* check if the root of the current dmap control page changed due
* to the update and if the current dmap control page is not at
* the current top level (i.e. L0, L1, L2) of the map. if so (i.e.
* root changed and this is not the top level), call this routine
* again (recursion) for the next higher level of the mapping to
* reflect the change in root for the current dmap control page.
*/
if (dcp->stree[ROOT] != oldroot) {
/* are we below the top level of the map. if so,
* bubble the root up to the next higher level.
*/
if (level < bmp->db_maxlevel) {
/* bubble up the new root of this dmap control page to
* the next level.
*/
if ((rc =
dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc,
level + 1))) {
/* something went wrong in bubbling up the new
* root value, so backout the changes to the
* current dmap control page.
*/
if (alloc) {
dbJoin((dmtree_t *) dcp, leafno,
oldval);
} else {
/* the dbJoin() above might have
* caused a larger binary buddy system
* to form and we may now be in the
* middle of it. if this is the case,
* back split the buddies.
*/
if (dcp->stree[ti] == NOFREE)
dbBackSplit((dmtree_t *)
dcp, leafno);
dbSplit((dmtree_t *) dcp, leafno,
dcp->budmin, oldval);
}
/* release the buffer and return the error.
*/
release_metapage(mp);
return (rc);
}
} else {
/* we're at the top level of the map. update
* the bmap control page to reflect the size
* of the maximum free buddy system.
*/
assert(level == bmp->db_maxlevel);
if (bmp->db_maxfreebud != oldroot) {
jfs_error(bmp->db_ipbmap->i_sb,
"the maximum free buddy is not the old root\n");
}
bmp->db_maxfreebud = dcp->stree[ROOT];
}
}
/* write the buffer.
*/
write_metapage(mp);
return (0);
}
/*
* NAME: dbSplit()
*
* FUNCTION: update the leaf of a dmtree with a new value, splitting
* the leaf from the binary buddy system of the dmtree's
* leaves, as required.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* splitsz - the size the binary buddy system starting at the leaf
* must be split to, specified as the log2 number of blocks.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
{
int budsz;
int cursz;
s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
/* check if the leaf needs to be split.
*/
if (leaf[leafno] > tp->dmt_budmin) {
/* the split occurs by cutting the buddy system in half
* at the specified leaf until we reach the specified
* size. pick up the starting split size (current size
* - 1 in l2) and the corresponding buddy size.
*/
cursz = leaf[leafno] - 1;
budsz = BUDSIZE(cursz, tp->dmt_budmin);
/* split until we reach the specified size.
*/
while (cursz >= splitsz) {
/* update the buddy's leaf with its new value.
*/
dbAdjTree(tp, leafno ^ budsz, cursz);
/* on to the next size and buddy.
*/
cursz -= 1;
budsz >>= 1;
}
}
/* adjust the dmap tree to reflect the specified leaf's new
* value.
*/
dbAdjTree(tp, leafno, newval);
}
/*
* NAME: dbBackSplit()
*
* FUNCTION: back split the binary buddy system of dmtree leaves
* that hold a specified leaf until the specified leaf
* starts its own binary buddy system.
*
* the allocators typically perform allocations at the start
* of binary buddy systems and dbSplit() is used to accomplish
* any required splits. in some cases, however, allocation
* may occur in the middle of a binary system and requires a
* back split, with the split proceeding out from the middle of
* the system (less efficient) rather than the start of the
* system (more efficient). the cases in which a back split
* is required are rare and are limited to the first allocation
* within an allocation group which is a part (not first part)
* of a larger binary buddy system and a few exception cases
* in which a previous join operation must be backed out.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
*
* RETURN VALUES: none
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbBackSplit(dmtree_t * tp, int leafno)
{
int budsz, bud, w, bsz, size;
int cursz;
s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
/* leaf should be part (not first part) of a binary
* buddy system.
*/
assert(leaf[leafno] == NOFREE);
/* the back split is accomplished by iteratively finding the leaf
* that starts the buddy system that contains the specified leaf and
* splitting that system in two. this iteration continues until
* the specified leaf becomes the start of a buddy system.
*
* determine maximum possible l2 size for the specified leaf.
*/
size =
LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs),
tp->dmt_budmin);
/* determine the number of leaves covered by this size. this
* is the buddy size that we will start with as we search for
* the buddy system that contains the specified leaf.
*/
budsz = BUDSIZE(size, tp->dmt_budmin);
/* back split.
*/
while (leaf[leafno] == NOFREE) {
/* find the leftmost buddy leaf.
*/
for (w = leafno, bsz = budsz;; bsz <<= 1,
w = (w < bud) ? w : bud) {
if (bsz >= le32_to_cpu(tp->dmt_nleafs)) {
jfs_err("JFS: block map error in dbBackSplit");
return -EIO;
}
/* determine the buddy.
*/
bud = w ^ bsz;
/* check if this buddy is the start of the system.
*/
if (leaf[bud] != NOFREE) {
/* split the leaf at the start of the
* system in two.
*/
cursz = leaf[bud] - 1;
dbSplit(tp, bud, cursz, cursz);
break;
}
}
}
if (leaf[leafno] != size) {
jfs_err("JFS: wrong leaf value in dbBackSplit");
return -EIO;
}
return 0;
}
/*
* NAME: dbJoin()
*
* FUNCTION: update the leaf of a dmtree with a new value, joining
* the leaf with other leaves of the dmtree into a multi-leaf
* binary buddy system, as required.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*/
static int dbJoin(dmtree_t * tp, int leafno, int newval)
{
int budsz, buddy;
s8 *leaf;
/* can the new leaf value require a join with other leaves ?
*/
if (newval >= tp->dmt_budmin) {
/* pickup a pointer to the leaves of the tree.
*/
leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
/* try to join the specified leaf into a large binary
* buddy system. the join proceeds by attempting to join
* the specified leafno with its buddy (leaf) at new value.
* if the join occurs, we attempt to join the left leaf
* of the joined buddies with its buddy at new value + 1.
* we continue to join until we find a buddy that cannot be
* joined (does not have a value equal to the size of the
* last join) or until all leaves have been joined into a
* single system.
*
* get the buddy size (number of words covered) of
* the new value.
*/
budsz = BUDSIZE(newval, tp->dmt_budmin);
/* try to join.
*/
while (budsz < le32_to_cpu(tp->dmt_nleafs)) {
/* get the buddy leaf.
*/
buddy = leafno ^ budsz;
/* if the leaf's new value is greater than its
* buddy's value, we join no more.
*/
if (newval > leaf[buddy])
break;
/* It shouldn't be less */
if (newval < leaf[buddy])
return -EIO;
/* check which (leafno or buddy) is the left buddy.
* the left buddy gets to claim the blocks resulting
* from the join while the right gets to claim none.
* the left buddy is also eligible to participate in
* a join at the next higher level while the right
* is not.
*
*/
if (leafno < buddy) {
/* leafno is the left buddy.
*/
dbAdjTree(tp, buddy, NOFREE);
} else {
/* buddy is the left buddy and becomes
* leafno.
*/
dbAdjTree(tp, leafno, NOFREE);
leafno = buddy;
}
/* on to try the next join.
*/
newval += 1;
budsz <<= 1;
}
}
/* update the leaf value.
*/
dbAdjTree(tp, leafno, newval);
return 0;
}
/*
* NAME: dbAdjTree()
*
* FUNCTION: update a leaf of a dmtree with a new value, adjusting
* the dmtree, as required, to reflect the new leaf value.
* the combination of any buddies must already be done before
* this is called.
*
* PARAMETERS:
* tp - pointer to the tree to be adjusted.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*/
static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
{
int lp, pp, k;
int max;
/* pick up the index of the leaf for this leafno.
*/
lp = leafno + le32_to_cpu(tp->dmt_leafidx);
/* is the current value the same as the old value ? if so,
* there is nothing to do.
*/
if (tp->dmt_stree[lp] == newval)
return;
/* set the new value.
*/
tp->dmt_stree[lp] = newval;
/* bubble the new value up the tree as required.
*/
for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
/* get the index of the first leaf of the 4 leaf
* group containing the specified leaf (leafno).
*/
lp = ((lp - 1) & ~0x03) + 1;
/* get the index of the parent of this 4 leaf group.
*/
pp = (lp - 1) >> 2;
/* determine the maximum of the 4 leaves.
*/
max = TREEMAX(&tp->dmt_stree[lp]);
/* if the maximum of the 4 is the same as the
* parent's value, we're done.
*/
if (tp->dmt_stree[pp] == max)
break;
/* parent gets new value.
*/
tp->dmt_stree[pp] = max;
/* parent becomes leaf for next go-round.
*/
lp = pp;
}
}
/*
* NAME: dbFindLeaf()
*
* FUNCTION: search a dmtree_t for sufficient free blocks, returning
* the index of a leaf describing the free blocks if
* sufficient free blocks are found.
*
* the search starts at the top of the dmtree_t tree and
* proceeds down the tree to the leftmost leaf with sufficient
* free space.
*
* PARAMETERS:
* tp - pointer to the tree to be searched.
* l2nb - log2 number of free blocks to search for.
* leafidx - return pointer to be set to the index of the leaf
* describing at least l2nb free blocks if sufficient
* free blocks are found.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient free blocks.
*/
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
{
int ti, n = 0, k, x = 0;
/* first check the root of the tree to see if there is
* sufficient free space.
*/
if (l2nb > tp->dmt_stree[ROOT])
return -ENOSPC;
/* sufficient free space available. now search down the tree
* starting at the next level for the leftmost leaf that
* describes sufficient free space.
*/
for (k = le32_to_cpu(tp->dmt_height), ti = 1;
k > 0; k--, ti = ((ti + n) << 2) + 1) {
/* search the four nodes at this level, starting from
* the left.
*/
for (x = ti, n = 0; n < 4; n++) {
/* sufficient free space found. move to the next
* level (or quit if this is the last level).
*/
if (l2nb <= tp->dmt_stree[x + n])
break;
}
/* better have found something since the higher
* levels of the tree said it was here.
*/
assert(n < 4);
}
/* set the return to the leftmost leaf describing sufficient
* free space.
*/
*leafidx = x + n - le32_to_cpu(tp->dmt_leafidx);
return (0);
}
/*
* NAME: dbFindBits()
*
* FUNCTION: find a specified number of binary buddy free bits within a
* dmap bitmap word value.
*
* this routine searches the bitmap value for (1 << l2nb) free
* bits at (1 << l2nb) alignments within the value.
*
* PARAMETERS:
* word - dmap bitmap word value.
* l2nb - number of free bits specified as a log2 number.
*
* RETURN VALUES:
* starting bit number of free bits.
*/
static int dbFindBits(u32 word, int l2nb)
{
int bitno, nb;
u32 mask;
/* get the number of bits.
*/
nb = 1 << l2nb;
assert(nb <= DBWORD);
/* complement the word so we can use a mask (i.e. 0s represent
* free bits) and compute the mask.
*/
word = ~word;
mask = ONES << (DBWORD - nb);
/* scan the word for nb free bits at nb alignments.
*/
for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) {
if ((mask & word) == mask)
break;
}
ASSERT(bitno < 32);
/* return the bit number.
*/
return (bitno);
}
/*
* NAME: dbMaxBud(u8 *cp)
*
* FUNCTION: determine the largest binary buddy string of free
* bits within 32-bits of the map.
*
* PARAMETERS:
* cp - pointer to the 32-bit value.
*
* RETURN VALUES:
* largest binary buddy of free bits within a dmap word.
*/
static int dbMaxBud(u8 * cp)
{
signed char tmp1, tmp2;
/* check if the wmap word is all free. if so, the
* free buddy size is BUDMIN.
*/
if (*((uint *) cp) == 0)
return (BUDMIN);
/* check if the wmap word is half free. if so, the
* free buddy size is BUDMIN-1.
*/
if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0)
return (BUDMIN - 1);
/* not all free or half free. determine the free buddy
* size thru table lookup using quarters of the wmap word.
*/
tmp1 = max(budtab[cp[2]], budtab[cp[3]]);
tmp2 = max(budtab[cp[0]], budtab[cp[1]]);
return (max(tmp1, tmp2));
}
/*
* NAME: cnttz(uint word)
*
* FUNCTION: determine the number of trailing zeros within a 32-bit
* value.
*
* PARAMETERS:
* value - 32-bit value to be examined.
*
* RETURN VALUES:
* count of trailing zeros
*/
static int cnttz(u32 word)
{
int n;
for (n = 0; n < 32; n++, word >>= 1) {
if (word & 0x01)
break;
}
return (n);
}
/*
* NAME: cntlz(u32 value)
*
* FUNCTION: determine the number of leading zeros within a 32-bit
* value.
*
* PARAMETERS:
* value - 32-bit value to be examined.
*
* RETURN VALUES:
* count of leading zeros
*/
static int cntlz(u32 value)
{
int n;
for (n = 0; n < 32; n++, value <<= 1) {
if (value & HIGHORDER)
break;
}
return (n);
}
/*
* NAME: blkstol2(s64 nb)
*
* FUNCTION: convert a block count to its log2 value. if the block
* count is not a l2 multiple, it is rounded up to the next
* larger l2 multiple.
*
* PARAMETERS:
* nb - number of blocks
*
* RETURN VALUES:
* log2 number of blocks
*/
static int blkstol2(s64 nb)
{
int l2nb;
s64 mask; /* meant to be signed */
mask = (s64) 1 << (64 - 1);
/* count the leading bits.
*/
for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) {
/* leading bit found.
*/
if (nb & mask) {
/* determine the l2 value.
*/
l2nb = (64 - 1) - l2nb;
/* check if we need to round up.
*/
if (~mask & nb)
l2nb++;
return (l2nb);
}
}
assert(0);
return 0; /* fix compiler warning */
}
/*
* NAME: dbAllocBottomUp()
*
* FUNCTION: alloc the specified block range from the working block
* allocation map.
*
* the blocks will be alloc from the working map one dmap
* at a time.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
{
struct metapage *mp;
struct dmap *dp;
int nb, rc;
s64 lblkno, rem;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* block to be allocated better be within the mapsize. */
ASSERT(nblocks <= bmp->db_mapsize - blkno);
/*
* allocate the blocks a dmap at a time.
*/
mp = NULL;
for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
/* release previous dmap if any */
if (mp) {
write_metapage(mp);
}
/* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
IREAD_UNLOCK(ipbmap);
return -EIO;
}
dp = (struct dmap *) mp->data;
/* determine the number of blocks to be allocated from
* this dmap.
*/
nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
/* allocate the blocks. */
if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (rc);
}
}
/* write the last buffer. */
write_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (0);
}
static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int rc;
int dbitno, word, rembits, nb, nwords, wbitno, agno;
s8 oldroot;
struct dmaptree *tp = (struct dmaptree *) & dp->tree;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
*/
oldroot = tp->stree[ROOT];
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* block range better be within the dmap */
assert(dbitno + nblocks <= BPERDMAP);
/* allocate the bits of the dmap's words corresponding to the block
* range. not all bits of the first and last words may be contained
* within the block range. if this is the case, we'll work against
* those words (i.e. partial first and/or last) on an individual basis
* (a single pass), allocating the bits of interest by hand and
* updating the leaf corresponding to the dmap word. a single pass
* will be used for all dmap words fully contained within the
* specified range. within this pass, the bits of all fully contained
* dmap words will be marked as free in a single shot and the leaves
* will be updated. a single leaf may describe the free space of
* multiple dmap words, so we may update only a subset of the actual
* leaves corresponding to the dmap words of the block range.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of a word is to be allocated.
*/
if (nb < DBWORD) {
/* allocate (set to 1) the appropriate bits within
* this dmap word.
*/
dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
>> wbitno);
word++;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and allocate (set to 1) the bits of these
* words.
*/
nwords = rembits >> L2DBWORD;
memset(&dp->wmap[word], (int) ONES, nwords * 4);
/* determine how many bits */
nb = nwords << L2DBWORD;
word += nwords;
}
}
/* update the free count for this dmap */
le32_add_cpu(&dp->nfree, -nblocks);
/* reconstruct summary tree */
dbInitDmapTree(dp);
BMAP_LOCK(bmp);
/* if this allocation group is completely free,
* update the highest active allocation group number
* if this allocation group is the new max.
*/
agno = blkno >> bmp->db_agl2size;
if (agno > bmp->db_maxag)
bmp->db_maxag = agno;
/* update the free count for the allocation group and map */
bmp->db_agfree[agno] -= nblocks;
bmp->db_nfree -= nblocks;
BMAP_UNLOCK(bmp);
/* if the root has not changed, done. */
if (tp->stree[ROOT] == oldroot)
return (0);
/* root changed. bubble the change up to the dmap control pages.
* if the adjustment of the upper level control pages fails,
* backout the bit allocation (thus making everything consistent).
*/
if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0)))
dbFreeBits(bmp, dp, blkno, nblocks);
return (rc);
}
/*
* NAME: dbExtendFS()
*
* FUNCTION: extend bmap from blkno for nblocks;
* dbExtendFS() updates bmap ready for dbAllocBottomUp();
*
* L2
* |
* L1---------------------------------L1
* | |
* L0---------L0---------L0 L0---------L0---------L0
* | | | | | |
* d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm;
* L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
*
* <---old---><----------------------------extend----------------------->
*/
int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
{
struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb);
int nbperpage = sbi->nbperpage;
int i, i0 = true, j, j0 = true, k, n;
s64 newsize;
s64 p;
struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL;
struct dmapctl *l2dcp, *l1dcp, *l0dcp;
struct dmap *dp;
s8 *l0leaf, *l1leaf, *l2leaf;
struct bmap *bmp = sbi->bmap;
int agno, l2agsize, oldl2agsize;
s64 ag_rem;
newsize = blkno + nblocks;
jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld",
(long long) blkno, (long long) nblocks, (long long) newsize);
/*
* initialize bmap control page.
*
* all the data in bmap control page should exclude
* the mkfs hidden dmap page.
*/
/* update mapsize */
bmp->db_mapsize = newsize;
bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize);
/* compute new AG size */
l2agsize = dbGetL2AGSize(newsize);
oldl2agsize = bmp->db_agl2size;
bmp->db_agl2size = l2agsize;
bmp->db_agsize = 1 << l2agsize;
/* compute new number of AG */
agno = bmp->db_numag;
bmp->db_numag = newsize >> l2agsize;
bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
/*
* reconfigure db_agfree[]
* from old AG configuration to new AG configuration;
*
* coalesce contiguous k (newAGSize/oldAGSize) AGs;
* i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
* note: new AG size = old AG size * (2**x).
*/
if (l2agsize == oldl2agsize)
goto extend;
k = 1 << (l2agsize - oldl2agsize);
ag_rem = bmp->db_agfree[0]; /* save agfree[0] */
for (i = 0, n = 0; i < agno; n++) {
bmp->db_agfree[n] = 0; /* init collection point */
/* coalesce contiguous k AGs; */
for (j = 0; j < k && i < agno; j++, i++) {
/* merge AGi to AGn */
bmp->db_agfree[n] += bmp->db_agfree[i];
}
}
bmp->db_agfree[0] += ag_rem; /* restore agfree[0] */
for (; n < MAXAG; n++)
bmp->db_agfree[n] = 0;
/*
* update highest active ag number
*/
bmp->db_maxag = bmp->db_maxag / k;
/*
* extend bmap
*
* update bit maps and corresponding level control pages;
* global control page db_nfree, db_agfree[agno], db_maxfreebud;
*/
extend:
/* get L2 page */
p = BMAPBLKNO + nbperpage; /* L2 page */
l2mp = read_metapage(ipbmap, p, PSIZE, 0);
if (!l2mp) {
jfs_error(ipbmap->i_sb, "L2 page could not be read\n");
return -EIO;
}
l2dcp = (struct dmapctl *) l2mp->data;
/* compute start L1 */
k = blkno >> L2MAXL1SIZE;
l2leaf = l2dcp->stree + CTLLEAFIND + k;
p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */
/*
* extend each L1 in L2
*/
for (; k < LPERCTL; k++, p += nbperpage) {
/* get L1 page */
if (j0) {
/* read in L1 page: (blkno & (MAXL1SIZE - 1)) */
l1mp = read_metapage(ipbmap, p, PSIZE, 0);
if (l1mp == NULL)
goto errout;
l1dcp = (struct dmapctl *) l1mp->data;
/* compute start L0 */
j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE;
l1leaf = l1dcp->stree + CTLLEAFIND + j;
p = BLKTOL0(blkno, sbi->l2nbperpage);
j0 = false;
} else {
/* assign/init L1 page */
l1mp = get_metapage(ipbmap, p, PSIZE, 0);
if (l1mp == NULL)
goto errout;
l1dcp = (struct dmapctl *) l1mp->data;
/* compute start L0 */
j = 0;
l1leaf = l1dcp->stree + CTLLEAFIND;
p += nbperpage; /* 1st L0 of L1.k */
}
/*
* extend each L0 in L1
*/
for (; j < LPERCTL; j++) {
/* get L0 page */
if (i0) {
/* read in L0 page: (blkno & (MAXL0SIZE - 1)) */
l0mp = read_metapage(ipbmap, p, PSIZE, 0);
if (l0mp == NULL)
goto errout;
l0dcp = (struct dmapctl *) l0mp->data;
/* compute start dmap */
i = (blkno & (MAXL0SIZE - 1)) >>
L2BPERDMAP;
l0leaf = l0dcp->stree + CTLLEAFIND + i;
p = BLKTODMAP(blkno,
sbi->l2nbperpage);
i0 = false;
} else {
/* assign/init L0 page */
l0mp = get_metapage(ipbmap, p, PSIZE, 0);
if (l0mp == NULL)
goto errout;
l0dcp = (struct dmapctl *) l0mp->data;
/* compute start dmap */
i = 0;
l0leaf = l0dcp->stree + CTLLEAFIND;
p += nbperpage; /* 1st dmap of L0.j */
}
/*
* extend each dmap in L0
*/
for (; i < LPERCTL; i++) {
/*
* reconstruct the dmap page, and
* initialize corresponding parent L0 leaf
*/
if ((n = blkno & (BPERDMAP - 1))) {
/* read in dmap page: */
mp = read_metapage(ipbmap, p,
PSIZE, 0);
if (mp == NULL)
goto errout;
n = min(nblocks, (s64)BPERDMAP - n);
} else {
/* assign/init dmap page */
mp = read_metapage(ipbmap, p,
PSIZE, 0);
if (mp == NULL)
goto errout;
n = min_t(s64, nblocks, BPERDMAP);
}
dp = (struct dmap *) mp->data;
*l0leaf = dbInitDmap(dp, blkno, n);
bmp->db_nfree += n;
agno = le64_to_cpu(dp->start) >> l2agsize;
bmp->db_agfree[agno] += n;
write_metapage(mp);
l0leaf++;
p += nbperpage;
blkno += n;
nblocks -= n;
if (nblocks == 0)
break;
} /* for each dmap in a L0 */
/*
* build current L0 page from its leaves, and
* initialize corresponding parent L1 leaf
*/
*l1leaf = dbInitDmapCtl(l0dcp, 0, ++i);
write_metapage(l0mp);
l0mp = NULL;
if (nblocks)
l1leaf++; /* continue for next L0 */
else {
/* more than 1 L0 ? */
if (j > 0)
break; /* build L1 page */
else {
/* summarize in global bmap page */
bmp->db_maxfreebud = *l1leaf;
release_metapage(l1mp);
release_metapage(l2mp);
goto finalize;
}
}
} /* for each L0 in a L1 */
/*
* build current L1 page from its leaves, and
* initialize corresponding parent L2 leaf
*/
*l2leaf = dbInitDmapCtl(l1dcp, 1, ++j);
write_metapage(l1mp);
l1mp = NULL;
if (nblocks)
l2leaf++; /* continue for next L1 */
else {
/* more than 1 L1 ? */
if (k > 0)
break; /* build L2 page */
else {
/* summarize in global bmap page */
bmp->db_maxfreebud = *l2leaf;
release_metapage(l2mp);
goto finalize;
}
}
} /* for each L1 in a L2 */
jfs_error(ipbmap->i_sb, "function has not returned as expected\n");
errout:
if (l0mp)
release_metapage(l0mp);
if (l1mp)
release_metapage(l1mp);
release_metapage(l2mp);
return -EIO;
/*
* finalize bmap control page
*/
finalize:
return 0;
}
/*
* dbFinalizeBmap()
*/
void dbFinalizeBmap(struct inode *ipbmap)
{
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
int actags, inactags, l2nl;
s64 ag_rem, actfree, inactfree, avgfree;
int i, n;
/*
* finalize bmap control page
*/
//finalize:
/*
* compute db_agpref: preferred ag to allocate from
* (the leftmost ag with average free space in it);
*/
//agpref:
/* get the number of active ags and inactive ags */
actags = bmp->db_maxag + 1;
inactags = bmp->db_numag - actags;
ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */
/* determine how many blocks are in the inactive allocation
* groups. in doing this, we must account for the fact that
* the rightmost group might be a partial group (i.e. file
* system size is not a multiple of the group size).
*/
inactfree = (inactags && ag_rem) ?
((inactags - 1) << bmp->db_agl2size) + ag_rem
: inactags << bmp->db_agl2size;
/* determine how many free blocks are in the active
* allocation groups plus the average number of free blocks
* within the active ags.
*/
actfree = bmp->db_nfree - inactfree;
avgfree = (u32) actfree / (u32) actags;
/* if the preferred allocation group has not average free space.
* re-establish the preferred group as the leftmost
* group with average free space.
*/
if (bmp->db_agfree[bmp->db_agpref] < avgfree) {
for (bmp->db_agpref = 0; bmp->db_agpref < actags;
bmp->db_agpref++) {
if (bmp->db_agfree[bmp->db_agpref] >= avgfree)
break;
}
if (bmp->db_agpref >= bmp->db_numag) {
jfs_error(ipbmap->i_sb,
"cannot find ag with average freespace\n");
}
}
/*
* compute db_aglevel, db_agheight, db_width, db_agstart:
* an ag is covered in aglevel dmapctl summary tree,
* at agheight level height (from leaf) with agwidth number of nodes
* each, which starts at agstart index node of the smmary tree node
* array;
*/
bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
l2nl =
bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
bmp->db_agheight = l2nl >> 1;
bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
i--) {
bmp->db_agstart += n;
n <<= 2;
}
}
/*
* NAME: dbInitDmap()/ujfs_idmap_page()
*
* FUNCTION: initialize working/persistent bitmap of the dmap page
* for the specified number of blocks:
*
* at entry, the bitmaps had been initialized as free (ZEROS);
* The number of blocks will only account for the actually
* existing blocks. Blocks which don't actually exist in
* the aggregate will be marked as allocated (ONES);
*
* PARAMETERS:
* dp - pointer to page of map
* nblocks - number of blocks this page
*
* RETURNS: NONE
*/
static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks)
{
int blkno, w, b, r, nw, nb, i;
/* starting block number within the dmap */
blkno = Blkno & (BPERDMAP - 1);
if (blkno == 0) {
dp->nblocks = dp->nfree = cpu_to_le32(nblocks);
dp->start = cpu_to_le64(Blkno);
if (nblocks == BPERDMAP) {
memset(&dp->wmap[0], 0, LPERDMAP * 4);
memset(&dp->pmap[0], 0, LPERDMAP * 4);
goto initTree;
}
} else {
le32_add_cpu(&dp->nblocks, nblocks);
le32_add_cpu(&dp->nfree, nblocks);
}
/* word number containing start block number */
w = blkno >> L2DBWORD;
/*
* free the bits corresponding to the block range (ZEROS):
* note: not all bits of the first and last words may be contained
* within the block range.
*/
for (r = nblocks; r > 0; r -= nb, blkno += nb) {
/* number of bits preceding range to be freed in the word */
b = blkno & (DBWORD - 1);
/* number of bits to free in the word */
nb = min(r, DBWORD - b);
/* is partial word to be freed ? */
if (nb < DBWORD) {
/* free (set to 0) from the bitmap word */
dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
>> b));
dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
>> b));
/* skip the word freed */
w++;
} else {
/* free (set to 0) contiguous bitmap words */
nw = r >> L2DBWORD;
memset(&dp->wmap[w], 0, nw * 4);
memset(&dp->pmap[w], 0, nw * 4);
/* skip the words freed */
nb = nw << L2DBWORD;
w += nw;
}
}
/*
* mark bits following the range to be freed (non-existing
* blocks) as allocated (ONES)
*/
if (blkno == BPERDMAP)
goto initTree;
/* the first word beyond the end of existing blocks */
w = blkno >> L2DBWORD;
/* does nblocks fall on a 32-bit boundary ? */
b = blkno & (DBWORD - 1);
if (b) {
/* mark a partial word allocated */
dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b);
w++;
}
/* set the rest of the words in the page to allocated (ONES) */
for (i = w; i < LPERDMAP; i++)
dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES);
/*
* init tree
*/
initTree:
return (dbInitDmapTree(dp));
}
/*
* NAME: dbInitDmapTree()/ujfs_complete_dmap()
*
* FUNCTION: initialize summary tree of the specified dmap:
*
* at entry, bitmap of the dmap has been initialized;
*
* PARAMETERS:
* dp - dmap to complete
* blkno - starting block number for this dmap
* treemax - will be filled in with max free for this dmap
*
* RETURNS: max free string at the root of the tree
*/
static int dbInitDmapTree(struct dmap * dp)
{
struct dmaptree *tp;
s8 *cp;
int i;
/* init fixed info of tree */
tp = &dp->tree;
tp->nleafs = cpu_to_le32(LPERDMAP);
tp->l2nleafs = cpu_to_le32(L2LPERDMAP);
tp->leafidx = cpu_to_le32(LEAFIND);
tp->height = cpu_to_le32(4);
tp->budmin = BUDMIN;
/* init each leaf from corresponding wmap word:
* note: leaf is set to NOFREE(-1) if all blocks of corresponding
* bitmap word are allocated.
*/
cp = tp->stree + le32_to_cpu(tp->leafidx);
for (i = 0; i < LPERDMAP; i++)
*cp++ = dbMaxBud((u8 *) & dp->wmap[i]);
/* build the dmap's binary buddy summary tree */
return (dbInitTree(tp));
}
/*
* NAME: dbInitTree()/ujfs_adjtree()
*
* FUNCTION: initialize binary buddy summary tree of a dmap or dmapctl.
*
* at entry, the leaves of the tree has been initialized
* from corresponding bitmap word or root of summary tree
* of the child control page;
* configure binary buddy system at the leaf level, then
* bubble up the values of the leaf nodes up the tree.
*
* PARAMETERS:
* cp - Pointer to the root of the tree
* l2leaves- Number of leaf nodes as a power of 2
* l2min - Number of blocks that can be covered by a leaf
* as a power of 2
*
* RETURNS: max free string at the root of the tree
*/
static int dbInitTree(struct dmaptree * dtp)
{
int l2max, l2free, bsize, nextb, i;
int child, parent, nparent;
s8 *tp, *cp, *cp1;
tp = dtp->stree;
/* Determine the maximum free string possible for the leaves */
l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin;
/*
* configure the leaf level into binary buddy system
*
* Try to combine buddies starting with a buddy size of 1
* (i.e. two leaves). At a buddy size of 1 two buddy leaves
* can be combined if both buddies have a maximum free of l2min;
* the combination will result in the left-most buddy leaf having
* a maximum free of l2min+1.
* After processing all buddies for a given size, process buddies
* at the next higher buddy size (i.e. current size * 2) and
* the next maximum free (current free + 1).
* This continues until the maximum possible buddy combination
* yields maximum free.
*/
for (l2free = dtp->budmin, bsize = 1; l2free < l2max;
l2free++, bsize = nextb) {
/* get next buddy size == current buddy pair size */
nextb = bsize << 1;
/* scan each adjacent buddy pair at current buddy size */
for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx);
i < le32_to_cpu(dtp->nleafs);
i += nextb, cp += nextb) {
/* coalesce if both adjacent buddies are max free */
if (*cp == l2free && *(cp + bsize) == l2free) {
*cp = l2free + 1; /* left take right */
*(cp + bsize) = -1; /* right give left */
}
}
}
/*
* bubble summary information of leaves up the tree.
*
* Starting at the leaf node level, the four nodes described by
* the higher level parent node are compared for a maximum free and
* this maximum becomes the value of the parent node.
* when all lower level nodes are processed in this fashion then
* move up to the next level (parent becomes a lower level node) and
* continue the process for that level.
*/
for (child = le32_to_cpu(dtp->leafidx),
nparent = le32_to_cpu(dtp->nleafs) >> 2;
nparent > 0; nparent >>= 2, child = parent) {
/* get index of 1st node of parent level */
parent = (child - 1) >> 2;
/* set the value of the parent node as the maximum
* of the four nodes of the current level.
*/
for (i = 0, cp = tp + child, cp1 = tp + parent;
i < nparent; i++, cp += 4, cp1++)
*cp1 = TREEMAX(cp);
}
return (*tp);
}
/*
* dbInitDmapCtl()
*
* function: initialize dmapctl page
*/
static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i)
{ /* start leaf index not covered by range */
s8 *cp;
dcp->nleafs = cpu_to_le32(LPERCTL);
dcp->l2nleafs = cpu_to_le32(L2LPERCTL);
dcp->leafidx = cpu_to_le32(CTLLEAFIND);
dcp->height = cpu_to_le32(5);
dcp->budmin = L2BPERDMAP + L2LPERCTL * level;
/*
* initialize the leaves of current level that were not covered
* by the specified input block range (i.e. the leaves have no
* low level dmapctl or dmap).
*/
cp = &dcp->stree[CTLLEAFIND + i];
for (; i < LPERCTL; i++)
*cp++ = NOFREE;
/* build the dmap's binary buddy summary tree */
return (dbInitTree((struct dmaptree *) dcp));
}
/*
* NAME: dbGetL2AGSize()/ujfs_getagl2size()
*
* FUNCTION: Determine log2(allocation group size) from aggregate size
*
* PARAMETERS:
* nblocks - Number of blocks in aggregate
*
* RETURNS: log2(allocation group size) in aggregate blocks
*/
static int dbGetL2AGSize(s64 nblocks)
{
s64 sz;
s64 m;
int l2sz;
if (nblocks < BPERDMAP * MAXAG)
return (L2BPERDMAP);
/* round up aggregate size to power of 2 */
m = ((u64) 1 << (64 - 1));
for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) {
if (m & nblocks)
break;
}
sz = (s64) 1 << l2sz;
if (sz < nblocks)
l2sz += 1;
/* agsize = roundupSize/max_number_of_ag */
return (l2sz - L2MAXAG);
}
/*
* NAME: dbMapFileSizeToMapSize()
*
* FUNCTION: compute number of blocks the block allocation map file
* can cover from the map file size;
*
* RETURNS: Number of blocks which can be covered by this block map file;
*/
/*
* maximum number of map pages at each level including control pages
*/
#define MAXL0PAGES (1 + LPERCTL)
#define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES)
/*
* convert number of map pages to the zero origin top dmapctl level
*/
#define BMAPPGTOLEV(npages) \
(((npages) <= 3 + MAXL0PAGES) ? 0 : \
((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
{
struct super_block *sb = ipbmap->i_sb;
s64 nblocks;
s64 npages, ndmaps;
int level, i;
int complete, factor;
nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize;
npages = nblocks >> JFS_SBI(sb)->l2nbperpage;
level = BMAPPGTOLEV(npages);
/* At each level, accumulate the number of dmap pages covered by
* the number of full child levels below it;
* repeat for the last incomplete child level.
*/
ndmaps = 0;
npages--; /* skip the first global control page */
/* skip higher level control pages above top level covered by map */
npages -= (2 - level);
npages--; /* skip top level's control page */
for (i = level; i >= 0; i--) {
factor =
(i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
complete = (u32) npages / factor;
ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL :
((i == 1) ? LPERCTL : 1));
/* pages in last/incomplete child */
npages = (u32) npages % factor;
/* skip incomplete child's level control page */
npages--;
}
/* convert the number of dmaps into the number of blocks
* which can be covered by the dmaps;
*/
nblocks = ndmaps << L2BPERDMAP;
return (nblocks);
}
| linux-master | fs/jfs/jfs_dmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2005
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/buffer_head.h>
#include <linux/mempool.h>
#include <linux/seq_file.h>
#include <linux/writeback.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_txnmgr.h"
#include "jfs_debug.h"
#ifdef CONFIG_JFS_STATISTICS
static struct {
uint pagealloc; /* # of page allocations */
uint pagefree; /* # of page frees */
uint lockwait; /* # of sleeping lock_metapage() calls */
} mpStat;
#endif
#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
static inline void unlock_metapage(struct metapage *mp)
{
clear_bit_unlock(META_locked, &mp->flag);
wake_up(&mp->wait);
}
static inline void __lock_metapage(struct metapage *mp)
{
DECLARE_WAITQUEUE(wait, current);
INCREMENT(mpStat.lockwait);
add_wait_queue_exclusive(&mp->wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
if (metapage_locked(mp)) {
unlock_page(mp->page);
io_schedule();
lock_page(mp->page);
}
} while (trylock_metapage(mp));
__set_current_state(TASK_RUNNING);
remove_wait_queue(&mp->wait, &wait);
}
/*
* Must have mp->page locked
*/
static inline void lock_metapage(struct metapage *mp)
{
if (trylock_metapage(mp))
__lock_metapage(mp);
}
#define METAPOOL_MIN_PAGES 32
static struct kmem_cache *metapage_cache;
static mempool_t *metapage_mempool;
#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
#if MPS_PER_PAGE > 1
struct meta_anchor {
int mp_count;
atomic_t io_count;
struct metapage *mp[MPS_PER_PAGE];
};
#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
static inline struct metapage *page_to_mp(struct page *page, int offset)
{
if (!PagePrivate(page))
return NULL;
return mp_anchor(page)->mp[offset >> L2PSIZE];
}
static inline int insert_metapage(struct page *page, struct metapage *mp)
{
struct meta_anchor *a;
int index;
int l2mp_blocks; /* log2 blocks per metapage */
if (PagePrivate(page))
a = mp_anchor(page);
else {
a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
if (!a)
return -ENOMEM;
set_page_private(page, (unsigned long)a);
SetPagePrivate(page);
kmap(page);
}
if (mp) {
l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
a->mp_count++;
a->mp[index] = mp;
}
return 0;
}
static inline void remove_metapage(struct page *page, struct metapage *mp)
{
struct meta_anchor *a = mp_anchor(page);
int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
int index;
index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
BUG_ON(a->mp[index] != mp);
a->mp[index] = NULL;
if (--a->mp_count == 0) {
kfree(a);
set_page_private(page, 0);
ClearPagePrivate(page);
kunmap(page);
}
}
static inline void inc_io(struct page *page)
{
atomic_inc(&mp_anchor(page)->io_count);
}
static inline void dec_io(struct page *page, void (*handler) (struct page *))
{
if (atomic_dec_and_test(&mp_anchor(page)->io_count))
handler(page);
}
#else
static inline struct metapage *page_to_mp(struct page *page, int offset)
{
return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
}
static inline int insert_metapage(struct page *page, struct metapage *mp)
{
if (mp) {
set_page_private(page, (unsigned long)mp);
SetPagePrivate(page);
kmap(page);
}
return 0;
}
static inline void remove_metapage(struct page *page, struct metapage *mp)
{
set_page_private(page, 0);
ClearPagePrivate(page);
kunmap(page);
}
#define inc_io(page) do {} while(0)
#define dec_io(page, handler) handler(page)
#endif
static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
{
struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
if (mp) {
mp->lid = 0;
mp->lsn = 0;
mp->data = NULL;
mp->clsn = 0;
mp->log = NULL;
init_waitqueue_head(&mp->wait);
}
return mp;
}
static inline void free_metapage(struct metapage *mp)
{
mempool_free(mp, metapage_mempool);
}
int __init metapage_init(void)
{
/*
* Allocate the metapage structures
*/
metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
0, 0, NULL);
if (metapage_cache == NULL)
return -ENOMEM;
metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
metapage_cache);
if (metapage_mempool == NULL) {
kmem_cache_destroy(metapage_cache);
return -ENOMEM;
}
return 0;
}
void metapage_exit(void)
{
mempool_destroy(metapage_mempool);
kmem_cache_destroy(metapage_cache);
}
static inline void drop_metapage(struct page *page, struct metapage *mp)
{
if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
test_bit(META_io, &mp->flag))
return;
remove_metapage(page, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
/*
* Metapage address space operations
*/
static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
int *len)
{
int rc = 0;
int xflag;
s64 xaddr;
sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_blkbits;
if (lblock >= file_blocks)
return 0;
if (lblock + *len > file_blocks)
*len = file_blocks - lblock;
if (inode->i_ino) {
rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
if ((rc == 0) && *len)
lblock = (sector_t)xaddr;
else
lblock = 0;
} /* else no mapping */
return lblock;
}
static void last_read_complete(struct page *page)
{
if (!PageError(page))
SetPageUptodate(page);
unlock_page(page);
}
static void metapage_read_end_io(struct bio *bio)
{
struct page *page = bio->bi_private;
if (bio->bi_status) {
printk(KERN_ERR "metapage_read_end_io: I/O error\n");
SetPageError(page);
}
dec_io(page, last_read_complete);
bio_put(bio);
}
static void remove_from_logsync(struct metapage *mp)
{
struct jfs_log *log = mp->log;
unsigned long flags;
/*
* This can race. Recheck that log hasn't been set to null, and after
* acquiring logsync lock, recheck lsn
*/
if (!log)
return;
LOGSYNC_LOCK(log, flags);
if (mp->lsn) {
mp->log = NULL;
mp->lsn = 0;
mp->clsn = 0;
log->count--;
list_del(&mp->synclist);
}
LOGSYNC_UNLOCK(log, flags);
}
static void last_write_complete(struct page *page)
{
struct metapage *mp;
unsigned int offset;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
if (mp && test_bit(META_io, &mp->flag)) {
if (mp->lsn)
remove_from_logsync(mp);
clear_bit(META_io, &mp->flag);
}
/*
* I'd like to call drop_metapage here, but I don't think it's
* safe unless I have the page locked
*/
}
end_page_writeback(page);
}
static void metapage_write_end_io(struct bio *bio)
{
struct page *page = bio->bi_private;
BUG_ON(!PagePrivate(page));
if (bio->bi_status) {
printk(KERN_ERR "metapage_write_end_io: I/O error\n");
SetPageError(page);
}
dec_io(page, last_write_complete);
bio_put(bio);
}
static int metapage_writepage(struct page *page, struct writeback_control *wbc)
{
struct bio *bio = NULL;
int block_offset; /* block offset of mp within page */
struct inode *inode = page->mapping->host;
int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
int len;
int xlen;
struct metapage *mp;
int redirty = 0;
sector_t lblock;
int nr_underway = 0;
sector_t pblock;
sector_t next_block = 0;
sector_t page_start;
unsigned long bio_bytes = 0;
unsigned long bio_offset = 0;
int offset;
int bad_blocks = 0;
page_start = (sector_t)page->index <<
(PAGE_SHIFT - inode->i_blkbits);
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
set_page_writeback(page);
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
if (!mp || !test_bit(META_dirty, &mp->flag))
continue;
if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
redirty = 1;
/*
* Make sure this page isn't blocked indefinitely.
* If the journal isn't undergoing I/O, push it
*/
if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
jfs_flush_journal(mp->log, 0);
continue;
}
clear_bit(META_dirty, &mp->flag);
set_bit(META_io, &mp->flag);
block_offset = offset >> inode->i_blkbits;
lblock = page_start + block_offset;
if (bio) {
if (xlen && lblock == next_block) {
/* Contiguous, in memory & on disk */
len = min(xlen, blocks_per_mp);
xlen -= len;
bio_bytes += len << inode->i_blkbits;
continue;
}
/* Not contiguous */
if (bio_add_page(bio, page, bio_bytes, bio_offset) <
bio_bytes)
goto add_failed;
/*
* Increment counter before submitting i/o to keep
* count from hitting zero before we're through
*/
inc_io(page);
if (!bio->bi_iter.bi_size)
goto dump_bio;
submit_bio(bio);
nr_underway++;
bio = NULL;
} else
inc_io(page);
xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
pblock = metapage_get_blocks(inode, lblock, &xlen);
if (!pblock) {
printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
/*
* We already called inc_io(), but can't cancel it
* with dec_io() until we're done with the page
*/
bad_blocks++;
continue;
}
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page;
/* Don't call bio_add_page yet, we may add to this vec */
bio_offset = offset;
bio_bytes = len << inode->i_blkbits;
xlen -= len;
next_block = lblock + len;
}
if (bio) {
if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
goto add_failed;
if (!bio->bi_iter.bi_size)
goto dump_bio;
submit_bio(bio);
nr_underway++;
}
if (redirty)
redirty_page_for_writepage(wbc, page);
unlock_page(page);
if (bad_blocks)
goto err_out;
if (nr_underway == 0)
end_page_writeback(page);
return 0;
add_failed:
/* We should never reach here, since we're only adding one vec */
printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
goto skip;
dump_bio:
print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
4, bio, sizeof(*bio), 0);
skip:
bio_put(bio);
unlock_page(page);
dec_io(page, last_write_complete);
err_out:
while (bad_blocks--)
dec_io(page, last_write_complete);
return -EIO;
}
static int metapage_read_folio(struct file *fp, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct bio *bio = NULL;
int block_offset;
int blocks_per_page = i_blocks_per_page(inode, page);
sector_t page_start; /* address of page in fs blocks */
sector_t pblock;
int xlen;
unsigned int len;
int offset;
BUG_ON(!PageLocked(page));
page_start = (sector_t)page->index <<
(PAGE_SHIFT - inode->i_blkbits);
block_offset = 0;
while (block_offset < blocks_per_page) {
xlen = blocks_per_page - block_offset;
pblock = metapage_get_blocks(inode, page_start + block_offset,
&xlen);
if (pblock) {
if (!PagePrivate(page))
insert_metapage(page, NULL);
inc_io(page);
if (bio)
submit_bio(bio);
bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
GFP_NOFS);
bio->bi_iter.bi_sector =
pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
bio->bi_private = page;
len = xlen << inode->i_blkbits;
offset = block_offset << inode->i_blkbits;
if (bio_add_page(bio, page, len, offset) < len)
goto add_failed;
block_offset += xlen;
} else
block_offset++;
}
if (bio)
submit_bio(bio);
else
unlock_page(page);
return 0;
add_failed:
printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
bio_put(bio);
dec_io(page, last_read_complete);
return -EIO;
}
static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
{
struct metapage *mp;
bool ret = true;
int offset;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(&folio->page, offset);
if (!mp)
continue;
jfs_info("metapage_release_folio: mp = 0x%p", mp);
if (mp->count || mp->nohomeok ||
test_bit(META_dirty, &mp->flag)) {
jfs_info("count = %ld, nohomeok = %d", mp->count,
mp->nohomeok);
ret = false;
continue;
}
if (mp->lsn)
remove_from_logsync(mp);
remove_metapage(&folio->page, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
return ret;
}
static void metapage_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
BUG_ON(offset || length < folio_size(folio));
BUG_ON(folio_test_writeback(folio));
metapage_release_folio(folio, 0);
}
const struct address_space_operations jfs_metapage_aops = {
.read_folio = metapage_read_folio,
.writepage = metapage_writepage,
.release_folio = metapage_release_folio,
.invalidate_folio = metapage_invalidate_folio,
.dirty_folio = filemap_dirty_folio,
};
struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
unsigned int size, int absolute,
unsigned long new)
{
int l2BlocksPerPage;
int l2bsize;
struct address_space *mapping;
struct metapage *mp = NULL;
struct page *page;
unsigned long page_index;
unsigned long page_offset;
jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
inode->i_ino, lblock, absolute);
l2bsize = inode->i_blkbits;
l2BlocksPerPage = PAGE_SHIFT - l2bsize;
page_index = lblock >> l2BlocksPerPage;
page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
if ((page_offset + size) > PAGE_SIZE) {
jfs_err("MetaData crosses page boundary!!");
jfs_err("lblock = %lx, size = %d", lblock, size);
dump_stack();
return NULL;
}
if (absolute)
mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
else {
/*
* If an nfs client tries to read an inode that is larger
* than any existing inodes, we may try to read past the
* end of the inode map
*/
if ((lblock << inode->i_blkbits) >= inode->i_size)
return NULL;
mapping = inode->i_mapping;
}
if (new && (PSIZE == PAGE_SIZE)) {
page = grab_cache_page(mapping, page_index);
if (!page) {
jfs_err("grab_cache_page failed!");
return NULL;
}
SetPageUptodate(page);
} else {
page = read_mapping_page(mapping, page_index, NULL);
if (IS_ERR(page)) {
jfs_err("read_mapping_page failed!");
return NULL;
}
lock_page(page);
}
mp = page_to_mp(page, page_offset);
if (mp) {
if (mp->logical_size != size) {
jfs_error(inode->i_sb,
"get_mp->logical_size != size\n");
jfs_err("logical_size = %d, size = %d",
mp->logical_size, size);
dump_stack();
goto unlock;
}
mp->count++;
lock_metapage(mp);
if (test_bit(META_discard, &mp->flag)) {
if (!new) {
jfs_error(inode->i_sb,
"using a discarded metapage\n");
discard_metapage(mp);
goto unlock;
}
clear_bit(META_discard, &mp->flag);
}
} else {
INCREMENT(mpStat.pagealloc);
mp = alloc_metapage(GFP_NOFS);
if (!mp)
goto unlock;
mp->page = page;
mp->sb = inode->i_sb;
mp->flag = 0;
mp->xflag = COMMIT_PAGE;
mp->count = 1;
mp->nohomeok = 0;
mp->logical_size = size;
mp->data = page_address(page) + page_offset;
mp->index = lblock;
if (unlikely(insert_metapage(page, mp))) {
free_metapage(mp);
goto unlock;
}
lock_metapage(mp);
}
if (new) {
jfs_info("zeroing mp = 0x%p", mp);
memset(mp->data, 0, PSIZE);
}
unlock_page(page);
jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
return mp;
unlock:
unlock_page(page);
return NULL;
}
void grab_metapage(struct metapage * mp)
{
jfs_info("grab_metapage: mp = 0x%p", mp);
get_page(mp->page);
lock_page(mp->page);
mp->count++;
lock_metapage(mp);
unlock_page(mp->page);
}
static int metapage_write_one(struct page *page)
{
struct folio *folio = page_folio(page);
struct address_space *mapping = folio->mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = folio_nr_pages(folio),
};
int ret = 0;
BUG_ON(!folio_test_locked(folio));
folio_wait_writeback(folio);
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
ret = metapage_writepage(page, &wbc);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);
} else {
folio_unlock(folio);
}
if (!ret)
ret = filemap_check_errors(mapping);
return ret;
}
void force_metapage(struct metapage *mp)
{
struct page *page = mp->page;
jfs_info("force_metapage: mp = 0x%p", mp);
set_bit(META_forcewrite, &mp->flag);
clear_bit(META_sync, &mp->flag);
get_page(page);
lock_page(page);
set_page_dirty(page);
if (metapage_write_one(page))
jfs_error(mp->sb, "metapage_write_one() failed\n");
clear_bit(META_forcewrite, &mp->flag);
put_page(page);
}
void hold_metapage(struct metapage *mp)
{
lock_page(mp->page);
}
void put_metapage(struct metapage *mp)
{
if (mp->count || mp->nohomeok) {
/* Someone else will release this */
unlock_page(mp->page);
return;
}
get_page(mp->page);
mp->count++;
lock_metapage(mp);
unlock_page(mp->page);
release_metapage(mp);
}
void release_metapage(struct metapage * mp)
{
struct page *page = mp->page;
jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
BUG_ON(!page);
lock_page(page);
unlock_metapage(mp);
assert(mp->count);
if (--mp->count || mp->nohomeok) {
unlock_page(page);
put_page(page);
return;
}
if (test_bit(META_dirty, &mp->flag)) {
set_page_dirty(page);
if (test_bit(META_sync, &mp->flag)) {
clear_bit(META_sync, &mp->flag);
if (metapage_write_one(page))
jfs_error(mp->sb, "metapage_write_one() failed\n");
lock_page(page);
}
} else if (mp->lsn) /* discard_metapage doesn't remove it */
remove_from_logsync(mp);
/* Try to keep metapages from using up too much memory */
drop_metapage(page, mp);
unlock_page(page);
put_page(page);
}
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
{
sector_t lblock;
int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
int BlocksPerPage = 1 << l2BlocksPerPage;
/* All callers are interested in block device's mapping */
struct address_space *mapping =
JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
struct metapage *mp;
struct page *page;
unsigned int offset;
/*
* Mark metapages to discard. They will eventually be
* released, but should not be written.
*/
for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
lblock += BlocksPerPage) {
page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
if (!page)
continue;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
if (!mp)
continue;
if (mp->index < addr)
continue;
if (mp->index >= addr + len)
break;
clear_bit(META_dirty, &mp->flag);
set_bit(META_discard, &mp->flag);
if (mp->lsn)
remove_from_logsync(mp);
}
unlock_page(page);
put_page(page);
}
}
#ifdef CONFIG_JFS_STATISTICS
int jfs_mpstat_proc_show(struct seq_file *m, void *v)
{
seq_printf(m,
"JFS Metapage statistics\n"
"=======================\n"
"page allocations = %d\n"
"page frees = %d\n"
"lock waits = %d\n",
mpStat.pagealloc,
mpStat.pagefree,
mpStat.lockwait);
return 0;
}
#endif
| linux-master | fs/jfs/jfs_metapage.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
/*
* jfs_imap.c: inode allocation map manager
*
* Serialization:
* Each AG has a simple lock which is used to control the serialization of
* the AG level lists. This lock should be taken first whenever an AG
* level list will be modified or accessed.
*
* Each IAG is locked by obtaining the buffer for the IAG page.
*
* There is also a inode lock for the inode map inode. A read lock needs to
* be taken whenever an IAG is read from the map or the global level
* information is read. A write lock needs to be taken whenever the global
* level information is modified or an atomic operation needs to be used.
*
* If more than one IAG is read at one time, the read lock may not
* be given up until all of the IAG's are read. Otherwise, a deadlock
* may occur when trying to obtain the read lock while another thread
* holding the read lock is waiting on the IAG already being held.
*
* The control page of the inode map is read into memory by diMount().
* Thereafter it should only be modified in memory and then it will be
* written out when the filesystem is unmounted by diUnmount().
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
#include "jfs_dinode.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_metapage.h"
#include "jfs_superblock.h"
#include "jfs_debug.h"
/*
* imap locks
*/
/* iag free list lock */
#define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock)
#define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock)
#define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock)
/* per ag iag list locks */
#define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index]))
#define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno])
#define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno])
/*
* forward references
*/
static int diAllocAG(struct inomap *, int, bool, struct inode *);
static int diAllocAny(struct inomap *, int, bool, struct inode *);
static int diAllocBit(struct inomap *, struct iag *, int);
static int diAllocExt(struct inomap *, int, struct inode *);
static int diAllocIno(struct inomap *, int, struct inode *);
static int diFindFree(u32, int);
static int diNewExt(struct inomap *, struct iag *, int);
static int diNewIAG(struct inomap *, int *, int, struct metapage **);
static void duplicateIXtree(struct super_block *, s64, int, s64 *);
static int diIAGRead(struct inomap * imap, int, struct metapage **);
static int copy_from_dinode(struct dinode *, struct inode *);
static void copy_to_dinode(struct dinode *, struct inode *);
/*
* NAME: diMount()
*
* FUNCTION: initialize the incore inode map control structures for
* a fileset or aggregate init time.
*
* the inode map's control structure (dinomap) is
* brought in from disk and placed in virtual memory.
*
* PARAMETERS:
* ipimap - pointer to inode map inode for the aggregate or fileset.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
*/
int diMount(struct inode *ipimap)
{
struct inomap *imap;
struct metapage *mp;
int index;
struct dinomap_disk *dinom_le;
/*
* allocate/initialize the in-memory inode map control structure
*/
/* allocate the in-memory inode map control structure. */
imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
if (imap == NULL)
return -ENOMEM;
/* read the on-disk inode map control structure. */
mp = read_metapage(ipimap,
IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
kfree(imap);
return -EIO;
}
/* copy the on-disk version to the in-memory version. */
dinom_le = (struct dinomap_disk *) mp->data;
imap->im_freeiag = le32_to_cpu(dinom_le->in_freeiag);
imap->im_nextiag = le32_to_cpu(dinom_le->in_nextiag);
atomic_set(&imap->im_numinos, le32_to_cpu(dinom_le->in_numinos));
atomic_set(&imap->im_numfree, le32_to_cpu(dinom_le->in_numfree));
imap->im_nbperiext = le32_to_cpu(dinom_le->in_nbperiext);
imap->im_l2nbperiext = le32_to_cpu(dinom_le->in_l2nbperiext);
for (index = 0; index < MAXAG; index++) {
imap->im_agctl[index].inofree =
le32_to_cpu(dinom_le->in_agctl[index].inofree);
imap->im_agctl[index].extfree =
le32_to_cpu(dinom_le->in_agctl[index].extfree);
imap->im_agctl[index].numinos =
le32_to_cpu(dinom_le->in_agctl[index].numinos);
imap->im_agctl[index].numfree =
le32_to_cpu(dinom_le->in_agctl[index].numfree);
}
/* release the buffer. */
release_metapage(mp);
/*
* allocate/initialize inode allocation map locks
*/
/* allocate and init iag free list lock */
IAGFREE_LOCK_INIT(imap);
/* allocate and init ag list locks */
for (index = 0; index < MAXAG; index++) {
AG_LOCK_INIT(imap, index);
}
/* bind the inode map inode and inode map control structure
* to each other.
*/
imap->im_ipimap = ipimap;
JFS_IP(ipimap)->i_imap = imap;
return (0);
}
/*
* NAME: diUnmount()
*
* FUNCTION: write to disk the incore inode map control structures for
* a fileset or aggregate at unmount time.
*
* PARAMETERS:
* ipimap - pointer to inode map inode for the aggregate or fileset.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
*/
int diUnmount(struct inode *ipimap, int mounterror)
{
struct inomap *imap = JFS_IP(ipimap)->i_imap;
/*
* update the on-disk inode map control structure
*/
if (!(mounterror || isReadOnly(ipimap)))
diSync(ipimap);
/*
* Invalidate the page cache buffers
*/
truncate_inode_pages(ipimap->i_mapping, 0);
/*
* free in-memory control structure
*/
kfree(imap);
JFS_IP(ipimap)->i_imap = NULL;
return (0);
}
/*
* diSync()
*/
int diSync(struct inode *ipimap)
{
struct dinomap_disk *dinom_le;
struct inomap *imp = JFS_IP(ipimap)->i_imap;
struct metapage *mp;
int index;
/*
* write imap global conrol page
*/
/* read the on-disk inode map control structure */
mp = get_metapage(ipimap,
IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
jfs_err("diSync: get_metapage failed!");
return -EIO;
}
/* copy the in-memory version to the on-disk version */
dinom_le = (struct dinomap_disk *) mp->data;
dinom_le->in_freeiag = cpu_to_le32(imp->im_freeiag);
dinom_le->in_nextiag = cpu_to_le32(imp->im_nextiag);
dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos));
dinom_le->in_numfree = cpu_to_le32(atomic_read(&imp->im_numfree));
dinom_le->in_nbperiext = cpu_to_le32(imp->im_nbperiext);
dinom_le->in_l2nbperiext = cpu_to_le32(imp->im_l2nbperiext);
for (index = 0; index < MAXAG; index++) {
dinom_le->in_agctl[index].inofree =
cpu_to_le32(imp->im_agctl[index].inofree);
dinom_le->in_agctl[index].extfree =
cpu_to_le32(imp->im_agctl[index].extfree);
dinom_le->in_agctl[index].numinos =
cpu_to_le32(imp->im_agctl[index].numinos);
dinom_le->in_agctl[index].numfree =
cpu_to_le32(imp->im_agctl[index].numfree);
}
/* write out the control structure */
write_metapage(mp);
/*
* write out dirty pages of imap
*/
filemap_write_and_wait(ipimap->i_mapping);
diWriteSpecial(ipimap, 0);
return (0);
}
/*
* NAME: diRead()
*
* FUNCTION: initialize an incore inode from disk.
*
* on entry, the specifed incore inode should itself
* specify the disk inode number corresponding to the
* incore inode (i.e. i_number should be initialized).
*
* this routine handles incore inode initialization for
* both "special" and "regular" inodes. special inodes
* are those required early in the mount process and
* require special handling since much of the file system
* is not yet initialized. these "special" inodes are
* identified by a NULL inode map inode pointer and are
* actually initialized by a call to diReadSpecial().
*
* for regular inodes, the iag describing the disk inode
* is read from disk to determine the inode extent address
* for the disk inode. with the inode extent address in
* hand, the page of the extent that contains the disk
* inode is read and the disk inode is copied to the
* incore inode.
*
* PARAMETERS:
* ip - pointer to incore inode to be initialized from disk.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOMEM - insufficient memory
*
*/
int diRead(struct inode *ip)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
int iagno, ino, extno, rc;
struct inode *ipimap;
struct dinode *dp;
struct iag *iagp;
struct metapage *mp;
s64 blkno, agstart;
struct inomap *imap;
int block_offset;
int inodes_left;
unsigned long pageno;
int rel_inode;
jfs_info("diRead: ino = %ld", ip->i_ino);
ipimap = sbi->ipimap;
JFS_IP(ip)->ipimap = ipimap;
/* determine the iag number for this inode (number) */
iagno = INOTOIAG(ip->i_ino);
/* read the iag */
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
imap = JFS_IP(ipimap)->i_imap;
rc = diIAGRead(imap, iagno, &mp);
IREAD_UNLOCK(ipimap);
if (rc) {
jfs_err("diRead: diIAGRead returned %d", rc);
return (rc);
}
iagp = (struct iag *) mp->data;
/* determine inode extent that holds the disk inode */
ino = ip->i_ino & (INOSPERIAG - 1);
extno = ino >> L2INOSPEREXT;
if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) ||
(addressPXD(&iagp->inoext[extno]) == 0)) {
release_metapage(mp);
return -ESTALE;
}
/* get disk block number of the page within the inode extent
* that holds the disk inode.
*/
blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage);
/* get the ag for the iag */
agstart = le64_to_cpu(iagp->agstart);
release_metapage(mp);
rel_inode = (ino & (INOSPERPAGE - 1));
pageno = blkno >> sbi->l2nbperpage;
if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
/*
* OS/2 didn't always align inode extents on page boundaries
*/
inodes_left =
(sbi->nbperpage - block_offset) << sbi->l2niperblk;
if (rel_inode < inodes_left)
rel_inode += block_offset << sbi->l2niperblk;
else {
pageno += 1;
rel_inode -= inodes_left;
}
}
/* read the page of disk inode */
mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
if (!mp) {
jfs_err("diRead: read_metapage failed");
return -EIO;
}
/* locate the disk inode requested */
dp = (struct dinode *) mp->data;
dp += rel_inode;
if (ip->i_ino != le32_to_cpu(dp->di_number)) {
jfs_error(ip->i_sb, "i_ino != di_number\n");
rc = -EIO;
} else if (le32_to_cpu(dp->di_nlink) == 0)
rc = -ESTALE;
else
/* copy the disk inode to the in-memory inode */
rc = copy_from_dinode(dp, ip);
release_metapage(mp);
/* set the ag for the inode */
JFS_IP(ip)->agstart = agstart;
JFS_IP(ip)->active_ag = -1;
return (rc);
}
/*
* NAME: diReadSpecial()
*
* FUNCTION: initialize a 'special' inode from disk.
*
* this routines handles aggregate level inodes. The
* inode cache cannot differentiate between the
* aggregate inodes and the filesystem inodes, so we
* handle these here. We don't actually use the aggregate
* inode map, since these inodes are at a fixed location
* and in some cases the aggregate inode map isn't initialized
* yet.
*
* PARAMETERS:
* sb - filesystem superblock
* inum - aggregate inode number
* secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES:
* new inode - success
* NULL - i/o error.
*/
struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
uint address;
struct dinode *dp;
struct inode *ip;
struct metapage *mp;
ip = new_inode(sb);
if (ip == NULL) {
jfs_err("diReadSpecial: new_inode returned NULL!");
return ip;
}
if (secondary) {
address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
JFS_IP(ip)->ipimap = sbi->ipaimap2;
} else {
address = AITBL_OFF >> L2PSIZE;
JFS_IP(ip)->ipimap = sbi->ipaimap;
}
ASSERT(inum < INOSPEREXT);
ip->i_ino = inum;
address += inum >> 3; /* 8 inodes per 4K page */
/* read the page of fixed disk inode (AIT) in raw mode */
mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
if (mp == NULL) {
set_nlink(ip, 1); /* Don't want iput() deleting it */
iput(ip);
return (NULL);
}
/* get the pointer to the disk inode of interest */
dp = (struct dinode *) (mp->data);
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
if ((copy_from_dinode(dp, ip)) != 0) {
/* handle bad return by returning NULL for ip */
set_nlink(ip, 1); /* Don't want iput() deleting it */
iput(ip);
/* release the page */
release_metapage(mp);
return (NULL);
}
ip->i_mapping->a_ops = &jfs_metapage_aops;
mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);
/* Allocations to metadata inodes should not affect quotas */
ip->i_flags |= S_NOQUOTA;
if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) {
sbi->gengen = le32_to_cpu(dp->di_gengen);
sbi->inostamp = le32_to_cpu(dp->di_inostamp);
}
/* release the page */
release_metapage(mp);
inode_fake_hash(ip);
return (ip);
}
/*
* NAME: diWriteSpecial()
*
* FUNCTION: Write the special inode to disk
*
* PARAMETERS:
* ip - special inode
* secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES: none
*/
void diWriteSpecial(struct inode *ip, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
uint address;
struct dinode *dp;
ino_t inum = ip->i_ino;
struct metapage *mp;
if (secondary)
address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
else
address = AITBL_OFF >> L2PSIZE;
ASSERT(inum < INOSPEREXT);
address += inum >> 3; /* 8 inodes per 4K page */
/* read the page of fixed disk inode (AIT) in raw mode */
mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
if (mp == NULL) {
jfs_err("diWriteSpecial: failed to read aggregate inode extent!");
return;
}
/* get the pointer to the disk inode of interest */
dp = (struct dinode *) (mp->data);
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
copy_to_dinode(dp, ip);
memcpy(&dp->di_xtroot, &JFS_IP(ip)->i_xtroot, 288);
if (inum == FILESYSTEM_I)
dp->di_gengen = cpu_to_le32(sbi->gengen);
/* write the page */
write_metapage(mp);
}
/*
* NAME: diFreeSpecial()
*
* FUNCTION: Free allocated space for special inode
*/
void diFreeSpecial(struct inode *ip)
{
if (ip == NULL) {
jfs_err("diFreeSpecial called with NULL ip!");
return;
}
filemap_write_and_wait(ip->i_mapping);
truncate_inode_pages(ip->i_mapping, 0);
iput(ip);
}
/*
* NAME: diWrite()
*
* FUNCTION: write the on-disk inode portion of the in-memory inode
* to its corresponding on-disk inode.
*
* on entry, the specifed incore inode should itself
* specify the disk inode number corresponding to the
* incore inode (i.e. i_number should be initialized).
*
* the inode contains the inode extent address for the disk
* inode. with the inode extent address in hand, the
* page of the extent that contains the disk inode is
* read and the disk inode portion of the incore inode
* is copied to the disk inode.
*
* PARAMETERS:
* tid - transacation id
* ip - pointer to incore inode to be written to the inode extent.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
*/
int diWrite(tid_t tid, struct inode *ip)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int rc = 0;
s32 ino;
struct dinode *dp;
s64 blkno;
int block_offset;
int inodes_left;
struct metapage *mp;
unsigned long pageno;
int rel_inode;
int dioffset;
struct inode *ipimap;
uint type;
lid_t lid;
struct tlock *ditlck, *tlck;
struct linelock *dilinelock, *ilinelock;
struct lv *lv;
int n;
ipimap = jfs_ip->ipimap;
ino = ip->i_ino & (INOSPERIAG - 1);
if (!addressPXD(&(jfs_ip->ixpxd)) ||
(lengthPXD(&(jfs_ip->ixpxd)) !=
JFS_IP(ipimap)->i_imap->im_nbperiext)) {
jfs_error(ip->i_sb, "ixpxd invalid\n");
return -EIO;
}
/*
* read the page of disk inode containing the specified inode:
*/
/* compute the block address of the page */
blkno = INOPBLK(&(jfs_ip->ixpxd), ino, sbi->l2nbperpage);
rel_inode = (ino & (INOSPERPAGE - 1));
pageno = blkno >> sbi->l2nbperpage;
if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
/*
* OS/2 didn't always align inode extents on page boundaries
*/
inodes_left =
(sbi->nbperpage - block_offset) << sbi->l2niperblk;
if (rel_inode < inodes_left)
rel_inode += block_offset << sbi->l2niperblk;
else {
pageno += 1;
rel_inode -= inodes_left;
}
}
/* read the page of disk inode */
retry:
mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
if (!mp)
return -EIO;
/* get the pointer to the disk inode */
dp = (struct dinode *) mp->data;
dp += rel_inode;
dioffset = (ino & (INOSPERPAGE - 1)) << L2DISIZE;
/*
* acquire transaction lock on the on-disk inode;
* N.B. tlock is acquired on ipimap not ip;
*/
if ((ditlck =
txLock(tid, ipimap, mp, tlckINODE | tlckENTRY)) == NULL)
goto retry;
dilinelock = (struct linelock *) & ditlck->lock;
/*
* copy btree root from in-memory inode to on-disk inode
*
* (tlock is taken from inline B+-tree root in in-memory
* inode when the B+-tree root is updated, which is pointed
* by jfs_ip->blid as well as being on tx tlock list)
*
* further processing of btree root is based on the copy
* in in-memory inode, where txLog() will log from, and,
* for xtree root, txUpdateMap() will update map and reset
* XAD_NEW bit;
*/
if (S_ISDIR(ip->i_mode) && (lid = jfs_ip->xtlid)) {
/*
* This is the special xtree inside the directory for storing
* the directory table
*/
xtpage_t *p, *xp;
xad_t *xad;
jfs_ip->xtlid = 0;
tlck = lid_to_tlock(lid);
assert(tlck->type & tlckXTREE);
tlck->type |= tlckBTROOT;
tlck->mp = mp;
ilinelock = (struct linelock *) & tlck->lock;
/*
* copy xtree root from inode to dinode:
*/
p = &jfs_ip->i_xtroot;
xp = (xtpage_t *) &dp->di_dirtable;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
lv->length << L2XTSLOTSIZE);
}
/* reset on-disk (metadata page) xtree XAD_NEW bit */
xad = &xp->xad[XTENTRYSTART];
for (n = XTENTRYSTART;
n < le16_to_cpu(xp->header.nextindex); n++, xad++)
if (xad->flag & (XAD_NEW | XAD_EXTENDED))
xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
}
if ((lid = jfs_ip->blid) == 0)
goto inlineData;
jfs_ip->blid = 0;
tlck = lid_to_tlock(lid);
type = tlck->type;
tlck->type |= tlckBTROOT;
tlck->mp = mp;
ilinelock = (struct linelock *) & tlck->lock;
/*
* regular file: 16 byte (XAD slot) granularity
*/
if (type & tlckXTREE) {
xtpage_t *p, *xp;
xad_t *xad;
/*
* copy xtree root from inode to dinode:
*/
p = &jfs_ip->i_xtroot;
xp = &dp->di_xtroot;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
lv->length << L2XTSLOTSIZE);
}
/* reset on-disk (metadata page) xtree XAD_NEW bit */
xad = &xp->xad[XTENTRYSTART];
for (n = XTENTRYSTART;
n < le16_to_cpu(xp->header.nextindex); n++, xad++)
if (xad->flag & (XAD_NEW | XAD_EXTENDED))
xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
}
/*
* directory: 32 byte (directory entry slot) granularity
*/
else if (type & tlckDTREE) {
dtpage_t *p, *xp;
/*
* copy dtree root from inode to dinode:
*/
p = (dtpage_t *) &jfs_ip->i_dtroot;
xp = (dtpage_t *) & dp->di_dtroot;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->slot[lv->offset], &p->slot[lv->offset],
lv->length << L2DTSLOTSIZE);
}
} else {
jfs_err("diWrite: UFO tlock");
}
inlineData:
/*
* copy inline symlink from in-memory inode to on-disk inode
*/
if (S_ISLNK(ip->i_mode) && ip->i_size < IDATASIZE) {
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE;
lv->length = 2;
memcpy(&dp->di_inline_all, jfs_ip->i_inline_all, IDATASIZE);
dilinelock->index++;
}
/*
* copy inline data from in-memory inode to on-disk inode:
* 128 byte slot granularity
*/
if (test_cflag(COMMIT_Inlineea, ip)) {
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 3 * 128) >> L2INODESLOTSIZE;
lv->length = 1;
memcpy(&dp->di_inlineea, jfs_ip->i_inline_ea, INODESLOTSIZE);
dilinelock->index++;
clear_cflag(COMMIT_Inlineea, ip);
}
/*
* lock/copy inode base: 128 byte slot granularity
*/
lv = & dilinelock->lv[dilinelock->index];
lv->offset = dioffset >> L2INODESLOTSIZE;
copy_to_dinode(dp, ip);
if (test_and_clear_cflag(COMMIT_Dirtable, ip)) {
lv->length = 2;
memcpy(&dp->di_dirtable, &jfs_ip->i_dirtable, 96);
} else
lv->length = 1;
dilinelock->index++;
/* release the buffer holding the updated on-disk inode.
* the buffer will be later written by commit processing.
*/
write_metapage(mp);
return (rc);
}
/*
* NAME: diFree(ip)
*
* FUNCTION: free a specified inode from the inode working map
* for a fileset or aggregate.
*
* if the inode to be freed represents the first (only)
* free inode within the iag, the iag will be placed on
* the ag free inode list.
*
* freeing the inode will cause the inode extent to be
* freed if the inode is the only allocated inode within
* the extent. in this case all the disk resource backing
* up the inode extent will be freed. in addition, the iag
* will be placed on the ag extent free list if the extent
* is the first free extent in the iag. if freeing the
* extent also means that no free inodes will exist for
* the iag, the iag will also be removed from the ag free
* inode list.
*
* the iag describing the inode will be freed if the extent
* is to be freed and it is the only backed extent within
* the iag. in this case, the iag will be removed from the
* ag free extent list and ag free inode list and placed on
* the inode map's free iag list.
*
* a careful update approach is used to provide consistency
* in the face of updates to multiple buffers. under this
* approach, all required buffers are obtained before making
* any updates and are held until all updates are complete.
*
* PARAMETERS:
* ip - inode to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
*/
int diFree(struct inode *ip)
{
int rc;
ino_t inum = ip->i_ino;
struct iag *iagp, *aiagp, *biagp, *ciagp, *diagp;
struct metapage *mp, *amp, *bmp, *cmp, *dmp;
int iagno, ino, extno, bitno, sword, agno;
int back, fwd;
u32 bitmap, mask;
struct inode *ipimap = JFS_SBI(ip->i_sb)->ipimap;
struct inomap *imap = JFS_IP(ipimap)->i_imap;
pxd_t freepxd;
tid_t tid;
struct inode *iplist[3];
struct tlock *tlck;
struct pxd_lock *pxdlock;
/*
* This is just to suppress compiler warnings. The same logic that
* references these variables is used to initialize them.
*/
aiagp = biagp = ciagp = diagp = NULL;
/* get the iag number containing the inode.
*/
iagno = INOTOIAG(inum);
/* make sure that the iag is contained within
* the map.
*/
if (iagno >= imap->im_nextiag) {
print_hex_dump(KERN_ERR, "imap: ", DUMP_PREFIX_ADDRESS, 16, 4,
imap, 32, 0);
jfs_error(ip->i_sb, "inum = %d, iagno = %d, nextiag = %d\n",
(uint) inum, iagno, imap->im_nextiag);
return -EIO;
}
/* get the allocation group for this ino.
*/
agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
/* Lock the AG specific inode map information
*/
AG_LOCK(imap, agno);
/* Obtain read lock in imap inode. Don't release it until we have
* read all of the IAG's that we are going to.
*/
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* read the iag.
*/
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
return (rc);
}
iagp = (struct iag *) mp->data;
/* get the inode number and extent number of the inode within
* the iag and the inode number within the extent.
*/
ino = inum & (INOSPERIAG - 1);
extno = ino >> L2INOSPEREXT;
bitno = ino & (INOSPEREXT - 1);
mask = HIGHORDER >> bitno;
if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
jfs_error(ip->i_sb, "wmap shows inode already free\n");
}
if (!addressPXD(&iagp->inoext[extno])) {
release_metapage(mp);
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
jfs_error(ip->i_sb, "invalid inoext\n");
return -EIO;
}
/* compute the bitmap for the extent reflecting the freed inode.
*/
bitmap = le32_to_cpu(iagp->wmap[extno]) & ~mask;
if (imap->im_agctl[agno].numfree > imap->im_agctl[agno].numinos) {
release_metapage(mp);
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
jfs_error(ip->i_sb, "numfree > numinos\n");
return -EIO;
}
/*
* inode extent still has some inodes or below low water mark:
* keep the inode extent;
*/
if (bitmap ||
imap->im_agctl[agno].numfree < 96 ||
(imap->im_agctl[agno].numfree < 288 &&
(((imap->im_agctl[agno].numfree * 100) /
imap->im_agctl[agno].numinos) <= 25))) {
/* if the iag currently has no free inodes (i.e.,
* the inode being freed is the first free inode of iag),
* insert the iag at head of the inode free list for the ag.
*/
if (iagp->nfreeinos == 0) {
/* check if there are any iags on the ag inode
* free list. if so, read the first one so that
* we can link the current iag onto the list at
* the head.
*/
if ((fwd = imap->im_agctl[agno].inofree) >= 0) {
/* read the iag that currently is the head
* of the list.
*/
if ((rc = diIAGRead(imap, fwd, &))) {
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
release_metapage(mp);
return (rc);
}
aiagp = (struct iag *) amp->data;
/* make current head point back to the iag.
*/
aiagp->inofreeback = cpu_to_le32(iagno);
write_metapage(amp);
}
/* iag points forward to current head and iag
* becomes the new head of the list.
*/
iagp->inofreefwd =
cpu_to_le32(imap->im_agctl[agno].inofree);
iagp->inofreeback = cpu_to_le32(-1);
imap->im_agctl[agno].inofree = iagno;
}
IREAD_UNLOCK(ipimap);
/* update the free inode summary map for the extent if
* freeing the inode means the extent will now have free
* inodes (i.e., the inode being freed is the first free
* inode of extent),
*/
if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
sword = extno >> L2EXTSPERSUM;
bitno = extno & (EXTSPERSUM - 1);
iagp->inosmap[sword] &=
cpu_to_le32(~(HIGHORDER >> bitno));
}
/* update the bitmap.
*/
iagp->wmap[extno] = cpu_to_le32(bitmap);
/* update the free inode counts at the iag, ag and
* map level.
*/
le32_add_cpu(&iagp->nfreeinos, 1);
imap->im_agctl[agno].numfree += 1;
atomic_inc(&imap->im_numfree);
/* release the AG inode map lock
*/
AG_UNLOCK(imap, agno);
/* write the iag */
write_metapage(mp);
return (0);
}
/*
* inode extent has become free and above low water mark:
* free the inode extent;
*/
/*
* prepare to update iag list(s) (careful update step 1)
*/
amp = bmp = cmp = dmp = NULL;
fwd = back = -1;
/* check if the iag currently has no free extents. if so,
* it will be placed on the head of the ag extent free list.
*/
if (iagp->nfreeexts == 0) {
/* check if the ag extent free list has any iags.
* if so, read the iag at the head of the list now.
* this (head) iag will be updated later to reflect
* the addition of the current iag at the head of
* the list.
*/
if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
goto error_out;
aiagp = (struct iag *) amp->data;
}
} else {
/* iag has free extents. check if the addition of a free
* extent will cause all extents to be free within this
* iag. if so, the iag will be removed from the ag extent
* free list and placed on the inode map's free iag list.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
/* in preparation for removing the iag from the
* ag extent free list, read the iags preceding
* and following the iag on the ag extent free
* list.
*/
if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
goto error_out;
aiagp = (struct iag *) amp->data;
}
if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
if ((rc = diIAGRead(imap, back, &bmp)))
goto error_out;
biagp = (struct iag *) bmp->data;
}
}
}
/* remove the iag from the ag inode free list if freeing
* this extent cause the iag to have no free inodes.
*/
if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
int inofreeback = le32_to_cpu(iagp->inofreeback);
int inofreefwd = le32_to_cpu(iagp->inofreefwd);
/* in preparation for removing the iag from the
* ag inode free list, read the iags preceding
* and following the iag on the ag inode free
* list. before reading these iags, we must make
* sure that we already don't have them in hand
* from up above, since re-reading an iag (buffer)
* we are currently holding would cause a deadlock.
*/
if (inofreefwd >= 0) {
if (inofreefwd == fwd)
ciagp = (struct iag *) amp->data;
else if (inofreefwd == back)
ciagp = (struct iag *) bmp->data;
else {
if ((rc =
diIAGRead(imap, inofreefwd, &cmp)))
goto error_out;
ciagp = (struct iag *) cmp->data;
}
assert(ciagp != NULL);
}
if (inofreeback >= 0) {
if (inofreeback == fwd)
diagp = (struct iag *) amp->data;
else if (inofreeback == back)
diagp = (struct iag *) bmp->data;
else {
if ((rc =
diIAGRead(imap, inofreeback, &dmp)))
goto error_out;
diagp = (struct iag *) dmp->data;
}
assert(diagp != NULL);
}
}
IREAD_UNLOCK(ipimap);
/*
* invalidate any page of the inode extent freed from buffer cache;
*/
freepxd = iagp->inoext[extno];
invalidate_pxd_metapages(ip, freepxd);
/*
* update iag list(s) (careful update step 2)
*/
/* add the iag to the ag extent free list if this is the
* first free extent for the iag.
*/
if (iagp->nfreeexts == 0) {
if (fwd >= 0)
aiagp->extfreeback = cpu_to_le32(iagno);
iagp->extfreefwd =
cpu_to_le32(imap->im_agctl[agno].extfree);
iagp->extfreeback = cpu_to_le32(-1);
imap->im_agctl[agno].extfree = iagno;
} else {
/* remove the iag from the ag extent list if all extents
* are now free and place it on the inode map iag free list.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
if (fwd >= 0)
aiagp->extfreeback = iagp->extfreeback;
if (back >= 0)
biagp->extfreefwd = iagp->extfreefwd;
else
imap->im_agctl[agno].extfree =
le32_to_cpu(iagp->extfreefwd);
iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
IAGFREE_LOCK(imap);
iagp->iagfree = cpu_to_le32(imap->im_freeiag);
imap->im_freeiag = iagno;
IAGFREE_UNLOCK(imap);
}
}
/* remove the iag from the ag inode free list if freeing
* this extent causes the iag to have no free inodes.
*/
if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
if ((int) le32_to_cpu(iagp->inofreefwd) >= 0)
ciagp->inofreeback = iagp->inofreeback;
if ((int) le32_to_cpu(iagp->inofreeback) >= 0)
diagp->inofreefwd = iagp->inofreefwd;
else
imap->im_agctl[agno].inofree =
le32_to_cpu(iagp->inofreefwd);
iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
}
/* update the inode extent address and working map
* to reflect the free extent.
* the permanent map should have been updated already
* for the inode being freed.
*/
if (iagp->pmap[extno] != 0) {
jfs_error(ip->i_sb, "the pmap does not show inode free\n");
}
iagp->wmap[extno] = 0;
PXDlength(&iagp->inoext[extno], 0);
PXDaddress(&iagp->inoext[extno], 0);
/* update the free extent and free inode summary maps
* to reflect the freed extent.
* the inode summary map is marked to indicate no inodes
* available for the freed extent.
*/
sword = extno >> L2EXTSPERSUM;
bitno = extno & (EXTSPERSUM - 1);
mask = HIGHORDER >> bitno;
iagp->inosmap[sword] |= cpu_to_le32(mask);
iagp->extsmap[sword] &= cpu_to_le32(~mask);
/* update the number of free inodes and number of free extents
* for the iag.
*/
le32_add_cpu(&iagp->nfreeinos, -(INOSPEREXT - 1));
le32_add_cpu(&iagp->nfreeexts, 1);
/* update the number of free inodes and backed inodes
* at the ag and inode map level.
*/
imap->im_agctl[agno].numfree -= (INOSPEREXT - 1);
imap->im_agctl[agno].numinos -= INOSPEREXT;
atomic_sub(INOSPEREXT - 1, &imap->im_numfree);
atomic_sub(INOSPEREXT, &imap->im_numinos);
if (amp)
write_metapage(amp);
if (bmp)
write_metapage(bmp);
if (cmp)
write_metapage(cmp);
if (dmp)
write_metapage(dmp);
/*
* start transaction to update block allocation map
* for the inode extent freed;
*
* N.B. AG_LOCK is released and iag will be released below, and
* other thread may allocate inode from/reusing the ixad freed
* BUT with new/different backing inode extent from the extent
* to be freed by the transaction;
*/
tid = txBegin(ipimap->i_sb, COMMIT_FORCE);
mutex_lock(&JFS_IP(ipimap)->commit_mutex);
/* acquire tlock of the iag page of the freed ixad
* to force the page NOHOMEOK (even though no data is
* logged from the iag page) until NOREDOPAGE|FREEXTENT log
* for the free of the extent is committed;
* write FREEXTENT|NOREDOPAGE log record
* N.B. linelock is overlaid as freed extent descriptor;
*/
tlck = txLock(tid, ipimap, mp, tlckINODE | tlckFREE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = freepxd;
pxdlock->index = 1;
write_metapage(mp);
iplist[0] = ipimap;
/*
* logredo needs the IAG number and IAG extent index in order
* to ensure that the IMap is consistent. The least disruptive
* way to pass these values through to the transaction manager
* is in the iplist array.
*
* It's not pretty, but it works.
*/
iplist[1] = (struct inode *) (size_t)iagno;
iplist[2] = (struct inode *) (size_t)extno;
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
/* unlock the AG inode map information */
AG_UNLOCK(imap, agno);
return (0);
error_out:
IREAD_UNLOCK(ipimap);
if (amp)
release_metapage(amp);
if (bmp)
release_metapage(bmp);
if (cmp)
release_metapage(cmp);
if (dmp)
release_metapage(dmp);
AG_UNLOCK(imap, agno);
release_metapage(mp);
return (rc);
}
/*
* There are several places in the diAlloc* routines where we initialize
* the inode.
*/
static inline void
diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
ip->i_ino = (iagno << L2INOSPERIAG) + ino;
jfs_ip->ixpxd = iagp->inoext[extno];
jfs_ip->agstart = le64_to_cpu(iagp->agstart);
jfs_ip->active_ag = -1;
}
/*
* NAME: diAlloc(pip,dir,ip)
*
* FUNCTION: allocate a disk inode from the inode working map
* for a fileset or aggregate.
*
* PARAMETERS:
* pip - pointer to incore inode for the parent inode.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
int diAlloc(struct inode *pip, bool dir, struct inode *ip)
{
int rc, ino, iagno, addext, extno, bitno, sword;
int nwords, rem, i, agno;
u32 mask, inosmap, extsmap;
struct inode *ipimap;
struct metapage *mp;
ino_t inum;
struct iag *iagp;
struct inomap *imap;
/* get the pointers to the inode map inode and the
* corresponding imap control structure.
*/
ipimap = JFS_SBI(pip->i_sb)->ipimap;
imap = JFS_IP(ipimap)->i_imap;
JFS_IP(ip)->ipimap = ipimap;
JFS_IP(ip)->fileset = FILESYSTEM_I;
/* for a directory, the allocation policy is to start
* at the ag level using the preferred ag.
*/
if (dir) {
agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
AG_LOCK(imap, agno);
goto tryag;
}
/* for files, the policy starts off by trying to allocate from
* the same iag containing the parent disk inode:
* try to allocate the new disk inode close to the parent disk
* inode, using parent disk inode number + 1 as the allocation
* hint. (we use a left-to-right policy to attempt to avoid
* moving backward on the disk.) compute the hint within the
* file system and the iag.
*/
/* get the ag number of this iag */
agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
/*
* There is an open file actively growing. We want to
* allocate new inodes from a different ag to avoid
* fragmentation problems.
*/
agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
AG_LOCK(imap, agno);
goto tryag;
}
inum = pip->i_ino + 1;
ino = inum & (INOSPERIAG - 1);
/* back off the hint if it is outside of the iag */
if (ino == 0)
inum = pip->i_ino;
/* lock the AG inode map information */
AG_LOCK(imap, agno);
/* Get read lock on imap inode */
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* get the iag number and read the iag */
iagno = INOTOIAG(inum);
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
return (rc);
}
iagp = (struct iag *) mp->data;
/* determine if new inode extent is allowed to be added to the iag.
* new inode extent can be added to the iag if the ag
* has less than 32 free disk inodes and the iag has free extents.
*/
addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts);
/*
* try to allocate from the IAG
*/
/* check if the inode may be allocated from the iag
* (i.e. the inode has free inodes or new extent can be added).
*/
if (iagp->nfreeinos || addext) {
/* determine the extent number of the hint.
*/
extno = ino >> L2INOSPEREXT;
/* check if the extent containing the hint has backed
* inodes. if so, try to allocate within this extent.
*/
if (addressPXD(&iagp->inoext[extno])) {
bitno = ino & (INOSPEREXT - 1);
if ((bitno =
diFindFree(le32_to_cpu(iagp->wmap[extno]),
bitno))
< INOSPEREXT) {
ino = (extno << L2INOSPEREXT) + bitno;
/* a free inode (bit) was found within this
* extent, so allocate it.
*/
rc = diAllocBit(imap, iagp, ino);
IREAD_UNLOCK(ipimap);
if (rc) {
assert(rc == -EIO);
} else {
/* set the results of the allocation
* and write the iag.
*/
diInitInode(ip, iagno, ino, extno,
iagp);
mark_metapage_dirty(mp);
}
release_metapage(mp);
/* free the AG lock and return.
*/
AG_UNLOCK(imap, agno);
return (rc);
}
if (!addext)
extno =
(extno ==
EXTSPERIAG - 1) ? 0 : extno + 1;
}
/*
* no free inodes within the extent containing the hint.
*
* try to allocate from the backed extents following
* hint or, if appropriate (i.e. addext is true), allocate
* an extent of free inodes at or following the extent
* containing the hint.
*
* the free inode and free extent summary maps are used
* here, so determine the starting summary map position
* and the number of words we'll have to examine. again,
* the approach is to allocate following the hint, so we
* might have to initially ignore prior bits of the summary
* map that represent extents prior to the extent containing
* the hint and later revisit these bits.
*/
bitno = extno & (EXTSPERSUM - 1);
nwords = (bitno == 0) ? SMAPSZ : SMAPSZ + 1;
sword = extno >> L2EXTSPERSUM;
/* mask any prior bits for the starting words of the
* summary map.
*/
mask = (bitno == 0) ? 0 : (ONES << (EXTSPERSUM - bitno));
inosmap = le32_to_cpu(iagp->inosmap[sword]) | mask;
extsmap = le32_to_cpu(iagp->extsmap[sword]) | mask;
/* scan the free inode and free extent summary maps for
* free resources.
*/
for (i = 0; i < nwords; i++) {
/* check if this word of the free inode summary
* map describes an extent with free inodes.
*/
if (~inosmap) {
/* an extent with free inodes has been
* found. determine the extent number
* and the inode number within the extent.
*/
rem = diFindFree(inosmap, 0);
extno = (sword << L2EXTSPERSUM) + rem;
rem = diFindFree(le32_to_cpu(iagp->wmap[extno]),
0);
if (rem >= INOSPEREXT) {
IREAD_UNLOCK(ipimap);
release_metapage(mp);
AG_UNLOCK(imap, agno);
jfs_error(ip->i_sb,
"can't find free bit in wmap\n");
return -EIO;
}
/* determine the inode number within the
* iag and allocate the inode from the
* map.
*/
ino = (extno << L2INOSPEREXT) + rem;
rc = diAllocBit(imap, iagp, ino);
IREAD_UNLOCK(ipimap);
if (rc)
assert(rc == -EIO);
else {
/* set the results of the allocation
* and write the iag.
*/
diInitInode(ip, iagno, ino, extno,
iagp);
mark_metapage_dirty(mp);
}
release_metapage(mp);
/* free the AG lock and return.
*/
AG_UNLOCK(imap, agno);
return (rc);
}
/* check if we may allocate an extent of free
* inodes and whether this word of the free
* extents summary map describes a free extent.
*/
if (addext && ~extsmap) {
/* a free extent has been found. determine
* the extent number.
*/
rem = diFindFree(extsmap, 0);
extno = (sword << L2EXTSPERSUM) + rem;
/* allocate an extent of free inodes.
*/
if ((rc = diNewExt(imap, iagp, extno))) {
/* if there is no disk space for a
* new extent, try to allocate the
* disk inode from somewhere else.
*/
if (rc == -ENOSPC)
break;
assert(rc == -EIO);
} else {
/* set the results of the allocation
* and write the iag.
*/
diInitInode(ip, iagno,
extno << L2INOSPEREXT,
extno, iagp);
mark_metapage_dirty(mp);
}
release_metapage(mp);
/* free the imap inode & the AG lock & return.
*/
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
return (rc);
}
/* move on to the next set of summary map words.
*/
sword = (sword == SMAPSZ - 1) ? 0 : sword + 1;
inosmap = le32_to_cpu(iagp->inosmap[sword]);
extsmap = le32_to_cpu(iagp->extsmap[sword]);
}
}
/* unlock imap inode */
IREAD_UNLOCK(ipimap);
/* nothing doing in this iag, so release it. */
release_metapage(mp);
tryag:
/*
* try to allocate anywhere within the same AG as the parent inode.
*/
rc = diAllocAG(imap, agno, dir, ip);
AG_UNLOCK(imap, agno);
if (rc != -ENOSPC)
return (rc);
/*
* try to allocate in any AG.
*/
return (diAllocAny(imap, agno, dir, ip));
}
/*
* NAME: diAllocAG(imap,agno,dir,ip)
*
* FUNCTION: allocate a disk inode from the allocation group.
*
* this routine first determines if a new extent of free
* inodes should be added for the allocation group, with
* the current request satisfied from this extent. if this
* is the case, an attempt will be made to do just that. if
* this attempt fails or it has been determined that a new
* extent should not be added, an attempt is made to satisfy
* the request by allocating an existing (backed) free inode
* from the allocation group.
*
* PRE CONDITION: Already have the AG lock for this AG.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group to allocate from.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to the new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int
diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
{
int rc, addext, numfree, numinos;
/* get the number of free and the number of backed disk
* inodes currently within the ag.
*/
numfree = imap->im_agctl[agno].numfree;
numinos = imap->im_agctl[agno].numinos;
if (numfree > numinos) {
jfs_error(ip->i_sb, "numfree > numinos\n");
return -EIO;
}
/* determine if we should allocate a new extent of free inodes
* within the ag: for directory inodes, add a new extent
* if there are a small number of free inodes or number of free
* inodes is a small percentage of the number of backed inodes.
*/
if (dir)
addext = (numfree < 64 ||
(numfree < 256
&& ((numfree * 100) / numinos) <= 20));
else
addext = (numfree == 0);
/*
* try to allocate a new extent of free inodes.
*/
if (addext) {
/* if free space is not available for this new extent, try
* below to allocate a free and existing (already backed)
* inode from the ag.
*/
if ((rc = diAllocExt(imap, agno, ip)) != -ENOSPC)
return (rc);
}
/*
* try to allocate an existing free inode from the ag.
*/
return (diAllocIno(imap, agno, ip));
}
/*
* NAME: diAllocAny(imap,agno,dir,iap)
*
* FUNCTION: allocate a disk inode from any other allocation group.
*
* this routine is called when an allocation attempt within
* the primary allocation group has failed. if attempts to
* allocate an inode from any allocation group other than the
* specified primary group.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - primary allocation group (to avoid).
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int
diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip)
{
int ag, rc;
int maxag = JFS_SBI(imap->im_ipimap->i_sb)->bmap->db_maxag;
/* try to allocate from the ags following agno up to
* the maximum ag number.
*/
for (ag = agno + 1; ag <= maxag; ag++) {
AG_LOCK(imap, ag);
rc = diAllocAG(imap, ag, dir, ip);
AG_UNLOCK(imap, ag);
if (rc != -ENOSPC)
return (rc);
}
/* try to allocate from the ags in front of agno.
*/
for (ag = 0; ag < agno; ag++) {
AG_LOCK(imap, ag);
rc = diAllocAG(imap, ag, dir, ip);
AG_UNLOCK(imap, ag);
if (rc != -ENOSPC)
return (rc);
}
/* no free disk inodes.
*/
return -ENOSPC;
}
/*
* NAME: diAllocIno(imap,agno,ip)
*
* FUNCTION: allocate a disk inode from the allocation group's free
* inode list, returning an error if this free list is
* empty (i.e. no iags on the list).
*
* allocation occurs from the first iag on the list using
* the iag's free inode summary map to find the leftmost
* free inode in the iag.
*
* PRE CONDITION: Already have AG lock for this AG.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group.
* ip - pointer to new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
{
int iagno, ino, rc, rem, extno, sword;
struct metapage *mp;
struct iag *iagp;
/* check if there are iags on the ag's free inode list.
*/
if ((iagno = imap->im_agctl[agno].inofree) < 0)
return -ENOSPC;
/* obtain read lock on imap inode */
IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
/* read the iag at the head of the list.
*/
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(imap->im_ipimap);
return (rc);
}
iagp = (struct iag *) mp->data;
/* better be free inodes in this iag if it is on the
* list.
*/
if (!iagp->nfreeinos) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb, "nfreeinos = 0, but iag on freelist\n");
return -EIO;
}
/* scan the free inode summary map to find an extent
* with free inodes.
*/
for (sword = 0;; sword++) {
if (sword >= SMAPSZ) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb,
"free inode not found in summary map\n");
return -EIO;
}
if (~iagp->inosmap[sword])
break;
}
/* found a extent with free inodes. determine
* the extent number.
*/
rem = diFindFree(le32_to_cpu(iagp->inosmap[sword]), 0);
if (rem >= EXTSPERSUM) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb, "no free extent found\n");
return -EIO;
}
extno = (sword << L2EXTSPERSUM) + rem;
/* find the first free inode in the extent.
*/
rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0);
if (rem >= INOSPEREXT) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb, "free inode not found\n");
return -EIO;
}
/* compute the inode number within the iag.
*/
ino = (extno << L2INOSPEREXT) + rem;
/* allocate the inode.
*/
rc = diAllocBit(imap, iagp, ino);
IREAD_UNLOCK(imap->im_ipimap);
if (rc) {
release_metapage(mp);
return (rc);
}
/* set the results of the allocation and write the iag.
*/
diInitInode(ip, iagno, ino, extno, iagp);
write_metapage(mp);
return (0);
}
/*
* NAME: diAllocExt(imap,agno,ip)
*
* FUNCTION: add a new extent of free inodes to an iag, allocating
* an inode from this extent to satisfy the current allocation
* request.
*
* this routine first tries to find an existing iag with free
* extents through the ag free extent list. if list is not
* empty, the head of the list will be selected as the home
* of the new extent of free inodes. otherwise (the list is
* empty), a new iag will be allocated for the ag to contain
* the extent.
*
* once an iag has been selected, the free extent summary map
* is used to locate a free extent within the iag and diNewExt()
* is called to initialize the extent, with initialization
* including the allocation of the first inode of the extent
* for the purpose of satisfying this request.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group number.
* ip - pointer to new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
{
int rem, iagno, sword, extno, rc;
struct metapage *mp;
struct iag *iagp;
/* check if the ag has any iags with free extents. if not,
* allocate a new iag for the ag.
*/
if ((iagno = imap->im_agctl[agno].extfree) < 0) {
/* If successful, diNewIAG will obtain the read lock on the
* imap inode.
*/
if ((rc = diNewIAG(imap, &iagno, agno, &mp))) {
return (rc);
}
iagp = (struct iag *) mp->data;
/* set the ag number if this a brand new iag
*/
iagp->agstart =
cpu_to_le64(AGTOBLK(agno, imap->im_ipimap));
} else {
/* read the iag.
*/
IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(imap->im_ipimap);
jfs_error(ip->i_sb, "error reading iag\n");
return rc;
}
iagp = (struct iag *) mp->data;
}
/* using the free extent summary map, find a free extent.
*/
for (sword = 0;; sword++) {
if (sword >= SMAPSZ) {
release_metapage(mp);
IREAD_UNLOCK(imap->im_ipimap);
jfs_error(ip->i_sb, "free ext summary map not found\n");
return -EIO;
}
if (~iagp->extsmap[sword])
break;
}
/* determine the extent number of the free extent.
*/
rem = diFindFree(le32_to_cpu(iagp->extsmap[sword]), 0);
if (rem >= EXTSPERSUM) {
release_metapage(mp);
IREAD_UNLOCK(imap->im_ipimap);
jfs_error(ip->i_sb, "free extent not found\n");
return -EIO;
}
extno = (sword << L2EXTSPERSUM) + rem;
/* initialize the new extent.
*/
rc = diNewExt(imap, iagp, extno);
IREAD_UNLOCK(imap->im_ipimap);
if (rc) {
/* something bad happened. if a new iag was allocated,
* place it back on the inode map's iag free list, and
* clear the ag number information.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
IAGFREE_LOCK(imap);
iagp->iagfree = cpu_to_le32(imap->im_freeiag);
imap->im_freeiag = iagno;
IAGFREE_UNLOCK(imap);
}
write_metapage(mp);
return (rc);
}
/* set the results of the allocation and write the iag.
*/
diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp);
write_metapage(mp);
return (0);
}
/*
* NAME: diAllocBit(imap,iagp,ino)
*
* FUNCTION: allocate a backed inode from an iag.
*
* this routine performs the mechanics of allocating a
* specified inode from a backed extent.
*
* if the inode to be allocated represents the last free
* inode within the iag, the iag will be removed from the
* ag free inode list.
*
* a careful update approach is used to provide consistency
* in the face of updates to multiple buffers. under this
* approach, all required buffers are obtained before making
* any updates and are held all are updates are complete.
*
* PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on
* this AG. Must have read lock on imap inode.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* ino - inode number to be allocated within the iag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
{
int extno, bitno, agno, sword, rc;
struct metapage *amp = NULL, *bmp = NULL;
struct iag *aiagp = NULL, *biagp = NULL;
u32 mask;
/* check if this is the last free inode within the iag.
* if so, it will have to be removed from the ag free
* inode list, so get the iags preceding and following
* it on the list.
*/
if (iagp->nfreeinos == cpu_to_le32(1)) {
if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) {
if ((rc =
diIAGRead(imap, le32_to_cpu(iagp->inofreefwd),
&)))
return (rc);
aiagp = (struct iag *) amp->data;
}
if ((int) le32_to_cpu(iagp->inofreeback) >= 0) {
if ((rc =
diIAGRead(imap,
le32_to_cpu(iagp->inofreeback),
&bmp))) {
if (amp)
release_metapage(amp);
return (rc);
}
biagp = (struct iag *) bmp->data;
}
}
/* get the ag number, extent number, inode number within
* the extent.
*/
agno = BLKTOAG(le64_to_cpu(iagp->agstart), JFS_SBI(imap->im_ipimap->i_sb));
extno = ino >> L2INOSPEREXT;
bitno = ino & (INOSPEREXT - 1);
/* compute the mask for setting the map.
*/
mask = HIGHORDER >> bitno;
/* the inode should be free and backed.
*/
if (((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) ||
((le32_to_cpu(iagp->wmap[extno]) & mask) != 0) ||
(addressPXD(&iagp->inoext[extno]) == 0)) {
if (amp)
release_metapage(amp);
if (bmp)
release_metapage(bmp);
jfs_error(imap->im_ipimap->i_sb, "iag inconsistent\n");
return -EIO;
}
/* mark the inode as allocated in the working map.
*/
iagp->wmap[extno] |= cpu_to_le32(mask);
/* check if all inodes within the extent are now
* allocated. if so, update the free inode summary
* map to reflect this.
*/
if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
sword = extno >> L2EXTSPERSUM;
bitno = extno & (EXTSPERSUM - 1);
iagp->inosmap[sword] |= cpu_to_le32(HIGHORDER >> bitno);
}
/* if this was the last free inode in the iag, remove the
* iag from the ag free inode list.
*/
if (iagp->nfreeinos == cpu_to_le32(1)) {
if (amp) {
aiagp->inofreeback = iagp->inofreeback;
write_metapage(amp);
}
if (bmp) {
biagp->inofreefwd = iagp->inofreefwd;
write_metapage(bmp);
} else {
imap->im_agctl[agno].inofree =
le32_to_cpu(iagp->inofreefwd);
}
iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
}
/* update the free inode count at the iag, ag, inode
* map levels.
*/
le32_add_cpu(&iagp->nfreeinos, -1);
imap->im_agctl[agno].numfree -= 1;
atomic_dec(&imap->im_numfree);
return (0);
}
/*
* NAME: diNewExt(imap,iagp,extno)
*
* FUNCTION: initialize a new extent of inodes for an iag, allocating
* the first inode of the extent for use for the current
* allocation request.
*
* disk resources are allocated for the new extent of inodes
* and the inodes themselves are initialized to reflect their
* existence within the extent (i.e. their inode numbers and
* inode extent addresses are set) and their initial state
* (mode and link count are set to zero).
*
* if the iag is new, it is not yet on an ag extent free list
* but will now be placed on this list.
*
* if the allocation of the new extent causes the iag to
* have no free extent, the iag will be removed from the
* ag extent free list.
*
* if the iag has no free backed inodes, it will be placed
* on the ag free inode list, since the addition of the new
* extent will now cause it to have free inodes.
*
* a careful update approach is used to provide consistency
* (i.e. list consistency) in the face of updates to multiple
* buffers. under this approach, all required buffers are
* obtained before making any updates and are held until all
* updates are complete.
*
* PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on
* this AG. Must have read lock on imap inode.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* extno - extent number.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
{
int agno, iagno, fwd, back, freei = 0, sword, rc;
struct iag *aiagp = NULL, *biagp = NULL, *ciagp = NULL;
struct metapage *amp, *bmp, *cmp, *dmp;
struct inode *ipimap;
s64 blkno, hint;
int i, j;
u32 mask;
ino_t ino;
struct dinode *dp;
struct jfs_sb_info *sbi;
/* better have free extents.
*/
if (!iagp->nfreeexts) {
jfs_error(imap->im_ipimap->i_sb, "no free extents\n");
return -EIO;
}
/* get the inode map inode.
*/
ipimap = imap->im_ipimap;
sbi = JFS_SBI(ipimap->i_sb);
amp = bmp = cmp = NULL;
/* get the ag and iag numbers for this iag.
*/
agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
iagno = le32_to_cpu(iagp->iagnum);
/* check if this is the last free extent within the
* iag. if so, the iag must be removed from the ag
* free extent list, so get the iags preceding and
* following the iag on this list.
*/
if (iagp->nfreeexts == cpu_to_le32(1)) {
if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
return (rc);
aiagp = (struct iag *) amp->data;
}
if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
if ((rc = diIAGRead(imap, back, &bmp)))
goto error_out;
biagp = (struct iag *) bmp->data;
}
} else {
/* the iag has free extents. if all extents are free
* (as is the case for a newly allocated iag), the iag
* must be added to the ag free extent list, so get
* the iag at the head of the list in preparation for
* adding this iag to this list.
*/
fwd = back = -1;
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
goto error_out;
aiagp = (struct iag *) amp->data;
}
}
}
/* check if the iag has no free inodes. if so, the iag
* will have to be added to the ag free inode list, so get
* the iag at the head of the list in preparation for
* adding this iag to this list. in doing this, we must
* check if we already have the iag at the head of
* the list in hand.
*/
if (iagp->nfreeinos == 0) {
freei = imap->im_agctl[agno].inofree;
if (freei >= 0) {
if (freei == fwd) {
ciagp = aiagp;
} else if (freei == back) {
ciagp = biagp;
} else {
if ((rc = diIAGRead(imap, freei, &cmp)))
goto error_out;
ciagp = (struct iag *) cmp->data;
}
if (ciagp == NULL) {
jfs_error(imap->im_ipimap->i_sb,
"ciagp == NULL\n");
rc = -EIO;
goto error_out;
}
}
}
/* allocate disk space for the inode extent.
*/
if ((extno == 0) || (addressPXD(&iagp->inoext[extno - 1]) == 0))
hint = ((s64) agno << sbi->bmap->db_agl2size) - 1;
else
hint = addressPXD(&iagp->inoext[extno - 1]) +
lengthPXD(&iagp->inoext[extno - 1]) - 1;
if ((rc = dbAlloc(ipimap, hint, (s64) imap->im_nbperiext, &blkno)))
goto error_out;
/* compute the inode number of the first inode within the
* extent.
*/
ino = (iagno << L2INOSPERIAG) + (extno << L2INOSPEREXT);
/* initialize the inodes within the newly allocated extent a
* page at a time.
*/
for (i = 0; i < imap->im_nbperiext; i += sbi->nbperpage) {
/* get a buffer for this page of disk inodes.
*/
dmp = get_metapage(ipimap, blkno + i, PSIZE, 1);
if (dmp == NULL) {
rc = -EIO;
goto error_out;
}
dp = (struct dinode *) dmp->data;
/* initialize the inode number, mode, link count and
* inode extent address.
*/
for (j = 0; j < INOSPERPAGE; j++, dp++, ino++) {
dp->di_inostamp = cpu_to_le32(sbi->inostamp);
dp->di_number = cpu_to_le32(ino);
dp->di_fileset = cpu_to_le32(FILESYSTEM_I);
dp->di_mode = 0;
dp->di_nlink = 0;
PXDaddress(&(dp->di_ixpxd), blkno);
PXDlength(&(dp->di_ixpxd), imap->im_nbperiext);
}
write_metapage(dmp);
}
/* if this is the last free extent within the iag, remove the
* iag from the ag free extent list.
*/
if (iagp->nfreeexts == cpu_to_le32(1)) {
if (fwd >= 0)
aiagp->extfreeback = iagp->extfreeback;
if (back >= 0)
biagp->extfreefwd = iagp->extfreefwd;
else
imap->im_agctl[agno].extfree =
le32_to_cpu(iagp->extfreefwd);
iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
} else {
/* if the iag has all free extents (newly allocated iag),
* add the iag to the ag free extent list.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
if (fwd >= 0)
aiagp->extfreeback = cpu_to_le32(iagno);
iagp->extfreefwd = cpu_to_le32(fwd);
iagp->extfreeback = cpu_to_le32(-1);
imap->im_agctl[agno].extfree = iagno;
}
}
/* if the iag has no free inodes, add the iag to the
* ag free inode list.
*/
if (iagp->nfreeinos == 0) {
if (freei >= 0)
ciagp->inofreeback = cpu_to_le32(iagno);
iagp->inofreefwd =
cpu_to_le32(imap->im_agctl[agno].inofree);
iagp->inofreeback = cpu_to_le32(-1);
imap->im_agctl[agno].inofree = iagno;
}
/* initialize the extent descriptor of the extent. */
PXDlength(&iagp->inoext[extno], imap->im_nbperiext);
PXDaddress(&iagp->inoext[extno], blkno);
/* initialize the working and persistent map of the extent.
* the working map will be initialized such that
* it indicates the first inode of the extent is allocated.
*/
iagp->wmap[extno] = cpu_to_le32(HIGHORDER);
iagp->pmap[extno] = 0;
/* update the free inode and free extent summary maps
* for the extent to indicate the extent has free inodes
* and no longer represents a free extent.
*/
sword = extno >> L2EXTSPERSUM;
mask = HIGHORDER >> (extno & (EXTSPERSUM - 1));
iagp->extsmap[sword] |= cpu_to_le32(mask);
iagp->inosmap[sword] &= cpu_to_le32(~mask);
/* update the free inode and free extent counts for the
* iag.
*/
le32_add_cpu(&iagp->nfreeinos, (INOSPEREXT - 1));
le32_add_cpu(&iagp->nfreeexts, -1);
/* update the free and backed inode counts for the ag.
*/
imap->im_agctl[agno].numfree += (INOSPEREXT - 1);
imap->im_agctl[agno].numinos += INOSPEREXT;
/* update the free and backed inode counts for the inode map.
*/
atomic_add(INOSPEREXT - 1, &imap->im_numfree);
atomic_add(INOSPEREXT, &imap->im_numinos);
/* write the iags.
*/
if (amp)
write_metapage(amp);
if (bmp)
write_metapage(bmp);
if (cmp)
write_metapage(cmp);
return (0);
error_out:
/* release the iags.
*/
if (amp)
release_metapage(amp);
if (bmp)
release_metapage(bmp);
if (cmp)
release_metapage(cmp);
return (rc);
}
/*
* NAME: diNewIAG(imap,iagnop,agno)
*
* FUNCTION: allocate a new iag for an allocation group.
*
* first tries to allocate the iag from the inode map
* iagfree list:
* if the list has free iags, the head of the list is removed
* and returned to satisfy the request.
* if the inode map's iag free list is empty, the inode map
* is extended to hold a new iag. this new iag is initialized
* and returned to satisfy the request.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagnop - pointer to an iag number set with the number of the
* newly allocated iag upon successful return.
* agno - allocation group number.
* bpp - Buffer pointer to be filled in with new IAG's buffer
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*
* serialization:
* AG lock held on entry/exit;
* write lock on the map is held inside;
* read lock on the map is held on successful completion;
*
* note: new iag transaction:
* . synchronously write iag;
* . write log of xtree and inode of imap;
* . commit;
* . synchronous write of xtree (right to left, bottom to top);
* . at start of logredo(): init in-memory imap with one additional iag page;
* . at end of logredo(): re-read imap inode to determine
* new imap size;
*/
static int
diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
{
int rc;
int iagno, i, xlen;
struct inode *ipimap;
struct super_block *sb;
struct jfs_sb_info *sbi;
struct metapage *mp;
struct iag *iagp;
s64 xaddr = 0;
s64 blkno;
tid_t tid;
struct inode *iplist[1];
/* pick up pointers to the inode map and mount inodes */
ipimap = imap->im_ipimap;
sb = ipimap->i_sb;
sbi = JFS_SBI(sb);
/* acquire the free iag lock */
IAGFREE_LOCK(imap);
/* if there are any iags on the inode map free iag list,
* allocate the iag from the head of the list.
*/
if (imap->im_freeiag >= 0) {
/* pick up the iag number at the head of the list */
iagno = imap->im_freeiag;
/* determine the logical block number of the iag */
blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
} else {
/* no free iags. the inode map will have to be extented
* to include a new iag.
*/
/* acquire inode map lock */
IWRITE_LOCK(ipimap, RDWRLOCK_IMAP);
if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) {
IWRITE_UNLOCK(ipimap);
IAGFREE_UNLOCK(imap);
jfs_error(imap->im_ipimap->i_sb,
"ipimap->i_size is wrong\n");
return -EIO;
}
/* get the next available iag number */
iagno = imap->im_nextiag;
/* make sure that we have not exceeded the maximum inode
* number limit.
*/
if (iagno > (MAXIAGS - 1)) {
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
rc = -ENOSPC;
goto out;
}
/*
* synchronously append new iag page.
*/
/* determine the logical address of iag page to append */
blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
/* Allocate extent for new iag page */
xlen = sbi->nbperpage;
if ((rc = dbAlloc(ipimap, 0, (s64) xlen, &xaddr))) {
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
goto out;
}
/*
* start transaction of update of the inode map
* addressing structure pointing to the new iag page;
*/
tid = txBegin(sb, COMMIT_FORCE);
mutex_lock(&JFS_IP(ipimap)->commit_mutex);
/* update the inode map addressing structure to point to it */
if ((rc =
xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) {
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
/* Free the blocks allocated for the iag since it was
* not successfully added to the inode map
*/
dbFree(ipimap, xaddr, (s64) xlen);
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
goto out;
}
/* update the inode map's inode to reflect the extension */
ipimap->i_size += PSIZE;
inode_add_bytes(ipimap, PSIZE);
/* assign a buffer for the page */
mp = get_metapage(ipimap, blkno, PSIZE, 0);
if (!mp) {
/*
* This is very unlikely since we just created the
* extent, but let's try to handle it correctly
*/
xtTruncate(tid, ipimap, ipimap->i_size - PSIZE,
COMMIT_PWMAP);
txAbort(tid, 0);
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
rc = -EIO;
goto out;
}
iagp = (struct iag *) mp->data;
/* init the iag */
memset(iagp, 0, sizeof(struct iag));
iagp->iagnum = cpu_to_le32(iagno);
iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
iagp->iagfree = cpu_to_le32(-1);
iagp->nfreeinos = 0;
iagp->nfreeexts = cpu_to_le32(EXTSPERIAG);
/* initialize the free inode summary map (free extent
* summary map initialization handled by bzero).
*/
for (i = 0; i < SMAPSZ; i++)
iagp->inosmap[i] = cpu_to_le32(ONES);
/*
* Write and sync the metapage
*/
flush_metapage(mp);
/*
* txCommit(COMMIT_FORCE) will synchronously write address
* index pages and inode after commit in careful update order
* of address index pages (right to left, bottom up);
*/
iplist[0] = ipimap;
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
duplicateIXtree(sb, blkno, xlen, &xaddr);
/* update the next available iag number */
imap->im_nextiag += 1;
/* Add the iag to the iag free list so we don't lose the iag
* if a failure happens now.
*/
imap->im_freeiag = iagno;
/* Until we have logredo working, we want the imap inode &
* control page to be up to date.
*/
diSync(ipimap);
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
}
/* obtain read lock on map */
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* read the iag */
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(ipimap);
rc = -EIO;
goto out;
}
iagp = (struct iag *) mp->data;
/* remove the iag from the iag free list */
imap->im_freeiag = le32_to_cpu(iagp->iagfree);
iagp->iagfree = cpu_to_le32(-1);
/* set the return iag number and buffer pointer */
*iagnop = iagno;
*mpp = mp;
out:
/* release the iag free lock */
IAGFREE_UNLOCK(imap);
return (rc);
}
/*
* NAME: diIAGRead()
*
* FUNCTION: get the buffer for the specified iag within a fileset
* or aggregate inode map.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagno - iag number.
* bpp - point to buffer pointer to be filled in on successful
* exit.
*
* SERIALIZATION:
* must have read lock on imap inode
* (When called by diExtendFS, the filesystem is quiesced, therefore
* the read lock is unnecessary.)
*
* RETURN VALUES:
* 0 - success.
* -EIO - i/o error.
*/
static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
{
struct inode *ipimap = imap->im_ipimap;
s64 blkno;
/* compute the logical block number of the iag. */
blkno = IAGTOLBLK(iagno, JFS_SBI(ipimap->i_sb)->l2nbperpage);
/* read the iag. */
*mpp = read_metapage(ipimap, blkno, PSIZE, 0);
if (*mpp == NULL) {
return -EIO;
}
return (0);
}
/*
* NAME: diFindFree()
*
* FUNCTION: find the first free bit in a word starting at
* the specified bit position.
*
* PARAMETERS:
* word - word to be examined.
* start - starting bit position.
*
* RETURN VALUES:
* bit position of first free bit in the word or 32 if
* no free bits were found.
*/
static int diFindFree(u32 word, int start)
{
int bitno;
assert(start < 32);
/* scan the word for the first free bit. */
for (word <<= start, bitno = start; bitno < 32;
bitno++, word <<= 1) {
if ((word & HIGHORDER) == 0)
break;
}
return (bitno);
}
/*
* NAME: diUpdatePMap()
*
* FUNCTION: Update the persistent map in an IAG for the allocation or
* freeing of the specified inode.
*
* PRE CONDITIONS: Working map has already been updated for allocate.
*
* PARAMETERS:
* ipimap - Incore inode map inode
* inum - Number of inode to mark in permanent map
* is_free - If 'true' indicates inode should be marked freed, otherwise
* indicates inode should be marked allocated.
*
* RETURN VALUES:
* 0 for success
*/
int
diUpdatePMap(struct inode *ipimap,
unsigned long inum, bool is_free, struct tblock * tblk)
{
int rc;
struct iag *iagp;
struct metapage *mp;
int iagno, ino, extno, bitno;
struct inomap *imap;
u32 mask;
struct jfs_log *log;
int lsn, difft, diffp;
unsigned long flags;
imap = JFS_IP(ipimap)->i_imap;
/* get the iag number containing the inode */
iagno = INOTOIAG(inum);
/* make sure that the iag is contained within the map */
if (iagno >= imap->im_nextiag) {
jfs_error(ipimap->i_sb, "the iag is outside the map\n");
return -EIO;
}
/* read the iag */
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
rc = diIAGRead(imap, iagno, &mp);
IREAD_UNLOCK(ipimap);
if (rc)
return (rc);
metapage_wait_for_io(mp);
iagp = (struct iag *) mp->data;
/* get the inode number and extent number of the inode within
* the iag and the inode number within the extent.
*/
ino = inum & (INOSPERIAG - 1);
extno = ino >> L2INOSPEREXT;
bitno = ino & (INOSPEREXT - 1);
mask = HIGHORDER >> bitno;
/*
* mark the inode free in persistent map:
*/
if (is_free) {
/* The inode should have been allocated both in working
* map and in persistent map;
* the inode will be freed from working map at the release
* of last reference release;
*/
if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
jfs_error(ipimap->i_sb,
"inode %ld not marked as allocated in wmap!\n",
inum);
}
if (!(le32_to_cpu(iagp->pmap[extno]) & mask)) {
jfs_error(ipimap->i_sb,
"inode %ld not marked as allocated in pmap!\n",
inum);
}
/* update the bitmap for the extent of the freed inode */
iagp->pmap[extno] &= cpu_to_le32(~mask);
}
/*
* mark the inode allocated in persistent map:
*/
else {
/* The inode should be already allocated in the working map
* and should be free in persistent map;
*/
if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
release_metapage(mp);
jfs_error(ipimap->i_sb,
"the inode is not allocated in the working map\n");
return -EIO;
}
if ((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) {
release_metapage(mp);
jfs_error(ipimap->i_sb,
"the inode is not free in the persistent map\n");
return -EIO;
}
/* update the bitmap for the extent of the allocated inode */
iagp->pmap[extno] |= cpu_to_le32(mask);
}
/*
* update iag lsn
*/
lsn = tblk->lsn;
log = JFS_SBI(tblk->sb)->log;
LOGSYNC_LOCK(log, flags);
if (mp->lsn != 0) {
/* inherit older/smaller lsn */
logdiff(difft, lsn, log);
logdiff(diffp, mp->lsn, log);
if (difft < diffp) {
mp->lsn = lsn;
/* move mp after tblock in logsync list */
list_move(&mp->synclist, &tblk->synclist);
}
/* inherit younger/larger clsn */
assert(mp->clsn);
logdiff(difft, tblk->clsn, log);
logdiff(diffp, mp->clsn, log);
if (difft > diffp)
mp->clsn = tblk->clsn;
} else {
mp->log = log;
mp->lsn = lsn;
/* insert mp after tblock in logsync list */
log->count++;
list_add(&mp->synclist, &tblk->synclist);
mp->clsn = tblk->clsn;
}
LOGSYNC_UNLOCK(log, flags);
write_metapage(mp);
return (0);
}
/*
* diExtendFS()
*
* function: update imap for extendfs();
*
* note: AG size has been increased s.t. each k old contiguous AGs are
* coalesced into a new AG;
*/
int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
{
int rc, rcx = 0;
struct inomap *imap = JFS_IP(ipimap)->i_imap;
struct iag *iagp = NULL, *hiagp = NULL;
struct bmap *mp = JFS_SBI(ipbmap->i_sb)->bmap;
struct metapage *bp, *hbp;
int i, n, head;
int numinos, xnuminos = 0, xnumfree = 0;
s64 agstart;
jfs_info("diExtendFS: nextiag:%d numinos:%d numfree:%d",
imap->im_nextiag, atomic_read(&imap->im_numinos),
atomic_read(&imap->im_numfree));
/*
* reconstruct imap
*
* coalesce contiguous k (newAGSize/oldAGSize) AGs;
* i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
* note: new AG size = old AG size * (2**x).
*/
/* init per AG control information im_agctl[] */
for (i = 0; i < MAXAG; i++) {
imap->im_agctl[i].inofree = -1;
imap->im_agctl[i].extfree = -1;
imap->im_agctl[i].numinos = 0; /* number of backed inodes */
imap->im_agctl[i].numfree = 0; /* number of free backed inodes */
}
/*
* process each iag page of the map.
*
* rebuild AG Free Inode List, AG Free Inode Extent List;
*/
for (i = 0; i < imap->im_nextiag; i++) {
if ((rc = diIAGRead(imap, i, &bp))) {
rcx = rc;
continue;
}
iagp = (struct iag *) bp->data;
if (le32_to_cpu(iagp->iagnum) != i) {
release_metapage(bp);
jfs_error(ipimap->i_sb, "unexpected value of iagnum\n");
return -EIO;
}
/* leave free iag in the free iag list */
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
release_metapage(bp);
continue;
}
agstart = le64_to_cpu(iagp->agstart);
n = agstart >> mp->db_agl2size;
iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
/* compute backed inodes */
numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
<< L2INOSPEREXT;
if (numinos > 0) {
/* merge AG backed inodes */
imap->im_agctl[n].numinos += numinos;
xnuminos += numinos;
}
/* if any backed free inodes, insert at AG free inode list */
if ((int) le32_to_cpu(iagp->nfreeinos) > 0) {
if ((head = imap->im_agctl[n].inofree) == -1) {
iagp->inofreefwd = cpu_to_le32(-1);
iagp->inofreeback = cpu_to_le32(-1);
} else {
if ((rc = diIAGRead(imap, head, &hbp))) {
rcx = rc;
goto nextiag;
}
hiagp = (struct iag *) hbp->data;
hiagp->inofreeback = iagp->iagnum;
iagp->inofreefwd = cpu_to_le32(head);
iagp->inofreeback = cpu_to_le32(-1);
write_metapage(hbp);
}
imap->im_agctl[n].inofree =
le32_to_cpu(iagp->iagnum);
/* merge AG backed free inodes */
imap->im_agctl[n].numfree +=
le32_to_cpu(iagp->nfreeinos);
xnumfree += le32_to_cpu(iagp->nfreeinos);
}
/* if any free extents, insert at AG free extent list */
if (le32_to_cpu(iagp->nfreeexts) > 0) {
if ((head = imap->im_agctl[n].extfree) == -1) {
iagp->extfreefwd = cpu_to_le32(-1);
iagp->extfreeback = cpu_to_le32(-1);
} else {
if ((rc = diIAGRead(imap, head, &hbp))) {
rcx = rc;
goto nextiag;
}
hiagp = (struct iag *) hbp->data;
hiagp->extfreeback = iagp->iagnum;
iagp->extfreefwd = cpu_to_le32(head);
iagp->extfreeback = cpu_to_le32(-1);
write_metapage(hbp);
}
imap->im_agctl[n].extfree =
le32_to_cpu(iagp->iagnum);
}
nextiag:
write_metapage(bp);
}
if (xnuminos != atomic_read(&imap->im_numinos) ||
xnumfree != atomic_read(&imap->im_numfree)) {
jfs_error(ipimap->i_sb, "numinos or numfree incorrect\n");
return -EIO;
}
return rcx;
}
/*
* duplicateIXtree()
*
* serialization: IWRITE_LOCK held on entry/exit
*
* note: shadow page with regular inode (rel.2);
*/
static void duplicateIXtree(struct super_block *sb, s64 blkno,
int xlen, s64 *xaddr)
{
struct jfs_superblock *j_sb;
struct buffer_head *bh;
struct inode *ip;
tid_t tid;
/* if AIT2 ipmap2 is bad, do not try to update it */
if (JFS_SBI(sb)->mntflag & JFS_BAD_SAIT) /* s_flag */
return;
ip = diReadSpecial(sb, FILESYSTEM_I, 1);
if (ip == NULL) {
JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
if (readSuper(sb, &bh))
return;
j_sb = (struct jfs_superblock *)bh->b_data;
j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT);
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
return;
}
/* start transaction */
tid = txBegin(sb, COMMIT_FORCE);
/* update the inode map addressing structure to point to it */
if (xtInsert(tid, ip, 0, blkno, xlen, xaddr, 0)) {
JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
txAbort(tid, 1);
goto cleanup;
}
/* update the inode map's inode to reflect the extension */
ip->i_size += PSIZE;
inode_add_bytes(ip, PSIZE);
txCommit(tid, 1, &ip, COMMIT_FORCE);
cleanup:
txEnd(tid);
diFreeSpecial(ip);
}
/*
* NAME: copy_from_dinode()
*
* FUNCTION: Copies inode info from disk inode to in-memory inode
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient memory
*/
static int copy_from_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
jfs_set_inode_flags(ip);
ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff;
if (sbi->umask != -1) {
ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask);
/* For directories, add x permission if r is allowed by umask */
if (S_ISDIR(ip->i_mode)) {
if (ip->i_mode & 0400)
ip->i_mode |= 0100;
if (ip->i_mode & 0040)
ip->i_mode |= 0010;
if (ip->i_mode & 0004)
ip->i_mode |= 0001;
}
}
set_nlink(ip, le32_to_cpu(dip->di_nlink));
jfs_ip->saved_uid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid));
if (!uid_valid(sbi->uid))
ip->i_uid = jfs_ip->saved_uid;
else {
ip->i_uid = sbi->uid;
}
jfs_ip->saved_gid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid));
if (!gid_valid(sbi->gid))
ip->i_gid = jfs_ip->saved_gid;
else {
ip->i_gid = sbi->gid;
}
ip->i_size = le64_to_cpu(dip->di_size);
ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
inode_set_ctime(ip, le32_to_cpu(dip->di_ctime.tv_sec),
le32_to_cpu(dip->di_ctime.tv_nsec));
ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
ip->i_generation = le32_to_cpu(dip->di_gen);
jfs_ip->ixpxd = dip->di_ixpxd; /* in-memory pxd's are little-endian */
jfs_ip->acl = dip->di_acl; /* as are dxd's */
jfs_ip->ea = dip->di_ea;
jfs_ip->next_index = le32_to_cpu(dip->di_next_index);
jfs_ip->otime = le32_to_cpu(dip->di_otime.tv_sec);
jfs_ip->acltype = le32_to_cpu(dip->di_acltype);
if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) {
jfs_ip->dev = le32_to_cpu(dip->di_rdev);
ip->i_rdev = new_decode_dev(jfs_ip->dev);
}
if (S_ISDIR(ip->i_mode)) {
memcpy(&jfs_ip->u.dir, &dip->u._dir, 384);
} else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) {
memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288);
} else
memcpy(&jfs_ip->i_inline_ea, &dip->di_inlineea, 128);
/* Zero the in-memory-only stuff */
jfs_ip->cflag = 0;
jfs_ip->btindex = 0;
jfs_ip->btorder = 0;
jfs_ip->bxflag = 0;
jfs_ip->blid = 0;
jfs_ip->atlhead = 0;
jfs_ip->atltail = 0;
jfs_ip->xtlid = 0;
return (0);
}
/*
* NAME: copy_to_dinode()
*
* FUNCTION: Copies inode info from in-memory inode to disk inode
*/
static void copy_to_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
dip->di_fileset = cpu_to_le32(jfs_ip->fileset);
dip->di_inostamp = cpu_to_le32(sbi->inostamp);
dip->di_number = cpu_to_le32(ip->i_ino);
dip->di_gen = cpu_to_le32(ip->i_generation);
dip->di_size = cpu_to_le64(ip->i_size);
dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
dip->di_nlink = cpu_to_le32(ip->i_nlink);
if (!uid_valid(sbi->uid))
dip->di_uid = cpu_to_le32(i_uid_read(ip));
else
dip->di_uid =cpu_to_le32(from_kuid(&init_user_ns,
jfs_ip->saved_uid));
if (!gid_valid(sbi->gid))
dip->di_gid = cpu_to_le32(i_gid_read(ip));
else
dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns,
jfs_ip->saved_gid));
/*
* mode2 is only needed for storing the higher order bits.
* Trust i_mode for the lower order ones
*/
if (sbi->umask == -1)
dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) |
ip->i_mode);
else /* Leave the original permissions alone */
dip->di_mode = cpu_to_le32(jfs_ip->mode2);
dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
dip->di_ctime.tv_sec = cpu_to_le32(inode_get_ctime(ip).tv_sec);
dip->di_ctime.tv_nsec = cpu_to_le32(inode_get_ctime(ip).tv_nsec);
dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */
dip->di_acl = jfs_ip->acl; /* as are dxd's */
dip->di_ea = jfs_ip->ea;
dip->di_next_index = cpu_to_le32(jfs_ip->next_index);
dip->di_otime.tv_sec = cpu_to_le32(jfs_ip->otime);
dip->di_otime.tv_nsec = 0;
dip->di_acltype = cpu_to_le32(jfs_ip->acltype);
if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
dip->di_rdev = cpu_to_le32(jfs_ip->dev);
}
| linux-master | fs/jfs/jfs_imap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_extent.h"
#include "jfs_debug.h"
/*
* forward references
*/
static int extBalloc(struct inode *, s64, s64 *, s64 *);
static s64 extRoundDown(s64 nb);
#define DPD(a) (printk("(a): %d\n",(a)))
#define DPC(a) (printk("(a): %c\n",(a)))
#define DPL1(a) \
{ \
if ((a) >> 32) \
printk("(a): %x%08x ",(a)); \
else \
printk("(a): %x ",(a) << 32); \
}
#define DPL(a) \
{ \
if ((a) >> 32) \
printk("(a): %x%08x\n",(a)); \
else \
printk("(a): %x\n",(a) << 32); \
}
#define DPD1(a) (printk("(a): %d ",(a)))
#define DPX(a) (printk("(a): %08x\n",(a)))
#define DPX1(a) (printk("(a): %08x ",(a)))
#define DPS(a) (printk("%s\n",(a)))
#define DPE(a) (printk("\nENTERING: %s\n",(a)))
#define DPE1(a) (printk("\nENTERING: %s",(a)))
#define DPS1(a) (printk(" %s ",(a)))
/*
* NAME: extAlloc()
*
* FUNCTION: allocate an extent for a specified page range within a
* file.
*
* PARAMETERS:
* ip - the inode of the file.
* xlen - requested extent length.
* pno - the starting page number with the file.
* xp - pointer to an xad. on entry, xad describes an
* extent that is used as an allocation hint if the
* xaddr of the xad is non-zero. on successful exit,
* the xad describes the newly allocated extent.
* abnr - bool indicating whether the newly allocated extent
* should be marked as allocated but not recorded.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
int
extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
s64 nxlen, nxaddr, xoff, hint, xaddr = 0;
int rc;
int xflag;
/* This blocks if we are low on resources */
txBeginAnon(ip->i_sb);
/* Avoid race with jfs_commit_inode() */
mutex_lock(&JFS_IP(ip)->commit_mutex);
/* validate extent length */
if (xlen > MAXXLEN)
xlen = MAXXLEN;
/* get the page's starting extent offset */
xoff = pno << sbi->l2nbperpage;
/* check if an allocation hint was provided */
if ((hint = addressXAD(xp))) {
/* get the size of the extent described by the hint */
nxlen = lengthXAD(xp);
/* check if the hint is for the portion of the file
* immediately previous to the current allocation
* request and if hint extent has the same abnr
* value as the current request. if so, we can
* extend the hint extent to include the current
* extent if we can allocate the blocks immediately
* following the hint extent.
*/
if (offsetXAD(xp) + nxlen == xoff &&
abnr == ((xp->flag & XAD_NOTRECORDED) ? true : false))
xaddr = hint + nxlen;
/* adjust the hint to the last block of the extent */
hint += (nxlen - 1);
}
/* allocate the disk blocks for the extent. initially, extBalloc()
* will try to allocate disk blocks for the requested size (xlen).
* if this fails (xlen contiguous free blocks not available), it'll
* try to allocate a smaller number of blocks (producing a smaller
* extent), with this smaller number of blocks consisting of the
* requested number of blocks rounded down to the next smaller
* power of 2 number (i.e. 16 -> 8). it'll continue to round down
* and retry the allocation until the number of blocks to allocate
* is smaller than the number of blocks per page.
*/
nxlen = xlen;
if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) {
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
}
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, nxlen);
if (rc) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return rc;
}
/* determine the value of the extent flag */
xflag = abnr ? XAD_NOTRECORDED : 0;
/* if we can extend the hint extent to cover the current request,
* extend it. otherwise, insert a new extent to
* cover the current request.
*/
if (xaddr && xaddr == nxaddr)
rc = xtExtend(0, ip, xoff, (int) nxlen, 0);
else
rc = xtInsert(0, ip, xflag, xoff, (int) nxlen, &nxaddr, 0);
/* if the extend or insert failed,
* free the newly allocated blocks and return the error.
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
dquot_free_block(ip, nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
}
/* set the results of the extent allocation */
XADaddress(xp, nxaddr);
XADlength(xp, nxlen);
XADoffset(xp, xoff);
xp->flag = xflag;
mark_inode_dirty(ip);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
/*
* COMMIT_SyncList flags an anonymous tlock on page that is on
* sync list.
* We need to commit the inode to get the page written to the disk.
*/
if (test_and_clear_cflag(COMMIT_Synclist,ip))
jfs_commit_inode(ip, 0);
return (0);
}
/*
* NAME: extHint()
*
* FUNCTION: produce an extent allocation hint for a file offset.
*
* PARAMETERS:
* ip - the inode of the file.
* offset - file offset for which the hint is needed.
* xp - pointer to the xad that is to be filled in with
* the hint.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
*/
int extHint(struct inode *ip, s64 offset, xad_t * xp)
{
struct super_block *sb = ip->i_sb;
int nbperpage = JFS_SBI(sb)->nbperpage;
s64 prev;
int rc = 0;
s64 xaddr;
int xlen;
int xflag;
/* init the hint as "no hint provided" */
XADaddress(xp, 0);
/* determine the starting extent offset of the page previous
* to the page containing the offset.
*/
prev = ((offset & ~POFFSET) >> JFS_SBI(sb)->l2bsize) - nbperpage;
/* if the offset is in the first page of the file, no hint provided.
*/
if (prev < 0)
goto out;
rc = xtLookup(ip, prev, nbperpage, &xflag, &xaddr, &xlen, 0);
if ((rc == 0) && xlen) {
if (xlen != nbperpage) {
jfs_error(ip->i_sb, "corrupt xtree\n");
rc = -EIO;
}
XADaddress(xp, xaddr);
XADlength(xp, xlen);
XADoffset(xp, prev);
/*
* only preserve the abnr flag within the xad flags
* of the returned hint.
*/
xp->flag = xflag & XAD_NOTRECORDED;
} else
rc = 0;
out:
return (rc);
}
/*
* NAME: extRecord()
*
* FUNCTION: change a page with a file from not recorded to recorded.
*
* PARAMETERS:
* ip - inode of the file.
* cp - cbuf of the file page.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
int extRecord(struct inode *ip, xad_t * xp)
{
int rc;
txBeginAnon(ip->i_sb);
mutex_lock(&JFS_IP(ip)->commit_mutex);
/* update the extent */
rc = xtUpdate(0, ip, xp);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return rc;
}
/*
* NAME: extBalloc()
*
* FUNCTION: allocate disk blocks to form an extent.
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
* contiguous free blocks not available), we'll try to allocate
* a smaller number of blocks (producing a smaller extent), with
* this smaller number of blocks consisting of the requested
* number of blocks rounded down to the next smaller power of 2
* number (i.e. 16 -> 8). we'll continue to round down and
* retry the allocation until the number of blocks to allocate
* is smaller than the number of blocks per page.
*
* PARAMETERS:
* ip - the inode of the file.
* hint - disk block number to be used as an allocation hint.
* *nblocks - pointer to an s64 value. on entry, this value specifies
* the desired number of block to be allocated. on successful
* exit, this value is set to the number of blocks actually
* allocated.
* blkno - pointer to a block address that is filled in on successful
* return with the starting block number of the newly
* allocated block range.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOSPC - insufficient disk resources.
*/
static int
extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
{
struct jfs_inode_info *ji = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
s64 nb, nblks, daddr, max;
int rc, nbperpage = sbi->nbperpage;
struct bmap *bmp = sbi->bmap;
int ag;
/* get the number of blocks to initially attempt to allocate.
* we'll first try the number of blocks requested unless this
* number is greater than the maximum number of contiguous free
* blocks in the map. in that case, we'll start off with the
* maximum free.
*/
/* give up if no space left */
if (bmp->db_maxfreebud == -1)
return -ENOSPC;
max = (s64) 1 << bmp->db_maxfreebud;
if (*nblocks >= max && *nblocks > nbperpage)
nb = nblks = (max > nbperpage) ? max : nbperpage;
else
nb = nblks = *nblocks;
/* try to allocate blocks */
while ((rc = dbAlloc(ip, hint, nb, &daddr)) != 0) {
/* if something other than an out of space error,
* stop and return this error.
*/
if (rc != -ENOSPC)
return (rc);
/* decrease the allocation request size */
nb = min(nblks, extRoundDown(nb));
/* give up if we cannot cover a page */
if (nb < nbperpage)
return (rc);
}
*nblocks = nb;
*blkno = daddr;
if (S_ISREG(ip->i_mode) && (ji->fileset == FILESYSTEM_I)) {
ag = BLKTOAG(daddr, sbi);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag == -1) {
atomic_inc(&bmp->db_active[ag]);
ji->active_ag = ag;
} else if (ji->active_ag != ag) {
atomic_dec(&bmp->db_active[ji->active_ag]);
atomic_inc(&bmp->db_active[ag]);
ji->active_ag = ag;
}
spin_unlock_irq(&ji->ag_lock);
}
return (0);
}
/*
* NAME: extRoundDown()
*
* FUNCTION: round down a specified number of blocks to the next
* smallest power of 2 number.
*
* PARAMETERS:
* nb - the inode of the file.
*
* RETURN VALUES:
* next smallest power of 2 number.
*/
static s64 extRoundDown(s64 nb)
{
int i;
u64 m, k;
for (i = 0, m = (u64) 1 << 63; i < 64; i++, m >>= 1) {
if (m & nb)
break;
}
i = 63 - i;
k = (u64) 1 << i;
k = ((k - 1) & nb) ? k : k >> 1;
return (k);
}
| linux-master | fs/jfs/jfs_extent.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
#include <linux/fs.h>
#include <linux/mpage.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/uio.h>
#include <linux/writeback.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
#include "jfs_imap.h"
#include "jfs_extent.h"
#include "jfs_unicode.h"
#include "jfs_debug.h"
#include "jfs_dmap.h"
struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
{
struct inode *inode;
int ret;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
ret = diRead(inode);
if (ret < 0) {
iget_failed(inode);
return ERR_PTR(ret);
}
if (S_ISREG(inode->i_mode)) {
inode->i_op = &jfs_file_inode_operations;
inode->i_fop = &jfs_file_operations;
inode->i_mapping->a_ops = &jfs_aops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &jfs_dir_inode_operations;
inode->i_fop = &jfs_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
if (inode->i_size >= IDATASIZE) {
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &jfs_aops;
} else {
inode->i_op = &jfs_fast_symlink_inode_operations;
inode->i_link = JFS_IP(inode)->i_inline;
/*
* The inline data should be null-terminated, but
* don't let on-disk corruption crash the kernel
*/
inode->i_link[inode->i_size] = '\0';
}
} else {
inode->i_op = &jfs_file_inode_operations;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
}
unlock_new_inode(inode);
return inode;
}
/*
* Workhorse of both fsync & write_inode
*/
int jfs_commit_inode(struct inode *inode, int wait)
{
int rc = 0;
tid_t tid;
static int noisy = 5;
jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
/*
* Don't commit if inode has been committed since last being
* marked dirty, or if it has been deleted.
*/
if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
return 0;
if (isReadOnly(inode)) {
/* kernel allows writes to devices on read-only
* partitions and may think inode is dirty
*/
if (!special_file(inode->i_mode) && noisy) {
jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
inode);
jfs_err("Is remount racy?");
noisy--;
}
return 0;
}
tid = txBegin(inode->i_sb, COMMIT_INODE);
mutex_lock(&JFS_IP(inode)->commit_mutex);
/*
* Retest inode state after taking commit_mutex
*/
if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
txEnd(tid);
mutex_unlock(&JFS_IP(inode)->commit_mutex);
return rc;
}
int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int wait = wbc->sync_mode == WB_SYNC_ALL;
if (inode->i_nlink == 0)
return 0;
/*
* If COMMIT_DIRTY is not set, the inode isn't really dirty.
* It has been committed since the last change, but was still
* on the dirty inode list.
*/
if (!test_cflag(COMMIT_Dirty, inode)) {
/* Make sure committed changes hit the disk */
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
return 0;
}
if (jfs_commit_inode(inode, wait)) {
jfs_err("jfs_write_inode: jfs_commit_inode failed!");
return -EIO;
} else
return 0;
}
void jfs_evict_inode(struct inode *inode)
{
struct jfs_inode_info *ji = JFS_IP(inode);
jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
if (!inode->i_nlink && !is_bad_inode(inode)) {
dquot_initialize(inode);
if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
truncate_inode_pages_final(&inode->i_data);
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);
if (ipimap && JFS_IP(ipimap)->i_imap)
diFree(inode);
/*
* Free the inode from the quota allocation.
*/
dquot_free_inode(inode);
}
} else {
truncate_inode_pages_final(&inode->i_data);
}
clear_inode(inode);
dquot_drop(inode);
BUG_ON(!list_empty(&ji->anon_inode_list));
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag != -1) {
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
atomic_dec(&bmap->db_active[ji->active_ag]);
ji->active_ag = -1;
}
spin_unlock_irq(&ji->ag_lock);
}
void jfs_dirty_inode(struct inode *inode, int flags)
{
static int noisy = 5;
if (isReadOnly(inode)) {
if (!special_file(inode->i_mode) && noisy) {
/* kernel allows writes to devices on read-only
* partitions and may try to mark inode dirty
*/
jfs_err("jfs_dirty_inode called on read-only volume");
jfs_err("Is remount racy?");
noisy--;
}
return;
}
set_cflag(COMMIT_Dirty, inode);
}
int jfs_get_block(struct inode *ip, sector_t lblock,
struct buffer_head *bh_result, int create)
{
s64 lblock64 = lblock;
int rc = 0;
xad_t xad;
s64 xaddr;
int xflag;
s32 xlen = bh_result->b_size >> ip->i_blkbits;
/*
* Take appropriate lock on inode
*/
if (create)
IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
else
IREAD_LOCK(ip, RDWRLOCK_NORMAL);
if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
(!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
xaddr) {
if (xflag & XAD_NOTRECORDED) {
if (!create)
/*
* Allocated but not recorded, read treats
* this as a hole
*/
goto unlock;
XADoffset(&xad, lblock64);
XADlength(&xad, xlen);
XADaddress(&xad, xaddr);
rc = extRecord(ip, &xad);
if (rc)
goto unlock;
set_buffer_new(bh_result);
}
map_bh(bh_result, ip->i_sb, xaddr);
bh_result->b_size = xlen << ip->i_blkbits;
goto unlock;
}
if (!create)
goto unlock;
/*
* Allocate a new block
*/
if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
goto unlock;
rc = extAlloc(ip, xlen, lblock64, &xad, false);
if (rc)
goto unlock;
set_buffer_new(bh_result);
map_bh(bh_result, ip->i_sb, addressXAD(&xad));
bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
unlock:
/*
* Release lock on inode
*/
if (create)
IWRITE_UNLOCK(ip);
else
IREAD_UNLOCK(ip);
return rc;
}
static int jfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, jfs_get_block);
}
static int jfs_read_folio(struct file *file, struct folio *folio)
{
return mpage_read_folio(folio, jfs_get_block);
}
static void jfs_readahead(struct readahead_control *rac)
{
mpage_readahead(rac, jfs_get_block);
}
static void jfs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
if (to > inode->i_size) {
truncate_pagecache(inode, inode->i_size);
jfs_truncate(inode);
}
}
static int jfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block);
if (unlikely(ret))
jfs_write_failed(mapping, pos + len);
return ret;
}
static int jfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, struct page *page,
void *fsdata)
{
int ret;
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (ret < len)
jfs_write_failed(mapping, pos + len);
return ret;
}
static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, jfs_get_block);
}
static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
if (end > isize)
jfs_write_failed(mapping, end);
}
return ret;
}
const struct address_space_operations jfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = jfs_read_folio,
.readahead = jfs_readahead,
.writepages = jfs_writepages,
.write_begin = jfs_write_begin,
.write_end = jfs_write_end,
.bmap = jfs_bmap,
.direct_IO = jfs_direct_IO,
.migrate_folio = buffer_migrate_folio,
};
/*
* Guts of jfs_truncate. Called with locks already held. Can be called
* with directory for truncating directory index table.
*/
void jfs_truncate_nolock(struct inode *ip, loff_t length)
{
loff_t newsize;
tid_t tid;
ASSERT(length >= 0);
if (test_cflag(COMMIT_Nolink, ip)) {
xtTruncate(0, ip, length, COMMIT_WMAP);
return;
}
do {
tid = txBegin(ip->i_sb, 0);
/*
* The commit_mutex cannot be taken before txBegin.
* txBegin may block and there is a chance the inode
* could be marked dirty and need to be committed
* before txBegin unblocks
*/
mutex_lock(&JFS_IP(ip)->commit_mutex);
newsize = xtTruncate(tid, ip, length,
COMMIT_TRUNCATE | COMMIT_PWMAP);
if (newsize < 0) {
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
break;
}
ip->i_mtime = inode_set_ctime_current(ip);
mark_inode_dirty(ip);
txCommit(tid, 1, &ip, 0);
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
} while (newsize > length); /* Truncate isn't always atomic */
}
void jfs_truncate(struct inode *ip)
{
jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
jfs_truncate_nolock(ip, ip->i_size);
IWRITE_UNLOCK(ip);
}
| linux-master | fs/jfs/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/jfs/ioctl.c
*
* Copyright (C) 2006 Herbert Poetzl
* adapted from Remy Card's ext2/ioctl.c
*/
#include <linux/fs.h>
#include <linux/ctype.h>
#include <linux/capability.h>
#include <linux/mount.h>
#include <linux/time.h>
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <asm/current.h>
#include <linux/uaccess.h>
#include <linux/fileattr.h>
#include "jfs_filsys.h"
#include "jfs_debug.h"
#include "jfs_incore.h"
#include "jfs_dinode.h"
#include "jfs_inode.h"
#include "jfs_dmap.h"
#include "jfs_discard.h"
static struct {
long jfs_flag;
long ext2_flag;
} jfs_map[] = {
{JFS_NOATIME_FL, FS_NOATIME_FL},
{JFS_DIRSYNC_FL, FS_DIRSYNC_FL},
{JFS_SYNC_FL, FS_SYNC_FL},
{JFS_SECRM_FL, FS_SECRM_FL},
{JFS_UNRM_FL, FS_UNRM_FL},
{JFS_APPEND_FL, FS_APPEND_FL},
{JFS_IMMUTABLE_FL, FS_IMMUTABLE_FL},
{0, 0},
};
static long jfs_map_ext2(unsigned long flags, int from)
{
int index=0;
long mapped=0;
while (jfs_map[index].jfs_flag) {
if (from) {
if (jfs_map[index].ext2_flag & flags)
mapped |= jfs_map[index].jfs_flag;
} else {
if (jfs_map[index].jfs_flag & flags)
mapped |= jfs_map[index].ext2_flag;
}
index++;
}
return mapped;
}
int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
{
struct jfs_inode_info *jfs_inode = JFS_IP(d_inode(dentry));
unsigned int flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE;
if (d_is_special(dentry))
return -ENOTTY;
fileattr_fill_flags(fa, jfs_map_ext2(flags, 0));
return 0;
}
int jfs_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa)
{
struct inode *inode = d_inode(dentry);
struct jfs_inode_info *jfs_inode = JFS_IP(inode);
unsigned int flags;
if (d_is_special(dentry))
return -ENOTTY;
if (fileattr_has_fsx(fa))
return -EOPNOTSUPP;
flags = jfs_map_ext2(fa->flags, 1);
if (!S_ISDIR(inode->i_mode))
flags &= ~JFS_DIRSYNC_FL;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
return -EPERM;
flags = flags & JFS_FL_USER_MODIFIABLE;
flags |= jfs_inode->mode2 & ~JFS_FL_USER_MODIFIABLE;
jfs_inode->mode2 = flags;
jfs_set_inode_flags(inode);
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
}
long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
switch (cmd) {
case FITRIM:
{
struct super_block *sb = inode->i_sb;
struct fstrim_range range;
s64 ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!bdev_max_discard_sectors(sb->s_bdev)) {
jfs_warn("FITRIM not supported on device");
return -EOPNOTSUPP;
}
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
sizeof(range)))
return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen,
bdev_discard_granularity(sb->s_bdev));
ret = jfs_ioc_trim(inode, &range);
if (ret < 0)
return ret;
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
return 0;
}
default:
return -ENOTTY;
}
}
| linux-master | fs/jfs/ioctl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2005
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
/*
* jfs_txnmgr.c: transaction manager
*
* notes:
* transaction starts with txBegin() and ends with txCommit()
* or txAbort().
*
* tlock is acquired at the time of update;
* (obviate scan at commit time for xtree and dtree)
* tlock and mp points to each other;
* (no hashlist for mp -> tlock).
*
* special cases:
* tlock on in-memory inode:
* in-place tlock in the in-memory inode itself;
* converted to page lock by iWrite() at commit time.
*
* tlock during write()/mmap() under anonymous transaction (tid = 0):
* transferred (?) to transaction at commit time.
*
* use the page itself to update allocation maps
* (obviate intermediate replication of allocation/deallocation data)
* hold on to mp+lock thru update of maps
*/
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_dinode.h"
#include "jfs_imap.h"
#include "jfs_dmap.h"
#include "jfs_superblock.h"
#include "jfs_debug.h"
/*
* transaction management structures
*/
static struct {
int freetid; /* index of a free tid structure */
int freelock; /* index first free lock word */
wait_queue_head_t freewait; /* eventlist of free tblock */
wait_queue_head_t freelockwait; /* eventlist of free tlock */
wait_queue_head_t lowlockwait; /* eventlist of ample tlocks */
int tlocksInUse; /* Number of tlocks in use */
spinlock_t LazyLock; /* synchronize sync_queue & unlock_queue */
/* struct tblock *sync_queue; * Transactions waiting for data sync */
struct list_head unlock_queue; /* Txns waiting to be released */
struct list_head anon_list; /* inodes having anonymous txns */
struct list_head anon_list2; /* inodes having anonymous txns
that couldn't be sync'ed */
} TxAnchor;
int jfs_tlocks_low; /* Indicates low number of available tlocks */
#ifdef CONFIG_JFS_STATISTICS
static struct {
uint txBegin;
uint txBegin_barrier;
uint txBegin_lockslow;
uint txBegin_freetid;
uint txBeginAnon;
uint txBeginAnon_barrier;
uint txBeginAnon_lockslow;
uint txLockAlloc;
uint txLockAlloc_freelock;
} TxStat;
#endif
static int nTxBlock = -1; /* number of transaction blocks */
module_param(nTxBlock, int, 0);
MODULE_PARM_DESC(nTxBlock,
"Number of transaction blocks (max:65536)");
static int nTxLock = -1; /* number of transaction locks */
module_param(nTxLock, int, 0);
MODULE_PARM_DESC(nTxLock,
"Number of transaction locks (max:65536)");
struct tblock *TxBlock; /* transaction block table */
static int TxLockLWM; /* Low water mark for number of txLocks used */
static int TxLockHWM; /* High water mark for number of txLocks used */
static int TxLockVHWM; /* Very High water mark */
struct tlock *TxLock; /* transaction lock table */
/*
* transaction management lock
*/
static DEFINE_SPINLOCK(jfsTxnLock);
#define TXN_LOCK() spin_lock(&jfsTxnLock)
#define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
#define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock)
#define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
#define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
static int jfs_commit_thread_waking;
/*
* Retry logic exist outside these macros to protect from spurrious wakeups.
*/
static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
{
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(event, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
TXN_UNLOCK();
io_schedule();
remove_wait_queue(event, &wait);
}
#define TXN_SLEEP(event)\
{\
TXN_SLEEP_DROP_LOCK(event);\
TXN_LOCK();\
}
#define TXN_WAKEUP(event) wake_up_all(event)
/*
* statistics
*/
static struct {
tid_t maxtid; /* 4: biggest tid ever used */
lid_t maxlid; /* 4: biggest lid ever used */
int ntid; /* 4: # of transactions performed */
int nlid; /* 4: # of tlocks acquired */
int waitlock; /* 4: # of tlock wait */
} stattx;
/*
* forward references
*/
static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
struct tlock *tlck, struct commit *cd);
static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
struct tlock *tlck);
static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
static void txAllocPMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk);
static void txForce(struct tblock * tblk);
static void txLog(struct jfs_log *log, struct tblock *tblk,
struct commit *cd);
static void txUpdateMap(struct tblock * tblk);
static void txRelease(struct tblock * tblk);
static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
static void LogSyncRelease(struct metapage * mp);
/*
* transaction block/lock management
* ---------------------------------
*/
/*
* Get a transaction lock from the free list. If the number in use is
* greater than the high water mark, wake up the sync daemon. This should
* free some anonymous transaction locks. (TXN_LOCK must be held.)
*/
static lid_t txLockAlloc(void)
{
lid_t lid;
INCREMENT(TxStat.txLockAlloc);
if (!TxAnchor.freelock) {
INCREMENT(TxStat.txLockAlloc_freelock);
}
while (!(lid = TxAnchor.freelock))
TXN_SLEEP(&TxAnchor.freelockwait);
TxAnchor.freelock = TxLock[lid].next;
HIGHWATERMARK(stattx.maxlid, lid);
if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
jfs_info("txLockAlloc tlocks low");
jfs_tlocks_low = 1;
wake_up_process(jfsSyncThread);
}
return lid;
}
static void txLockFree(lid_t lid)
{
TxLock[lid].tid = 0;
TxLock[lid].next = TxAnchor.freelock;
TxAnchor.freelock = lid;
TxAnchor.tlocksInUse--;
if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
jfs_info("txLockFree jfs_tlocks_low no more");
jfs_tlocks_low = 0;
TXN_WAKEUP(&TxAnchor.lowlockwait);
}
TXN_WAKEUP(&TxAnchor.freelockwait);
}
/*
* NAME: txInit()
*
* FUNCTION: initialize transaction management structures
*
* RETURN:
*
* serialization: single thread at jfs_init()
*/
int txInit(void)
{
int k, size;
struct sysinfo si;
/* Set defaults for nTxLock and nTxBlock if unset */
if (nTxLock == -1) {
if (nTxBlock == -1) {
/* Base default on memory size */
si_meminfo(&si);
if (si.totalram > (256 * 1024)) /* 1 GB */
nTxLock = 64 * 1024;
else
nTxLock = si.totalram >> 2;
} else if (nTxBlock > (8 * 1024))
nTxLock = 64 * 1024;
else
nTxLock = nTxBlock << 3;
}
if (nTxBlock == -1)
nTxBlock = nTxLock >> 3;
/* Verify tunable parameters */
if (nTxBlock < 16)
nTxBlock = 16; /* No one should set it this low */
if (nTxBlock > 65536)
nTxBlock = 65536;
if (nTxLock < 256)
nTxLock = 256; /* No one should set it this low */
if (nTxLock > 65536)
nTxLock = 65536;
printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
nTxBlock, nTxLock);
/*
* initialize transaction block (tblock) table
*
* transaction id (tid) = tblock index
* tid = 0 is reserved.
*/
TxLockLWM = (nTxLock * 4) / 10;
TxLockHWM = (nTxLock * 7) / 10;
TxLockVHWM = (nTxLock * 8) / 10;
size = sizeof(struct tblock) * nTxBlock;
TxBlock = vmalloc(size);
if (TxBlock == NULL)
return -ENOMEM;
for (k = 1; k < nTxBlock - 1; k++) {
TxBlock[k].next = k + 1;
init_waitqueue_head(&TxBlock[k].gcwait);
init_waitqueue_head(&TxBlock[k].waitor);
}
TxBlock[k].next = 0;
init_waitqueue_head(&TxBlock[k].gcwait);
init_waitqueue_head(&TxBlock[k].waitor);
TxAnchor.freetid = 1;
init_waitqueue_head(&TxAnchor.freewait);
stattx.maxtid = 1; /* statistics */
/*
* initialize transaction lock (tlock) table
*
* transaction lock id = tlock index
* tlock id = 0 is reserved.
*/
size = sizeof(struct tlock) * nTxLock;
TxLock = vmalloc(size);
if (TxLock == NULL) {
vfree(TxBlock);
return -ENOMEM;
}
/* initialize tlock table */
for (k = 1; k < nTxLock - 1; k++)
TxLock[k].next = k + 1;
TxLock[k].next = 0;
init_waitqueue_head(&TxAnchor.freelockwait);
init_waitqueue_head(&TxAnchor.lowlockwait);
TxAnchor.freelock = 1;
TxAnchor.tlocksInUse = 0;
INIT_LIST_HEAD(&TxAnchor.anon_list);
INIT_LIST_HEAD(&TxAnchor.anon_list2);
LAZY_LOCK_INIT();
INIT_LIST_HEAD(&TxAnchor.unlock_queue);
stattx.maxlid = 1; /* statistics */
return 0;
}
/*
* NAME: txExit()
*
* FUNCTION: clean up when module is unloaded
*/
void txExit(void)
{
vfree(TxLock);
TxLock = NULL;
vfree(TxBlock);
TxBlock = NULL;
}
/*
* NAME: txBegin()
*
* FUNCTION: start a transaction.
*
* PARAMETER: sb - superblock
* flag - force for nested tx;
*
* RETURN: tid - transaction id
*
* note: flag force allows to start tx for nested tx
* to prevent deadlock on logsync barrier;
*/
tid_t txBegin(struct super_block *sb, int flag)
{
tid_t t;
struct tblock *tblk;
struct jfs_log *log;
jfs_info("txBegin: flag = 0x%x", flag);
log = JFS_SBI(sb)->log;
if (!log) {
jfs_error(sb, "read-only filesystem\n");
return 0;
}
TXN_LOCK();
INCREMENT(TxStat.txBegin);
retry:
if (!(flag & COMMIT_FORCE)) {
/*
* synchronize with logsync barrier
*/
if (test_bit(log_SYNCBARRIER, &log->flag) ||
test_bit(log_QUIESCE, &log->flag)) {
INCREMENT(TxStat.txBegin_barrier);
TXN_SLEEP(&log->syncwait);
goto retry;
}
}
if (flag == 0) {
/*
* Don't begin transaction if we're getting starved for tlocks
* unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
* free tlocks)
*/
if (TxAnchor.tlocksInUse > TxLockVHWM) {
INCREMENT(TxStat.txBegin_lockslow);
TXN_SLEEP(&TxAnchor.lowlockwait);
goto retry;
}
}
/*
* allocate transaction id/block
*/
if ((t = TxAnchor.freetid) == 0) {
jfs_info("txBegin: waiting for free tid");
INCREMENT(TxStat.txBegin_freetid);
TXN_SLEEP(&TxAnchor.freewait);
goto retry;
}
tblk = tid_to_tblock(t);
if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
/* Don't let a non-forced transaction take the last tblk */
jfs_info("txBegin: waiting for free tid");
INCREMENT(TxStat.txBegin_freetid);
TXN_SLEEP(&TxAnchor.freewait);
goto retry;
}
TxAnchor.freetid = tblk->next;
/*
* initialize transaction
*/
/*
* We can't zero the whole thing or we screw up another thread being
* awakened after sleeping on tblk->waitor
*
* memset(tblk, 0, sizeof(struct tblock));
*/
tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
tblk->sb = sb;
++log->logtid;
tblk->logtid = log->logtid;
++log->active;
HIGHWATERMARK(stattx.maxtid, t); /* statistics */
INCREMENT(stattx.ntid); /* statistics */
TXN_UNLOCK();
jfs_info("txBegin: returning tid = %d", t);
return t;
}
/*
* NAME: txBeginAnon()
*
* FUNCTION: start an anonymous transaction.
* Blocks if logsync or available tlocks are low to prevent
* anonymous tlocks from depleting supply.
*
* PARAMETER: sb - superblock
*
* RETURN: none
*/
void txBeginAnon(struct super_block *sb)
{
struct jfs_log *log;
log = JFS_SBI(sb)->log;
TXN_LOCK();
INCREMENT(TxStat.txBeginAnon);
retry:
/*
* synchronize with logsync barrier
*/
if (test_bit(log_SYNCBARRIER, &log->flag) ||
test_bit(log_QUIESCE, &log->flag)) {
INCREMENT(TxStat.txBeginAnon_barrier);
TXN_SLEEP(&log->syncwait);
goto retry;
}
/*
* Don't begin transaction if we're getting starved for tlocks
*/
if (TxAnchor.tlocksInUse > TxLockVHWM) {
INCREMENT(TxStat.txBeginAnon_lockslow);
TXN_SLEEP(&TxAnchor.lowlockwait);
goto retry;
}
TXN_UNLOCK();
}
/*
* txEnd()
*
* function: free specified transaction block.
*
* logsync barrier processing:
*
* serialization:
*/
void txEnd(tid_t tid)
{
struct tblock *tblk = tid_to_tblock(tid);
struct jfs_log *log;
jfs_info("txEnd: tid = %d", tid);
TXN_LOCK();
/*
* wakeup transactions waiting on the page locked
* by the current transaction
*/
TXN_WAKEUP(&tblk->waitor);
log = JFS_SBI(tblk->sb)->log;
/*
* Lazy commit thread can't free this guy until we mark it UNLOCKED,
* otherwise, we would be left with a transaction that may have been
* reused.
*
* Lazy commit thread will turn off tblkGC_LAZY before calling this
* routine.
*/
if (tblk->flag & tblkGC_LAZY) {
jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
TXN_UNLOCK();
spin_lock_irq(&log->gclock); // LOGGC_LOCK
tblk->flag |= tblkGC_UNLOCKED;
spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
return;
}
jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
assert(tblk->next == 0);
/*
* insert tblock back on freelist
*/
tblk->next = TxAnchor.freetid;
TxAnchor.freetid = tid;
/*
* mark the tblock not active
*/
if (--log->active == 0) {
clear_bit(log_FLUSH, &log->flag);
/*
* synchronize with logsync barrier
*/
if (test_bit(log_SYNCBARRIER, &log->flag)) {
TXN_UNLOCK();
/* write dirty metadata & forward log syncpt */
jfs_syncpt(log, 1);
jfs_info("log barrier off: 0x%x", log->lsn);
/* enable new transactions start */
clear_bit(log_SYNCBARRIER, &log->flag);
/* wakeup all waitors for logsync barrier */
TXN_WAKEUP(&log->syncwait);
goto wakeup;
}
}
TXN_UNLOCK();
wakeup:
/*
* wakeup all waitors for a free tblock
*/
TXN_WAKEUP(&TxAnchor.freewait);
}
/*
* txLock()
*
* function: acquire a transaction lock on the specified <mp>
*
* parameter:
*
* return: transaction lock id
*
* serialization:
*/
struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
int type)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int dir_xtree = 0;
lid_t lid;
tid_t xtid;
struct tlock *tlck;
struct xtlock *xtlck;
struct linelock *linelock;
xtpage_t *p;
struct tblock *tblk;
TXN_LOCK();
if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
!(mp->xflag & COMMIT_PAGE)) {
/*
* Directory inode is special. It can have both an xtree tlock
* and a dtree tlock associated with it.
*/
dir_xtree = 1;
lid = jfs_ip->xtlid;
} else
lid = mp->lid;
/* is page not locked by a transaction ? */
if (lid == 0)
goto allocateLock;
jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
/* is page locked by the requester transaction ? */
tlck = lid_to_tlock(lid);
if ((xtid = tlck->tid) == tid) {
TXN_UNLOCK();
goto grantLock;
}
/*
* is page locked by anonymous transaction/lock ?
*
* (page update without transaction (i.e., file write) is
* locked under anonymous transaction tid = 0:
* anonymous tlocks maintained on anonymous tlock list of
* the inode of the page and available to all anonymous
* transactions until txCommit() time at which point
* they are transferred to the transaction tlock list of
* the committing transaction of the inode)
*/
if (xtid == 0) {
tlck->tid = tid;
TXN_UNLOCK();
tblk = tid_to_tblock(tid);
/*
* The order of the tlocks in the transaction is important
* (during truncate, child xtree pages must be freed before
* parent's tlocks change the working map).
* Take tlock off anonymous list and add to tail of
* transaction list
*
* Note: We really need to get rid of the tid & lid and
* use list_head's. This code is getting UGLY!
*/
if (jfs_ip->atlhead == lid) {
if (jfs_ip->atltail == lid) {
/* only anonymous txn.
* Remove from anon_list
*/
TXN_LOCK();
list_del_init(&jfs_ip->anon_inode_list);
TXN_UNLOCK();
}
jfs_ip->atlhead = tlck->next;
} else {
lid_t last;
for (last = jfs_ip->atlhead;
lid_to_tlock(last)->next != lid;
last = lid_to_tlock(last)->next) {
assert(last);
}
lid_to_tlock(last)->next = tlck->next;
if (jfs_ip->atltail == lid)
jfs_ip->atltail = last;
}
/* insert the tlock at tail of transaction tlock list */
if (tblk->next)
lid_to_tlock(tblk->last)->next = lid;
else
tblk->next = lid;
tlck->next = 0;
tblk->last = lid;
goto grantLock;
}
goto waitLock;
/*
* allocate a tlock
*/
allocateLock:
lid = txLockAlloc();
tlck = lid_to_tlock(lid);
/*
* initialize tlock
*/
tlck->tid = tid;
TXN_UNLOCK();
/* mark tlock for meta-data page */
if (mp->xflag & COMMIT_PAGE) {
tlck->flag = tlckPAGELOCK;
/* mark the page dirty and nohomeok */
metapage_nohomeok(mp);
jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
mp, mp->nohomeok, tid, tlck);
/* if anonymous transaction, and buffer is on the group
* commit synclist, mark inode to show this. This will
* prevent the buffer from being marked nohomeok for too
* long a time.
*/
if ((tid == 0) && mp->lsn)
set_cflag(COMMIT_Synclist, ip);
}
/* mark tlock for in-memory inode */
else
tlck->flag = tlckINODELOCK;
if (S_ISDIR(ip->i_mode))
tlck->flag |= tlckDIRECTORY;
tlck->type = 0;
/* bind the tlock and the page */
tlck->ip = ip;
tlck->mp = mp;
if (dir_xtree)
jfs_ip->xtlid = lid;
else
mp->lid = lid;
/*
* enqueue transaction lock to transaction/inode
*/
/* insert the tlock at tail of transaction tlock list */
if (tid) {
tblk = tid_to_tblock(tid);
if (tblk->next)
lid_to_tlock(tblk->last)->next = lid;
else
tblk->next = lid;
tlck->next = 0;
tblk->last = lid;
}
/* anonymous transaction:
* insert the tlock at head of inode anonymous tlock list
*/
else {
tlck->next = jfs_ip->atlhead;
jfs_ip->atlhead = lid;
if (tlck->next == 0) {
/* This inode's first anonymous transaction */
jfs_ip->atltail = lid;
TXN_LOCK();
list_add_tail(&jfs_ip->anon_inode_list,
&TxAnchor.anon_list);
TXN_UNLOCK();
}
}
/* initialize type dependent area for linelock */
linelock = (struct linelock *) & tlck->lock;
linelock->next = 0;
linelock->flag = tlckLINELOCK;
linelock->maxcnt = TLOCKSHORT;
linelock->index = 0;
switch (type & tlckTYPE) {
case tlckDTREE:
linelock->l2linesize = L2DTSLOTSIZE;
break;
case tlckXTREE:
linelock->l2linesize = L2XTSLOTSIZE;
xtlck = (struct xtlock *) linelock;
xtlck->header.offset = 0;
xtlck->header.length = 2;
if (type & tlckNEW) {
xtlck->lwm.offset = XTENTRYSTART;
} else {
if (mp->xflag & COMMIT_PAGE)
p = (xtpage_t *) mp->data;
else
p = &jfs_ip->i_xtroot;
xtlck->lwm.offset =
le16_to_cpu(p->header.nextindex);
}
xtlck->lwm.length = 0; /* ! */
xtlck->twm.offset = 0;
xtlck->hwm.offset = 0;
xtlck->index = 2;
break;
case tlckINODE:
linelock->l2linesize = L2INODESLOTSIZE;
break;
case tlckDATA:
linelock->l2linesize = L2DATASLOTSIZE;
break;
default:
jfs_err("UFO tlock:0x%p", tlck);
}
/*
* update tlock vector
*/
grantLock:
tlck->type |= type;
return tlck;
/*
* page is being locked by another transaction:
*/
waitLock:
/* Only locks on ipimap or ipaimap should reach here */
/* assert(jfs_ip->fileset == AGGREGATE_I); */
if (jfs_ip->fileset != AGGREGATE_I) {
printk(KERN_ERR "txLock: trying to lock locked page!");
print_hex_dump(KERN_ERR, "ip: ", DUMP_PREFIX_ADDRESS, 16, 4,
ip, sizeof(*ip), 0);
print_hex_dump(KERN_ERR, "mp: ", DUMP_PREFIX_ADDRESS, 16, 4,
mp, sizeof(*mp), 0);
print_hex_dump(KERN_ERR, "Locker's tblock: ",
DUMP_PREFIX_ADDRESS, 16, 4, tid_to_tblock(tid),
sizeof(struct tblock), 0);
print_hex_dump(KERN_ERR, "Tlock: ", DUMP_PREFIX_ADDRESS, 16, 4,
tlck, sizeof(*tlck), 0);
BUG();
}
INCREMENT(stattx.waitlock); /* statistics */
TXN_UNLOCK();
release_metapage(mp);
TXN_LOCK();
xtid = tlck->tid; /* reacquire after dropping TXN_LOCK */
jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
tid, xtid, lid);
/* Recheck everything since dropping TXN_LOCK */
if (xtid && (tlck->mp == mp) && (mp->lid == lid))
TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
else
TXN_UNLOCK();
jfs_info("txLock: awakened tid = %d, lid = %d", tid, lid);
return NULL;
}
/*
* NAME: txRelease()
*
* FUNCTION: Release buffers associated with transaction locks, but don't
* mark homeok yet. The allows other transactions to modify
* buffers, but won't let them go to disk until commit record
* actually gets written.
*
* PARAMETER:
* tblk -
*
* RETURN: Errors from subroutines.
*/
static void txRelease(struct tblock * tblk)
{
struct metapage *mp;
lid_t lid;
struct tlock *tlck;
TXN_LOCK();
for (lid = tblk->next; lid; lid = tlck->next) {
tlck = lid_to_tlock(lid);
if ((mp = tlck->mp) != NULL &&
(tlck->type & tlckBTROOT) == 0) {
assert(mp->xflag & COMMIT_PAGE);
mp->lid = 0;
}
}
/*
* wakeup transactions waiting on a page locked
* by the current transaction
*/
TXN_WAKEUP(&tblk->waitor);
TXN_UNLOCK();
}
/*
* NAME: txUnlock()
*
* FUNCTION: Initiates pageout of pages modified by tid in journalled
* objects and frees their lockwords.
*/
static void txUnlock(struct tblock * tblk)
{
struct tlock *tlck;
struct linelock *linelock;
lid_t lid, next, llid, k;
struct metapage *mp;
struct jfs_log *log;
int difft, diffp;
unsigned long flags;
jfs_info("txUnlock: tblk = 0x%p", tblk);
log = JFS_SBI(tblk->sb)->log;
/*
* mark page under tlock homeok (its log has been written):
*/
for (lid = tblk->next; lid; lid = next) {
tlck = lid_to_tlock(lid);
next = tlck->next;
jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
/* unbind page from tlock */
if ((mp = tlck->mp) != NULL &&
(tlck->type & tlckBTROOT) == 0) {
assert(mp->xflag & COMMIT_PAGE);
/* hold buffer
*/
hold_metapage(mp);
assert(mp->nohomeok > 0);
_metapage_homeok(mp);
/* inherit younger/larger clsn */
LOGSYNC_LOCK(log, flags);
if (mp->clsn) {
logdiff(difft, tblk->clsn, log);
logdiff(diffp, mp->clsn, log);
if (difft > diffp)
mp->clsn = tblk->clsn;
} else
mp->clsn = tblk->clsn;
LOGSYNC_UNLOCK(log, flags);
assert(!(tlck->flag & tlckFREEPAGE));
put_metapage(mp);
}
/* insert tlock, and linelock(s) of the tlock if any,
* at head of freelist
*/
TXN_LOCK();
llid = ((struct linelock *) & tlck->lock)->next;
while (llid) {
linelock = (struct linelock *) lid_to_tlock(llid);
k = linelock->next;
txLockFree(llid);
llid = k;
}
txLockFree(lid);
TXN_UNLOCK();
}
tblk->next = tblk->last = 0;
/*
* remove tblock from logsynclist
* (allocation map pages inherited lsn of tblk and
* has been inserted in logsync list at txUpdateMap())
*/
if (tblk->lsn) {
LOGSYNC_LOCK(log, flags);
log->count--;
list_del(&tblk->synclist);
LOGSYNC_UNLOCK(log, flags);
}
}
/*
* txMaplock()
*
* function: allocate a transaction lock for freed page/entry;
* for freed page, maplock is used as xtlock/dtlock type;
*/
struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
lid_t lid;
struct tblock *tblk;
struct tlock *tlck;
struct maplock *maplock;
TXN_LOCK();
/*
* allocate a tlock
*/
lid = txLockAlloc();
tlck = lid_to_tlock(lid);
/*
* initialize tlock
*/
tlck->tid = tid;
/* bind the tlock and the object */
tlck->flag = tlckINODELOCK;
if (S_ISDIR(ip->i_mode))
tlck->flag |= tlckDIRECTORY;
tlck->ip = ip;
tlck->mp = NULL;
tlck->type = type;
/*
* enqueue transaction lock to transaction/inode
*/
/* insert the tlock at tail of transaction tlock list */
if (tid) {
tblk = tid_to_tblock(tid);
if (tblk->next)
lid_to_tlock(tblk->last)->next = lid;
else
tblk->next = lid;
tlck->next = 0;
tblk->last = lid;
}
/* anonymous transaction:
* insert the tlock at head of inode anonymous tlock list
*/
else {
tlck->next = jfs_ip->atlhead;
jfs_ip->atlhead = lid;
if (tlck->next == 0) {
/* This inode's first anonymous transaction */
jfs_ip->atltail = lid;
list_add_tail(&jfs_ip->anon_inode_list,
&TxAnchor.anon_list);
}
}
TXN_UNLOCK();
/* initialize type dependent area for maplock */
maplock = (struct maplock *) & tlck->lock;
maplock->next = 0;
maplock->maxcnt = 0;
maplock->index = 0;
return tlck;
}
/*
* txLinelock()
*
* function: allocate a transaction lock for log vector list
*/
struct linelock *txLinelock(struct linelock * tlock)
{
lid_t lid;
struct tlock *tlck;
struct linelock *linelock;
TXN_LOCK();
/* allocate a TxLock structure */
lid = txLockAlloc();
tlck = lid_to_tlock(lid);
TXN_UNLOCK();
/* initialize linelock */
linelock = (struct linelock *) tlck;
linelock->next = 0;
linelock->flag = tlckLINELOCK;
linelock->maxcnt = TLOCKLONG;
linelock->index = 0;
if (tlck->flag & tlckDIRECTORY)
linelock->flag |= tlckDIRECTORY;
/* append linelock after tlock */
linelock->next = tlock->next;
tlock->next = lid;
return linelock;
}
/*
* transaction commit management
* -----------------------------
*/
/*
* NAME: txCommit()
*
* FUNCTION: commit the changes to the objects specified in
* clist. For journalled segments only the
* changes of the caller are committed, ie by tid.
* for non-journalled segments the data are flushed to
* disk and then the change to the disk inode and indirect
* blocks committed (so blocks newly allocated to the
* segment will be made a part of the segment atomically).
*
* all of the segments specified in clist must be in
* one file system. no more than 6 segments are needed
* to handle all unix svcs.
*
* if the i_nlink field (i.e. disk inode link count)
* is zero, and the type of inode is a regular file or
* directory, or symbolic link , the inode is truncated
* to zero length. the truncation is committed but the
* VM resources are unaffected until it is closed (see
* iput and iclose).
*
* PARAMETER:
*
* RETURN:
*
* serialization:
* on entry the inode lock on each segment is assumed
* to be held.
*
* i/o error:
*/
int txCommit(tid_t tid, /* transaction identifier */
int nip, /* number of inodes to commit */
struct inode **iplist, /* list of inode to commit */
int flag)
{
int rc = 0;
struct commit cd;
struct jfs_log *log;
struct tblock *tblk;
struct lrd *lrd;
struct inode *ip;
struct jfs_inode_info *jfs_ip;
int k, n;
ino_t top;
struct super_block *sb;
jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
/* is read-only file system ? */
if (isReadOnly(iplist[0])) {
rc = -EROFS;
goto TheEnd;
}
sb = cd.sb = iplist[0]->i_sb;
cd.tid = tid;
if (tid == 0)
tid = txBegin(sb, 0);
tblk = tid_to_tblock(tid);
/*
* initialize commit structure
*/
log = JFS_SBI(sb)->log;
cd.log = log;
/* initialize log record descriptor in commit */
lrd = &cd.lrd;
lrd->logtid = cpu_to_le32(tblk->logtid);
lrd->backchain = 0;
tblk->xflag |= flag;
if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
tblk->xflag |= COMMIT_LAZY;
/*
* prepare non-journaled objects for commit
*
* flush data pages of non-journaled file
* to prevent the file getting non-initialized disk blocks
* in case of crash.
* (new blocks - )
*/
cd.iplist = iplist;
cd.nip = nip;
/*
* acquire transaction lock on (on-disk) inodes
*
* update on-disk inode from in-memory inode
* acquiring transaction locks for AFTER records
* on the on-disk inode of file object
*
* sort the inodes array by inode number in descending order
* to prevent deadlock when acquiring transaction lock
* of on-disk inodes on multiple on-disk inode pages by
* multiple concurrent transactions
*/
for (k = 0; k < cd.nip; k++) {
top = (cd.iplist[k])->i_ino;
for (n = k + 1; n < cd.nip; n++) {
ip = cd.iplist[n];
if (ip->i_ino > top) {
top = ip->i_ino;
cd.iplist[n] = cd.iplist[k];
cd.iplist[k] = ip;
}
}
ip = cd.iplist[k];
jfs_ip = JFS_IP(ip);
/*
* BUGBUG - This code has temporarily been removed. The
* intent is to ensure that any file data is written before
* the metadata is committed to the journal. This prevents
* uninitialized data from appearing in a file after the
* journal has been replayed. (The uninitialized data
* could be sensitive data removed by another user.)
*
* The problem now is that we are holding the IWRITELOCK
* on the inode, and calling filemap_fdatawrite on an
* unmapped page will cause a deadlock in jfs_get_block.
*
* The long term solution is to pare down the use of
* IWRITELOCK. We are currently holding it too long.
* We could also be smarter about which data pages need
* to be written before the transaction is committed and
* when we don't need to worry about it at all.
*
* if ((!S_ISDIR(ip->i_mode))
* && (tblk->flag & COMMIT_DELETE) == 0)
* filemap_write_and_wait(ip->i_mapping);
*/
/*
* Mark inode as not dirty. It will still be on the dirty
* inode list, but we'll know not to commit it again unless
* it gets marked dirty again
*/
clear_cflag(COMMIT_Dirty, ip);
/* inherit anonymous tlock(s) of inode */
if (jfs_ip->atlhead) {
lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
tblk->next = jfs_ip->atlhead;
if (!tblk->last)
tblk->last = jfs_ip->atltail;
jfs_ip->atlhead = jfs_ip->atltail = 0;
TXN_LOCK();
list_del_init(&jfs_ip->anon_inode_list);
TXN_UNLOCK();
}
/*
* acquire transaction lock on on-disk inode page
* (become first tlock of the tblk's tlock list)
*/
if (((rc = diWrite(tid, ip))))
goto out;
}
/*
* write log records from transaction locks
*
* txUpdateMap() resets XAD_NEW in XAD.
*/
txLog(log, tblk, &cd);
/*
* Ensure that inode isn't reused before
* lazy commit thread finishes processing
*/
if (tblk->xflag & COMMIT_DELETE) {
ihold(tblk->u.ip);
/*
* Avoid a rare deadlock
*
* If the inode is locked, we may be blocked in
* jfs_commit_inode. If so, we don't want the
* lazy_commit thread doing the last iput() on the inode
* since that may block on the locked inode. Instead,
* commit the transaction synchronously, so the last iput
* will be done by the calling thread (or later)
*/
/*
* I believe this code is no longer needed. Splitting I_LOCK
* into two bits, I_NEW and I_SYNC should prevent this
* deadlock as well. But since I don't have a JFS testload
* to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
* Joern
*/
if (tblk->u.ip->i_state & I_SYNC)
tblk->xflag &= ~COMMIT_LAZY;
}
ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
((tblk->u.ip->i_nlink == 0) &&
!test_cflag(COMMIT_Nolink, tblk->u.ip)));
/*
* write COMMIT log record
*/
lrd->type = cpu_to_le16(LOG_COMMIT);
lrd->length = 0;
lmLog(log, tblk, lrd, NULL);
lmGroupCommit(log, tblk);
/*
* - transaction is now committed -
*/
/*
* force pages in careful update
* (imap addressing structure update)
*/
if (flag & COMMIT_FORCE)
txForce(tblk);
/*
* update allocation map.
*
* update inode allocation map and inode:
* free pager lock on memory object of inode if any.
* update block allocation map.
*
* txUpdateMap() resets XAD_NEW in XAD.
*/
if (tblk->xflag & COMMIT_FORCE)
txUpdateMap(tblk);
/*
* free transaction locks and pageout/free pages
*/
txRelease(tblk);
if ((tblk->flag & tblkGC_LAZY) == 0)
txUnlock(tblk);
/*
* reset in-memory object state
*/
for (k = 0; k < cd.nip; k++) {
ip = cd.iplist[k];
jfs_ip = JFS_IP(ip);
/*
* reset in-memory inode state
*/
jfs_ip->bxflag = 0;
jfs_ip->blid = 0;
}
out:
if (rc != 0)
txAbort(tid, 1);
TheEnd:
jfs_info("txCommit: tid = %d, returning %d", tid, rc);
return rc;
}
/*
* NAME: txLog()
*
* FUNCTION: Writes AFTER log records for all lines modified
* by tid for segments specified by inodes in comdata.
* Code assumes only WRITELOCKS are recorded in lockwords.
*
* PARAMETERS:
*
* RETURN :
*/
static void txLog(struct jfs_log *log, struct tblock *tblk, struct commit *cd)
{
struct inode *ip;
lid_t lid;
struct tlock *tlck;
struct lrd *lrd = &cd->lrd;
/*
* write log record(s) for each tlock of transaction,
*/
for (lid = tblk->next; lid; lid = tlck->next) {
tlck = lid_to_tlock(lid);
tlck->flag |= tlckLOG;
/* initialize lrd common */
ip = tlck->ip;
lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
/* write log record of page from the tlock */
switch (tlck->type & tlckTYPE) {
case tlckXTREE:
xtLog(log, tblk, lrd, tlck);
break;
case tlckDTREE:
dtLog(log, tblk, lrd, tlck);
break;
case tlckINODE:
diLog(log, tblk, lrd, tlck, cd);
break;
case tlckMAP:
mapLog(log, tblk, lrd, tlck);
break;
case tlckDATA:
dataLog(log, tblk, lrd, tlck);
break;
default:
jfs_err("UFO tlock:0x%p", tlck);
}
}
return;
}
/*
* diLog()
*
* function: log inode tlock and format maplock to update bmap;
*/
static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
struct tlock *tlck, struct commit *cd)
{
struct metapage *mp;
pxd_t *pxd;
struct pxd_lock *pxdlock;
mp = tlck->mp;
/* initialize as REDOPAGE record format */
lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
pxd = &lrd->log.redopage.pxd;
/*
* inode after image
*/
if (tlck->type & tlckENTRY) {
/* log after-image for logredo(): */
lrd->type = cpu_to_le16(LOG_REDOPAGE);
PXDaddress(pxd, mp->index);
PXDlength(pxd,
mp->logical_size >> tblk->sb->s_blocksize_bits);
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
} else if (tlck->type & tlckFREE) {
/*
* free inode extent
*
* (pages of the freed inode extent have been invalidated and
* a maplock for free of the extent has been formatted at
* txLock() time);
*
* the tlock had been acquired on the inode allocation map page
* (iag) that specifies the freed extent, even though the map
* page is not itself logged, to prevent pageout of the map
* page before the log;
*/
/* log LOG_NOREDOINOEXT of the freed inode extent for
* logredo() to start NoRedoPage filters, and to update
* imap and bmap for free of the extent;
*/
lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
/*
* For the LOG_NOREDOINOEXT record, we need
* to pass the IAG number and inode extent
* index (within that IAG) from which the
* extent is being released. These have been
* passed to us in the iplist[1] and iplist[2].
*/
lrd->log.noredoinoext.iagnum =
cpu_to_le32((u32) (size_t) cd->iplist[1]);
lrd->log.noredoinoext.inoext_idx =
cpu_to_le32((u32) (size_t) cd->iplist[2]);
pxdlock = (struct pxd_lock *) & tlck->lock;
*pxd = pxdlock->pxd;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
/* update bmap */
tlck->flag |= tlckUPDATEMAP;
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
} else
jfs_err("diLog: UFO type tlck:0x%p", tlck);
return;
}
/*
* dataLog()
*
* function: log data tlock
*/
static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
struct tlock *tlck)
{
struct metapage *mp;
pxd_t *pxd;
mp = tlck->mp;
/* initialize as REDOPAGE record format */
lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
pxd = &lrd->log.redopage.pxd;
/* log after-image for logredo(): */
lrd->type = cpu_to_le16(LOG_REDOPAGE);
if (jfs_dirtable_inline(tlck->ip)) {
/*
* The table has been truncated, we've must have deleted
* the last entry, so don't bother logging this
*/
mp->lid = 0;
grab_metapage(mp);
metapage_homeok(mp);
discard_metapage(mp);
tlck->mp = NULL;
return;
}
PXDaddress(pxd, mp->index);
PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
return;
}
/*
* dtLog()
*
* function: log dtree tlock and format maplock to update bmap;
*/
static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
struct metapage *mp;
struct pxd_lock *pxdlock;
pxd_t *pxd;
mp = tlck->mp;
/* initialize as REDOPAGE/NOREDOPAGE record format */
lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
pxd = &lrd->log.redopage.pxd;
if (tlck->type & tlckBTROOT)
lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
/*
* page extension via relocation: entry insertion;
* page extension in-place: entry insertion;
* new right page from page split, reinitialized in-line
* root from root page split: entry insertion;
*/
if (tlck->type & (tlckNEW | tlckEXTEND)) {
/* log after-image of the new page for logredo():
* mark log (LOG_NEW) for logredo() to initialize
* freelist and update bmap for alloc of the new page;
*/
lrd->type = cpu_to_le16(LOG_REDOPAGE);
if (tlck->type & tlckEXTEND)
lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
else
lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
PXDaddress(pxd, mp->index);
PXDlength(pxd,
mp->logical_size >> tblk->sb->s_blocksize_bits);
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
/* format a maplock for txUpdateMap() to update bPMAP for
* alloc of the new page;
*/
if (tlck->type & tlckBTROOT)
return;
tlck->flag |= tlckUPDATEMAP;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckALLOCPXD;
pxdlock->pxd = *pxd;
pxdlock->index = 1;
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
return;
}
/*
* entry insertion/deletion,
* sibling page link update (old right page before split);
*/
if (tlck->type & (tlckENTRY | tlckRELINK)) {
/* log after-image for logredo(): */
lrd->type = cpu_to_le16(LOG_REDOPAGE);
PXDaddress(pxd, mp->index);
PXDlength(pxd,
mp->logical_size >> tblk->sb->s_blocksize_bits);
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
return;
}
/*
* page deletion: page has been invalidated
* page relocation: source extent
*
* a maplock for free of the page has been formatted
* at txLock() time);
*/
if (tlck->type & (tlckFREE | tlckRELOCATE)) {
/* log LOG_NOREDOPAGE of the deleted page for logredo()
* to start NoRedoPage filter and to update bmap for free
* of the deletd page
*/
lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
pxdlock = (struct pxd_lock *) & tlck->lock;
*pxd = pxdlock->pxd;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
/* a maplock for txUpdateMap() for free of the page
* has been formatted at txLock() time;
*/
tlck->flag |= tlckUPDATEMAP;
}
return;
}
/*
* xtLog()
*
* function: log xtree tlock and format maplock to update bmap;
*/
static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
struct inode *ip;
struct metapage *mp;
xtpage_t *p;
struct xtlock *xtlck;
struct maplock *maplock;
struct xdlistlock *xadlock;
struct pxd_lock *pxdlock;
pxd_t *page_pxd;
int next, lwm, hwm;
ip = tlck->ip;
mp = tlck->mp;
/* initialize as REDOPAGE/NOREDOPAGE record format */
lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
page_pxd = &lrd->log.redopage.pxd;
if (tlck->type & tlckBTROOT) {
lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
p = &JFS_IP(ip)->i_xtroot;
if (S_ISDIR(ip->i_mode))
lrd->log.redopage.type |=
cpu_to_le16(LOG_DIR_XTREE);
} else
p = (xtpage_t *) mp->data;
next = le16_to_cpu(p->header.nextindex);
xtlck = (struct xtlock *) & tlck->lock;
maplock = (struct maplock *) & tlck->lock;
xadlock = (struct xdlistlock *) maplock;
/*
* entry insertion/extension;
* sibling page link update (old right page before split);
*/
if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
/* log after-image for logredo():
* logredo() will update bmap for alloc of new/extended
* extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
* after-image of XADlist;
* logredo() resets (XAD_NEW|XAD_EXTEND) flag when
* applying the after-image to the meta-data page.
*/
lrd->type = cpu_to_le16(LOG_REDOPAGE);
PXDaddress(page_pxd, mp->index);
PXDlength(page_pxd,
mp->logical_size >> tblk->sb->s_blocksize_bits);
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
/* format a maplock for txUpdateMap() to update bPMAP
* for alloc of new/extended extents of XAD[lwm:next)
* from the page itself;
* txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
*/
lwm = xtlck->lwm.offset;
if (lwm == 0)
lwm = XTPAGEMAXSLOT;
if (lwm == next)
goto out;
if (lwm > next) {
jfs_err("xtLog: lwm > next");
goto out;
}
tlck->flag |= tlckUPDATEMAP;
xadlock->flag = mlckALLOCXADLIST;
xadlock->count = next - lwm;
if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
int i;
pxd_t *pxd;
/*
* Lazy commit may allow xtree to be modified before
* txUpdateMap runs. Copy xad into linelock to
* preserve correct data.
*
* We can fit twice as may pxd's as xads in the lock
*/
xadlock->flag = mlckALLOCPXDLIST;
pxd = xadlock->xdlist = &xtlck->pxdlock;
for (i = 0; i < xadlock->count; i++) {
PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
p->xad[lwm + i].flag &=
~(XAD_NEW | XAD_EXTENDED);
pxd++;
}
} else {
/*
* xdlist will point to into inode's xtree, ensure
* that transaction is not committed lazily.
*/
xadlock->flag = mlckALLOCXADLIST;
xadlock->xdlist = &p->xad[lwm];
tblk->xflag &= ~COMMIT_LAZY;
}
jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d",
tlck->ip, mp, tlck, lwm, xadlock->count);
maplock->index = 1;
out:
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
return;
}
/*
* page deletion: file deletion/truncation (ref. xtTruncate())
*
* (page will be invalidated after log is written and bmap
* is updated from the page);
*/
if (tlck->type & tlckFREE) {
/* LOG_NOREDOPAGE log for NoRedoPage filter:
* if page free from file delete, NoRedoFile filter from
* inode image of zero link count will subsume NoRedoPage
* filters for each page;
* if page free from file truncattion, write NoRedoPage
* filter;
*
* upadte of block allocation map for the page itself:
* if page free from deletion and truncation, LOG_UPDATEMAP
* log for the page itself is generated from processing
* its parent page xad entries;
*/
/* if page free from file truncation, log LOG_NOREDOPAGE
* of the deleted page for logredo() to start NoRedoPage
* filter for the page;
*/
if (tblk->xflag & COMMIT_TRUNCATE) {
/* write NOREDOPAGE for the page */
lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
PXDaddress(page_pxd, mp->index);
PXDlength(page_pxd,
mp->logical_size >> tblk->sb->
s_blocksize_bits);
lrd->backchain =
cpu_to_le32(lmLog(log, tblk, lrd, NULL));
if (tlck->type & tlckBTROOT) {
/* Empty xtree must be logged */
lrd->type = cpu_to_le16(LOG_REDOPAGE);
lrd->backchain =
cpu_to_le32(lmLog(log, tblk, lrd, tlck));
}
}
/* init LOG_UPDATEMAP of the freed extents
* XAD[XTENTRYSTART:hwm) from the deleted page itself
* for logredo() to update bmap;
*/
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
xtlck = (struct xtlock *) & tlck->lock;
hwm = xtlck->hwm.offset;
lrd->log.updatemap.nxd =
cpu_to_le16(hwm - XTENTRYSTART + 1);
/* reformat linelock for lmLog() */
xtlck->header.offset = XTENTRYSTART;
xtlck->header.length = hwm - XTENTRYSTART + 1;
xtlck->index = 1;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
/* format a maplock for txUpdateMap() to update bmap
* to free extents of XAD[XTENTRYSTART:hwm) from the
* deleted page itself;
*/
tlck->flag |= tlckUPDATEMAP;
xadlock->count = hwm - XTENTRYSTART + 1;
if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
int i;
pxd_t *pxd;
/*
* Lazy commit may allow xtree to be modified before
* txUpdateMap runs. Copy xad into linelock to
* preserve correct data.
*
* We can fit twice as may pxd's as xads in the lock
*/
xadlock->flag = mlckFREEPXDLIST;
pxd = xadlock->xdlist = &xtlck->pxdlock;
for (i = 0; i < xadlock->count; i++) {
PXDaddress(pxd,
addressXAD(&p->xad[XTENTRYSTART + i]));
PXDlength(pxd,
lengthXAD(&p->xad[XTENTRYSTART + i]));
pxd++;
}
} else {
/*
* xdlist will point to into inode's xtree, ensure
* that transaction is not committed lazily.
*/
xadlock->flag = mlckFREEXADLIST;
xadlock->xdlist = &p->xad[XTENTRYSTART];
tblk->xflag &= ~COMMIT_LAZY;
}
jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
tlck->ip, mp, xadlock->count);
maplock->index = 1;
/* mark page as invalid */
if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
&& !(tlck->type & tlckBTROOT))
tlck->flag |= tlckFREEPAGE;
/*
else (tblk->xflag & COMMIT_PMAP)
? release the page;
*/
return;
}
/*
* page/entry truncation: file truncation (ref. xtTruncate())
*
* |----------+------+------+---------------|
* | | |
* | | hwm - hwm before truncation
* | next - truncation point
* lwm - lwm before truncation
* header ?
*/
if (tlck->type & tlckTRUNCATE) {
pxd_t pxd; /* truncated extent of xad */
int twm;
/*
* For truncation the entire linelock may be used, so it would
* be difficult to store xad list in linelock itself.
* Therefore, we'll just force transaction to be committed
* synchronously, so that xtree pages won't be changed before
* txUpdateMap runs.
*/
tblk->xflag &= ~COMMIT_LAZY;
lwm = xtlck->lwm.offset;
if (lwm == 0)
lwm = XTPAGEMAXSLOT;
hwm = xtlck->hwm.offset;
twm = xtlck->twm.offset;
/*
* write log records
*/
/* log after-image for logredo():
*
* logredo() will update bmap for alloc of new/extended
* extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
* after-image of XADlist;
* logredo() resets (XAD_NEW|XAD_EXTEND) flag when
* applying the after-image to the meta-data page.
*/
lrd->type = cpu_to_le16(LOG_REDOPAGE);
PXDaddress(page_pxd, mp->index);
PXDlength(page_pxd,
mp->logical_size >> tblk->sb->s_blocksize_bits);
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
/*
* truncate entry XAD[twm == next - 1]:
*/
if (twm == next - 1) {
/* init LOG_UPDATEMAP for logredo() to update bmap for
* free of truncated delta extent of the truncated
* entry XAD[next - 1]:
* (xtlck->pxdlock = truncated delta extent);
*/
pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
/* assert(pxdlock->type & tlckTRUNCATE); */
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
lrd->log.updatemap.nxd = cpu_to_le16(1);
lrd->log.updatemap.pxd = pxdlock->pxd;
pxd = pxdlock->pxd; /* save to format maplock */
lrd->backchain =
cpu_to_le32(lmLog(log, tblk, lrd, NULL));
}
/*
* free entries XAD[next:hwm]:
*/
if (hwm >= next) {
/* init LOG_UPDATEMAP of the freed extents
* XAD[next:hwm] from the deleted page itself
* for logredo() to update bmap;
*/
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
lrd->log.updatemap.type =
cpu_to_le16(LOG_FREEXADLIST);
xtlck = (struct xtlock *) & tlck->lock;
hwm = xtlck->hwm.offset;
lrd->log.updatemap.nxd =
cpu_to_le16(hwm - next + 1);
/* reformat linelock for lmLog() */
xtlck->header.offset = next;
xtlck->header.length = hwm - next + 1;
xtlck->index = 1;
lrd->backchain =
cpu_to_le32(lmLog(log, tblk, lrd, tlck));
}
/*
* format maplock(s) for txUpdateMap() to update bmap
*/
maplock->index = 0;
/*
* allocate entries XAD[lwm:next):
*/
if (lwm < next) {
/* format a maplock for txUpdateMap() to update bPMAP
* for alloc of new/extended extents of XAD[lwm:next)
* from the page itself;
* txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
*/
tlck->flag |= tlckUPDATEMAP;
xadlock->flag = mlckALLOCXADLIST;
xadlock->count = next - lwm;
xadlock->xdlist = &p->xad[lwm];
jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d",
tlck->ip, mp, xadlock->count, lwm, next);
maplock->index++;
xadlock++;
}
/*
* truncate entry XAD[twm == next - 1]:
*/
if (twm == next - 1) {
/* format a maplock for txUpdateMap() to update bmap
* to free truncated delta extent of the truncated
* entry XAD[next - 1];
* (xtlck->pxdlock = truncated delta extent);
*/
tlck->flag |= tlckUPDATEMAP;
pxdlock = (struct pxd_lock *) xadlock;
pxdlock->flag = mlckFREEPXD;
pxdlock->count = 1;
pxdlock->pxd = pxd;
jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d",
ip, mp, pxdlock->count, hwm);
maplock->index++;
xadlock++;
}
/*
* free entries XAD[next:hwm]:
*/
if (hwm >= next) {
/* format a maplock for txUpdateMap() to update bmap
* to free extents of XAD[next:hwm] from thedeleted
* page itself;
*/
tlck->flag |= tlckUPDATEMAP;
xadlock->flag = mlckFREEXADLIST;
xadlock->count = hwm - next + 1;
xadlock->xdlist = &p->xad[next];
jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d",
tlck->ip, mp, xadlock->count, next, hwm);
maplock->index++;
}
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
}
return;
}
/*
* mapLog()
*
* function: log from maplock of freed data extents;
*/
static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
struct pxd_lock *pxdlock;
int i, nlock;
pxd_t *pxd;
/*
* page relocation: free the source page extent
*
* a maplock for txUpdateMap() for free of the page
* has been formatted at txLock() time saving the src
* relocated page address;
*/
if (tlck->type & tlckRELOCATE) {
/* log LOG_NOREDOPAGE of the old relocated page
* for logredo() to start NoRedoPage filter;
*/
lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxd = &lrd->log.redopage.pxd;
*pxd = pxdlock->pxd;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
/* (N.B. currently, logredo() does NOT update bmap
* for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
* if page free from relocation, LOG_UPDATEMAP log is
* specifically generated now for logredo()
* to update bmap for free of src relocated page;
* (new flag LOG_RELOCATE may be introduced which will
* inform logredo() to start NORedoPage filter and also
* update block allocation map at the same time, thus
* avoiding an extra log write);
*/
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
lrd->log.updatemap.nxd = cpu_to_le16(1);
lrd->log.updatemap.pxd = pxdlock->pxd;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
/* a maplock for txUpdateMap() for free of the page
* has been formatted at txLock() time;
*/
tlck->flag |= tlckUPDATEMAP;
return;
}
/*
* Otherwise it's not a relocate request
*
*/
else {
/* log LOG_UPDATEMAP for logredo() to update bmap for
* free of truncated/relocated delta extent of the data;
* e.g.: external EA extent, relocated/truncated extent
* from xtTailgate();
*/
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
pxdlock = (struct pxd_lock *) & tlck->lock;
nlock = pxdlock->index;
for (i = 0; i < nlock; i++, pxdlock++) {
if (pxdlock->flag & mlckALLOCPXD)
lrd->log.updatemap.type =
cpu_to_le16(LOG_ALLOCPXD);
else
lrd->log.updatemap.type =
cpu_to_le16(LOG_FREEPXD);
lrd->log.updatemap.nxd = cpu_to_le16(1);
lrd->log.updatemap.pxd = pxdlock->pxd;
lrd->backchain =
cpu_to_le32(lmLog(log, tblk, lrd, NULL));
jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
(ulong) addressPXD(&pxdlock->pxd),
lengthPXD(&pxdlock->pxd));
}
/* update bmap */
tlck->flag |= tlckUPDATEMAP;
}
}
/*
* txEA()
*
* function: acquire maplock for EA/ACL extents or
* set COMMIT_INLINE flag;
*/
void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
{
struct tlock *tlck = NULL;
struct pxd_lock *maplock = NULL, *pxdlock = NULL;
/*
* format maplock for alloc of new EA extent
*/
if (newea) {
/* Since the newea could be a completely zeroed entry we need to
* check for the two flags which indicate we should actually
* commit new EA data
*/
if (newea->flag & DXD_EXTENT) {
tlck = txMaplock(tid, ip, tlckMAP);
maplock = (struct pxd_lock *) & tlck->lock;
pxdlock = (struct pxd_lock *) maplock;
pxdlock->flag = mlckALLOCPXD;
PXDaddress(&pxdlock->pxd, addressDXD(newea));
PXDlength(&pxdlock->pxd, lengthDXD(newea));
pxdlock++;
maplock->index = 1;
} else if (newea->flag & DXD_INLINE) {
tlck = NULL;
set_cflag(COMMIT_Inlineea, ip);
}
}
/*
* format maplock for free of old EA extent
*/
if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
if (tlck == NULL) {
tlck = txMaplock(tid, ip, tlckMAP);
maplock = (struct pxd_lock *) & tlck->lock;
pxdlock = (struct pxd_lock *) maplock;
maplock->index = 0;
}
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, addressDXD(oldea));
PXDlength(&pxdlock->pxd, lengthDXD(oldea));
maplock->index++;
}
}
/*
* txForce()
*
* function: synchronously write pages locked by transaction
* after txLog() but before txUpdateMap();
*/
static void txForce(struct tblock * tblk)
{
struct tlock *tlck;
lid_t lid, next;
struct metapage *mp;
/*
* reverse the order of transaction tlocks in
* careful update order of address index pages
* (right to left, bottom up)
*/
tlck = lid_to_tlock(tblk->next);
lid = tlck->next;
tlck->next = 0;
while (lid) {
tlck = lid_to_tlock(lid);
next = tlck->next;
tlck->next = tblk->next;
tblk->next = lid;
lid = next;
}
/*
* synchronously write the page, and
* hold the page for txUpdateMap();
*/
for (lid = tblk->next; lid; lid = next) {
tlck = lid_to_tlock(lid);
next = tlck->next;
if ((mp = tlck->mp) != NULL &&
(tlck->type & tlckBTROOT) == 0) {
assert(mp->xflag & COMMIT_PAGE);
if (tlck->flag & tlckWRITEPAGE) {
tlck->flag &= ~tlckWRITEPAGE;
/* do not release page to freelist */
force_metapage(mp);
#if 0
/*
* The "right" thing to do here is to
* synchronously write the metadata.
* With the current implementation this
* is hard since write_metapage requires
* us to kunmap & remap the page. If we
* have tlocks pointing into the metadata
* pages, we don't want to do this. I think
* we can get by with synchronously writing
* the pages when they are released.
*/
assert(mp->nohomeok);
set_bit(META_dirty, &mp->flag);
set_bit(META_sync, &mp->flag);
#endif
}
}
}
}
/*
* txUpdateMap()
*
* function: update persistent allocation map (and working map
* if appropriate);
*
* parameter:
*/
static void txUpdateMap(struct tblock * tblk)
{
struct inode *ip;
struct inode *ipimap;
lid_t lid;
struct tlock *tlck;
struct maplock *maplock;
struct pxd_lock pxdlock;
int maptype;
int k, nlock;
struct metapage *mp = NULL;
ipimap = JFS_SBI(tblk->sb)->ipimap;
maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
/*
* update block allocation map
*
* update allocation state in pmap (and wmap) and
* update lsn of the pmap page;
*/
/*
* scan each tlock/page of transaction for block allocation/free:
*
* for each tlock/page of transaction, update map.
* ? are there tlock for pmap and pwmap at the same time ?
*/
for (lid = tblk->next; lid; lid = tlck->next) {
tlck = lid_to_tlock(lid);
if ((tlck->flag & tlckUPDATEMAP) == 0)
continue;
if (tlck->flag & tlckFREEPAGE) {
/*
* Another thread may attempt to reuse freed space
* immediately, so we want to get rid of the metapage
* before anyone else has a chance to get it.
* Lock metapage, update maps, then invalidate
* the metapage.
*/
mp = tlck->mp;
ASSERT(mp->xflag & COMMIT_PAGE);
grab_metapage(mp);
}
/*
* extent list:
* . in-line PXD list:
* . out-of-line XAD list:
*/
maplock = (struct maplock *) & tlck->lock;
nlock = maplock->index;
for (k = 0; k < nlock; k++, maplock++) {
/*
* allocate blocks in persistent map:
*
* blocks have been allocated from wmap at alloc time;
*/
if (maplock->flag & mlckALLOC) {
txAllocPMap(ipimap, maplock, tblk);
}
/*
* free blocks in persistent and working map:
* blocks will be freed in pmap and then in wmap;
*
* ? tblock specifies the PMAP/PWMAP based upon
* transaction
*
* free blocks in persistent map:
* blocks will be freed from wmap at last reference
* release of the object for regular files;
*
* Alway free blocks from both persistent & working
* maps for directories
*/
else { /* (maplock->flag & mlckFREE) */
if (tlck->flag & tlckDIRECTORY)
txFreeMap(ipimap, maplock,
tblk, COMMIT_PWMAP);
else
txFreeMap(ipimap, maplock,
tblk, maptype);
}
}
if (tlck->flag & tlckFREEPAGE) {
if (!(tblk->flag & tblkGC_LAZY)) {
/* This is equivalent to txRelease */
ASSERT(mp->lid == lid);
tlck->mp->lid = 0;
}
assert(mp->nohomeok == 1);
metapage_homeok(mp);
discard_metapage(mp);
tlck->mp = NULL;
}
}
/*
* update inode allocation map
*
* update allocation state in pmap and
* update lsn of the pmap page;
* update in-memory inode flag/state
*
* unlock mapper/write lock
*/
if (tblk->xflag & COMMIT_CREATE) {
diUpdatePMap(ipimap, tblk->ino, false, tblk);
/* update persistent block allocation map
* for the allocation of inode extent;
*/
pxdlock.flag = mlckALLOCPXD;
pxdlock.pxd = tblk->u.ixpxd;
pxdlock.index = 1;
txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
} else if (tblk->xflag & COMMIT_DELETE) {
ip = tblk->u.ip;
diUpdatePMap(ipimap, ip->i_ino, true, tblk);
iput(ip);
}
}
/*
* txAllocPMap()
*
* function: allocate from persistent map;
*
* parameter:
* ipbmap -
* malock -
* xad list:
* pxd:
*
* maptype -
* allocate from persistent map;
* free from persistent map;
* (e.g., tmp file - free from working map at releae
* of last reference);
* free from persistent and working map;
*
* lsn - log sequence number;
*/
static void txAllocPMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct xdlistlock *xadlistlock;
xad_t *xad;
s64 xaddr;
int xlen;
struct pxd_lock *pxdlock;
struct xdlistlock *pxdlistlock;
pxd_t *pxd;
int n;
/*
* allocate from persistent map;
*/
if (maplock->flag & mlckALLOCXADLIST) {
xadlistlock = (struct xdlistlock *) maplock;
xad = xadlistlock->xdlist;
for (n = 0; n < xadlistlock->count; n++, xad++) {
if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
xaddr = addressXAD(xad);
xlen = lengthXAD(xad);
dbUpdatePMap(ipbmap, false, xaddr,
(s64) xlen, tblk);
xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
}
}
} else if (maplock->flag & mlckALLOCPXD) {
pxdlock = (struct pxd_lock *) maplock;
xaddr = addressPXD(&pxdlock->pxd);
xlen = lengthPXD(&pxdlock->pxd);
dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);
jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
} else { /* (maplock->flag & mlckALLOCPXDLIST) */
pxdlistlock = (struct xdlistlock *) maplock;
pxd = pxdlistlock->xdlist;
for (n = 0; n < pxdlistlock->count; n++, pxd++) {
xaddr = addressPXD(pxd);
xlen = lengthPXD(pxd);
dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen,
tblk);
jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
}
}
}
/*
* txFreeMap()
*
* function: free from persistent and/or working map;
*
* todo: optimization
*/
void txFreeMap(struct inode *ip,
struct maplock * maplock, struct tblock * tblk, int maptype)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct xdlistlock *xadlistlock;
xad_t *xad;
s64 xaddr;
int xlen;
struct pxd_lock *pxdlock;
struct xdlistlock *pxdlistlock;
pxd_t *pxd;
int n;
jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
tblk, maplock, maptype);
/*
* free from persistent map;
*/
if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
if (maplock->flag & mlckFREEXADLIST) {
xadlistlock = (struct xdlistlock *) maplock;
xad = xadlistlock->xdlist;
for (n = 0; n < xadlistlock->count; n++, xad++) {
if (!(xad->flag & XAD_NEW)) {
xaddr = addressXAD(xad);
xlen = lengthXAD(xad);
dbUpdatePMap(ipbmap, true, xaddr,
(s64) xlen, tblk);
jfs_info("freePMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
}
}
} else if (maplock->flag & mlckFREEPXD) {
pxdlock = (struct pxd_lock *) maplock;
xaddr = addressPXD(&pxdlock->pxd);
xlen = lengthPXD(&pxdlock->pxd);
dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen,
tblk);
jfs_info("freePMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
} else { /* (maplock->flag & mlckALLOCPXDLIST) */
pxdlistlock = (struct xdlistlock *) maplock;
pxd = pxdlistlock->xdlist;
for (n = 0; n < pxdlistlock->count; n++, pxd++) {
xaddr = addressPXD(pxd);
xlen = lengthPXD(pxd);
dbUpdatePMap(ipbmap, true, xaddr,
(s64) xlen, tblk);
jfs_info("freePMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
}
}
}
/*
* free from working map;
*/
if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
if (maplock->flag & mlckFREEXADLIST) {
xadlistlock = (struct xdlistlock *) maplock;
xad = xadlistlock->xdlist;
for (n = 0; n < xadlistlock->count; n++, xad++) {
xaddr = addressXAD(xad);
xlen = lengthXAD(xad);
dbFree(ip, xaddr, (s64) xlen);
xad->flag = 0;
jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
}
} else if (maplock->flag & mlckFREEPXD) {
pxdlock = (struct pxd_lock *) maplock;
xaddr = addressPXD(&pxdlock->pxd);
xlen = lengthPXD(&pxdlock->pxd);
dbFree(ip, xaddr, (s64) xlen);
jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
} else { /* (maplock->flag & mlckFREEPXDLIST) */
pxdlistlock = (struct xdlistlock *) maplock;
pxd = pxdlistlock->xdlist;
for (n = 0; n < pxdlistlock->count; n++, pxd++) {
xaddr = addressPXD(pxd);
xlen = lengthPXD(pxd);
dbFree(ip, xaddr, (s64) xlen);
jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
(ulong) xaddr, xlen);
}
}
}
}
/*
* txFreelock()
*
* function: remove tlock from inode anonymous locklist
*/
void txFreelock(struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct tlock *xtlck, *tlck;
lid_t xlid = 0, lid;
if (!jfs_ip->atlhead)
return;
TXN_LOCK();
xtlck = (struct tlock *) &jfs_ip->atlhead;
while ((lid = xtlck->next) != 0) {
tlck = lid_to_tlock(lid);
if (tlck->flag & tlckFREELOCK) {
xtlck->next = tlck->next;
txLockFree(lid);
} else {
xtlck = tlck;
xlid = lid;
}
}
if (jfs_ip->atlhead)
jfs_ip->atltail = xlid;
else {
jfs_ip->atltail = 0;
/*
* If inode was on anon_list, remove it
*/
list_del_init(&jfs_ip->anon_inode_list);
}
TXN_UNLOCK();
}
/*
* txAbort()
*
* function: abort tx before commit;
*
* frees line-locks and segment locks for all
* segments in comdata structure.
* Optionally sets state of file-system to FM_DIRTY in super-block.
* log age of page-frames in memory for which caller has
* are reset to 0 (to avoid logwarap).
*/
void txAbort(tid_t tid, int dirty)
{
lid_t lid, next;
struct metapage *mp;
struct tblock *tblk = tid_to_tblock(tid);
struct tlock *tlck;
/*
* free tlocks of the transaction
*/
for (lid = tblk->next; lid; lid = next) {
tlck = lid_to_tlock(lid);
next = tlck->next;
mp = tlck->mp;
JFS_IP(tlck->ip)->xtlid = 0;
if (mp) {
mp->lid = 0;
/*
* reset lsn of page to avoid logwarap:
*
* (page may have been previously committed by another
* transaction(s) but has not been paged, i.e.,
* it may be on logsync list even though it has not
* been logged for the current tx.)
*/
if (mp->xflag & COMMIT_PAGE && mp->lsn)
LogSyncRelease(mp);
}
/* insert tlock at head of freelist */
TXN_LOCK();
txLockFree(lid);
TXN_UNLOCK();
}
/* caller will free the transaction block */
tblk->next = tblk->last = 0;
/*
* mark filesystem dirty
*/
if (dirty)
jfs_error(tblk->sb, "\n");
return;
}
/*
* txLazyCommit(void)
*
* All transactions except those changing ipimap (COMMIT_FORCE) are
* processed by this routine. This insures that the inode and block
* allocation maps are updated in order. For synchronous transactions,
* let the user thread finish processing after txUpdateMap() is called.
*/
static void txLazyCommit(struct tblock * tblk)
{
struct jfs_log *log;
while (((tblk->flag & tblkGC_READY) == 0) &&
((tblk->flag & tblkGC_UNLOCKED) == 0)) {
/* We must have gotten ahead of the user thread
*/
jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
yield();
}
jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
txUpdateMap(tblk);
log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
spin_lock_irq(&log->gclock); // LOGGC_LOCK
tblk->flag |= tblkGC_COMMITTED;
if (tblk->flag & tblkGC_READY)
log->gcrtc--;
wake_up_all(&tblk->gcwait); // LOGGC_WAKEUP
/*
* Can't release log->gclock until we've tested tblk->flag
*/
if (tblk->flag & tblkGC_LAZY) {
spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
txUnlock(tblk);
tblk->flag &= ~tblkGC_LAZY;
txEnd(tblk - TxBlock); /* Convert back to tid */
} else
spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
}
/*
* jfs_lazycommit(void)
*
* To be run as a kernel daemon. If lbmIODone is called in an interrupt
* context, or where blocking is not wanted, this routine will process
* committed transactions from the unlock queue.
*/
int jfs_lazycommit(void *arg)
{
int WorkDone;
struct tblock *tblk;
unsigned long flags;
struct jfs_sb_info *sbi;
do {
LAZY_LOCK(flags);
jfs_commit_thread_waking = 0; /* OK to wake another thread */
while (!list_empty(&TxAnchor.unlock_queue)) {
WorkDone = 0;
list_for_each_entry(tblk, &TxAnchor.unlock_queue,
cqueue) {
sbi = JFS_SBI(tblk->sb);
/*
* For each volume, the transactions must be
* handled in order. If another commit thread
* is handling a tblk for this superblock,
* skip it
*/
if (sbi->commit_state & IN_LAZYCOMMIT)
continue;
sbi->commit_state |= IN_LAZYCOMMIT;
WorkDone = 1;
/*
* Remove transaction from queue
*/
list_del(&tblk->cqueue);
LAZY_UNLOCK(flags);
txLazyCommit(tblk);
LAZY_LOCK(flags);
sbi->commit_state &= ~IN_LAZYCOMMIT;
/*
* Don't continue in the for loop. (We can't
* anyway, it's unsafe!) We want to go back to
* the beginning of the list.
*/
break;
}
/* If there was nothing to do, don't continue */
if (!WorkDone)
break;
}
/* In case a wakeup came while all threads were active */
jfs_commit_thread_waking = 0;
if (freezing(current)) {
LAZY_UNLOCK(flags);
try_to_freeze();
} else {
DECLARE_WAITQUEUE(wq, current);
add_wait_queue(&jfs_commit_thread_wait, &wq);
set_current_state(TASK_INTERRUPTIBLE);
LAZY_UNLOCK(flags);
schedule();
remove_wait_queue(&jfs_commit_thread_wait, &wq);
}
} while (!kthread_should_stop());
if (!list_empty(&TxAnchor.unlock_queue))
jfs_err("jfs_lazycommit being killed w/pending transactions!");
else
jfs_info("jfs_lazycommit being killed");
return 0;
}
void txLazyUnlock(struct tblock * tblk)
{
unsigned long flags;
LAZY_LOCK(flags);
list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
/*
* Don't wake up a commit thread if there is already one servicing
* this superblock, or if the last one we woke up hasn't started yet.
*/
if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
!jfs_commit_thread_waking) {
jfs_commit_thread_waking = 1;
wake_up(&jfs_commit_thread_wait);
}
LAZY_UNLOCK(flags);
}
static void LogSyncRelease(struct metapage * mp)
{
struct jfs_log *log = mp->log;
assert(mp->nohomeok);
assert(log);
metapage_homeok(mp);
}
/*
* txQuiesce
*
* Block all new transactions and push anonymous transactions to
* completion
*
* This does almost the same thing as jfs_sync below. We don't
* worry about deadlocking when jfs_tlocks_low is set, since we would
* expect jfs_sync to get us out of that jam.
*/
void txQuiesce(struct super_block *sb)
{
struct inode *ip;
struct jfs_inode_info *jfs_ip;
struct jfs_log *log = JFS_SBI(sb)->log;
tid_t tid;
set_bit(log_QUIESCE, &log->flag);
TXN_LOCK();
restart:
while (!list_empty(&TxAnchor.anon_list)) {
jfs_ip = list_entry(TxAnchor.anon_list.next,
struct jfs_inode_info,
anon_inode_list);
ip = &jfs_ip->vfs_inode;
/*
* inode will be removed from anonymous list
* when it is committed
*/
TXN_UNLOCK();
tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
mutex_lock(&jfs_ip->commit_mutex);
txCommit(tid, 1, &ip, 0);
txEnd(tid);
mutex_unlock(&jfs_ip->commit_mutex);
/*
* Just to be safe. I don't know how
* long we can run without blocking
*/
cond_resched();
TXN_LOCK();
}
/*
* If jfs_sync is running in parallel, there could be some inodes
* on anon_list2. Let's check.
*/
if (!list_empty(&TxAnchor.anon_list2)) {
list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
goto restart;
}
TXN_UNLOCK();
/*
* We may need to kick off the group commit
*/
jfs_flush_journal(log, 0);
}
/*
* txResume()
*
* Allows transactions to start again following txQuiesce
*/
void txResume(struct super_block *sb)
{
struct jfs_log *log = JFS_SBI(sb)->log;
clear_bit(log_QUIESCE, &log->flag);
TXN_WAKEUP(&log->syncwait);
}
/*
* jfs_sync(void)
*
* To be run as a kernel daemon. This is awakened when tlocks run low.
* We write any inodes that have anonymous tlocks so they will become
* available.
*/
int jfs_sync(void *arg)
{
struct inode *ip;
struct jfs_inode_info *jfs_ip;
tid_t tid;
do {
/*
* write each inode on the anonymous inode list
*/
TXN_LOCK();
while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
jfs_ip = list_entry(TxAnchor.anon_list.next,
struct jfs_inode_info,
anon_inode_list);
ip = &jfs_ip->vfs_inode;
if (! igrab(ip)) {
/*
* Inode is being freed
*/
list_del_init(&jfs_ip->anon_inode_list);
} else if (mutex_trylock(&jfs_ip->commit_mutex)) {
/*
* inode will be removed from anonymous list
* when it is committed
*/
TXN_UNLOCK();
tid = txBegin(ip->i_sb, COMMIT_INODE);
txCommit(tid, 1, &ip, 0);
txEnd(tid);
mutex_unlock(&jfs_ip->commit_mutex);
iput(ip);
/*
* Just to be safe. I don't know how
* long we can run without blocking
*/
cond_resched();
TXN_LOCK();
} else {
/* We can't get the commit mutex. It may
* be held by a thread waiting for tlock's
* so let's not block here. Save it to
* put back on the anon_list.
*/
/* Move from anon_list to anon_list2 */
list_move(&jfs_ip->anon_inode_list,
&TxAnchor.anon_list2);
TXN_UNLOCK();
iput(ip);
TXN_LOCK();
}
}
/* Add anon_list2 back to anon_list */
list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
if (freezing(current)) {
TXN_UNLOCK();
try_to_freeze();
} else {
set_current_state(TASK_INTERRUPTIBLE);
TXN_UNLOCK();
schedule();
}
} while (!kthread_should_stop());
jfs_info("jfs_sync being killed");
return 0;
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
int jfs_txanchor_proc_show(struct seq_file *m, void *v)
{
char *freewait;
char *freelockwait;
char *lowlockwait;
freewait =
waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
freelockwait =
waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
lowlockwait =
waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
seq_printf(m,
"JFS TxAnchor\n"
"============\n"
"freetid = %d\n"
"freewait = %s\n"
"freelock = %d\n"
"freelockwait = %s\n"
"lowlockwait = %s\n"
"tlocksInUse = %d\n"
"jfs_tlocks_low = %d\n"
"unlock_queue is %sempty\n",
TxAnchor.freetid,
freewait,
TxAnchor.freelock,
freelockwait,
lowlockwait,
TxAnchor.tlocksInUse,
jfs_tlocks_low,
list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
return 0;
}
#endif
#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
int jfs_txstats_proc_show(struct seq_file *m, void *v)
{
seq_printf(m,
"JFS TxStats\n"
"===========\n"
"calls to txBegin = %d\n"
"txBegin blocked by sync barrier = %d\n"
"txBegin blocked by tlocks low = %d\n"
"txBegin blocked by no free tid = %d\n"
"calls to txBeginAnon = %d\n"
"txBeginAnon blocked by sync barrier = %d\n"
"txBeginAnon blocked by tlocks low = %d\n"
"calls to txLockAlloc = %d\n"
"tLockAlloc blocked by no free lock = %d\n",
TxStat.txBegin,
TxStat.txBegin_barrier,
TxStat.txBegin_lockslow,
TxStat.txBegin_freetid,
TxStat.txBeginAnon,
TxStat.txBeginAnon_barrier,
TxStat.txBeginAnon_lockslow,
TxStat.txLockAlloc,
TxStat.txLockAlloc_freelock);
return 0;
}
#endif
| linux-master | fs/jfs/jfs_txnmgr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
/*
* jfs_dtree.c: directory B+-tree manager
*
* B+-tree with variable length key directory:
*
* each directory page is structured as an array of 32-byte
* directory entry slots initialized as a freelist
* to avoid search/compaction of free space at insertion.
* when an entry is inserted, a number of slots are allocated
* from the freelist as required to store variable length data
* of the entry; when the entry is deleted, slots of the entry
* are returned to freelist.
*
* leaf entry stores full name as key and file serial number
* (aka inode number) as data.
* internal/router entry stores sufffix compressed name
* as key and simple extent descriptor as data.
*
* each directory page maintains a sorted entry index table
* which stores the start slot index of sorted entries
* to allow binary search on the table.
*
* directory starts as a root/leaf page in on-disk inode
* inline data area.
* when it becomes full, it starts a leaf of a external extent
* of length of 1 block. each time the first leaf becomes full,
* it is extended rather than split (its size is doubled),
* until its length becoms 4 KBytes, from then the extent is split
* with new 4 Kbyte extent when it becomes full
* to reduce external fragmentation of small directories.
*
* blah, blah, blah, for linear scan of directory in pieces by
* readdir().
*
*
* case-insensitive directory file system
*
* names are stored in case-sensitive way in leaf entry.
* but stored, searched and compared in case-insensitive (uppercase) order
* (i.e., both search key and entry key are folded for search/compare):
* (note that case-sensitive order is BROKEN in storage, e.g.,
* sensitive: Ad, aB, aC, aD -> insensitive: aB, aC, aD, Ad
*
* entries which folds to the same key makes up a equivalent class
* whose members are stored as contiguous cluster (may cross page boundary)
* but whose order is arbitrary and acts as duplicate, e.g.,
* abc, Abc, aBc, abC)
*
* once match is found at leaf, requires scan forward/backward
* either for, in case-insensitive search, duplicate
* or for, in case-sensitive search, for exact match
*
* router entry must be created/stored in case-insensitive way
* in internal entry:
* (right most key of left page and left most key of right page
* are folded, and its suffix compression is propagated as router
* key in parent)
* (e.g., if split occurs <abc> and <aBd>, <ABD> trather than <aB>
* should be made the router key for the split)
*
* case-insensitive search:
*
* fold search key;
*
* case-insensitive search of B-tree:
* for internal entry, router key is already folded;
* for leaf entry, fold the entry key before comparison.
*
* if (leaf entry case-insensitive match found)
* if (next entry satisfies case-insensitive match)
* return EDUPLICATE;
* if (prev entry satisfies case-insensitive match)
* return EDUPLICATE;
* return match;
* else
* return no match;
*
* serialization:
* target directory inode lock is being held on entry/exit
* of all main directory service routines.
*
* log based recovery:
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_dmap.h"
#include "jfs_unicode.h"
#include "jfs_debug.h"
/* dtree split parameter */
struct dtsplit {
struct metapage *mp;
s16 index;
s16 nslot;
struct component_name *key;
ddata_t *data;
struct pxdlist *pxdlist;
};
#define DT_PAGE(IP, MP) BT_PAGE(IP, MP, dtpage_t, i_dtroot)
/* get page buffer for specified block address */
#define DT_GETPAGE(IP, BN, MP, SIZE, P, RC) \
do { \
BT_GETPAGE(IP, BN, MP, dtpage_t, SIZE, P, RC, i_dtroot); \
if (!(RC)) { \
if (((P)->header.nextindex > \
(((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \
((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) { \
BT_PUTPAGE(MP); \
jfs_error((IP)->i_sb, \
"DT_GETPAGE: dtree page corrupt\n"); \
MP = NULL; \
RC = -EIO; \
} \
} \
} while (0)
/* for consistency */
#define DT_PUTPAGE(MP) BT_PUTPAGE(MP)
#define DT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
BT_GETSEARCH(IP, LEAF, BN, MP, dtpage_t, P, INDEX, i_dtroot)
/*
* forward references
*/
static int dtSplitUp(tid_t tid, struct inode *ip,
struct dtsplit * split, struct btstack * btstack);
static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rxdp);
static int dtExtendPage(tid_t tid, struct inode *ip,
struct dtsplit * split, struct btstack * btstack);
static int dtSplitRoot(tid_t tid, struct inode *ip,
struct dtsplit * split, struct metapage ** rmpp);
static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
dtpage_t * fp, struct btstack * btstack);
static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
static int dtReadFirst(struct inode *ip, struct btstack * btstack);
static int dtReadNext(struct inode *ip,
loff_t * offset, struct btstack * btstack);
static int dtCompare(struct component_name * key, dtpage_t * p, int si);
static int ciCompare(struct component_name * key, dtpage_t * p, int si,
int flag);
static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
int flag);
static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, struct component_name * key, int flag);
static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
ddata_t * data, struct dt_lock **);
static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
int do_index);
static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock);
static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock);
static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock);
#define ciToUpper(c) UniStrupr((c)->name)
/*
* read_index_page()
*
* Reads a page of a directory's index table.
* Having metadata mapped into the directory inode's address space
* presents a multitude of problems. We avoid this by mapping to
* the absolute address space outside of the *_metapage routines
*/
static struct metapage *read_index_page(struct inode *inode, s64 blkno)
{
int rc;
s64 xaddr;
int xflag;
s32 xlen;
rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
if (rc || (xaddr == 0))
return NULL;
return read_metapage(inode, xaddr, PSIZE, 1);
}
/*
* get_index_page()
*
* Same as get_index_page(), but get's a new page without reading
*/
static struct metapage *get_index_page(struct inode *inode, s64 blkno)
{
int rc;
s64 xaddr;
int xflag;
s32 xlen;
rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
if (rc || (xaddr == 0))
return NULL;
return get_metapage(inode, xaddr, PSIZE, 1);
}
/*
* find_index()
*
* Returns dtree page containing directory table entry for specified
* index and pointer to its entry.
*
* mp must be released by caller.
*/
static struct dir_table_slot *find_index(struct inode *ip, u32 index,
struct metapage ** mp, s64 *lblock)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
s64 blkno;
s64 offset;
int page_offset;
struct dir_table_slot *slot;
static int maxWarnings = 10;
if (index < 2) {
if (maxWarnings) {
jfs_warn("find_entry called with index = %d", index);
maxWarnings--;
}
return NULL;
}
if (index >= jfs_ip->next_index) {
jfs_warn("find_entry called with index >= next_index");
return NULL;
}
if (jfs_dirtable_inline(ip)) {
/*
* Inline directory table
*/
*mp = NULL;
slot = &jfs_ip->i_dirtable[index - 2];
} else {
offset = (index - 2) * sizeof(struct dir_table_slot);
page_offset = offset & (PSIZE - 1);
blkno = ((offset + 1) >> L2PSIZE) <<
JFS_SBI(ip->i_sb)->l2nbperpage;
if (*mp && (*lblock != blkno)) {
release_metapage(*mp);
*mp = NULL;
}
if (!(*mp)) {
*lblock = blkno;
*mp = read_index_page(ip, blkno);
}
if (!(*mp)) {
jfs_err("free_index: error reading directory table");
return NULL;
}
slot =
(struct dir_table_slot *) ((char *) (*mp)->data +
page_offset);
}
return slot;
}
static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp,
u32 index)
{
struct tlock *tlck;
struct linelock *llck;
struct lv *lv;
tlck = txLock(tid, ip, mp, tlckDATA);
llck = (struct linelock *) tlck->lock;
if (llck->index >= llck->maxcnt)
llck = txLinelock(llck);
lv = &llck->lv[llck->index];
/*
* Linelock slot size is twice the size of directory table
* slot size. 512 entries per page.
*/
lv->offset = ((index - 2) & 511) >> 1;
lv->length = 1;
llck->index++;
}
/*
* add_index()
*
* Adds an entry to the directory index table. This is used to provide
* each directory entry with a persistent index in which to resume
* directory traversals
*/
static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
{
struct super_block *sb = ip->i_sb;
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
u64 blkno;
struct dir_table_slot *dirtab_slot;
u32 index;
struct linelock *llck;
struct lv *lv;
struct metapage *mp;
s64 offset;
uint page_offset;
struct tlock *tlck;
s64 xaddr;
ASSERT(DO_INDEX(ip));
if (jfs_ip->next_index < 2) {
jfs_warn("add_index: next_index = %d. Resetting!",
jfs_ip->next_index);
jfs_ip->next_index = 2;
}
index = jfs_ip->next_index++;
if (index <= MAX_INLINE_DIRTABLE_ENTRY) {
/*
* i_size reflects size of index table, or 8 bytes per entry.
*/
ip->i_size = (loff_t) (index - 1) << 3;
/*
* dir table fits inline within inode
*/
dirtab_slot = &jfs_ip->i_dirtable[index-2];
dirtab_slot->flag = DIR_INDEX_VALID;
dirtab_slot->slot = slot;
DTSaddress(dirtab_slot, bn);
set_cflag(COMMIT_Dirtable, ip);
return index;
}
if (index == (MAX_INLINE_DIRTABLE_ENTRY + 1)) {
struct dir_table_slot temp_table[12];
/*
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
if (dquot_alloc_block(ip, sbi->nbperpage))
goto clean_up;
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
/*
* Save the table, we're going to overwrite it with the
* xtree root
*/
memcpy(temp_table, &jfs_ip->i_dirtable, sizeof(temp_table));
/*
* Initialize empty x-tree
*/
xtInitRoot(tid, ip);
/*
* Add the first block to the xtree
*/
if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
/* This really shouldn't fail */
jfs_warn("add_index: xtInsert failed!");
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
dbFree(ip, xaddr, sbi->nbperpage);
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
mp = get_index_page(ip, 0);
if (!mp) {
jfs_err("add_index: get_metapage failed!");
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
goto clean_up;
}
tlck = txLock(tid, ip, mp, tlckDATA);
llck = (struct linelock *) & tlck->lock;
ASSERT(llck->index == 0);
lv = &llck->lv[0];
lv->offset = 0;
lv->length = 6; /* tlckDATA slot size is 16 bytes */
llck->index++;
memcpy(mp->data, temp_table, sizeof(temp_table));
mark_metapage_dirty(mp);
release_metapage(mp);
/*
* Logging is now directed by xtree tlocks
*/
clear_cflag(COMMIT_Dirtable, ip);
}
offset = (index - 2) * sizeof(struct dir_table_slot);
page_offset = offset & (PSIZE - 1);
blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage;
if (page_offset == 0) {
/*
* This will be the beginning of a new page
*/
xaddr = 0;
if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) {
jfs_warn("add_index: xtInsert failed!");
goto clean_up;
}
ip->i_size += PSIZE;
if ((mp = get_index_page(ip, blkno)))
memset(mp->data, 0, PSIZE); /* Just looks better */
else
xtTruncate(tid, ip, offset, COMMIT_PWMAP);
} else
mp = read_index_page(ip, blkno);
if (!mp) {
jfs_err("add_index: get/read_metapage failed!");
goto clean_up;
}
lock_index(tid, ip, mp, index);
dirtab_slot =
(struct dir_table_slot *) ((char *) mp->data + page_offset);
dirtab_slot->flag = DIR_INDEX_VALID;
dirtab_slot->slot = slot;
DTSaddress(dirtab_slot, bn);
mark_metapage_dirty(mp);
release_metapage(mp);
return index;
clean_up:
jfs_ip->next_index--;
return 0;
}
/*
* free_index()
*
* Marks an entry to the directory index table as free.
*/
static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next)
{
struct dir_table_slot *dirtab_slot;
s64 lblock;
struct metapage *mp = NULL;
dirtab_slot = find_index(ip, index, &mp, &lblock);
if (!dirtab_slot)
return;
dirtab_slot->flag = DIR_INDEX_FREE;
dirtab_slot->slot = dirtab_slot->addr1 = 0;
dirtab_slot->addr2 = cpu_to_le32(next);
if (mp) {
lock_index(tid, ip, mp, index);
mark_metapage_dirty(mp);
release_metapage(mp);
} else
set_cflag(COMMIT_Dirtable, ip);
}
/*
* modify_index()
*
* Changes an entry in the directory index table
*/
static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
int slot, struct metapage ** mp, s64 *lblock)
{
struct dir_table_slot *dirtab_slot;
dirtab_slot = find_index(ip, index, mp, lblock);
if (!dirtab_slot)
return;
DTSaddress(dirtab_slot, bn);
dirtab_slot->slot = slot;
if (*mp) {
lock_index(tid, ip, *mp, index);
mark_metapage_dirty(*mp);
} else
set_cflag(COMMIT_Dirtable, ip);
}
/*
* read_index()
*
* reads a directory table slot
*/
static int read_index(struct inode *ip, u32 index,
struct dir_table_slot * dirtab_slot)
{
s64 lblock;
struct metapage *mp = NULL;
struct dir_table_slot *slot;
slot = find_index(ip, index, &mp, &lblock);
if (!slot) {
return -EIO;
}
memcpy(dirtab_slot, slot, sizeof(struct dir_table_slot));
if (mp)
release_metapage(mp);
return 0;
}
/*
* dtSearch()
*
* function:
* Search for the entry with specified key
*
* parameter:
*
* return: 0 - search result on stack, leaf page pinned;
* errno - I/O error
*/
int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
struct btstack * btstack, int flag)
{
int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn;
struct metapage *mp;
dtpage_t *p;
s8 *stbl;
int base, index, lim;
struct btframe *btsp;
pxd_t *pxd;
int psize = 288; /* initial in-line directory */
ino_t inumber;
struct component_name ciKey;
struct super_block *sb = ip->i_sb;
ciKey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t),
GFP_NOFS);
if (!ciKey.name) {
rc = -ENOMEM;
goto dtSearch_Exit2;
}
/* uppercase search key for c-i directory */
UniStrcpy(ciKey.name, key->name);
ciKey.namlen = key->namlen;
/* only uppercase if case-insensitive support is on */
if ((JFS_SBI(sb)->mntflag & JFS_OS2) == JFS_OS2) {
ciToUpper(&ciKey);
}
BT_CLR(btstack); /* reset stack */
/* init level count for max pages to split */
btstack->nsplit = 1;
/*
* search down tree from root:
*
* between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
* internal page, child page Pi contains entry with k, Ki <= K < Kj.
*
* if entry with search key K is not found
* internal page search find the entry with largest key Ki
* less than K which point to the child page to search;
* leaf page search find the entry with smallest key Kj
* greater than K so that the returned index is the position of
* the entry to be shifted right for insertion of new entry.
* for empty tree, search key is greater than any key of the tree.
*
* by convention, root bn = 0.
*/
for (bn = 0;;) {
/* get/pin the page to search */
DT_GETPAGE(ip, bn, mp, psize, p, rc);
if (rc)
goto dtSearch_Exit1;
/* get sorted entry table of the page */
stbl = DT_GETSTBL(p);
/*
* binary search with search key K on the current page.
*/
for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
index = base + (lim >> 1);
if (p->header.flag & BT_LEAF) {
/* uppercase leaf name to compare */
cmp =
ciCompare(&ciKey, p, stbl[index],
JFS_SBI(sb)->mntflag);
} else {
/* router key is in uppercase */
cmp = dtCompare(&ciKey, p, stbl[index]);
}
if (cmp == 0) {
/*
* search hit
*/
/* search hit - leaf page:
* return the entry found
*/
if (p->header.flag & BT_LEAF) {
inumber = le32_to_cpu(
((struct ldtentry *) & p->slot[stbl[index]])->inumber);
/*
* search for JFS_LOOKUP
*/
if (flag == JFS_LOOKUP) {
*data = inumber;
rc = 0;
goto out;
}
/*
* search for JFS_CREATE
*/
if (flag == JFS_CREATE) {
*data = inumber;
rc = -EEXIST;
goto out;
}
/*
* search for JFS_REMOVE or JFS_RENAME
*/
if ((flag == JFS_REMOVE ||
flag == JFS_RENAME) &&
*data != inumber) {
rc = -ESTALE;
goto out;
}
/*
* JFS_REMOVE|JFS_FINDDIR|JFS_RENAME
*/
/* save search result */
*data = inumber;
btsp = btstack->top;
btsp->bn = bn;
btsp->index = index;
btsp->mp = mp;
rc = 0;
goto dtSearch_Exit1;
}
/* search hit - internal page:
* descend/search its child page
*/
goto getChild;
}
if (cmp > 0) {
base = index + 1;
--lim;
}
}
/*
* search miss
*
* base is the smallest index with key (Kj) greater than
* search key (K) and may be zero or (maxindex + 1) index.
*/
/*
* search miss - leaf page
*
* return location of entry (base) where new entry with
* search key K is to be inserted.
*/
if (p->header.flag & BT_LEAF) {
/*
* search for JFS_LOOKUP, JFS_REMOVE, or JFS_RENAME
*/
if (flag == JFS_LOOKUP || flag == JFS_REMOVE ||
flag == JFS_RENAME) {
rc = -ENOENT;
goto out;
}
/*
* search for JFS_CREATE|JFS_FINDDIR:
*
* save search result
*/
*data = 0;
btsp = btstack->top;
btsp->bn = bn;
btsp->index = base;
btsp->mp = mp;
rc = 0;
goto dtSearch_Exit1;
}
/*
* search miss - internal page
*
* if base is non-zero, decrement base by one to get the parent
* entry of the child page to search.
*/
index = base ? base - 1 : base;
/*
* go down to child page
*/
getChild:
/* update max. number of pages to split */
if (BT_STACK_FULL(btstack)) {
/* Something's corrupted, mark filesystem dirty so
* chkdsk will fix it.
*/
jfs_error(sb, "stack overrun!\n");
BT_STACK_DUMP(btstack);
rc = -EIO;
goto out;
}
btstack->nsplit++;
/* push (bn, index) of the parent page/entry */
BT_PUSH(btstack, bn, index);
/* get the child page block number */
pxd = (pxd_t *) & p->slot[stbl[index]];
bn = addressPXD(pxd);
psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
/* unpin the parent page */
DT_PUTPAGE(mp);
}
out:
DT_PUTPAGE(mp);
dtSearch_Exit1:
kfree(ciKey.name);
dtSearch_Exit2:
return rc;
}
/*
* dtInsert()
*
* function: insert an entry to directory tree
*
* parameter:
*
* return: 0 - success;
* errno - failure;
*/
int dtInsert(tid_t tid, struct inode *ip,
struct component_name * name, ino_t * fsn, struct btstack * btstack)
{
int rc = 0;
struct metapage *mp; /* meta-page buffer */
dtpage_t *p; /* base B+-tree index page */
s64 bn;
int index;
struct dtsplit split; /* split information */
ddata_t data;
struct dt_lock *dtlck;
int n;
struct tlock *tlck;
struct lv *lv;
/*
* retrieve search result
*
* dtSearch() returns (leaf page pinned, index at which to insert).
* n.b. dtSearch() may return index of (maxindex + 1) of
* the full page.
*/
DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
/*
* insert entry for new key
*/
if (DO_INDEX(ip)) {
if (JFS_IP(ip)->next_index == DIREND) {
DT_PUTPAGE(mp);
return -EMLINK;
}
n = NDTLEAF(name->namlen);
data.leaf.tid = tid;
data.leaf.ip = ip;
} else {
n = NDTLEAF_LEGACY(name->namlen);
data.leaf.ip = NULL; /* signifies legacy directory format */
}
data.leaf.ino = *fsn;
/*
* leaf page does not have enough room for new entry:
*
* extend/split the leaf page;
*
* dtSplitUp() will insert the entry and unpin the leaf page.
*/
if (n > p->header.freecnt) {
split.mp = mp;
split.index = index;
split.nslot = n;
split.key = name;
split.data = &data;
rc = dtSplitUp(tid, ip, &split, btstack);
return rc;
}
/*
* leaf page does have enough room for new entry:
*
* insert the new data entry into the leaf page;
*/
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the leaf page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
/* linelock header */
lv->offset = 0;
lv->length = 1;
dtlck->index++;
dtInsertEntry(p, index, name, &data, &dtlck);
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
n = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + n;
lv->length =
((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
dtlck->index++;
}
/* unpin the leaf page */
DT_PUTPAGE(mp);
return 0;
}
/*
* dtSplitUp()
*
* function: propagate insertion bottom up;
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* leaf page unpinned;
*/
static int dtSplitUp(tid_t tid,
struct inode *ip, struct dtsplit * split, struct btstack * btstack)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
int rc = 0;
struct metapage *smp;
dtpage_t *sp; /* split page */
struct metapage *rmp;
dtpage_t *rp; /* new right page split from sp */
pxd_t rpxd; /* new right page extent descriptor */
struct metapage *lmp;
dtpage_t *lp; /* left child page */
int skip; /* index of entry of insertion */
struct btframe *parent; /* parent page entry on traverse stack */
s64 xaddr, nxaddr;
int xlen, xsize;
struct pxdlist pxdlist;
pxd_t *pxd;
struct component_name key = { 0, NULL };
ddata_t *data = split->data;
int n;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int quota_allocation = 0;
/* get split page */
smp = split->mp;
sp = DT_PAGE(ip, smp);
key.name = kmalloc_array(JFS_NAME_MAX + 2, sizeof(wchar_t), GFP_NOFS);
if (!key.name) {
DT_PUTPAGE(smp);
rc = -ENOMEM;
goto dtSplitUp_Exit;
}
/*
* split leaf page
*
* The split routines insert the new entry, and
* acquire txLock as appropriate.
*/
/*
* split root leaf page:
*/
if (sp->header.flag & BT_ROOT) {
/*
* allocate a single extent child page
*/
xlen = 1;
n = sbi->bsize >> L2DTSLOTSIZE;
n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */
if (n <= split->nslot)
xlen++;
if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) {
DT_PUTPAGE(smp);
goto freeKeyName;
}
pxdlist.maxnpxd = 1;
pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
PXDaddress(pxd, xaddr);
PXDlength(pxd, xlen);
split->pxdlist = &pxdlist;
rc = dtSplitRoot(tid, ip, split, &rmp);
if (rc)
dbFree(ip, xaddr, xlen);
else
DT_PUTPAGE(rmp);
DT_PUTPAGE(smp);
if (!DO_INDEX(ip))
ip->i_size = xlen << sbi->l2bsize;
goto freeKeyName;
}
/*
* extend first leaf page
*
* extend the 1st extent if less than buffer page size
* (dtExtendPage() reurns leaf page unpinned)
*/
pxd = &sp->header.self;
xlen = lengthPXD(pxd);
xsize = xlen << sbi->l2bsize;
if (xsize < PSIZE) {
xaddr = addressPXD(pxd);
n = xsize >> L2DTSLOTSIZE;
n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
if ((n + sp->header.freecnt) <= split->nslot)
n = xlen + (xlen << 1);
else
n = xlen;
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, n);
if (rc)
goto extendOut;
quota_allocation += n;
if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
(s64) n, &nxaddr)))
goto extendOut;
pxdlist.maxnpxd = 1;
pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
PXDaddress(pxd, nxaddr);
PXDlength(pxd, xlen + n);
split->pxdlist = &pxdlist;
if ((rc = dtExtendPage(tid, ip, split, btstack))) {
nxaddr = addressPXD(pxd);
if (xaddr != nxaddr) {
/* free relocated extent */
xlen = lengthPXD(pxd);
dbFree(ip, nxaddr, (s64) xlen);
} else {
/* free extended delta */
xlen = lengthPXD(pxd) - n;
xaddr = addressPXD(pxd) + xlen;
dbFree(ip, xaddr, (s64) n);
}
} else if (!DO_INDEX(ip))
ip->i_size = lengthPXD(pxd) << sbi->l2bsize;
extendOut:
DT_PUTPAGE(smp);
goto freeKeyName;
}
/*
* split leaf page <sp> into <sp> and a new right page <rp>.
*
* return <rp> pinned and its extent descriptor <rpxd>
*/
/*
* allocate new directory page extent and
* new index page(s) to cover page split(s)
*
* allocation hint: ?
*/
n = btstack->nsplit;
pxdlist.maxnpxd = pxdlist.npxd = 0;
xlen = sbi->nbperpage;
for (pxd = pxdlist.pxd; n > 0; n--, pxd++) {
if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr)) == 0) {
PXDaddress(pxd, xaddr);
PXDlength(pxd, xlen);
pxdlist.maxnpxd++;
continue;
}
DT_PUTPAGE(smp);
/* undo allocation */
goto splitOut;
}
split->pxdlist = &pxdlist;
if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) {
DT_PUTPAGE(smp);
/* undo allocation */
goto splitOut;
}
if (!DO_INDEX(ip))
ip->i_size += PSIZE;
/*
* propagate up the router entry for the leaf page just split
*
* insert a router entry for the new page into the parent page,
* propagate the insert/split up the tree by walking back the stack
* of (bn of parent page, index of child page entry in parent page)
* that were traversed during the search for the page that split.
*
* the propagation of insert/split up the tree stops if the root
* splits or the page inserted into doesn't have to split to hold
* the new entry.
*
* the parent entry for the split page remains the same, and
* a new entry is inserted at its right with the first key and
* block number of the new right page.
*
* There are a maximum of 4 pages pinned at any time:
* two children, left parent and right parent (when the parent splits).
* keep the child pages pinned while working on the parent.
* make sure that all pins are released at exit.
*/
while ((parent = BT_POP(btstack)) != NULL) {
/* parent page specified by stack frame <parent> */
/* keep current child pages (<lp>, <rp>) pinned */
lmp = smp;
lp = sp;
/*
* insert router entry in parent for new right child page <rp>
*/
/* get the parent page <sp> */
DT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
if (rc) {
DT_PUTPAGE(lmp);
DT_PUTPAGE(rmp);
goto splitOut;
}
/*
* The new key entry goes ONE AFTER the index of parent entry,
* because the split was to the right.
*/
skip = parent->index + 1;
/*
* compute the key for the router entry
*
* key suffix compression:
* for internal pages that have leaf pages as children,
* retain only what's needed to distinguish between
* the new entry and the entry on the page to its left.
* If the keys compare equal, retain the entire key.
*
* note that compression is performed only at computing
* router key at the lowest internal level.
* further compression of the key between pairs of higher
* level internal pages loses too much information and
* the search may fail.
* (e.g., two adjacent leaf pages of {a, ..., x} {xx, ...,}
* results in two adjacent parent entries (a)(xx).
* if split occurs between these two entries, and
* if compression is applied, the router key of parent entry
* of right page (x) will divert search for x into right
* subtree and miss x in the left subtree.)
*
* the entire key must be retained for the next-to-leftmost
* internal key at any level of the tree, or search may fail
* (e.g., ?)
*/
switch (rp->header.flag & BT_TYPE) {
case BT_LEAF:
/*
* compute the length of prefix for suffix compression
* between last entry of left page and first entry
* of right page
*/
if ((sp->header.flag & BT_ROOT && skip > 1) ||
sp->header.prev != 0 || skip > 1) {
/* compute uppercase router prefix key */
rc = ciGetLeafPrefixKey(lp,
lp->header.nextindex-1,
rp, 0, &key,
sbi->mntflag);
if (rc) {
DT_PUTPAGE(lmp);
DT_PUTPAGE(rmp);
DT_PUTPAGE(smp);
goto splitOut;
}
} else {
/* next to leftmost entry of
lowest internal level */
/* compute uppercase router key */
dtGetKey(rp, 0, &key, sbi->mntflag);
key.name[key.namlen] = 0;
if ((sbi->mntflag & JFS_OS2) == JFS_OS2)
ciToUpper(&key);
}
n = NDTINTERNAL(key.namlen);
break;
case BT_INTERNAL:
dtGetKey(rp, 0, &key, sbi->mntflag);
n = NDTINTERNAL(key.namlen);
break;
default:
jfs_err("dtSplitUp(): UFO!");
break;
}
/* unpin left child page */
DT_PUTPAGE(lmp);
/*
* compute the data for the router entry
*/
data->xd = rpxd; /* child page xd */
/*
* parent page is full - split the parent page
*/
if (n > sp->header.freecnt) {
/* init for parent page split */
split->mp = smp;
split->index = skip; /* index at insert */
split->nslot = n;
split->key = &key;
/* split->data = data; */
/* unpin right child page */
DT_PUTPAGE(rmp);
/* The split routines insert the new entry,
* acquire txLock as appropriate.
* return <rp> pinned and its block number <rbn>.
*/
rc = (sp->header.flag & BT_ROOT) ?
dtSplitRoot(tid, ip, split, &rmp) :
dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd);
if (rc) {
DT_PUTPAGE(smp);
goto splitOut;
}
/* smp and rmp are pinned */
}
/*
* parent page is not full - insert router entry in parent page
*/
else {
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the parent page
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
/* linelock header */
lv->offset = 0;
lv->length = 1;
dtlck->index++;
/* linelock stbl of non-root parent page */
if (!(sp->header.flag & BT_ROOT)) {
lv++;
n = skip >> L2DTSLOTSIZE;
lv->offset = sp->header.stblindex + n;
lv->length =
((sp->header.nextindex -
1) >> L2DTSLOTSIZE) - n + 1;
dtlck->index++;
}
dtInsertEntry(sp, skip, &key, data, &dtlck);
/* exit propagate up */
break;
}
}
/* unpin current split and its right page */
DT_PUTPAGE(smp);
DT_PUTPAGE(rmp);
/*
* free remaining extents allocated for split
*/
splitOut:
n = pxdlist.npxd;
pxd = &pxdlist.pxd[n];
for (; n < pxdlist.maxnpxd; n++, pxd++)
dbFree(ip, addressPXD(pxd), (s64) lengthPXD(pxd));
freeKeyName:
kfree(key.name);
/* Rollback quota allocation */
if (rc && quota_allocation)
dquot_free_block(ip, quota_allocation);
dtSplitUp_Exit:
return rc;
}
/*
* dtSplitPage()
*
* function: Split a non-root page of a btree.
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* return split and new page pinned;
*/
static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
{
int rc = 0;
struct metapage *smp;
dtpage_t *sp;
struct metapage *rmp;
dtpage_t *rp; /* new right page allocated */
s64 rbn; /* new right page block number */
struct metapage *mp;
dtpage_t *p;
s64 nextbn;
struct pxdlist *pxdlist;
pxd_t *pxd;
int skip, nextindex, half, left, nxt, off, si;
struct ldtentry *ldtentry;
struct idtentry *idtentry;
u8 *stbl;
struct dtslot *f;
int fsi, stblsize;
int n;
struct dt_lock *sdtlck, *rdtlck;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *slv, *rlv, *lv;
/* get split page */
smp = split->mp;
sp = DT_PAGE(ip, smp);
/*
* allocate the new right page for the split
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
rbn = addressPXD(pxd);
rmp = get_metapage(ip, rbn, PSIZE, 1);
if (rmp == NULL)
return -EIO;
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return rc;
}
jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
BT_MARK_DIRTY(rmp, ip);
/*
* acquire a transaction lock on the new right page
*/
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
rdtlck = (struct dt_lock *) & tlck->lock;
rp = (dtpage_t *) rmp->data;
*rpp = rp;
rp->header.self = *pxd;
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the split page
*
* action:
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
sdtlck = (struct dt_lock *) & tlck->lock;
/* linelock header of split page */
ASSERT(sdtlck->index == 0);
slv = & sdtlck->lv[0];
slv->offset = 0;
slv->length = 1;
sdtlck->index++;
/*
* initialize/update sibling pointers between sp and rp
*/
nextbn = le64_to_cpu(sp->header.next);
rp->header.next = cpu_to_le64(nextbn);
rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
sp->header.next = cpu_to_le64(rbn);
/*
* initialize new right page
*/
rp->header.flag = sp->header.flag;
/* compute sorted entry table at start of extent data area */
rp->header.nextindex = 0;
rp->header.stblindex = 1;
n = PSIZE >> L2DTSLOTSIZE;
rp->header.maxslot = n;
stblsize = (n + 31) >> L2DTSLOTSIZE; /* in unit of slot */
/* init freelist */
fsi = rp->header.stblindex + stblsize;
rp->header.freelist = fsi;
rp->header.freecnt = rp->header.maxslot - fsi;
/*
* sequential append at tail: append without split
*
* If splitting the last page on a level because of appending
* a entry to it (skip is maxentry), it's likely that the access is
* sequential. Adding an empty page on the side of the level is less
* work and can push the fill factor much higher than normal.
* If we're wrong it's no big deal, we'll just do the split the right
* way next time.
* (It may look like it's equally easy to do a similar hack for
* reverse sorted data, that is, split the tree left,
* but it's not. Be my guest.)
*/
if (nextbn == 0 && split->index == sp->header.nextindex) {
/* linelock header + stbl (first slot) of new page */
rlv = & rdtlck->lv[rdtlck->index];
rlv->offset = 0;
rlv->length = 2;
rdtlck->index++;
/*
* initialize freelist of new right page
*/
f = &rp->slot[fsi];
for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/* insert entry at the first entry of the new right page */
dtInsertEntry(rp, 0, split->key, split->data, &rdtlck);
goto out;
}
/*
* non-sequential insert (at possibly middle page)
*/
/*
* update prev pointer of previous right sibling page;
*/
if (nextbn != 0) {
DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
if (rc) {
discard_metapage(rmp);
return rc;
}
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the next page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
jfs_info("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p",
tlck, ip, mp);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header of previous right sibling page */
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
p->header.prev = cpu_to_le64(rbn);
DT_PUTPAGE(mp);
}
/*
* split the data between the split and right pages.
*/
skip = split->index;
half = (PSIZE >> L2DTSLOTSIZE) >> 1; /* swag */
left = 0;
/*
* compute fill factor for split pages
*
* <nxt> traces the next entry to move to rp
* <off> traces the next entry to stay in sp
*/
stbl = (u8 *) & sp->slot[sp->header.stblindex];
nextindex = sp->header.nextindex;
for (nxt = off = 0; nxt < nextindex; ++off) {
if (off == skip)
/* check for fill factor with new entry size */
n = split->nslot;
else {
si = stbl[nxt];
switch (sp->header.flag & BT_TYPE) {
case BT_LEAF:
ldtentry = (struct ldtentry *) & sp->slot[si];
if (DO_INDEX(ip))
n = NDTLEAF(ldtentry->namlen);
else
n = NDTLEAF_LEGACY(ldtentry->
namlen);
break;
case BT_INTERNAL:
idtentry = (struct idtentry *) & sp->slot[si];
n = NDTINTERNAL(idtentry->namlen);
break;
default:
break;
}
++nxt; /* advance to next entry to move in sp */
}
left += n;
if (left >= half)
break;
}
/* <nxt> poins to the 1st entry to move */
/*
* move entries to right page
*
* dtMoveEntry() initializes rp and reserves entry for insertion
*
* split page moved out entries are linelocked;
* new/right page moved in entries are linelocked;
*/
/* linelock header + stbl of new right page */
rlv = & rdtlck->lv[rdtlck->index];
rlv->offset = 0;
rlv->length = 5;
rdtlck->index++;
dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip));
sp->header.nextindex = nxt;
/*
* finalize freelist of new right page
*/
fsi = rp->header.freelist;
f = &rp->slot[fsi];
for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/*
* Update directory index table for entries now in right page
*/
if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
s64 lblock;
mp = NULL;
stbl = DT_GETSTBL(rp);
for (n = 0; n < rp->header.nextindex; n++) {
ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
modify_index(tid, ip, le32_to_cpu(ldtentry->index),
rbn, n, &mp, &lblock);
}
if (mp)
release_metapage(mp);
}
/*
* the skipped index was on the left page,
*/
if (skip <= off) {
/* insert the new entry in the split page */
dtInsertEntry(sp, skip, split->key, split->data, &sdtlck);
/* linelock stbl of split page */
if (sdtlck->index >= sdtlck->maxcnt)
sdtlck = (struct dt_lock *) txLinelock(sdtlck);
slv = & sdtlck->lv[sdtlck->index];
n = skip >> L2DTSLOTSIZE;
slv->offset = sp->header.stblindex + n;
slv->length =
((sp->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
sdtlck->index++;
}
/*
* the skipped index was on the right page,
*/
else {
/* adjust the skip index to reflect the new position */
skip -= nxt;
/* insert the new entry in the right page */
dtInsertEntry(rp, skip, split->key, split->data, &rdtlck);
}
out:
*rmpp = rmp;
*rpxdp = *pxd;
return rc;
}
/*
* dtExtendPage()
*
* function: extend 1st/only directory leaf page
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* return extended page pinned;
*/
static int dtExtendPage(tid_t tid,
struct inode *ip, struct dtsplit * split, struct btstack * btstack)
{
struct super_block *sb = ip->i_sb;
int rc;
struct metapage *smp, *pmp, *mp;
dtpage_t *sp, *pp;
struct pxdlist *pxdlist;
pxd_t *pxd, *tpxd;
int xlen, xsize;
int newstblindex, newstblsize;
int oldstblindex, oldstblsize;
int fsi, last;
struct dtslot *f;
struct btframe *parent;
int n;
struct dt_lock *dtlck;
s64 xaddr, txaddr;
struct tlock *tlck;
struct pxd_lock *pxdlock;
struct lv *lv;
uint type;
struct ldtentry *ldtentry;
u8 *stbl;
/* get page to extend */
smp = split->mp;
sp = DT_PAGE(ip, smp);
/* get parent/root page */
parent = BT_POP(btstack);
DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc);
if (rc)
return (rc);
/*
* extend the extent
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
xaddr = addressPXD(pxd);
tpxd = &sp->header.self;
txaddr = addressPXD(tpxd);
/* in-place extension */
if (xaddr == txaddr) {
type = tlckEXTEND;
}
/* relocation */
else {
type = tlckNEW;
/* save moved extent descriptor for later free */
tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = sp->header.self;
pxdlock->index = 1;
/*
* Update directory index table to reflect new page address
*/
if (DO_INDEX(ip)) {
s64 lblock;
mp = NULL;
stbl = DT_GETSTBL(sp);
for (n = 0; n < sp->header.nextindex; n++) {
ldtentry =
(struct ldtentry *) & sp->slot[stbl[n]];
modify_index(tid, ip,
le32_to_cpu(ldtentry->index),
xaddr, n, &mp, &lblock);
}
if (mp)
release_metapage(mp);
}
}
/*
* extend the page
*/
sp->header.self = *pxd;
jfs_info("dtExtendPage: ip:0x%p smp:0x%p sp:0x%p", ip, smp, sp);
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the extended/leaf page
*/
tlck = txLock(tid, ip, smp, tlckDTREE | type);
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[0];
/* update buffer extent descriptor of extended page */
xlen = lengthPXD(pxd);
xsize = xlen << JFS_SBI(sb)->l2bsize;
/*
* copy old stbl to new stbl at start of extended area
*/
oldstblindex = sp->header.stblindex;
oldstblsize = (sp->header.maxslot + 31) >> L2DTSLOTSIZE;
newstblindex = sp->header.maxslot;
n = xsize >> L2DTSLOTSIZE;
newstblsize = (n + 31) >> L2DTSLOTSIZE;
memcpy(&sp->slot[newstblindex], &sp->slot[oldstblindex],
sp->header.nextindex);
/*
* in-line extension: linelock old area of extended page
*/
if (type == tlckEXTEND) {
/* linelock header */
lv->offset = 0;
lv->length = 1;
dtlck->index++;
lv++;
/* linelock new stbl of extended page */
lv->offset = newstblindex;
lv->length = newstblsize;
}
/*
* relocation: linelock whole relocated area
*/
else {
lv->offset = 0;
lv->length = sp->header.maxslot + newstblsize;
}
dtlck->index++;
sp->header.maxslot = n;
sp->header.stblindex = newstblindex;
/* sp->header.nextindex remains the same */
/*
* add old stbl region at head of freelist
*/
fsi = oldstblindex;
f = &sp->slot[fsi];
last = sp->header.freelist;
for (n = 0; n < oldstblsize; n++, fsi++, f++) {
f->next = last;
last = fsi;
}
sp->header.freelist = last;
sp->header.freecnt += oldstblsize;
/*
* append free region of newly extended area at tail of freelist
*/
/* init free region of newly extended area */
fsi = n = newstblindex + newstblsize;
f = &sp->slot[fsi];
for (fsi++; fsi < sp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/* append new free region at tail of old freelist */
fsi = sp->header.freelist;
if (fsi == -1)
sp->header.freelist = n;
else {
do {
f = &sp->slot[fsi];
fsi = f->next;
} while (fsi != -1);
f->next = n;
}
sp->header.freecnt += sp->header.maxslot - n;
/*
* insert the new entry
*/
dtInsertEntry(sp, split->index, split->key, split->data, &dtlck);
BT_MARK_DIRTY(pmp, ip);
/*
* linelock any freeslots residing in old extent
*/
if (type == tlckEXTEND) {
n = sp->header.maxslot >> 2;
if (sp->header.freelist < n)
dtLinelockFreelist(sp, n, &dtlck);
}
/*
* update parent entry on the parent/root page
*/
/*
* acquire a transaction lock on the parent/root page
*/
tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[dtlck->index];
/* linelock parent entry - 1st slot */
lv->offset = 1;
lv->length = 1;
dtlck->index++;
/* update the parent pxd for page extension */
tpxd = (pxd_t *) & pp->slot[1];
*tpxd = *pxd;
DT_PUTPAGE(pmp);
return 0;
}
/*
* dtSplitRoot()
*
* function:
* split the full root page into
* original/root/split page and new right page
* i.e., root remains fixed in tree anchor (inode) and
* the root is copied to a single new right child page
* since root page << non-root page, and
* the split root page contains a single entry for the
* new right child page.
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* return new page pinned;
*/
static int dtSplitRoot(tid_t tid,
struct inode *ip, struct dtsplit * split, struct metapage ** rmpp)
{
struct super_block *sb = ip->i_sb;
struct metapage *smp;
dtroot_t *sp;
struct metapage *rmp;
dtpage_t *rp;
s64 rbn;
int xlen;
int xsize;
struct dtslot *f;
s8 *stbl;
int fsi, stblsize, n;
struct idtentry *s;
pxd_t *ppxd;
struct pxdlist *pxdlist;
pxd_t *pxd;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int rc;
/* get split root page */
smp = split->mp;
sp = &JFS_IP(ip)->i_dtroot;
/*
* allocate/initialize a single (right) child page
*
* N.B. at first split, a one (or two) block to fit new entry
* is allocated; at subsequent split, a full page is allocated;
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
rbn = addressPXD(pxd);
xlen = lengthPXD(pxd);
xsize = xlen << JFS_SBI(sb)->l2bsize;
rmp = get_metapage(ip, rbn, xsize, 1);
if (!rmp)
return -EIO;
rp = rmp->data;
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return rc;
}
BT_MARK_DIRTY(rmp, ip);
/*
* acquire a transaction lock on the new right page
*/
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
dtlck = (struct dt_lock *) & tlck->lock;
rp->header.flag =
(sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
rp->header.self = *pxd;
/* initialize sibling pointers */
rp->header.next = 0;
rp->header.prev = 0;
/*
* move in-line root page into new right page extent
*/
/* linelock header + copied entries + new stbl (1st slot) in new page */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 10; /* 1 + 8 + 1 */
dtlck->index++;
n = xsize >> L2DTSLOTSIZE;
rp->header.maxslot = n;
stblsize = (n + 31) >> L2DTSLOTSIZE;
/* copy old stbl to new stbl at start of extended area */
rp->header.stblindex = DTROOTMAXSLOT;
stbl = (s8 *) & rp->slot[DTROOTMAXSLOT];
memcpy(stbl, sp->header.stbl, sp->header.nextindex);
rp->header.nextindex = sp->header.nextindex;
/* copy old data area to start of new data area */
memcpy(&rp->slot[1], &sp->slot[1], IDATASIZE);
/*
* append free region of newly extended area at tail of freelist
*/
/* init free region of newly extended area */
fsi = n = DTROOTMAXSLOT + stblsize;
f = &rp->slot[fsi];
for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/* append new free region at tail of old freelist */
fsi = sp->header.freelist;
if (fsi == -1)
rp->header.freelist = n;
else {
rp->header.freelist = fsi;
do {
f = &rp->slot[fsi];
fsi = f->next;
} while (fsi != -1);
f->next = n;
}
rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n;
/*
* Update directory index table for entries now in right page
*/
if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
s64 lblock;
struct metapage *mp = NULL;
struct ldtentry *ldtentry;
stbl = DT_GETSTBL(rp);
for (n = 0; n < rp->header.nextindex; n++) {
ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
modify_index(tid, ip, le32_to_cpu(ldtentry->index),
rbn, n, &mp, &lblock);
}
if (mp)
release_metapage(mp);
}
/*
* insert the new entry into the new right/child page
* (skip index in the new right page will not change)
*/
dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
/*
* reset parent/root page
*
* set the 1st entry offset to 0, which force the left-most key
* at any level of the tree to be less than any search key.
*
* The btree comparison code guarantees that the left-most key on any
* level of the tree is never used, so it doesn't need to be filled in.
*/
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the root page (in-memory inode)
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock root */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = DTROOTMAXSLOT;
dtlck->index++;
/* update page header of root */
if (sp->header.flag & BT_LEAF) {
sp->header.flag &= ~BT_LEAF;
sp->header.flag |= BT_INTERNAL;
}
/* init the first entry */
s = (struct idtentry *) & sp->slot[DTENTRYSTART];
ppxd = (pxd_t *) s;
*ppxd = *pxd;
s->next = -1;
s->namlen = 0;
stbl = sp->header.stbl;
stbl[0] = DTENTRYSTART;
sp->header.nextindex = 1;
/* init freelist */
fsi = DTENTRYSTART + 1;
f = &sp->slot[fsi];
/* init free region of remaining area */
for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
f->next = fsi;
f->next = -1;
sp->header.freelist = DTENTRYSTART + 1;
sp->header.freecnt = DTROOTMAXSLOT - (DTENTRYSTART + 1);
*rmpp = rmp;
return 0;
}
/*
* dtDelete()
*
* function: delete the entry(s) referenced by a key.
*
* parameter:
*
* return:
*/
int dtDelete(tid_t tid,
struct inode *ip, struct component_name * key, ino_t * ino, int flag)
{
int rc = 0;
s64 bn;
struct metapage *mp, *imp;
dtpage_t *p;
int index;
struct btstack btstack;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int i;
struct ldtentry *ldtentry;
u8 *stbl;
u32 table_index, next_index;
struct metapage *nmp;
dtpage_t *np;
/*
* search for the entry to delete:
*
* dtSearch() returns (leaf page pinned, index at which to delete).
*/
if ((rc = dtSearch(ip, key, ino, &btstack, flag)))
return rc;
/* retrieve search result */
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
/*
* We need to find put the index of the next entry into the
* directory index table in order to resume a readdir from this
* entry.
*/
if (DO_INDEX(ip)) {
stbl = DT_GETSTBL(p);
ldtentry = (struct ldtentry *) & p->slot[stbl[index]];
table_index = le32_to_cpu(ldtentry->index);
if (index == (p->header.nextindex - 1)) {
/*
* Last entry in this leaf page
*/
if ((p->header.flag & BT_ROOT)
|| (p->header.next == 0))
next_index = -1;
else {
/* Read next leaf page */
DT_GETPAGE(ip, le64_to_cpu(p->header.next),
nmp, PSIZE, np, rc);
if (rc)
next_index = -1;
else {
stbl = DT_GETSTBL(np);
ldtentry =
(struct ldtentry *) & np->
slot[stbl[0]];
next_index =
le32_to_cpu(ldtentry->index);
DT_PUTPAGE(nmp);
}
}
} else {
ldtentry =
(struct ldtentry *) & p->slot[stbl[index + 1]];
next_index = le32_to_cpu(ldtentry->index);
}
free_index(tid, ip, table_index, next_index);
}
/*
* the leaf page becomes empty, delete the page
*/
if (p->header.nextindex == 1) {
/* delete empty page */
rc = dtDeleteUp(tid, ip, mp, p, &btstack);
}
/*
* the leaf page has other entries remaining:
*
* delete the entry from the leaf page.
*/
else {
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the leaf page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
/*
* Do not assume that dtlck->index will be zero. During a
* rename within a directory, this transaction may have
* modified this page already when adding the new entry.
*/
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
i = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + i;
lv->length =
((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
i + 1;
dtlck->index++;
}
/* free the leaf entry */
dtDeleteEntry(p, index, &dtlck);
/*
* Update directory index table for entries moved in stbl
*/
if (DO_INDEX(ip) && index < p->header.nextindex) {
s64 lblock;
imp = NULL;
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
ldtentry =
(struct ldtentry *) & p->slot[stbl[i]];
modify_index(tid, ip,
le32_to_cpu(ldtentry->index),
bn, i, &imp, &lblock);
}
if (imp)
release_metapage(imp);
}
DT_PUTPAGE(mp);
}
return rc;
}
/*
* dtDeleteUp()
*
* function:
* free empty pages as propagating deletion up the tree
*
* parameter:
*
* return:
*/
static int dtDeleteUp(tid_t tid, struct inode *ip,
struct metapage * fmp, dtpage_t * fp, struct btstack * btstack)
{
int rc = 0;
struct metapage *mp;
dtpage_t *p;
int index, nextindex;
int xlen;
struct btframe *parent;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
struct pxd_lock *pxdlock;
int i;
/*
* keep the root leaf page which has become empty
*/
if (BT_IS_ROOT(fmp)) {
/*
* reset the root
*
* dtInitRoot() acquires txlock on the root
*/
dtInitRoot(tid, ip, PARENT(ip));
DT_PUTPAGE(fmp);
return 0;
}
/*
* free the non-root leaf page
*/
/*
* acquire a transaction lock on the page
*
* write FREEXTENT|NOREDOPAGE log record
* N.B. linelock is overlaid as freed extent descriptor, and
* the buffer page is freed;
*/
tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = fp->header.self;
pxdlock->index = 1;
/* update sibling pointers */
if ((rc = dtRelink(tid, ip, fp))) {
BT_PUTPAGE(fmp);
return rc;
}
xlen = lengthPXD(&fp->header.self);
/* Free quota allocation. */
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
/*
* propagate page deletion up the directory tree
*
* If the delete from the parent page makes it empty,
* continue all the way up the tree.
* stop if the root page is reached (which is never deleted) or
* if the entry deletion does not empty the page.
*/
while ((parent = BT_POP(btstack)) != NULL) {
/* pin the parent page <sp> */
DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/*
* free the extent of the child page deleted
*/
index = parent->index;
/*
* delete the entry for the child page from parent
*/
nextindex = p->header.nextindex;
/*
* the parent has the single entry being deleted:
*
* free the parent page which has become empty.
*/
if (nextindex == 1) {
/*
* keep the root internal page which has become empty
*/
if (p->header.flag & BT_ROOT) {
/*
* reset the root
*
* dtInitRoot() acquires txlock on the root
*/
dtInitRoot(tid, ip, PARENT(ip));
DT_PUTPAGE(mp);
return 0;
}
/*
* free the parent page
*/
else {
/*
* acquire a transaction lock on the page
*
* write FREEXTENT|NOREDOPAGE log record
*/
tlck =
txMaplock(tid, ip,
tlckDTREE | tlckFREE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = p->header.self;
pxdlock->index = 1;
/* update sibling pointers */
if ((rc = dtRelink(tid, ip, p))) {
DT_PUTPAGE(mp);
return rc;
}
xlen = lengthPXD(&p->header.self);
/* Free quota allocation */
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);
/* propagate up */
continue;
}
}
/*
* the parent has other entries remaining:
*
* delete the router entry from the parent page.
*/
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the page
*
* action: router entry deletion
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
i = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + i;
lv->length =
((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
i + 1;
dtlck->index++;
}
/* free the router entry */
dtDeleteEntry(p, index, &dtlck);
/* reset key of new leftmost entry of level (for consistency) */
if (index == 0 &&
((p->header.flag & BT_ROOT) || p->header.prev == 0))
dtTruncateEntry(p, 0, &dtlck);
/* unpin the parent page */
DT_PUTPAGE(mp);
/* exit propagation up */
break;
}
if (!DO_INDEX(ip))
ip->i_size -= PSIZE;
return 0;
}
/*
* dtRelink()
*
* function:
* link around a freed page.
*
* parameter:
* fp: page to be freed
*
* return:
*/
static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
{
int rc;
struct metapage *mp;
s64 nextbn, prevbn;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
nextbn = le64_to_cpu(p->header.next);
prevbn = le64_to_cpu(p->header.prev);
/* update prev pointer of the next page */
if (nextbn != 0) {
DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the next page
*
* action: update prev pointer;
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
jfs_info("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
tlck, ip, mp);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
p->header.prev = cpu_to_le64(prevbn);
DT_PUTPAGE(mp);
}
/* update next pointer of the previous page */
if (prevbn != 0) {
DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the prev page
*
* action: update next pointer;
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
jfs_info("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
tlck, ip, mp);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
p->header.next = cpu_to_le64(nextbn);
DT_PUTPAGE(mp);
}
return 0;
}
/*
* dtInitRoot()
*
* initialize directory root (inline in inode)
*/
void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
dtroot_t *p;
int fsi;
struct dtslot *f;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
u16 xflag_save;
/*
* If this was previously an non-empty directory, we need to remove
* the old directory table.
*/
if (DO_INDEX(ip)) {
if (!jfs_dirtable_inline(ip)) {
struct tblock *tblk = tid_to_tblock(tid);
/*
* We're playing games with the tid's xflag. If
* we're removing a regular file, the file's xtree
* is committed with COMMIT_PMAP, but we always
* commit the directories xtree with COMMIT_PWMAP.
*/
xflag_save = tblk->xflag;
tblk->xflag = 0;
/*
* xtTruncate isn't guaranteed to fully truncate
* the xtree. The caller needs to check i_size
* after committing the transaction to see if
* additional truncation is needed. The
* COMMIT_Stale flag tells caller that we
* initiated the truncation.
*/
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
set_cflag(COMMIT_Stale, ip);
tblk->xflag = xflag_save;
} else
ip->i_size = 1;
jfs_ip->next_index = 2;
} else
ip->i_size = IDATASIZE;
/*
* acquire a transaction lock on the root
*
* action: directory initialization;
*/
tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag,
tlckDTREE | tlckENTRY | tlckBTROOT);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock root */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = DTROOTMAXSLOT;
dtlck->index++;
p = &jfs_ip->i_dtroot;
p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
p->header.nextindex = 0;
/* init freelist */
fsi = 1;
f = &p->slot[fsi];
/* init data area of root */
for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
f->next = fsi;
f->next = -1;
p->header.freelist = 1;
p->header.freecnt = 8;
/* init '..' entry */
p->header.idotdot = cpu_to_le32(idotdot);
return;
}
/*
* add_missing_indices()
*
* function: Fix dtree page in which one or more entries has an invalid index.
* fsck.jfs should really fix this, but it currently does not.
* Called from jfs_readdir when bad index is detected.
*/
static void add_missing_indices(struct inode *inode, s64 bn)
{
struct ldtentry *d;
struct dt_lock *dtlck;
int i;
uint index;
struct lv *lv;
struct metapage *mp;
dtpage_t *p;
int rc;
s8 *stbl;
tid_t tid;
struct tlock *tlck;
tid = txBegin(inode->i_sb, 0);
DT_GETPAGE(inode, bn, mp, PSIZE, p, rc);
if (rc) {
printk(KERN_ERR "DT_GETPAGE failed!\n");
goto end;
}
BT_MARK_DIRTY(mp, inode);
ASSERT(p->header.flag & BT_LEAF);
tlck = txLock(tid, inode, mp, tlckDTREE | tlckENTRY);
if (BT_IS_ROOT(mp))
tlck->type |= tlckBTROOT;
dtlck = (struct dt_lock *) &tlck->lock;
stbl = DT_GETSTBL(p);
for (i = 0; i < p->header.nextindex; i++) {
d = (struct ldtentry *) &p->slot[stbl[i]];
index = le32_to_cpu(d->index);
if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
d->index = cpu_to_le32(add_index(tid, inode, bn, i));
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = &dtlck->lv[dtlck->index];
lv->offset = stbl[i];
lv->length = 1;
dtlck->index++;
}
}
DT_PUTPAGE(mp);
(void) txCommit(tid, 1, &inode, 0);
end:
txEnd(tid);
}
/*
* Buffer to hold directory entry info while traversing a dtree page
* before being fed to the filldir function
*/
struct jfs_dirent {
loff_t position;
int ino;
u16 name_len;
char name[];
};
/*
* function to determine next variable-sized jfs_dirent in buffer
*/
static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
{
return (struct jfs_dirent *)
((char *)dirent +
((sizeof (struct jfs_dirent) + dirent->name_len + 1 +
sizeof (loff_t) - 1) &
~(sizeof (loff_t) - 1)));
}
/*
* jfs_readdir()
*
* function: read directory entries sequentially
* from the specified entry offset
*
* parameter:
*
* return: offset = (pn, index) of start entry
* of next jfs_readdir()/dtRead()
*/
int jfs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *ip = file_inode(file);
struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
int rc = 0;
loff_t dtpos; /* legacy OS/2 style position */
struct dtoffset {
s16 pn;
s16 index;
s32 unused;
} *dtoffset = (struct dtoffset *) &dtpos;
s64 bn;
struct metapage *mp;
dtpage_t *p;
int index;
s8 *stbl;
struct btstack btstack;
int i, next;
struct ldtentry *d;
struct dtslot *t;
int d_namleft, len, outlen;
unsigned long dirent_buf;
char *name_ptr;
u32 dir_index;
int do_index = 0;
uint loop_count = 0;
struct jfs_dirent *jfs_dirent;
int jfs_dirents;
int overflow, fix_page, page_fixed = 0;
static int unique_pos = 2; /* If we can't fix broken index */
if (ctx->pos == DIREND)
return 0;
if (DO_INDEX(ip)) {
/*
* persistent index is stored in directory entries.
* Special cases: 0 = .
* 1 = ..
* -1 = End of directory
*/
do_index = 1;
dir_index = (u32) ctx->pos;
/*
* NFSv4 reserves cookies 1 and 2 for . and .. so the value
* we return to the vfs is one greater than the one we use
* internally.
*/
if (dir_index)
dir_index--;
if (dir_index > 1) {
struct dir_table_slot dirtab_slot;
if (dtEmpty(ip) ||
(dir_index >= JFS_IP(ip)->next_index)) {
/* Stale position. Directory has shrunk */
ctx->pos = DIREND;
return 0;
}
repeat:
rc = read_index(ip, dir_index, &dirtab_slot);
if (rc) {
ctx->pos = DIREND;
return rc;
}
if (dirtab_slot.flag == DIR_INDEX_FREE) {
if (loop_count++ > JFS_IP(ip)->next_index) {
jfs_err("jfs_readdir detected infinite loop!");
ctx->pos = DIREND;
return 0;
}
dir_index = le32_to_cpu(dirtab_slot.addr2);
if (dir_index == -1) {
ctx->pos = DIREND;
return 0;
}
goto repeat;
}
bn = addressDTS(&dirtab_slot);
index = dirtab_slot.slot;
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc) {
ctx->pos = DIREND;
return 0;
}
if (p->header.flag & BT_INTERNAL) {
jfs_err("jfs_readdir: bad index table");
DT_PUTPAGE(mp);
ctx->pos = DIREND;
return 0;
}
} else {
if (dir_index == 0) {
/*
* self "."
*/
ctx->pos = 1;
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
return 0;
}
/*
* parent ".."
*/
ctx->pos = 2;
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
return 0;
/*
* Find first entry of left-most leaf
*/
if (dtEmpty(ip)) {
ctx->pos = DIREND;
return 0;
}
if ((rc = dtReadFirst(ip, &btstack)))
return rc;
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
}
} else {
/*
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6
*
* pn = 0; index = 1: First entry "."
* pn = 0; index = 2: Second entry ".."
* pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries
*/
dtpos = ctx->pos;
if (dtpos < 2) {
/* build "." entry */
ctx->pos = 1;
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
return 0;
dtoffset->index = 2;
ctx->pos = dtpos;
}
if (dtoffset->pn == 0) {
if (dtoffset->index == 2) {
/* build ".." entry */
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
return 0;
} else {
jfs_err("jfs_readdir called with invalid offset!");
}
dtoffset->pn = 1;
dtoffset->index = 0;
ctx->pos = dtpos;
}
if (dtEmpty(ip)) {
ctx->pos = DIREND;
return 0;
}
if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) {
jfs_err("jfs_readdir: unexpected rc = %d from dtReadNext",
rc);
ctx->pos = DIREND;
return 0;
}
/* get start leaf page and index */
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
/* offset beyond directory eof ? */
if (bn < 0) {
ctx->pos = DIREND;
return 0;
}
}
dirent_buf = __get_free_page(GFP_KERNEL);
if (dirent_buf == 0) {
DT_PUTPAGE(mp);
jfs_warn("jfs_readdir: __get_free_page failed!");
ctx->pos = DIREND;
return -ENOMEM;
}
while (1) {
jfs_dirent = (struct jfs_dirent *) dirent_buf;
jfs_dirents = 0;
overflow = fix_page = 0;
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
d = (struct ldtentry *) & p->slot[stbl[i]];
if (((long) jfs_dirent + d->namlen + 1) >
(dirent_buf + PAGE_SIZE)) {
/* DBCS codepages could overrun dirent_buf */
index = i;
overflow = 1;
break;
}
d_namleft = d->namlen;
name_ptr = jfs_dirent->name;
jfs_dirent->ino = le32_to_cpu(d->inumber);
if (do_index) {
len = min(d_namleft, DTLHDRDATALEN);
jfs_dirent->position = le32_to_cpu(d->index);
/*
* d->index should always be valid, but it
* isn't. fsck.jfs doesn't create the
* directory index for the lost+found
* directory. Rather than let it go,
* we can try to fix it.
*/
if ((jfs_dirent->position < 2) ||
(jfs_dirent->position >=
JFS_IP(ip)->next_index)) {
if (!page_fixed && !isReadOnly(ip)) {
fix_page = 1;
/*
* setting overflow and setting
* index to i will cause the
* same page to be processed
* again starting here
*/
overflow = 1;
index = i;
break;
}
jfs_dirent->position = unique_pos++;
}
/*
* We add 1 to the index because we may
* use a value of 2 internally, and NFSv4
* doesn't like that.
*/
jfs_dirent->position++;
} else {
jfs_dirent->position = dtpos;
len = min(d_namleft, DTLHDRDATALEN_LEGACY);
}
/* copy the name of head/only segment */
outlen = jfs_strfromUCS_le(name_ptr, d->name, len,
codepage);
jfs_dirent->name_len = outlen;
/* copy name in the additional segment(s) */
next = d->next;
while (next >= 0) {
t = (struct dtslot *) & p->slot[next];
name_ptr += outlen;
d_namleft -= len;
/* Sanity Check */
if (d_namleft == 0) {
jfs_error(ip->i_sb,
"JFS:Dtree error: ino = %ld, bn=%lld, index = %d\n",
(long)ip->i_ino,
(long long)bn,
i);
goto skip_one;
}
len = min(d_namleft, DTSLOTDATALEN);
outlen = jfs_strfromUCS_le(name_ptr, t->name,
len, codepage);
jfs_dirent->name_len += outlen;
next = t->next;
}
jfs_dirents++;
jfs_dirent = next_jfs_dirent(jfs_dirent);
skip_one:
if (!do_index)
dtoffset->index++;
}
if (!overflow) {
/* Point to next leaf page */
if (p->header.flag & BT_ROOT)
bn = 0;
else {
bn = le64_to_cpu(p->header.next);
index = 0;
/* update offset (pn:index) for new page */
if (!do_index) {
dtoffset->pn++;
dtoffset->index = 0;
}
}
page_fixed = 0;
}
/* unpin previous leaf page */
DT_PUTPAGE(mp);
jfs_dirent = (struct jfs_dirent *) dirent_buf;
while (jfs_dirents--) {
ctx->pos = jfs_dirent->position;
if (!dir_emit(ctx, jfs_dirent->name,
jfs_dirent->name_len,
jfs_dirent->ino, DT_UNKNOWN))
goto out;
jfs_dirent = next_jfs_dirent(jfs_dirent);
}
if (fix_page) {
add_missing_indices(ip, bn);
page_fixed = 1;
}
if (!overflow && (bn == 0)) {
ctx->pos = DIREND;
break;
}
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc) {
free_page(dirent_buf);
return rc;
}
}
out:
free_page(dirent_buf);
return rc;
}
/*
* dtReadFirst()
*
* function: get the leftmost page of the directory
*/
static int dtReadFirst(struct inode *ip, struct btstack * btstack)
{
int rc = 0;
s64 bn;
int psize = 288; /* initial in-line directory */
struct metapage *mp;
dtpage_t *p;
s8 *stbl;
struct btframe *btsp;
pxd_t *xd;
BT_CLR(btstack); /* reset stack */
/*
* descend leftmost path of the tree
*
* by convention, root bn = 0.
*/
for (bn = 0;;) {
DT_GETPAGE(ip, bn, mp, psize, p, rc);
if (rc)
return rc;
/*
* leftmost leaf page
*/
if (p->header.flag & BT_LEAF) {
/* return leftmost entry */
btsp = btstack->top;
btsp->bn = bn;
btsp->index = 0;
btsp->mp = mp;
return 0;
}
/*
* descend down to leftmost child page
*/
if (BT_STACK_FULL(btstack)) {
DT_PUTPAGE(mp);
jfs_error(ip->i_sb, "btstack overrun\n");
BT_STACK_DUMP(btstack);
return -EIO;
}
/* push (bn, index) of the parent page/entry */
BT_PUSH(btstack, bn, 0);
/* get the leftmost entry */
stbl = DT_GETSTBL(p);
xd = (pxd_t *) & p->slot[stbl[0]];
/* get the child page block address */
bn = addressPXD(xd);
psize = lengthPXD(xd) << JFS_SBI(ip->i_sb)->l2bsize;
/* unpin the parent page */
DT_PUTPAGE(mp);
}
}
/*
* dtReadNext()
*
* function: get the page of the specified offset (pn:index)
*
* return: if (offset > eof), bn = -1;
*
* note: if index > nextindex of the target leaf page,
* start with 1st entry of next leaf page;
*/
static int dtReadNext(struct inode *ip, loff_t * offset,
struct btstack * btstack)
{
int rc = 0;
struct dtoffset {
s16 pn;
s16 index;
s32 unused;
} *dtoffset = (struct dtoffset *) offset;
s64 bn;
struct metapage *mp;
dtpage_t *p;
int index;
int pn;
s8 *stbl;
struct btframe *btsp, *parent;
pxd_t *xd;
/*
* get leftmost leaf page pinned
*/
if ((rc = dtReadFirst(ip, btstack)))
return rc;
/* get leaf page */
DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
/* get the start offset (pn:index) */
pn = dtoffset->pn - 1; /* Now pn = 0 represents leftmost leaf */
index = dtoffset->index;
/* start at leftmost page ? */
if (pn == 0) {
/* offset beyond eof ? */
if (index < p->header.nextindex)
goto out;
if (p->header.flag & BT_ROOT) {
bn = -1;
goto out;
}
/* start with 1st entry of next leaf page */
dtoffset->pn++;
dtoffset->index = index = 0;
goto a;
}
/* start at non-leftmost page: scan parent pages for large pn */
if (p->header.flag & BT_ROOT) {
bn = -1;
goto out;
}
/* start after next leaf page ? */
if (pn > 1)
goto b;
/* get leaf page pn = 1 */
a:
bn = le64_to_cpu(p->header.next);
/* unpin leaf page */
DT_PUTPAGE(mp);
/* offset beyond eof ? */
if (bn == 0) {
bn = -1;
goto out;
}
goto c;
/*
* scan last internal page level to get target leaf page
*/
b:
/* unpin leftmost leaf page */
DT_PUTPAGE(mp);
/* get left most parent page */
btsp = btstack->top;
parent = btsp - 1;
bn = parent->bn;
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* scan parent pages at last internal page level */
while (pn >= p->header.nextindex) {
pn -= p->header.nextindex;
/* get next parent page address */
bn = le64_to_cpu(p->header.next);
/* unpin current parent page */
DT_PUTPAGE(mp);
/* offset beyond eof ? */
if (bn == 0) {
bn = -1;
goto out;
}
/* get next parent page */
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* update parent page stack frame */
parent->bn = bn;
}
/* get leaf page address */
stbl = DT_GETSTBL(p);
xd = (pxd_t *) & p->slot[stbl[pn]];
bn = addressPXD(xd);
/* unpin parent page */
DT_PUTPAGE(mp);
/*
* get target leaf page
*/
c:
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/*
* leaf page has been completed:
* start with 1st entry of next leaf page
*/
if (index >= p->header.nextindex) {
bn = le64_to_cpu(p->header.next);
/* unpin leaf page */
DT_PUTPAGE(mp);
/* offset beyond eof ? */
if (bn == 0) {
bn = -1;
goto out;
}
/* get next leaf page */
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* start with 1st entry of next leaf page */
dtoffset->pn++;
dtoffset->index = 0;
}
out:
/* return target leaf page pinned */
btsp = btstack->top;
btsp->bn = bn;
btsp->index = dtoffset->index;
btsp->mp = mp;
return 0;
}
/*
* dtCompare()
*
* function: compare search key with an internal entry
*
* return:
* < 0 if k is < record
* = 0 if k is = record
* > 0 if k is > record
*/
static int dtCompare(struct component_name * key, /* search key */
dtpage_t * p, /* directory page */
int si)
{ /* entry slot index */
wchar_t *kname;
__le16 *name;
int klen, namlen, len, rc;
struct idtentry *ih;
struct dtslot *t;
/*
* force the left-most key on internal pages, at any level of
* the tree, to be less than any search key.
* this obviates having to update the leftmost key on an internal
* page when the user inserts a new key in the tree smaller than
* anything that has been stored.
*
* (? if/when dtSearch() narrows down to 1st entry (index = 0),
* at any internal page at any level of the tree,
* it descends to child of the entry anyway -
* ? make the entry as min size dummy entry)
*
* if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
* return (1);
*/
kname = key->name;
klen = key->namlen;
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
name = ih->name;
namlen = ih->namlen;
len = min(namlen, DTIHDRDATALEN);
/* compare with head/only segment */
len = min(klen, len);
if ((rc = UniStrncmp_le(kname, name, len)))
return rc;
klen -= len;
namlen -= len;
/* compare with additional segment(s) */
kname += len;
while (klen > 0 && namlen > 0) {
/* compare with next name segment */
t = (struct dtslot *) & p->slot[si];
len = min(namlen, DTSLOTDATALEN);
len = min(klen, len);
name = t->name;
if ((rc = UniStrncmp_le(kname, name, len)))
return rc;
klen -= len;
namlen -= len;
kname += len;
si = t->next;
}
return (klen - namlen);
}
/*
* ciCompare()
*
* function: compare search key with an (leaf/internal) entry
*
* return:
* < 0 if k is < record
* = 0 if k is = record
* > 0 if k is > record
*/
static int ciCompare(struct component_name * key, /* search key */
dtpage_t * p, /* directory page */
int si, /* entry slot index */
int flag)
{
wchar_t *kname, x;
__le16 *name;
int klen, namlen, len, rc;
struct ldtentry *lh;
struct idtentry *ih;
struct dtslot *t;
int i;
/*
* force the left-most key on internal pages, at any level of
* the tree, to be less than any search key.
* this obviates having to update the leftmost key on an internal
* page when the user inserts a new key in the tree smaller than
* anything that has been stored.
*
* (? if/when dtSearch() narrows down to 1st entry (index = 0),
* at any internal page at any level of the tree,
* it descends to child of the entry anyway -
* ? make the entry as min size dummy entry)
*
* if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
* return (1);
*/
kname = key->name;
klen = key->namlen;
/*
* leaf page entry
*/
if (p->header.flag & BT_LEAF) {
lh = (struct ldtentry *) & p->slot[si];
si = lh->next;
name = lh->name;
namlen = lh->namlen;
if (flag & JFS_DIR_INDEX)
len = min(namlen, DTLHDRDATALEN);
else
len = min(namlen, DTLHDRDATALEN_LEGACY);
}
/*
* internal page entry
*/
else {
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
name = ih->name;
namlen = ih->namlen;
len = min(namlen, DTIHDRDATALEN);
}
/* compare with head/only segment */
len = min(klen, len);
for (i = 0; i < len; i++, kname++, name++) {
/* only uppercase if case-insensitive support is on */
if ((flag & JFS_OS2) == JFS_OS2)
x = UniToupper(le16_to_cpu(*name));
else
x = le16_to_cpu(*name);
if ((rc = *kname - x))
return rc;
}
klen -= len;
namlen -= len;
/* compare with additional segment(s) */
while (klen > 0 && namlen > 0) {
/* compare with next name segment */
t = (struct dtslot *) & p->slot[si];
len = min(namlen, DTSLOTDATALEN);
len = min(klen, len);
name = t->name;
for (i = 0; i < len; i++, kname++, name++) {
/* only uppercase if case-insensitive support is on */
if ((flag & JFS_OS2) == JFS_OS2)
x = UniToupper(le16_to_cpu(*name));
else
x = le16_to_cpu(*name);
if ((rc = *kname - x))
return rc;
}
klen -= len;
namlen -= len;
si = t->next;
}
return (klen - namlen);
}
/*
* ciGetLeafPrefixKey()
*
* function: compute prefix of suffix compression
* from two adjacent leaf entries
* across page boundary
*
* return: non-zero on error
*
*/
static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, struct component_name * key, int flag)
{
int klen, namlen;
wchar_t *pl, *pr, *kname;
struct component_name lkey;
struct component_name rkey;
lkey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t),
GFP_KERNEL);
if (lkey.name == NULL)
return -ENOMEM;
rkey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t),
GFP_KERNEL);
if (rkey.name == NULL) {
kfree(lkey.name);
return -ENOMEM;
}
/* get left and right key */
dtGetKey(lp, li, &lkey, flag);
lkey.name[lkey.namlen] = 0;
if ((flag & JFS_OS2) == JFS_OS2)
ciToUpper(&lkey);
dtGetKey(rp, ri, &rkey, flag);
rkey.name[rkey.namlen] = 0;
if ((flag & JFS_OS2) == JFS_OS2)
ciToUpper(&rkey);
/* compute prefix */
klen = 0;
kname = key->name;
namlen = min(lkey.namlen, rkey.namlen);
for (pl = lkey.name, pr = rkey.name;
namlen; pl++, pr++, namlen--, klen++, kname++) {
*kname = *pr;
if (*pl != *pr) {
key->namlen = klen + 1;
goto free_names;
}
}
/* l->namlen <= r->namlen since l <= r */
if (lkey.namlen < rkey.namlen) {
*kname = *pr;
key->namlen = klen + 1;
} else /* l->namelen == r->namelen */
key->namlen = klen;
free_names:
kfree(lkey.name);
kfree(rkey.name);
return 0;
}
/*
* dtGetKey()
*
* function: get key of the entry
*/
static void dtGetKey(dtpage_t * p, int i, /* entry index */
struct component_name * key, int flag)
{
int si;
s8 *stbl;
struct ldtentry *lh;
struct idtentry *ih;
struct dtslot *t;
int namlen, len;
wchar_t *kname;
__le16 *name;
/* get entry */
stbl = DT_GETSTBL(p);
si = stbl[i];
if (p->header.flag & BT_LEAF) {
lh = (struct ldtentry *) & p->slot[si];
si = lh->next;
namlen = lh->namlen;
name = lh->name;
if (flag & JFS_DIR_INDEX)
len = min(namlen, DTLHDRDATALEN);
else
len = min(namlen, DTLHDRDATALEN_LEGACY);
} else {
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
namlen = ih->namlen;
name = ih->name;
len = min(namlen, DTIHDRDATALEN);
}
key->namlen = namlen;
kname = key->name;
/*
* move head/only segment
*/
UniStrncpy_from_le(kname, name, len);
/*
* move additional segment(s)
*/
while (si >= 0) {
/* get next segment */
t = &p->slot[si];
kname += len;
namlen -= len;
len = min(namlen, DTSLOTDATALEN);
UniStrncpy_from_le(kname, t->name, len);
si = t->next;
}
}
/*
* dtInsertEntry()
*
* function: allocate free slot(s) and
* write a leaf/internal entry
*
* return: entry slot index
*/
static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
ddata_t * data, struct dt_lock ** dtlock)
{
struct dtslot *h, *t;
struct ldtentry *lh = NULL;
struct idtentry *ih = NULL;
int hsi, fsi, klen, len, nextindex;
wchar_t *kname;
__le16 *name;
s8 *stbl;
pxd_t *xd;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
s64 bn = 0;
struct metapage *mp = NULL;
klen = key->namlen;
kname = key->name;
/* allocate a free slot */
hsi = fsi = p->header.freelist;
h = &p->slot[fsi];
p->header.freelist = h->next;
--p->header.freecnt;
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = hsi;
/* write head/only segment */
if (p->header.flag & BT_LEAF) {
lh = (struct ldtentry *) h;
lh->next = h->next;
lh->inumber = cpu_to_le32(data->leaf.ino);
lh->namlen = klen;
name = lh->name;
if (data->leaf.ip) {
len = min(klen, DTLHDRDATALEN);
if (!(p->header.flag & BT_ROOT))
bn = addressPXD(&p->header.self);
lh->index = cpu_to_le32(add_index(data->leaf.tid,
data->leaf.ip,
bn, index));
} else
len = min(klen, DTLHDRDATALEN_LEGACY);
} else {
ih = (struct idtentry *) h;
ih->next = h->next;
xd = (pxd_t *) ih;
*xd = data->xd;
ih->namlen = klen;
name = ih->name;
len = min(klen, DTIHDRDATALEN);
}
UniStrncpy_to_le(name, kname, len);
n = 1;
xsi = hsi;
/* write additional segment(s) */
t = h;
klen -= len;
while (klen) {
/* get free slot */
fsi = p->header.freelist;
t = &p->slot[fsi];
p->header.freelist = t->next;
--p->header.freecnt;
/* is next slot contiguous ? */
if (fsi != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = fsi;
n = 0;
}
kname += len;
len = min(klen, DTSLOTDATALEN);
UniStrncpy_to_le(t->name, kname, len);
n++;
xsi = fsi;
klen -= len;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
/* terminate last/only segment */
if (h == t) {
/* single segment entry */
if (p->header.flag & BT_LEAF)
lh->next = -1;
else
ih->next = -1;
} else
/* multi-segment entry */
t->next = -1;
/* if insert into middle, shift right succeeding entries in stbl */
stbl = DT_GETSTBL(p);
nextindex = p->header.nextindex;
if (index < nextindex) {
memmove(stbl + index + 1, stbl + index, nextindex - index);
if ((p->header.flag & BT_LEAF) && data->leaf.ip) {
s64 lblock;
/*
* Need to update slot number for entries that moved
* in the stbl
*/
mp = NULL;
for (n = index + 1; n <= nextindex; n++) {
lh = (struct ldtentry *) & (p->slot[stbl[n]]);
modify_index(data->leaf.tid, data->leaf.ip,
le32_to_cpu(lh->index), bn, n,
&mp, &lblock);
}
if (mp)
release_metapage(mp);
}
}
stbl[index] = hsi;
/* advance next available entry index of stbl */
++p->header.nextindex;
}
/*
* dtMoveEntry()
*
* function: move entries from split/left page to new/right page
*
* nextindex of dst page and freelist/freecnt of both pages
* are updated.
*/
static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
int do_index)
{
int ssi, next; /* src slot index */
int di; /* dst entry index */
int dsi; /* dst slot index */
s8 *sstbl, *dstbl; /* sorted entry table */
int snamlen, len;
struct ldtentry *slh, *dlh = NULL;
struct idtentry *sih, *dih = NULL;
struct dtslot *h, *s, *d;
struct dt_lock *sdtlck = *sdtlock, *ddtlck = *ddtlock;
struct lv *slv, *dlv;
int xssi, ns, nd;
int sfsi;
sstbl = (s8 *) & sp->slot[sp->header.stblindex];
dstbl = (s8 *) & dp->slot[dp->header.stblindex];
dsi = dp->header.freelist; /* first (whole page) free slot */
sfsi = sp->header.freelist;
/* linelock destination entry slot */
dlv = & ddtlck->lv[ddtlck->index];
dlv->offset = dsi;
/* linelock source entry slot */
slv = & sdtlck->lv[sdtlck->index];
slv->offset = sstbl[si];
xssi = slv->offset - 1;
/*
* move entries
*/
ns = nd = 0;
for (di = 0; si < sp->header.nextindex; si++, di++) {
ssi = sstbl[si];
dstbl[di] = dsi;
/* is next slot contiguous ? */
if (ssi != xssi + 1) {
/* close current linelock */
slv->length = ns;
sdtlck->index++;
/* open new linelock */
if (sdtlck->index < sdtlck->maxcnt)
slv++;
else {
sdtlck = (struct dt_lock *) txLinelock(sdtlck);
slv = & sdtlck->lv[0];
}
slv->offset = ssi;
ns = 0;
}
/*
* move head/only segment of an entry
*/
/* get dst slot */
h = d = &dp->slot[dsi];
/* get src slot and move */
s = &sp->slot[ssi];
if (sp->header.flag & BT_LEAF) {
/* get source entry */
slh = (struct ldtentry *) s;
dlh = (struct ldtentry *) h;
snamlen = slh->namlen;
if (do_index) {
len = min(snamlen, DTLHDRDATALEN);
dlh->index = slh->index; /* little-endian */
} else
len = min(snamlen, DTLHDRDATALEN_LEGACY);
memcpy(dlh, slh, 6 + len * 2);
next = slh->next;
/* update dst head/only segment next field */
dsi++;
dlh->next = dsi;
} else {
sih = (struct idtentry *) s;
snamlen = sih->namlen;
len = min(snamlen, DTIHDRDATALEN);
dih = (struct idtentry *) h;
memcpy(dih, sih, 10 + len * 2);
next = sih->next;
dsi++;
dih->next = dsi;
}
/* free src head/only segment */
s->next = sfsi;
s->cnt = 1;
sfsi = ssi;
ns++;
nd++;
xssi = ssi;
/*
* move additional segment(s) of the entry
*/
snamlen -= len;
while ((ssi = next) >= 0) {
/* is next slot contiguous ? */
if (ssi != xssi + 1) {
/* close current linelock */
slv->length = ns;
sdtlck->index++;
/* open new linelock */
if (sdtlck->index < sdtlck->maxcnt)
slv++;
else {
sdtlck =
(struct dt_lock *)
txLinelock(sdtlck);
slv = & sdtlck->lv[0];
}
slv->offset = ssi;
ns = 0;
}
/* get next source segment */
s = &sp->slot[ssi];
/* get next destination free slot */
d++;
len = min(snamlen, DTSLOTDATALEN);
UniStrncpy_le(d->name, s->name, len);
ns++;
nd++;
xssi = ssi;
dsi++;
d->next = dsi;
/* free source segment */
next = s->next;
s->next = sfsi;
s->cnt = 1;
sfsi = ssi;
snamlen -= len;
} /* end while */
/* terminate dst last/only segment */
if (h == d) {
/* single segment entry */
if (dp->header.flag & BT_LEAF)
dlh->next = -1;
else
dih->next = -1;
} else
/* multi-segment entry */
d->next = -1;
} /* end for */
/* close current linelock */
slv->length = ns;
sdtlck->index++;
*sdtlock = sdtlck;
dlv->length = nd;
ddtlck->index++;
*ddtlock = ddtlck;
/* update source header */
sp->header.freelist = sfsi;
sp->header.freecnt += nd;
/* update destination header */
dp->header.nextindex = di;
dp->header.freelist = dsi;
dp->header.freecnt -= nd;
}
/*
* dtDeleteEntry()
*
* function: free a (leaf/internal) entry
*
* log freelist header, stbl, and each segment slot of entry
* (even though last/only segment next field is modified,
* physical image logging requires all segment slots of
* the entry logged to avoid applying previous updates
* to the same slots)
*/
static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock)
{
int fsi; /* free entry slot index */
s8 *stbl;
struct dtslot *t;
int si, freecnt;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
/* get free entry slot index */
stbl = DT_GETSTBL(p);
fsi = stbl[fi];
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = fsi;
/* get the head/only segment */
t = &p->slot[fsi];
if (p->header.flag & BT_LEAF)
si = ((struct ldtentry *) t)->next;
else
si = ((struct idtentry *) t)->next;
t->next = si;
t->cnt = 1;
n = freecnt = 1;
xsi = fsi;
/* find the last/only segment */
while (si >= 0) {
/* is next slot contiguous ? */
if (si != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
n = 0;
}
n++;
xsi = si;
freecnt++;
t = &p->slot[si];
t->cnt = 1;
si = t->next;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
/* update freelist */
t->next = p->header.freelist;
p->header.freelist = fsi;
p->header.freecnt += freecnt;
/* if delete from middle,
* shift left the succedding entries in the stbl
*/
si = p->header.nextindex;
if (fi < si - 1)
memmove(&stbl[fi], &stbl[fi + 1], si - fi - 1);
p->header.nextindex--;
}
/*
* dtTruncateEntry()
*
* function: truncate a (leaf/internal) entry
*
* log freelist header, stbl, and each segment slot of entry
* (even though last/only segment next field is modified,
* physical image logging requires all segment slots of
* the entry logged to avoid applying previous updates
* to the same slots)
*/
static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock)
{
int tsi; /* truncate entry slot index */
s8 *stbl;
struct dtslot *t;
int si, freecnt;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int fsi, xsi, n;
/* get free entry slot index */
stbl = DT_GETSTBL(p);
tsi = stbl[ti];
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = tsi;
/* get the head/only segment */
t = &p->slot[tsi];
ASSERT(p->header.flag & BT_INTERNAL);
((struct idtentry *) t)->namlen = 0;
si = ((struct idtentry *) t)->next;
((struct idtentry *) t)->next = -1;
n = 1;
freecnt = 0;
fsi = si;
xsi = tsi;
/* find the last/only segment */
while (si >= 0) {
/* is next slot contiguous ? */
if (si != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
n = 0;
}
n++;
xsi = si;
freecnt++;
t = &p->slot[si];
t->cnt = 1;
si = t->next;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
/* update freelist */
if (freecnt == 0)
return;
t->next = p->header.freelist;
p->header.freelist = fsi;
p->header.freecnt += freecnt;
}
/*
* dtLinelockFreelist()
*/
static void dtLinelockFreelist(dtpage_t * p, /* directory page */
int m, /* max slot index */
struct dt_lock ** dtlock)
{
int fsi; /* free entry slot index */
struct dtslot *t;
int si;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
/* get free entry slot index */
fsi = p->header.freelist;
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = fsi;
n = 1;
xsi = fsi;
t = &p->slot[fsi];
si = t->next;
/* find the last/only segment */
while (si < m && si >= 0) {
/* is next slot contiguous ? */
if (si != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
n = 0;
}
n++;
xsi = si;
t = &p->slot[si];
si = t->next;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
}
/*
* NAME: dtModify
*
* FUNCTION: Modify the inode number part of a directory entry
*
* PARAMETERS:
* tid - Transaction id
* ip - Inode of parent directory
* key - Name of entry to be modified
* orig_ino - Original inode number expected in entry
* new_ino - New inode number to put into entry
* flag - JFS_RENAME
*
* RETURNS:
* -ESTALE - If entry found does not match orig_ino passed in
* -ENOENT - If no entry can be found to match key
* 0 - If successfully modified entry
*/
int dtModify(tid_t tid, struct inode *ip,
struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag)
{
int rc;
s64 bn;
struct metapage *mp;
dtpage_t *p;
int index;
struct btstack btstack;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
s8 *stbl;
int entry_si; /* entry slot index */
struct ldtentry *entry;
/*
* search for the entry to modify:
*
* dtSearch() returns (leaf page pinned, index at which to modify).
*/
if ((rc = dtSearch(ip, key, orig_ino, &btstack, flag)))
return rc;
/* retrieve search result */
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the leaf page of named entry
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
/* get slot index of the entry */
stbl = DT_GETSTBL(p);
entry_si = stbl[index];
/* linelock entry */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = entry_si;
lv->length = 1;
dtlck->index++;
/* get the head/only segment */
entry = (struct ldtentry *) & p->slot[entry_si];
/* substitute the inode number of the entry */
entry->inumber = cpu_to_le32(new_ino);
/* unpin the leaf page */
DT_PUTPAGE(mp);
return 0;
}
| linux-master | fs/jfs/jfs_dtree.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/quotaops.h>
#include <linux/exportfs.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_inode.h"
#include "jfs_dinode.h"
#include "jfs_dmap.h"
#include "jfs_unicode.h"
#include "jfs_metapage.h"
#include "jfs_xattr.h"
#include "jfs_acl.h"
#include "jfs_debug.h"
/*
* forward references
*/
const struct dentry_operations jfs_ci_dentry_operations;
static s64 commitZeroLink(tid_t, struct inode *);
/*
* NAME: free_ea_wmap(inode)
*
* FUNCTION: free uncommitted extended attributes from working map
*
*/
static inline void free_ea_wmap(struct inode *inode)
{
dxd_t *ea = &JFS_IP(inode)->ea;
if (ea->flag & DXD_EXTENT) {
/* free EA pages from cache */
invalidate_dxd_metapages(inode, *ea);
dbFree(inode, addressDXD(ea), lengthDXD(ea));
}
ea->flag = 0;
}
/*
* NAME: jfs_create(dip, dentry, mode)
*
* FUNCTION: create a regular file in the parent directory <dip>
* with name = <from dentry> and mode = <mode>
*
* PARAMETER: dip - parent directory vnode
* dentry - dentry of new file
* mode - create mode (rwxrwxrwx).
* nd- nd struct
*
* RETURN: Errors from subroutines
*
*/
static int jfs_create(struct mnt_idmap *idmap, struct inode *dip,
struct dentry *dentry, umode_t mode, bool excl)
{
int rc = 0;
tid_t tid; /* transaction id */
struct inode *ip = NULL; /* child directory inode */
ino_t ino;
struct component_name dname; /* child directory name */
struct btstack btstack;
struct inode *iplist[2];
struct tblock *tblk;
jfs_info("jfs_create: dip:0x%p name:%pd", dip, dentry);
rc = dquot_initialize(dip);
if (rc)
goto out1;
/*
* search parent directory for entry/freespace
* (dtSearch() returns parent directory page pinned)
*/
if ((rc = get_UCSname(&dname, dentry)))
goto out1;
/*
* Either iAlloc() or txBegin() may block. Deadlock can occur if we
* block there while holding dtree page, so we allocate the inode &
* begin the transaction before we search the directory.
*/
ip = ialloc(dip, mode);
if (IS_ERR(ip)) {
rc = PTR_ERR(ip);
goto out2;
}
tid = txBegin(dip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_acl(tid, ip, dip);
if (rc)
goto out3;
rc = jfs_init_security(tid, ip, dip, &dentry->d_name);
if (rc) {
txAbort(tid, 0);
goto out3;
}
if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) {
jfs_err("jfs_create: dtSearch returned %d", rc);
txAbort(tid, 0);
goto out3;
}
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_CREATE;
tblk->ino = ip->i_ino;
tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
iplist[0] = dip;
iplist[1] = ip;
/*
* initialize the child XAD tree root in-line in inode
*/
xtInitRoot(tid, ip);
/*
* create entry in parent directory for child directory
* (dtInsert() releases parent directory page)
*/
ino = ip->i_ino;
if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) {
if (rc == -EIO) {
jfs_err("jfs_create: dtInsert returned -EIO");
txAbort(tid, 1); /* Marks Filesystem dirty */
} else
txAbort(tid, 0); /* Filesystem full */
goto out3;
}
ip->i_op = &jfs_file_inode_operations;
ip->i_fop = &jfs_file_operations;
ip->i_mapping->a_ops = &jfs_aops;
mark_inode_dirty(ip);
dip->i_mtime = inode_set_ctime_current(dip);
mark_inode_dirty(dip);
rc = txCommit(tid, 2, &iplist[0], 0);
out3:
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
out2:
free_UCSname(&dname);
out1:
jfs_info("jfs_create: rc:%d", rc);
return rc;
}
/*
* NAME: jfs_mkdir(dip, dentry, mode)
*
* FUNCTION: create a child directory in the parent directory <dip>
* with name = <from dentry> and mode = <mode>
*
* PARAMETER: dip - parent directory vnode
* dentry - dentry of child directory
* mode - create mode (rwxrwxrwx).
*
* RETURN: Errors from subroutines
*
* note:
* EACCES: user needs search+write permission on the parent directory
*/
static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
struct dentry *dentry, umode_t mode)
{
int rc = 0;
tid_t tid; /* transaction id */
struct inode *ip = NULL; /* child directory inode */
ino_t ino;
struct component_name dname; /* child directory name */
struct btstack btstack;
struct inode *iplist[2];
struct tblock *tblk;
jfs_info("jfs_mkdir: dip:0x%p name:%pd", dip, dentry);
rc = dquot_initialize(dip);
if (rc)
goto out1;
/*
* search parent directory for entry/freespace
* (dtSearch() returns parent directory page pinned)
*/
if ((rc = get_UCSname(&dname, dentry)))
goto out1;
/*
* Either iAlloc() or txBegin() may block. Deadlock can occur if we
* block there while holding dtree page, so we allocate the inode &
* begin the transaction before we search the directory.
*/
ip = ialloc(dip, S_IFDIR | mode);
if (IS_ERR(ip)) {
rc = PTR_ERR(ip);
goto out2;
}
tid = txBegin(dip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_acl(tid, ip, dip);
if (rc)
goto out3;
rc = jfs_init_security(tid, ip, dip, &dentry->d_name);
if (rc) {
txAbort(tid, 0);
goto out3;
}
if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) {
jfs_err("jfs_mkdir: dtSearch returned %d", rc);
txAbort(tid, 0);
goto out3;
}
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_CREATE;
tblk->ino = ip->i_ino;
tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
iplist[0] = dip;
iplist[1] = ip;
/*
* initialize the child directory in-line in inode
*/
dtInitRoot(tid, ip, dip->i_ino);
/*
* create entry in parent directory for child directory
* (dtInsert() releases parent directory page)
*/
ino = ip->i_ino;
if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) {
if (rc == -EIO) {
jfs_err("jfs_mkdir: dtInsert returned -EIO");
txAbort(tid, 1); /* Marks Filesystem dirty */
} else
txAbort(tid, 0); /* Filesystem full */
goto out3;
}
set_nlink(ip, 2); /* for '.' */
ip->i_op = &jfs_dir_inode_operations;
ip->i_fop = &jfs_dir_operations;
mark_inode_dirty(ip);
/* update parent directory inode */
inc_nlink(dip); /* for '..' from child directory */
dip->i_mtime = inode_set_ctime_current(dip);
mark_inode_dirty(dip);
rc = txCommit(tid, 2, &iplist[0], 0);
out3:
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
out2:
free_UCSname(&dname);
out1:
jfs_info("jfs_mkdir: rc:%d", rc);
return rc;
}
/*
* NAME: jfs_rmdir(dip, dentry)
*
* FUNCTION: remove a link to child directory
*
* PARAMETER: dip - parent inode
* dentry - child directory dentry
*
* RETURN: -EINVAL - if name is . or ..
* -EINVAL - if . or .. exist but are invalid.
* errors from subroutines
*
* note:
* if other threads have the directory open when the last link
* is removed, the "." and ".." entries, if present, are removed before
* rmdir() returns and no new entries may be created in the directory,
* but the directory is not removed until the last reference to
* the directory is released (cf.unlink() of regular file).
*/
static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
{
int rc;
tid_t tid; /* transaction id */
struct inode *ip = d_inode(dentry);
ino_t ino;
struct component_name dname;
struct inode *iplist[2];
struct tblock *tblk;
jfs_info("jfs_rmdir: dip:0x%p name:%pd", dip, dentry);
/* Init inode for quota operations. */
rc = dquot_initialize(dip);
if (rc)
goto out;
rc = dquot_initialize(ip);
if (rc)
goto out;
/* directory must be empty to be removed */
if (!dtEmpty(ip)) {
rc = -ENOTEMPTY;
goto out;
}
if ((rc = get_UCSname(&dname, dentry))) {
goto out;
}
tid = txBegin(dip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
iplist[0] = dip;
iplist[1] = ip;
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_DELETE;
tblk->u.ip = ip;
/*
* delete the entry of target directory from parent directory
*/
ino = ip->i_ino;
if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) {
jfs_err("jfs_rmdir: dtDelete returned %d", rc);
if (rc == -EIO)
txAbort(tid, 1);
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
goto out2;
}
/* update parent directory's link count corresponding
* to ".." entry of the target directory deleted
*/
dip->i_mtime = inode_set_ctime_current(dip);
inode_dec_link_count(dip);
/*
* OS/2 could have created EA and/or ACL
*/
/* free EA from both persistent and working map */
if (JFS_IP(ip)->ea.flag & DXD_EXTENT) {
/* free EA pages */
txEA(tid, ip, &JFS_IP(ip)->ea, NULL);
}
JFS_IP(ip)->ea.flag = 0;
/* free ACL from both persistent and working map */
if (JFS_IP(ip)->acl.flag & DXD_EXTENT) {
/* free ACL pages */
txEA(tid, ip, &JFS_IP(ip)->acl, NULL);
}
JFS_IP(ip)->acl.flag = 0;
/* mark the target directory as deleted */
clear_nlink(ip);
mark_inode_dirty(ip);
rc = txCommit(tid, 2, &iplist[0], 0);
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
/*
* Truncating the directory index table is not guaranteed. It
* may need to be done iteratively
*/
if (test_cflag(COMMIT_Stale, dip)) {
if (dip->i_size > 1)
jfs_truncate_nolock(dip, 0);
clear_cflag(COMMIT_Stale, dip);
}
out2:
free_UCSname(&dname);
out:
jfs_info("jfs_rmdir: rc:%d", rc);
return rc;
}
/*
* NAME: jfs_unlink(dip, dentry)
*
* FUNCTION: remove a link to object <vp> named by <name>
* from parent directory <dvp>
*
* PARAMETER: dip - inode of parent directory
* dentry - dentry of object to be removed
*
* RETURN: errors from subroutines
*
* note:
* temporary file: if one or more processes have the file open
* when the last link is removed, the link will be removed before
* unlink() returns, but the removal of the file contents will be
* postponed until all references to the files are closed.
*
* JFS does NOT support unlink() on directories.
*
*/
static int jfs_unlink(struct inode *dip, struct dentry *dentry)
{
int rc;
tid_t tid; /* transaction id */
struct inode *ip = d_inode(dentry);
ino_t ino;
struct component_name dname; /* object name */
struct inode *iplist[2];
struct tblock *tblk;
s64 new_size = 0;
int commit_flag;
jfs_info("jfs_unlink: dip:0x%p name:%pd", dip, dentry);
/* Init inode for quota operations. */
rc = dquot_initialize(dip);
if (rc)
goto out;
rc = dquot_initialize(ip);
if (rc)
goto out;
if ((rc = get_UCSname(&dname, dentry)))
goto out;
IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
tid = txBegin(dip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
iplist[0] = dip;
iplist[1] = ip;
/*
* delete the entry of target file from parent directory
*/
ino = ip->i_ino;
if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) {
jfs_err("jfs_unlink: dtDelete returned %d", rc);
if (rc == -EIO)
txAbort(tid, 1); /* Marks FS Dirty */
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
IWRITE_UNLOCK(ip);
goto out1;
}
ASSERT(ip->i_nlink);
dip->i_mtime = inode_set_ctime_to_ts(dip, inode_set_ctime_current(ip));
mark_inode_dirty(dip);
/* update target's inode */
inode_dec_link_count(ip);
/*
* commit zero link count object
*/
if (ip->i_nlink == 0) {
assert(!test_cflag(COMMIT_Nolink, ip));
/* free block resources */
if ((new_size = commitZeroLink(tid, ip)) < 0) {
txAbort(tid, 1); /* Marks FS Dirty */
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
IWRITE_UNLOCK(ip);
rc = new_size;
goto out1;
}
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_DELETE;
tblk->u.ip = ip;
}
/*
* Incomplete truncate of file data can
* result in timing problems unless we synchronously commit the
* transaction.
*/
if (new_size)
commit_flag = COMMIT_SYNC;
else
commit_flag = 0;
/*
* If xtTruncate was incomplete, commit synchronously to avoid
* timing complications
*/
rc = txCommit(tid, 2, &iplist[0], commit_flag);
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
while (new_size && (rc == 0)) {
tid = txBegin(dip->i_sb, 0);
mutex_lock(&JFS_IP(ip)->commit_mutex);
new_size = xtTruncate_pmap(tid, ip, new_size);
if (new_size < 0) {
txAbort(tid, 1); /* Marks FS Dirty */
rc = new_size;
} else
rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC);
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
}
if (ip->i_nlink == 0)
set_cflag(COMMIT_Nolink, ip);
IWRITE_UNLOCK(ip);
/*
* Truncating the directory index table is not guaranteed. It
* may need to be done iteratively
*/
if (test_cflag(COMMIT_Stale, dip)) {
if (dip->i_size > 1)
jfs_truncate_nolock(dip, 0);
clear_cflag(COMMIT_Stale, dip);
}
out1:
free_UCSname(&dname);
out:
jfs_info("jfs_unlink: rc:%d", rc);
return rc;
}
/*
* NAME: commitZeroLink()
*
* FUNCTION: for non-directory, called by jfs_remove(),
* truncate a regular file, directory or symbolic
* link to zero length. return 0 if type is not
* one of these.
*
* if the file is currently associated with a VM segment
* only permanent disk and inode map resources are freed,
* and neither the inode nor indirect blocks are modified
* so that the resources can be later freed in the work
* map by ctrunc1.
* if there is no VM segment on entry, the resources are
* freed in both work and permanent map.
* (? for temporary file - memory object is cached even
* after no reference:
* reference count > 0 - )
*
* PARAMETERS: cd - pointer to commit data structure.
* current inode is the one to truncate.
*
* RETURN: Errors from subroutines
*/
static s64 commitZeroLink(tid_t tid, struct inode *ip)
{
int filetype;
struct tblock *tblk;
jfs_info("commitZeroLink: tid = %d, ip = 0x%p", tid, ip);
filetype = ip->i_mode & S_IFMT;
switch (filetype) {
case S_IFREG:
break;
case S_IFLNK:
/* fast symbolic link */
if (ip->i_size < IDATASIZE) {
ip->i_size = 0;
return 0;
}
break;
default:
assert(filetype != S_IFDIR);
return 0;
}
set_cflag(COMMIT_Freewmap, ip);
/* mark transaction of block map update type */
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_PMAP;
/*
* free EA
*/
if (JFS_IP(ip)->ea.flag & DXD_EXTENT)
/* acquire maplock on EA to be freed from block map */
txEA(tid, ip, &JFS_IP(ip)->ea, NULL);
/*
* free ACL
*/
if (JFS_IP(ip)->acl.flag & DXD_EXTENT)
/* acquire maplock on EA to be freed from block map */
txEA(tid, ip, &JFS_IP(ip)->acl, NULL);
/*
* free xtree/data (truncate to zero length):
* free xtree/data pages from cache if COMMIT_PWMAP,
* free xtree/data blocks from persistent block map, and
* free xtree/data blocks from working block map if COMMIT_PWMAP;
*/
if (ip->i_size)
return xtTruncate_pmap(tid, ip, 0);
return 0;
}
/*
* NAME: jfs_free_zero_link()
*
* FUNCTION: for non-directory, called by iClose(),
* free resources of a file from cache and WORKING map
* for a file previously committed with zero link count
* while associated with a pager object,
*
* PARAMETER: ip - pointer to inode of file.
*/
void jfs_free_zero_link(struct inode *ip)
{
int type;
jfs_info("jfs_free_zero_link: ip = 0x%p", ip);
/* return if not reg or symbolic link or if size is
* already ok.
*/
type = ip->i_mode & S_IFMT;
switch (type) {
case S_IFREG:
break;
case S_IFLNK:
/* if its contained in inode nothing to do */
if (ip->i_size < IDATASIZE)
return;
break;
default:
return;
}
/*
* free EA
*/
if (JFS_IP(ip)->ea.flag & DXD_EXTENT) {
s64 xaddr = addressDXD(&JFS_IP(ip)->ea);
int xlen = lengthDXD(&JFS_IP(ip)->ea);
struct maplock maplock; /* maplock for COMMIT_WMAP */
struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */
/* free EA pages from cache */
invalidate_dxd_metapages(ip, JFS_IP(ip)->ea);
/* free EA extent from working block map */
maplock.index = 1;
pxdlock = (struct pxd_lock *) & maplock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, xlen);
txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP);
}
/*
* free ACL
*/
if (JFS_IP(ip)->acl.flag & DXD_EXTENT) {
s64 xaddr = addressDXD(&JFS_IP(ip)->acl);
int xlen = lengthDXD(&JFS_IP(ip)->acl);
struct maplock maplock; /* maplock for COMMIT_WMAP */
struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */
invalidate_dxd_metapages(ip, JFS_IP(ip)->acl);
/* free ACL extent from working block map */
maplock.index = 1;
pxdlock = (struct pxd_lock *) & maplock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, xlen);
txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP);
}
/*
* free xtree/data (truncate to zero length):
* free xtree/data pages from cache, and
* free xtree/data blocks from working block map;
*/
if (ip->i_size)
xtTruncate(0, ip, 0, COMMIT_WMAP);
}
/*
* NAME: jfs_link(vp, dvp, name, crp)
*
* FUNCTION: create a link to <vp> by the name = <name>
* in the parent directory <dvp>
*
* PARAMETER: vp - target object
* dvp - parent directory of new link
* name - name of new link to target object
* crp - credential
*
* RETURN: Errors from subroutines
*
* note:
* JFS does NOT support link() on directories (to prevent circular
* path in the directory hierarchy);
* EPERM: the target object is a directory, and either the caller
* does not have appropriate privileges or the implementation prohibits
* using link() on directories [XPG4.2].
*
* JFS does NOT support links between file systems:
* EXDEV: target object and new link are on different file systems and
* implementation does not support links between file systems [XPG4.2].
*/
static int jfs_link(struct dentry *old_dentry,
struct inode *dir, struct dentry *dentry)
{
int rc;
tid_t tid;
struct inode *ip = d_inode(old_dentry);
ino_t ino;
struct component_name dname;
struct btstack btstack;
struct inode *iplist[2];
jfs_info("jfs_link: %pd %pd", old_dentry, dentry);
rc = dquot_initialize(dir);
if (rc)
goto out;
if (isReadOnly(ip)) {
jfs_error(ip->i_sb, "read-only filesystem\n");
return -EROFS;
}
tid = txBegin(ip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
/*
* scan parent directory for entry/freespace
*/
if ((rc = get_UCSname(&dname, dentry)))
goto out_tx;
if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE)))
goto free_dname;
/*
* create entry for new link in parent directory
*/
ino = ip->i_ino;
if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack)))
goto free_dname;
/* update object inode */
inc_nlink(ip); /* for new link */
inode_set_ctime_current(ip);
dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
ihold(ip);
iplist[0] = ip;
iplist[1] = dir;
rc = txCommit(tid, 2, &iplist[0], 0);
if (rc) {
drop_nlink(ip); /* never instantiated */
iput(ip);
} else
d_instantiate(dentry, ip);
free_dname:
free_UCSname(&dname);
out_tx:
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dir)->commit_mutex);
out:
jfs_info("jfs_link: rc:%d", rc);
return rc;
}
/*
* NAME: jfs_symlink(dip, dentry, name)
*
* FUNCTION: creates a symbolic link to <symlink> by name <name>
* in directory <dip>
*
* PARAMETER: dip - parent directory vnode
* dentry - dentry of symbolic link
* name - the path name of the existing object
* that will be the source of the link
*
* RETURN: errors from subroutines
*
* note:
* ENAMETOOLONG: pathname resolution of a symbolic link produced
* an intermediate result whose length exceeds PATH_MAX [XPG4.2]
*/
static int jfs_symlink(struct mnt_idmap *idmap, struct inode *dip,
struct dentry *dentry, const char *name)
{
int rc;
tid_t tid;
ino_t ino = 0;
struct component_name dname;
u32 ssize; /* source pathname size */
struct btstack btstack;
struct inode *ip;
s64 xlen = 0;
int bmask = 0, xsize;
s64 xaddr;
struct metapage *mp;
struct super_block *sb;
struct tblock *tblk;
struct inode *iplist[2];
jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);
rc = dquot_initialize(dip);
if (rc)
goto out1;
ssize = strlen(name) + 1;
/*
* search parent directory for entry/freespace
* (dtSearch() returns parent directory page pinned)
*/
if ((rc = get_UCSname(&dname, dentry)))
goto out1;
/*
* allocate on-disk/in-memory inode for symbolic link:
* (iAlloc() returns new, locked inode)
*/
ip = ialloc(dip, S_IFLNK | 0777);
if (IS_ERR(ip)) {
rc = PTR_ERR(ip);
goto out2;
}
tid = txBegin(dip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_security(tid, ip, dip, &dentry->d_name);
if (rc)
goto out3;
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_CREATE;
tblk->ino = ip->i_ino;
tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
/* fix symlink access permission
* (dir_create() ANDs in the u.u_cmask,
* but symlinks really need to be 777 access)
*/
ip->i_mode |= 0777;
/*
* write symbolic link target path name
*/
xtInitRoot(tid, ip);
/*
* write source path name inline in on-disk inode (fast symbolic link)
*/
if (ssize <= IDATASIZE) {
ip->i_op = &jfs_fast_symlink_inode_operations;
ip->i_link = JFS_IP(ip)->i_inline_all;
memcpy(ip->i_link, name, ssize);
ip->i_size = ssize - 1;
/*
* if symlink is > 128 bytes, we don't have the space to
* store inline extended attributes
*/
if (ssize > sizeof (JFS_IP(ip)->i_inline))
JFS_IP(ip)->mode2 &= ~INLINEEA;
jfs_info("jfs_symlink: fast symlink added ssize:%u name:%s ",
ssize, name);
}
/*
* write source path name in a single extent
*/
else {
jfs_info("jfs_symlink: allocate extent ip:0x%p", ip);
ip->i_op = &jfs_symlink_inode_operations;
inode_nohighmem(ip);
ip->i_mapping->a_ops = &jfs_aops;
/*
* even though the data of symlink object (source
* path name) is treated as non-journaled user data,
* it is read/written thru buffer cache for performance.
*/
sb = ip->i_sb;
bmask = JFS_SBI(sb)->bsize - 1;
xsize = (ssize + bmask) & ~bmask;
xaddr = 0;
xlen = xsize >> JFS_SBI(sb)->l2bsize;
if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0))) {
txAbort(tid, 0);
goto out3;
}
ip->i_size = ssize - 1;
while (ssize) {
/* This is kind of silly since PATH_MAX == 4K */
u32 copy_size = min_t(u32, ssize, PSIZE);
mp = get_metapage(ip, xaddr, PSIZE, 1);
if (mp == NULL) {
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
rc = -EIO;
txAbort(tid, 0);
goto out3;
}
memcpy(mp->data, name, copy_size);
flush_metapage(mp);
ssize -= copy_size;
name += copy_size;
xaddr += JFS_SBI(sb)->nbperpage;
}
}
/*
* create entry for symbolic link in parent directory
*/
rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE);
if (rc == 0) {
ino = ip->i_ino;
rc = dtInsert(tid, dip, &dname, &ino, &btstack);
}
if (rc) {
if (xlen)
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
txAbort(tid, 0);
/* discard new inode */
goto out3;
}
mark_inode_dirty(ip);
dip->i_mtime = inode_set_ctime_current(dip);
mark_inode_dirty(dip);
/*
* commit update of parent directory and link object
*/
iplist[0] = dip;
iplist[1] = ip;
rc = txCommit(tid, 2, &iplist[0], 0);
out3:
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dip)->commit_mutex);
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
out2:
free_UCSname(&dname);
out1:
jfs_info("jfs_symlink: rc:%d", rc);
return rc;
}
/*
* NAME: jfs_rename
*
* FUNCTION: rename a file or directory
*/
static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct btstack btstack;
ino_t ino;
struct component_name new_dname;
struct inode *new_ip;
struct component_name old_dname;
struct inode *old_ip;
int rc;
tid_t tid;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
int ipcount;
struct inode *iplist[4];
struct tblock *tblk;
s64 new_size = 0;
int commit_flag;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
jfs_info("jfs_rename: %pd %pd", old_dentry, new_dentry);
rc = dquot_initialize(old_dir);
if (rc)
goto out1;
rc = dquot_initialize(new_dir);
if (rc)
goto out1;
old_ip = d_inode(old_dentry);
new_ip = d_inode(new_dentry);
if ((rc = get_UCSname(&old_dname, old_dentry)))
goto out1;
if ((rc = get_UCSname(&new_dname, new_dentry)))
goto out2;
/*
* Make sure source inode number is what we think it is
*/
rc = dtSearch(old_dir, &old_dname, &ino, &btstack, JFS_LOOKUP);
if (rc || (ino != old_ip->i_ino)) {
rc = -ENOENT;
goto out3;
}
/*
* Make sure dest inode number (if any) is what we think it is
*/
rc = dtSearch(new_dir, &new_dname, &ino, &btstack, JFS_LOOKUP);
if (!rc) {
if ((!new_ip) || (ino != new_ip->i_ino)) {
rc = -ESTALE;
goto out3;
}
} else if (rc != -ENOENT)
goto out3;
else if (new_ip) {
/* no entry exists, but one was expected */
rc = -ESTALE;
goto out3;
}
if (S_ISDIR(old_ip->i_mode)) {
if (new_ip) {
if (!dtEmpty(new_ip)) {
rc = -ENOTEMPTY;
goto out3;
}
}
} else if (new_ip) {
IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
/* Init inode for quota operations. */
rc = dquot_initialize(new_ip);
if (rc)
goto out_unlock;
}
/*
* The real work starts here
*/
tid = txBegin(new_dir->i_sb, 0);
/*
* How do we know the locking is safe from deadlocks?
* The vfs does the hard part for us. Any time we are taking nested
* commit_mutexes, the vfs already has i_mutex held on the parent.
* Here, the vfs has already taken i_mutex on both old_dir and new_dir.
*/
mutex_lock_nested(&JFS_IP(new_dir)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(old_ip)->commit_mutex, COMMIT_MUTEX_CHILD);
if (old_dir != new_dir)
mutex_lock_nested(&JFS_IP(old_dir)->commit_mutex,
COMMIT_MUTEX_SECOND_PARENT);
if (new_ip) {
mutex_lock_nested(&JFS_IP(new_ip)->commit_mutex,
COMMIT_MUTEX_VICTIM);
/*
* Change existing directory entry to new inode number
*/
ino = new_ip->i_ino;
rc = dtModify(tid, new_dir, &new_dname, &ino,
old_ip->i_ino, JFS_RENAME);
if (rc)
goto out_tx;
drop_nlink(new_ip);
if (S_ISDIR(new_ip->i_mode)) {
drop_nlink(new_ip);
if (new_ip->i_nlink) {
mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
if (old_dir != new_dir)
mutex_unlock(&JFS_IP(old_dir)->commit_mutex);
mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
mutex_unlock(&JFS_IP(new_dir)->commit_mutex);
if (!S_ISDIR(old_ip->i_mode) && new_ip)
IWRITE_UNLOCK(new_ip);
jfs_error(new_ip->i_sb,
"new_ip->i_nlink != 0\n");
return -EIO;
}
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_DELETE;
tblk->u.ip = new_ip;
} else if (new_ip->i_nlink == 0) {
assert(!test_cflag(COMMIT_Nolink, new_ip));
/* free block resources */
if ((new_size = commitZeroLink(tid, new_ip)) < 0) {
txAbort(tid, 1); /* Marks FS Dirty */
rc = new_size;
goto out_tx;
}
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_DELETE;
tblk->u.ip = new_ip;
} else {
inode_set_ctime_current(new_ip);
mark_inode_dirty(new_ip);
}
} else {
/*
* Add new directory entry
*/
rc = dtSearch(new_dir, &new_dname, &ino, &btstack,
JFS_CREATE);
if (rc) {
jfs_err("jfs_rename didn't expect dtSearch to fail w/rc = %d",
rc);
goto out_tx;
}
ino = old_ip->i_ino;
rc = dtInsert(tid, new_dir, &new_dname, &ino, &btstack);
if (rc) {
if (rc == -EIO)
jfs_err("jfs_rename: dtInsert returned -EIO");
goto out_tx;
}
if (S_ISDIR(old_ip->i_mode))
inc_nlink(new_dir);
}
/*
* Remove old directory entry
*/
ino = old_ip->i_ino;
rc = dtDelete(tid, old_dir, &old_dname, &ino, JFS_REMOVE);
if (rc) {
jfs_err("jfs_rename did not expect dtDelete to return rc = %d",
rc);
txAbort(tid, 1); /* Marks Filesystem dirty */
goto out_tx;
}
if (S_ISDIR(old_ip->i_mode)) {
drop_nlink(old_dir);
if (old_dir != new_dir) {
/*
* Change inode number of parent for moved directory
*/
JFS_IP(old_ip)->i_dtroot.header.idotdot =
cpu_to_le32(new_dir->i_ino);
/* Linelock header of dtree */
tlck = txLock(tid, old_ip,
(struct metapage *) &JFS_IP(old_ip)->bxflag,
tlckDTREE | tlckBTROOT | tlckRELINK);
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
}
}
/*
* Update ctime on changed/moved inodes & mark dirty
*/
inode_set_ctime_current(old_ip);
mark_inode_dirty(old_ip);
new_dir->i_mtime = inode_set_ctime_current(new_dir);
mark_inode_dirty(new_dir);
/* Build list of inodes modified by this transaction */
ipcount = 0;
iplist[ipcount++] = old_ip;
if (new_ip)
iplist[ipcount++] = new_ip;
iplist[ipcount++] = old_dir;
if (old_dir != new_dir) {
iplist[ipcount++] = new_dir;
old_dir->i_mtime = inode_set_ctime_current(old_dir);
mark_inode_dirty(old_dir);
}
/*
* Incomplete truncate of file data can
* result in timing problems unless we synchronously commit the
* transaction.
*/
if (new_size)
commit_flag = COMMIT_SYNC;
else
commit_flag = 0;
rc = txCommit(tid, ipcount, iplist, commit_flag);
out_tx:
txEnd(tid);
if (new_ip)
mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
if (old_dir != new_dir)
mutex_unlock(&JFS_IP(old_dir)->commit_mutex);
mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
mutex_unlock(&JFS_IP(new_dir)->commit_mutex);
while (new_size && (rc == 0)) {
tid = txBegin(new_ip->i_sb, 0);
mutex_lock(&JFS_IP(new_ip)->commit_mutex);
new_size = xtTruncate_pmap(tid, new_ip, new_size);
if (new_size < 0) {
txAbort(tid, 1);
rc = new_size;
} else
rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC);
txEnd(tid);
mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
}
if (new_ip && (new_ip->i_nlink == 0))
set_cflag(COMMIT_Nolink, new_ip);
/*
* Truncating the directory index table is not guaranteed. It
* may need to be done iteratively
*/
if (test_cflag(COMMIT_Stale, old_dir)) {
if (old_dir->i_size > 1)
jfs_truncate_nolock(old_dir, 0);
clear_cflag(COMMIT_Stale, old_dir);
}
out_unlock:
if (new_ip && !S_ISDIR(new_ip->i_mode))
IWRITE_UNLOCK(new_ip);
out3:
free_UCSname(&new_dname);
out2:
free_UCSname(&old_dname);
out1:
jfs_info("jfs_rename: returning %d", rc);
return rc;
}
/*
* NAME: jfs_mknod
*
* FUNCTION: Create a special file (device)
*/
static int jfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct jfs_inode_info *jfs_ip;
struct btstack btstack;
struct component_name dname;
ino_t ino;
struct inode *ip;
struct inode *iplist[2];
int rc;
tid_t tid;
struct tblock *tblk;
jfs_info("jfs_mknod: %pd", dentry);
rc = dquot_initialize(dir);
if (rc)
goto out;
if ((rc = get_UCSname(&dname, dentry)))
goto out;
ip = ialloc(dir, mode);
if (IS_ERR(ip)) {
rc = PTR_ERR(ip);
goto out1;
}
jfs_ip = JFS_IP(ip);
tid = txBegin(dir->i_sb, 0);
mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_acl(tid, ip, dir);
if (rc)
goto out3;
rc = jfs_init_security(tid, ip, dir, &dentry->d_name);
if (rc) {
txAbort(tid, 0);
goto out3;
}
if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE))) {
txAbort(tid, 0);
goto out3;
}
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_CREATE;
tblk->ino = ip->i_ino;
tblk->u.ixpxd = JFS_IP(ip)->ixpxd;
ino = ip->i_ino;
if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack))) {
txAbort(tid, 0);
goto out3;
}
ip->i_op = &jfs_file_inode_operations;
jfs_ip->dev = new_encode_dev(rdev);
init_special_inode(ip, ip->i_mode, rdev);
mark_inode_dirty(ip);
dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
iplist[0] = dir;
iplist[1] = ip;
rc = txCommit(tid, 2, iplist, 0);
out3:
txEnd(tid);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
mutex_unlock(&JFS_IP(dir)->commit_mutex);
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
out1:
free_UCSname(&dname);
out:
jfs_info("jfs_mknod: returning %d", rc);
return rc;
}
static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags)
{
struct btstack btstack;
ino_t inum;
struct inode *ip;
struct component_name key;
int rc;
jfs_info("jfs_lookup: name = %pd", dentry);
if ((rc = get_UCSname(&key, dentry)))
return ERR_PTR(rc);
rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP);
free_UCSname(&key);
if (rc == -ENOENT) {
ip = NULL;
} else if (rc) {
jfs_err("jfs_lookup: dtSearch returned %d", rc);
ip = ERR_PTR(rc);
} else {
ip = jfs_iget(dip->i_sb, inum);
if (IS_ERR(ip))
jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum);
}
return d_splice_alias(ip, dentry);
}
static struct inode *jfs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
struct inode *inode;
if (ino == 0)
return ERR_PTR(-ESTALE);
inode = jfs_iget(sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return inode;
}
struct dentry *jfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
jfs_nfs_get_inode);
}
struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
jfs_nfs_get_inode);
}
struct dentry *jfs_get_parent(struct dentry *dentry)
{
unsigned long parent_ino;
parent_ino =
le32_to_cpu(JFS_IP(d_inode(dentry))->i_dtroot.header.idotdot);
return d_obtain_alias(jfs_iget(dentry->d_sb, parent_ino));
}
const struct inode_operations jfs_dir_inode_operations = {
.create = jfs_create,
.lookup = jfs_lookup,
.link = jfs_link,
.unlink = jfs_unlink,
.symlink = jfs_symlink,
.mkdir = jfs_mkdir,
.rmdir = jfs_rmdir,
.mknod = jfs_mknod,
.rename = jfs_rename,
.listxattr = jfs_listxattr,
.setattr = jfs_setattr,
.fileattr_get = jfs_fileattr_get,
.fileattr_set = jfs_fileattr_set,
#ifdef CONFIG_JFS_POSIX_ACL
.get_inode_acl = jfs_get_acl,
.set_acl = jfs_set_acl,
#endif
};
WRAP_DIR_ITER(jfs_readdir) // FIXME!
const struct file_operations jfs_dir_operations = {
.read = generic_read_dir,
.iterate_shared = shared_jfs_readdir,
.fsync = jfs_fsync,
.unlocked_ioctl = jfs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = generic_file_llseek,
};
static int jfs_ci_hash(const struct dentry *dir, struct qstr *this)
{
unsigned long hash;
int i;
hash = init_name_hash(dir);
for (i=0; i < this->len; i++)
hash = partial_name_hash(tolower(this->name[i]), hash);
this->hash = end_name_hash(hash);
return 0;
}
static int jfs_ci_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
int i, result = 1;
if (len != name->len)
goto out;
for (i=0; i < len; i++) {
if (tolower(str[i]) != tolower(name->name[i]))
goto out;
}
result = 0;
out:
return result;
}
static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags)
{
/*
* This is not negative dentry. Always valid.
*
* Note, rename() to existing directory entry will have ->d_inode,
* and will use existing name which isn't specified name by user.
*
* We may be able to drop this positive dentry here. But dropping
* positive dentry isn't good idea. So it's unsupported like
* rename("filename", "FILENAME") for now.
*/
if (d_really_is_positive(dentry))
return 1;
/*
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
if (!flags)
return 0;
/*
* Drop the negative dentry, in order to make sure to use the
* case sensitive name which is specified by user if this is
* for creation.
*/
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
return 1;
}
const struct dentry_operations jfs_ci_dentry_operations =
{
.d_hash = jfs_ci_hash,
.d_compare = jfs_ci_compare,
.d_revalidate = jfs_ci_revalidate,
};
| linux-master | fs/jfs/namei.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2002-2004
* Copyright (C) Andreas Gruenbacher, 2001
* Copyright (C) Linus Torvalds, 1991, 1992
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
#include "jfs_incore.h"
#include "jfs_txnmgr.h"
#include "jfs_xattr.h"
#include "jfs_acl.h"
struct posix_acl *jfs_get_acl(struct inode *inode, int type, bool rcu)
{
struct posix_acl *acl;
char *ea_name;
int size;
char *value = NULL;
if (rcu)
return ERR_PTR(-ECHILD);
switch(type) {
case ACL_TYPE_ACCESS:
ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
return ERR_PTR(-EINVAL);
}
size = __jfs_getxattr(inode, ea_name, NULL, 0);
if (size > 0) {
value = kmalloc(size, GFP_KERNEL);
if (!value)
return ERR_PTR(-ENOMEM);
size = __jfs_getxattr(inode, ea_name, value, size);
}
if (size < 0) {
if (size == -ENODATA)
acl = NULL;
else
acl = ERR_PTR(size);
} else {
acl = posix_acl_from_xattr(&init_user_ns, value, size);
}
kfree(value);
return acl;
}
static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
struct posix_acl *acl)
{
char *ea_name;
int rc;
int size = 0;
char *value = NULL;
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
return -EINVAL;
}
if (acl) {
size = posix_acl_xattr_size(acl->a_count);
value = kmalloc(size, GFP_KERNEL);
if (!value)
return -ENOMEM;
rc = posix_acl_to_xattr(&init_user_ns, acl, value, size);
if (rc < 0)
goto out;
}
rc = __jfs_setxattr(tid, inode, ea_name, value, size, 0);
out:
kfree(value);
if (!rc)
set_cached_acl(inode, type, acl);
return rc;
}
int jfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
int rc;
tid_t tid;
int update_mode = 0;
struct inode *inode = d_inode(dentry);
umode_t mode = inode->i_mode;
tid = txBegin(inode->i_sb, 0);
mutex_lock(&JFS_IP(inode)->commit_mutex);
if (type == ACL_TYPE_ACCESS && acl) {
rc = posix_acl_update_mode(&nop_mnt_idmap, inode, &mode, &acl);
if (rc)
goto end_tx;
if (mode != inode->i_mode)
update_mode = 1;
}
rc = __jfs_set_acl(tid, inode, type, acl);
if (!rc) {
if (update_mode) {
inode->i_mode = mode;
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
rc = txCommit(tid, 1, &inode, 0);
}
end_tx:
txEnd(tid);
mutex_unlock(&JFS_IP(inode)->commit_mutex);
return rc;
}
int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir)
{
struct posix_acl *default_acl, *acl;
int rc = 0;
rc = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (rc)
return rc;
if (default_acl) {
rc = __jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, default_acl);
posix_acl_release(default_acl);
} else {
inode->i_default_acl = NULL;
}
if (acl) {
if (!rc)
rc = __jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
} else {
inode->i_acl = NULL;
}
JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) |
inode->i_mode;
return rc;
}
| linux-master | fs/jfs/acl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) Christoph Hellwig, 2001-2002
*/
#include <linux/fs.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_xattr.h"
const struct inode_operations jfs_fast_symlink_inode_operations = {
.get_link = simple_get_link,
.setattr = jfs_setattr,
.listxattr = jfs_listxattr,
};
const struct inode_operations jfs_symlink_inode_operations = {
.get_link = page_get_link,
.setattr = jfs_setattr,
.listxattr = jfs_listxattr,
};
| linux-master | fs/jfs/symlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2002
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/posix_acl.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_dmap.h"
#include "jfs_txnmgr.h"
#include "jfs_xattr.h"
#include "jfs_acl.h"
#include "jfs_debug.h"
int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
int rc = 0;
rc = file_write_and_wait_range(file, start, end);
if (rc)
return rc;
inode_lock(inode);
if (!(inode->i_state & I_DIRTY_ALL) ||
(datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
/* Make sure committed changes hit the disk */
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
inode_unlock(inode);
return rc;
}
rc |= jfs_commit_inode(inode, 1);
inode_unlock(inode);
return rc ? -EIO : 0;
}
static int jfs_open(struct inode *inode, struct file *file)
{
int rc;
if ((rc = dquot_file_open(inode, file)))
return rc;
/*
* We attempt to allow only one "active" file open per aggregate
* group. Otherwise, appending to files in parallel can cause
* fragmentation within the files.
*
* If the file is empty, it was probably just created and going
* to be written to. If it has a size, we'll hold off until the
* file is actually grown.
*/
if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE &&
(inode->i_size == 0)) {
struct jfs_inode_info *ji = JFS_IP(inode);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag == -1) {
struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]);
}
spin_unlock_irq(&ji->ag_lock);
}
return 0;
}
static int jfs_release(struct inode *inode, struct file *file)
{
struct jfs_inode_info *ji = JFS_IP(inode);
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag != -1) {
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
atomic_dec(&bmap->db_active[ji->active_ag]);
ji->active_ag = -1;
}
spin_unlock_irq(&ji->ag_lock);
return 0;
}
int jfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int rc;
rc = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (rc)
return rc;
if (is_quota_modification(&nop_mnt_idmap, inode, iattr)) {
rc = dquot_initialize(inode);
if (rc)
return rc;
}
if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
(iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
rc = dquot_transfer(&nop_mnt_idmap, inode, iattr);
if (rc)
return rc;
}
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
rc = inode_newsize_ok(inode, iattr->ia_size);
if (rc)
return rc;
truncate_setsize(inode, iattr->ia_size);
jfs_truncate(inode);
}
setattr_copy(&nop_mnt_idmap, inode, iattr);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE)
rc = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
return rc;
}
const struct inode_operations jfs_file_inode_operations = {
.listxattr = jfs_listxattr,
.setattr = jfs_setattr,
.fileattr_get = jfs_fileattr_get,
.fileattr_set = jfs_fileattr_set,
#ifdef CONFIG_JFS_POSIX_ACL
.get_inode_acl = jfs_get_acl,
.set_acl = jfs_set_acl,
#endif
};
const struct file_operations jfs_file_operations = {
.open = jfs_open,
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fsync = jfs_fsync,
.release = jfs_release,
.unlocked_ioctl = jfs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
| linux-master | fs/jfs/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) Tino Reichardt, 2012
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_discard.h"
#include "jfs_dmap.h"
#include "jfs_debug.h"
/*
* NAME: jfs_issue_discard()
*
* FUNCTION: TRIM the specified block range on device, if supported
*
* PARAMETERS:
* ip - pointer to in-core inode
* blkno - starting block number to be trimmed (0..N)
* nblocks - number of blocks to be trimmed
*
* RETURN VALUES:
* none
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks)
{
struct super_block *sb = ip->i_sb;
int r = 0;
r = sb_issue_discard(sb, blkno, nblocks, GFP_NOFS, 0);
if (unlikely(r != 0)) {
jfs_err("JFS: sb_issue_discard(%p, %llu, %llu, GFP_NOFS, 0) = %d => failed!",
sb, (unsigned long long)blkno,
(unsigned long long)nblocks, r);
}
jfs_info("JFS: sb_issue_discard(%p, %llu, %llu, GFP_NOFS, 0) = %d",
sb, (unsigned long long)blkno,
(unsigned long long)nblocks, r);
return;
}
/*
* NAME: jfs_ioc_trim()
*
* FUNCTION: attempt to discard (TRIM) all free blocks from the
* filesystem.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* range - the range, given by user space
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
struct super_block *sb = ipbmap->i_sb;
int agno, agno_end;
u64 start, end, minlen;
u64 trimmed = 0;
/**
* convert byte values to block size of filesystem:
* start: First Byte to trim
* len: number of Bytes to trim from start
* minlen: minimum extent length in Bytes
*/
start = range->start >> sb->s_blocksize_bits;
end = start + (range->len >> sb->s_blocksize_bits) - 1;
minlen = range->minlen >> sb->s_blocksize_bits;
if (minlen == 0)
minlen = 1;
if (minlen > bmp->db_agsize ||
start >= bmp->db_mapsize ||
range->len < sb->s_blocksize)
return -EINVAL;
if (end >= bmp->db_mapsize)
end = bmp->db_mapsize - 1;
/**
* we trim all ag's within the range
*/
agno = BLKTOAG(start, JFS_SBI(ip->i_sb));
agno_end = BLKTOAG(end, JFS_SBI(ip->i_sb));
while (agno <= agno_end) {
trimmed += dbDiscardAG(ip, agno, minlen);
agno++;
}
range->len = trimmed << sb->s_blocksize_bits;
return 0;
}
| linux-master | fs/jfs/jfs_discard.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
/*
* jfs_logmgr.c: log manager
*
* for related information, see transaction manager (jfs_txnmgr.c), and
* recovery manager (jfs_logredo.c).
*
* note: for detail, RTFS.
*
* log buffer manager:
* special purpose buffer manager supporting log i/o requirements.
* per log serial pageout of logpage
* queuing i/o requests and redrive i/o at iodone
* maintain current logpage buffer
* no caching since append only
* appropriate jfs buffer cache buffers as needed
*
* group commit:
* transactions which wrote COMMIT records in the same in-memory
* log page during the pageout of previous/current log page(s) are
* committed together by the pageout of the page.
*
* TBD lazy commit:
* transactions are committed asynchronously when the log page
* containing it COMMIT is paged out when it becomes full;
*
* serialization:
* . a per log lock serialize log write.
* . a per log lock serialize group commit.
* . a per log lock serialize log open/close;
*
* TBD log integrity:
* careful-write (ping-pong) of last logpage to recover from crash
* in overwrite.
* detection of split (out-of-order) write of physical sectors
* of last logpage via timestamp at end of each sector
* with its mirror data array at trailer).
*
* alternatives:
* lsn - 64-bit monotonically increasing integer vs
* 32-bit lspn and page eor.
*/
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/kthread.h>
#include <linux/buffer_head.h> /* for sync_blockdev() */
#include <linux/bio.h>
#include <linux/freezer.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_superblock.h"
#include "jfs_txnmgr.h"
#include "jfs_debug.h"
/*
* lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread)
*/
static struct lbuf *log_redrive_list;
static DEFINE_SPINLOCK(log_redrive_lock);
/*
* log read/write serialization (per log)
*/
#define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock)
#define LOG_LOCK(log) mutex_lock(&((log)->loglock))
#define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock))
/*
* log group commit serialization (per log)
*/
#define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock)
#define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock)
#define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock)
#define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait)
/*
* log sync serialization (per log)
*/
#define LOGSYNC_DELTA(logsize) min((logsize)/8, 128*LOGPSIZE)
#define LOGSYNC_BARRIER(logsize) ((logsize)/4)
/*
#define LOGSYNC_DELTA(logsize) min((logsize)/4, 256*LOGPSIZE)
#define LOGSYNC_BARRIER(logsize) ((logsize)/2)
*/
/*
* log buffer cache synchronization
*/
static DEFINE_SPINLOCK(jfsLCacheLock);
#define LCACHE_LOCK(flags) spin_lock_irqsave(&jfsLCacheLock, flags)
#define LCACHE_UNLOCK(flags) spin_unlock_irqrestore(&jfsLCacheLock, flags)
/*
* See __SLEEP_COND in jfs_locks.h
*/
#define LCACHE_SLEEP_COND(wq, cond, flags) \
do { \
if (cond) \
break; \
__SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \
} while (0)
#define LCACHE_WAKEUP(event) wake_up(event)
/*
* lbuf buffer cache (lCache) control
*/
/* log buffer manager pageout control (cumulative, inclusive) */
#define lbmREAD 0x0001
#define lbmWRITE 0x0002 /* enqueue at tail of write queue;
* init pageout if at head of queue;
*/
#define lbmRELEASE 0x0004 /* remove from write queue
* at completion of pageout;
* do not free/recycle it yet:
* caller will free it;
*/
#define lbmSYNC 0x0008 /* do not return to freelist
* when removed from write queue;
*/
#define lbmFREE 0x0010 /* return to freelist
* at completion of pageout;
* the buffer may be recycled;
*/
#define lbmDONE 0x0020
#define lbmERROR 0x0040
#define lbmGC 0x0080 /* lbmIODone to perform post-GC processing
* of log page
*/
#define lbmDIRECT 0x0100
/*
* Global list of active external journals
*/
static LIST_HEAD(jfs_external_logs);
static struct jfs_log *dummy_log;
static DEFINE_MUTEX(jfs_log_mutex);
/*
* forward references
*/
static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk,
struct lrd * lrd, struct tlock * tlck);
static int lmNextPage(struct jfs_log * log);
static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,
int activate);
static int open_inline_log(struct super_block *sb);
static int open_dummy_log(struct super_block *sb);
static int lbmLogInit(struct jfs_log * log);
static void lbmLogShutdown(struct jfs_log * log);
static struct lbuf *lbmAllocate(struct jfs_log * log, int);
static void lbmFree(struct lbuf * bp);
static void lbmfree(struct lbuf * bp);
static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp);
static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block);
static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag);
static int lbmIOWait(struct lbuf * bp, int flag);
static bio_end_io_t lbmIODone;
static void lbmStartIO(struct lbuf * bp);
static void lmGCwrite(struct jfs_log * log, int cant_block);
static int lmLogSync(struct jfs_log * log, int hard_sync);
/*
* statistics
*/
#ifdef CONFIG_JFS_STATISTICS
static struct lmStat {
uint commit; /* # of commit */
uint pagedone; /* # of page written */
uint submitted; /* # of pages submitted */
uint full_page; /* # of full pages submitted */
uint partial_page; /* # of partial pages submitted */
} lmStat;
#endif
static void write_special_inodes(struct jfs_log *log,
int (*writer)(struct address_space *))
{
struct jfs_sb_info *sbi;
list_for_each_entry(sbi, &log->sb_list, log_list) {
writer(sbi->ipbmap->i_mapping);
writer(sbi->ipimap->i_mapping);
writer(sbi->direct_inode->i_mapping);
}
}
/*
* NAME: lmLog()
*
* FUNCTION: write a log record;
*
* PARAMETER:
*
* RETURN: lsn - offset to the next log record to write (end-of-log);
* -1 - error;
*
* note: todo: log error handler
*/
int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
int lsn;
int diffp, difft;
struct metapage *mp = NULL;
unsigned long flags;
jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p",
log, tblk, lrd, tlck);
LOG_LOCK(log);
/* log by (out-of-transaction) JFS ? */
if (tblk == NULL)
goto writeRecord;
/* log from page ? */
if (tlck == NULL ||
tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL)
goto writeRecord;
/*
* initialize/update page/transaction recovery lsn
*/
lsn = log->lsn;
LOGSYNC_LOCK(log, flags);
/*
* initialize page lsn if first log write of the page
*/
if (mp->lsn == 0) {
mp->log = log;
mp->lsn = lsn;
log->count++;
/* insert page at tail of logsynclist */
list_add_tail(&mp->synclist, &log->synclist);
}
/*
* initialize/update lsn of tblock of the page
*
* transaction inherits oldest lsn of pages associated
* with allocation/deallocation of resources (their
* log records are used to reconstruct allocation map
* at recovery time: inode for inode allocation map,
* B+-tree index of extent descriptors for block
* allocation map);
* allocation map pages inherit transaction lsn at
* commit time to allow forwarding log syncpt past log
* records associated with allocation/deallocation of
* resources only after persistent map of these map pages
* have been updated and propagated to home.
*/
/*
* initialize transaction lsn:
*/
if (tblk->lsn == 0) {
/* inherit lsn of its first page logged */
tblk->lsn = mp->lsn;
log->count++;
/* insert tblock after the page on logsynclist */
list_add(&tblk->synclist, &mp->synclist);
}
/*
* update transaction lsn:
*/
else {
/* inherit oldest/smallest lsn of page */
logdiff(diffp, mp->lsn, log);
logdiff(difft, tblk->lsn, log);
if (diffp < difft) {
/* update tblock lsn with page lsn */
tblk->lsn = mp->lsn;
/* move tblock after page on logsynclist */
list_move(&tblk->synclist, &mp->synclist);
}
}
LOGSYNC_UNLOCK(log, flags);
/*
* write the log record
*/
writeRecord:
lsn = lmWriteRecord(log, tblk, lrd, tlck);
/*
* forward log syncpt if log reached next syncpt trigger
*/
logdiff(diffp, lsn, log);
if (diffp >= log->nextsync)
lsn = lmLogSync(log, 0);
/* update end-of-log lsn */
log->lsn = lsn;
LOG_UNLOCK(log);
/* return end-of-log address */
return lsn;
}
/*
* NAME: lmWriteRecord()
*
* FUNCTION: move the log record to current log page
*
* PARAMETER: cd - commit descriptor
*
* RETURN: end-of-log address
*
* serialization: LOG_LOCK() held on entry/exit
*/
static int
lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
int lsn = 0; /* end-of-log address */
struct lbuf *bp; /* dst log page buffer */
struct logpage *lp; /* dst log page */
caddr_t dst; /* destination address in log page */
int dstoffset; /* end-of-log offset in log page */
int freespace; /* free space in log page */
caddr_t p; /* src meta-data page */
caddr_t src;
int srclen;
int nbytes; /* number of bytes to move */
int i;
int len;
struct linelock *linelock;
struct lv *lv;
struct lvd *lvd;
int l2linesize;
len = 0;
/* retrieve destination log page to write */
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = log->eor;
/* any log data to write ? */
if (tlck == NULL)
goto moveLrd;
/*
* move log record data
*/
/* retrieve source meta-data page to log */
if (tlck->flag & tlckPAGELOCK) {
p = (caddr_t) (tlck->mp->data);
linelock = (struct linelock *) & tlck->lock;
}
/* retrieve source in-memory inode to log */
else if (tlck->flag & tlckINODELOCK) {
if (tlck->type & tlckDTREE)
p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot;
else
p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
linelock = (struct linelock *) & tlck->lock;
}
else {
jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck);
return 0; /* Probably should trap */
}
l2linesize = linelock->l2linesize;
moveData:
ASSERT(linelock->index <= linelock->maxcnt);
lv = linelock->lv;
for (i = 0; i < linelock->index; i++, lv++) {
if (lv->length == 0)
continue;
/* is page full ? */
if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) {
/* page become full: move on to next page */
lmNextPage(log);
bp = log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
}
/*
* move log vector data
*/
src = (u8 *) p + (lv->offset << l2linesize);
srclen = lv->length << l2linesize;
len += srclen;
while (srclen > 0) {
freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset;
nbytes = min(freespace, srclen);
dst = (caddr_t) lp + dstoffset;
memcpy(dst, src, nbytes);
dstoffset += nbytes;
/* is page not full ? */
if (dstoffset < LOGPSIZE - LOGPTLRSIZE)
break;
/* page become full: move on to next page */
lmNextPage(log);
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
srclen -= nbytes;
src += nbytes;
}
/*
* move log vector descriptor
*/
len += 4;
lvd = (struct lvd *) ((caddr_t) lp + dstoffset);
lvd->offset = cpu_to_le16(lv->offset);
lvd->length = cpu_to_le16(lv->length);
dstoffset += 4;
jfs_info("lmWriteRecord: lv offset:%d length:%d",
lv->offset, lv->length);
}
if ((i = linelock->next)) {
linelock = (struct linelock *) lid_to_tlock(i);
goto moveData;
}
/*
* move log record descriptor
*/
moveLrd:
lrd->length = cpu_to_le16(len);
src = (caddr_t) lrd;
srclen = LOGRDSIZE;
while (srclen > 0) {
freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset;
nbytes = min(freespace, srclen);
dst = (caddr_t) lp + dstoffset;
memcpy(dst, src, nbytes);
dstoffset += nbytes;
srclen -= nbytes;
/* are there more to move than freespace of page ? */
if (srclen)
goto pageFull;
/*
* end of log record descriptor
*/
/* update last log record eor */
log->eor = dstoffset;
bp->l_eor = dstoffset;
lsn = (log->page << L2LOGPSIZE) + dstoffset;
if (lrd->type & cpu_to_le16(LOG_COMMIT)) {
tblk->clsn = lsn;
jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn,
bp->l_eor);
INCREMENT(lmStat.commit); /* # of commit */
/*
* enqueue tblock for group commit:
*
* enqueue tblock of non-trivial/synchronous COMMIT
* at tail of group commit queue
* (trivial/asynchronous COMMITs are ignored by
* group commit.)
*/
LOGGC_LOCK(log);
/* init tblock gc state */
tblk->flag = tblkGC_QUEUE;
tblk->bp = log->bp;
tblk->pn = log->page;
tblk->eor = log->eor;
/* enqueue transaction to commit queue */
list_add_tail(&tblk->cqueue, &log->cqueue);
LOGGC_UNLOCK(log);
}
jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x",
le16_to_cpu(lrd->type), log->bp, log->page, dstoffset);
/* page not full ? */
if (dstoffset < LOGPSIZE - LOGPTLRSIZE)
return lsn;
pageFull:
/* page become full: move on to next page */
lmNextPage(log);
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
src += nbytes;
}
return lsn;
}
/*
* NAME: lmNextPage()
*
* FUNCTION: write current page and allocate next page.
*
* PARAMETER: log
*
* RETURN: 0
*
* serialization: LOG_LOCK() held on entry/exit
*/
static int lmNextPage(struct jfs_log * log)
{
struct logpage *lp;
int lspn; /* log sequence page number */
int pn; /* current page number */
struct lbuf *bp;
struct lbuf *nextbp;
struct tblock *tblk;
/* get current log page number and log sequence page number */
pn = log->page;
bp = log->bp;
lp = (struct logpage *) bp->l_ldata;
lspn = le32_to_cpu(lp->h.page);
LOGGC_LOCK(log);
/*
* write or queue the full page at the tail of write queue
*/
/* get the tail tblk on commit queue */
if (list_empty(&log->cqueue))
tblk = NULL;
else
tblk = list_entry(log->cqueue.prev, struct tblock, cqueue);
/* every tblk who has COMMIT record on the current page,
* and has not been committed, must be on commit queue
* since tblk is queued at commit queueu at the time
* of writing its COMMIT record on the page before
* page becomes full (even though the tblk thread
* who wrote COMMIT record may have been suspended
* currently);
*/
/* is page bound with outstanding tail tblk ? */
if (tblk && tblk->pn == pn) {
/* mark tblk for end-of-page */
tblk->flag |= tblkGC_EOP;
if (log->cflag & logGC_PAGEOUT) {
/* if page is not already on write queue,
* just enqueue (no lbmWRITE to prevent redrive)
* buffer to wqueue to ensure correct serial order
* of the pages since log pages will be added
* continuously
*/
if (bp->l_wqnext == NULL)
lbmWrite(log, bp, 0, 0);
} else {
/*
* No current GC leader, initiate group commit
*/
log->cflag |= logGC_PAGEOUT;
lmGCwrite(log, 0);
}
}
/* page is not bound with outstanding tblk:
* init write or mark it to be redriven (lbmWRITE)
*/
else {
/* finalize the page */
bp->l_ceor = bp->l_eor;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0);
}
LOGGC_UNLOCK(log);
/*
* allocate/initialize next page
*/
/* if log wraps, the first data page of log is 2
* (0 never used, 1 is superblock).
*/
log->page = (pn == log->size - 1) ? 2 : pn + 1;
log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */
/* allocate/initialize next log page buffer */
nextbp = lbmAllocate(log, log->page);
nextbp->l_eor = log->eor;
log->bp = nextbp;
/* initialize next log page */
lp = (struct logpage *) nextbp->l_ldata;
lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
return 0;
}
/*
* NAME: lmGroupCommit()
*
* FUNCTION: group commit
* initiate pageout of the pages with COMMIT in the order of
* page number - redrive pageout of the page at the head of
* pageout queue until full page has been written.
*
* RETURN:
*
* NOTE:
* LOGGC_LOCK serializes log group commit queue, and
* transaction blocks on the commit queue.
* N.B. LOG_LOCK is NOT held during lmGroupCommit().
*/
int lmGroupCommit(struct jfs_log * log, struct tblock * tblk)
{
int rc = 0;
LOGGC_LOCK(log);
/* group committed already ? */
if (tblk->flag & tblkGC_COMMITTED) {
if (tblk->flag & tblkGC_ERROR)
rc = -EIO;
LOGGC_UNLOCK(log);
return rc;
}
jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc);
if (tblk->xflag & COMMIT_LAZY)
tblk->flag |= tblkGC_LAZY;
if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) &&
(!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag)
|| jfs_tlocks_low)) {
/*
* No pageout in progress
*
* start group commit as its group leader.
*/
log->cflag |= logGC_PAGEOUT;
lmGCwrite(log, 0);
}
if (tblk->xflag & COMMIT_LAZY) {
/*
* Lazy transactions can leave now
*/
LOGGC_UNLOCK(log);
return 0;
}
/* lmGCwrite gives up LOGGC_LOCK, check again */
if (tblk->flag & tblkGC_COMMITTED) {
if (tblk->flag & tblkGC_ERROR)
rc = -EIO;
LOGGC_UNLOCK(log);
return rc;
}
/* upcount transaction waiting for completion
*/
log->gcrtc++;
tblk->flag |= tblkGC_READY;
__SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED),
LOGGC_LOCK(log), LOGGC_UNLOCK(log));
/* removed from commit queue */
if (tblk->flag & tblkGC_ERROR)
rc = -EIO;
LOGGC_UNLOCK(log);
return rc;
}
/*
* NAME: lmGCwrite()
*
* FUNCTION: group commit write
* initiate write of log page, building a group of all transactions
* with commit records on that page.
*
* RETURN: None
*
* NOTE:
* LOGGC_LOCK must be held by caller.
* N.B. LOG_LOCK is NOT held during lmGroupCommit().
*/
static void lmGCwrite(struct jfs_log * log, int cant_write)
{
struct lbuf *bp;
struct logpage *lp;
int gcpn; /* group commit page number */
struct tblock *tblk;
struct tblock *xtblk = NULL;
/*
* build the commit group of a log page
*
* scan commit queue and make a commit group of all
* transactions with COMMIT records on the same log page.
*/
/* get the head tblk on the commit queue */
gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn;
list_for_each_entry(tblk, &log->cqueue, cqueue) {
if (tblk->pn != gcpn)
break;
xtblk = tblk;
/* state transition: (QUEUE, READY) -> COMMIT */
tblk->flag |= tblkGC_COMMIT;
}
tblk = xtblk; /* last tblk of the page */
/*
* pageout to commit transactions on the log page.
*/
bp = (struct lbuf *) tblk->bp;
lp = (struct logpage *) bp->l_ldata;
/* is page already full ? */
if (tblk->flag & tblkGC_EOP) {
/* mark page to free at end of group commit of the page */
tblk->flag &= ~tblkGC_EOP;
tblk->flag |= tblkGC_FREE;
bp->l_ceor = bp->l_eor;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC,
cant_write);
INCREMENT(lmStat.full_page);
}
/* page is not yet full */
else {
bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write);
INCREMENT(lmStat.partial_page);
}
}
/*
* NAME: lmPostGC()
*
* FUNCTION: group commit post-processing
* Processes transactions after their commit records have been written
* to disk, redriving log I/O if necessary.
*
* RETURN: None
*
* NOTE:
* This routine is called a interrupt time by lbmIODone
*/
static void lmPostGC(struct lbuf * bp)
{
unsigned long flags;
struct jfs_log *log = bp->l_log;
struct logpage *lp;
struct tblock *tblk, *temp;
//LOGGC_LOCK(log);
spin_lock_irqsave(&log->gclock, flags);
/*
* current pageout of group commit completed.
*
* remove/wakeup transactions from commit queue who were
* group committed with the current log page
*/
list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) {
if (!(tblk->flag & tblkGC_COMMIT))
break;
/* if transaction was marked GC_COMMIT then
* it has been shipped in the current pageout
* and made it to disk - it is committed.
*/
if (bp->l_flag & lbmERROR)
tblk->flag |= tblkGC_ERROR;
/* remove it from the commit queue */
list_del(&tblk->cqueue);
tblk->flag &= ~tblkGC_QUEUE;
if (tblk == log->flush_tblk) {
/* we can stop flushing the log now */
clear_bit(log_FLUSH, &log->flag);
log->flush_tblk = NULL;
}
jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk,
tblk->flag);
if (!(tblk->xflag & COMMIT_FORCE))
/*
* Hand tblk over to lazy commit thread
*/
txLazyUnlock(tblk);
else {
/* state transition: COMMIT -> COMMITTED */
tblk->flag |= tblkGC_COMMITTED;
if (tblk->flag & tblkGC_READY)
log->gcrtc--;
LOGGC_WAKEUP(tblk);
}
/* was page full before pageout ?
* (and this is the last tblk bound with the page)
*/
if (tblk->flag & tblkGC_FREE)
lbmFree(bp);
/* did page become full after pageout ?
* (and this is the last tblk bound with the page)
*/
else if (tblk->flag & tblkGC_EOP) {
/* finalize the page */
lp = (struct logpage *) bp->l_ldata;
bp->l_ceor = bp->l_eor;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
jfs_info("lmPostGC: calling lbmWrite");
lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE,
1);
}
}
/* are there any transactions who have entered lnGroupCommit()
* (whose COMMITs are after that of the last log page written.
* They are waiting for new group commit (above at (SLEEP 1))
* or lazy transactions are on a full (queued) log page,
* select the latest ready transaction as new group leader and
* wake her up to lead her group.
*/
if ((!list_empty(&log->cqueue)) &&
((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) ||
test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low))
/*
* Call lmGCwrite with new group leader
*/
lmGCwrite(log, 1);
/* no transaction are ready yet (transactions are only just
* queued (GC_QUEUE) and not entered for group commit yet).
* the first transaction entering group commit
* will elect herself as new group leader.
*/
else
log->cflag &= ~logGC_PAGEOUT;
//LOGGC_UNLOCK(log);
spin_unlock_irqrestore(&log->gclock, flags);
return;
}
/*
* NAME: lmLogSync()
*
* FUNCTION: write log SYNCPT record for specified log
* if new sync address is available
* (normally the case if sync() is executed by back-ground
* process).
* calculate new value of i_nextsync which determines when
* this code is called again.
*
* PARAMETERS: log - log structure
* hard_sync - 1 to force all metadata to be written
*
* RETURN: 0
*
* serialization: LOG_LOCK() held on entry/exit
*/
static int lmLogSync(struct jfs_log * log, int hard_sync)
{
int logsize;
int written; /* written since last syncpt */
int free; /* free space left available */
int delta; /* additional delta to write normally */
int more; /* additional write granted */
struct lrd lrd;
int lsn;
struct logsyncblk *lp;
unsigned long flags;
/* push dirty metapages out to disk */
if (hard_sync)
write_special_inodes(log, filemap_fdatawrite);
else
write_special_inodes(log, filemap_flush);
/*
* forward syncpt
*/
/* if last sync is same as last syncpt,
* invoke sync point forward processing to update sync.
*/
if (log->sync == log->syncpt) {
LOGSYNC_LOCK(log, flags);
if (list_empty(&log->synclist))
log->sync = log->lsn;
else {
lp = list_entry(log->synclist.next,
struct logsyncblk, synclist);
log->sync = lp->lsn;
}
LOGSYNC_UNLOCK(log, flags);
}
/* if sync is different from last syncpt,
* write a SYNCPT record with syncpt = sync.
* reset syncpt = sync
*/
if (log->sync != log->syncpt) {
lrd.logtid = 0;
lrd.backchain = 0;
lrd.type = cpu_to_le16(LOG_SYNCPT);
lrd.length = 0;
lrd.log.syncpt.sync = cpu_to_le32(log->sync);
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
log->syncpt = log->sync;
} else
lsn = log->lsn;
/*
* setup next syncpt trigger (SWAG)
*/
logsize = log->logsize;
logdiff(written, lsn, log);
free = logsize - written;
delta = LOGSYNC_DELTA(logsize);
more = min(free / 2, delta);
if (more < 2 * LOGPSIZE) {
jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n");
/*
* log wrapping
*
* option 1 - panic ? No.!
* option 2 - shutdown file systems
* associated with log ?
* option 3 - extend log ?
* option 4 - second chance
*
* mark log wrapped, and continue.
* when all active transactions are completed,
* mark log valid for recovery.
* if crashed during invalid state, log state
* implies invalid log, forcing fsck().
*/
/* mark log state log wrap in log superblock */
/* log->state = LOGWRAP; */
/* reset sync point computation */
log->syncpt = log->sync = lsn;
log->nextsync = delta;
} else
/* next syncpt trigger = written + more */
log->nextsync = written + more;
/* if number of bytes written from last sync point is more
* than 1/4 of the log size, stop new transactions from
* starting until all current transactions are completed
* by setting syncbarrier flag.
*/
if (!test_bit(log_SYNCBARRIER, &log->flag) &&
(written > LOGSYNC_BARRIER(logsize)) && log->active) {
set_bit(log_SYNCBARRIER, &log->flag);
jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn,
log->syncpt);
/*
* We may have to initiate group commit
*/
jfs_flush_journal(log, 0);
}
return lsn;
}
/*
* NAME: jfs_syncpt
*
* FUNCTION: write log SYNCPT record for specified log
*
* PARAMETERS: log - log structure
* hard_sync - set to 1 to force metadata to be written
*/
void jfs_syncpt(struct jfs_log *log, int hard_sync)
{ LOG_LOCK(log);
if (!test_bit(log_QUIESCE, &log->flag))
lmLogSync(log, hard_sync);
LOG_UNLOCK(log);
}
/*
* NAME: lmLogOpen()
*
* FUNCTION: open the log on first open;
* insert filesystem in the active list of the log.
*
* PARAMETER: ipmnt - file system mount inode
* iplog - log inode (out)
*
* RETURN:
*
* serialization:
*/
int lmLogOpen(struct super_block *sb)
{
int rc;
struct block_device *bdev;
struct jfs_log *log;
struct jfs_sb_info *sbi = JFS_SBI(sb);
if (sbi->flag & JFS_NOINTEGRITY)
return open_dummy_log(sb);
if (sbi->mntflag & JFS_INLINELOG)
return open_inline_log(sb);
mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
if (log->bdev->bd_dev == sbi->logdev) {
if (!uuid_equal(&log->uuid, &sbi->loguuid)) {
jfs_warn("wrong uuid on JFS journal");
mutex_unlock(&jfs_log_mutex);
return -EINVAL;
}
/*
* add file system to log active file system list
*/
if ((rc = lmLogFileSystem(log, sbi, 1))) {
mutex_unlock(&jfs_log_mutex);
return rc;
}
goto journal_found;
}
}
if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&log->sb_list);
init_waitqueue_head(&log->syncwait);
/*
* external log as separate logical volume
*
* file systems to log may have n-to-1 relationship;
*/
bdev = blkdev_get_by_dev(sbi->logdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
log, NULL);
if (IS_ERR(bdev)) {
rc = PTR_ERR(bdev);
goto free;
}
log->bdev = bdev;
uuid_copy(&log->uuid, &sbi->loguuid);
/*
* initialize log:
*/
if ((rc = lmLogInit(log)))
goto close;
list_add(&log->journal_list, &jfs_external_logs);
/*
* add file system to log active file system list
*/
if ((rc = lmLogFileSystem(log, sbi, 1)))
goto shutdown;
journal_found:
LOG_LOCK(log);
list_add(&sbi->log_list, &log->sb_list);
sbi->log = log;
LOG_UNLOCK(log);
mutex_unlock(&jfs_log_mutex);
return 0;
/*
* unwind on error
*/
shutdown: /* unwind lbmLogInit() */
list_del(&log->journal_list);
lbmLogShutdown(log);
close: /* close external log device */
blkdev_put(bdev, log);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
kfree(log);
jfs_warn("lmLogOpen: exit(%d)", rc);
return rc;
}
static int open_inline_log(struct super_block *sb)
{
struct jfs_log *log;
int rc;
if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL)))
return -ENOMEM;
INIT_LIST_HEAD(&log->sb_list);
init_waitqueue_head(&log->syncwait);
set_bit(log_INLINELOG, &log->flag);
log->bdev = sb->s_bdev;
log->base = addressPXD(&JFS_SBI(sb)->logpxd);
log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
(L2LOGPSIZE - sb->s_blocksize_bits);
log->l2bsize = sb->s_blocksize_bits;
ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits);
/*
* initialize log.
*/
if ((rc = lmLogInit(log))) {
kfree(log);
jfs_warn("lmLogOpen: exit(%d)", rc);
return rc;
}
list_add(&JFS_SBI(sb)->log_list, &log->sb_list);
JFS_SBI(sb)->log = log;
return rc;
}
static int open_dummy_log(struct super_block *sb)
{
int rc;
mutex_lock(&jfs_log_mutex);
if (!dummy_log) {
dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
if (!dummy_log) {
mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&dummy_log->sb_list);
init_waitqueue_head(&dummy_log->syncwait);
dummy_log->no_integrity = 1;
/* Make up some stuff */
dummy_log->base = 0;
dummy_log->size = 1024;
rc = lmLogInit(dummy_log);
if (rc) {
kfree(dummy_log);
dummy_log = NULL;
mutex_unlock(&jfs_log_mutex);
return rc;
}
}
LOG_LOCK(dummy_log);
list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
JFS_SBI(sb)->log = dummy_log;
LOG_UNLOCK(dummy_log);
mutex_unlock(&jfs_log_mutex);
return 0;
}
/*
* NAME: lmLogInit()
*
* FUNCTION: log initialization at first log open.
*
* logredo() (or logformat()) should have been run previously.
* initialize the log from log superblock.
* set the log state in the superblock to LOGMOUNT and
* write SYNCPT log record.
*
* PARAMETER: log - log structure
*
* RETURN: 0 - if ok
* -EINVAL - bad log magic number or superblock dirty
* error returned from logwait()
*
* serialization: single first open thread
*/
int lmLogInit(struct jfs_log * log)
{
int rc = 0;
struct lrd lrd;
struct logsuper *logsuper;
struct lbuf *bpsuper;
struct lbuf *bp;
struct logpage *lp;
int lsn = 0;
jfs_info("lmLogInit: log:0x%p", log);
/* initialize the group commit serialization lock */
LOGGC_LOCK_INIT(log);
/* allocate/initialize the log write serialization lock */
LOG_LOCK_INIT(log);
LOGSYNC_LOCK_INIT(log);
INIT_LIST_HEAD(&log->synclist);
INIT_LIST_HEAD(&log->cqueue);
log->flush_tblk = NULL;
log->count = 0;
/*
* initialize log i/o
*/
if ((rc = lbmLogInit(log)))
return rc;
if (!test_bit(log_INLINELOG, &log->flag))
log->l2bsize = L2LOGPSIZE;
/* check for disabled journaling to disk */
if (log->no_integrity) {
/*
* Journal pages will still be filled. When the time comes
* to actually do the I/O, the write is not done, and the
* endio routine is called directly.
*/
bp = lbmAllocate(log , 0);
log->bp = bp;
bp->l_pn = bp->l_eor = 0;
} else {
/*
* validate log superblock
*/
if ((rc = lbmRead(log, 1, &bpsuper)))
goto errout10;
logsuper = (struct logsuper *) bpsuper->l_ldata;
if (logsuper->magic != cpu_to_le32(LOGMAGIC)) {
jfs_warn("*** Log Format Error ! ***");
rc = -EINVAL;
goto errout20;
}
/* logredo() should have been run successfully. */
if (logsuper->state != cpu_to_le32(LOGREDONE)) {
jfs_warn("*** Log Is Dirty ! ***");
rc = -EINVAL;
goto errout20;
}
/* initialize log from log superblock */
if (test_bit(log_INLINELOG,&log->flag)) {
if (log->size != le32_to_cpu(logsuper->size)) {
rc = -EINVAL;
goto errout20;
}
jfs_info("lmLogInit: inline log:0x%p base:0x%Lx size:0x%x",
log, (unsigned long long)log->base, log->size);
} else {
if (!uuid_equal(&logsuper->uuid, &log->uuid)) {
jfs_warn("wrong uuid on JFS log device");
rc = -EINVAL;
goto errout20;
}
log->size = le32_to_cpu(logsuper->size);
log->l2bsize = le32_to_cpu(logsuper->l2bsize);
jfs_info("lmLogInit: external log:0x%p base:0x%Lx size:0x%x",
log, (unsigned long long)log->base, log->size);
}
log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;
log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);
/*
* initialize for log append write mode
*/
/* establish current/end-of-log page/buffer */
if ((rc = lbmRead(log, log->page, &bp)))
goto errout20;
lp = (struct logpage *) bp->l_ldata;
jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d",
le32_to_cpu(logsuper->end), log->page, log->eor,
le16_to_cpu(lp->h.eor));
log->bp = bp;
bp->l_pn = log->page;
bp->l_eor = log->eor;
/* if current page is full, move on to next page */
if (log->eor >= LOGPSIZE - LOGPTLRSIZE)
lmNextPage(log);
/*
* initialize log syncpoint
*/
/*
* write the first SYNCPT record with syncpoint = 0
* (i.e., log redo up to HERE !);
* remove current page from lbm write queue at end of pageout
* (to write log superblock update), but do not release to
* freelist;
*/
lrd.logtid = 0;
lrd.backchain = 0;
lrd.type = cpu_to_le16(LOG_SYNCPT);
lrd.length = 0;
lrd.log.syncpt.sync = 0;
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
bp = log->bp;
bp->l_ceor = bp->l_eor;
lp = (struct logpage *) bp->l_ldata;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0);
if ((rc = lbmIOWait(bp, 0)))
goto errout30;
/*
* update/write superblock
*/
logsuper->state = cpu_to_le32(LOGMOUNT);
log->serial = le32_to_cpu(logsuper->serial) + 1;
logsuper->serial = cpu_to_le32(log->serial);
lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
if ((rc = lbmIOWait(bpsuper, lbmFREE)))
goto errout30;
}
/* initialize logsync parameters */
log->logsize = (log->size - 2) << L2LOGPSIZE;
log->lsn = lsn;
log->syncpt = lsn;
log->sync = log->syncpt;
log->nextsync = LOGSYNC_DELTA(log->logsize);
jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x",
log->lsn, log->syncpt, log->sync);
/*
* initialize for lazy/group commit
*/
log->clsn = lsn;
return 0;
/*
* unwind on error
*/
errout30: /* release log page */
log->wqueue = NULL;
bp->l_wqnext = NULL;
lbmFree(bp);
errout20: /* release log superblock */
lbmFree(bpsuper);
errout10: /* unwind lbmLogInit() */
lbmLogShutdown(log);
jfs_warn("lmLogInit: exit(%d)", rc);
return rc;
}
/*
* NAME: lmLogClose()
*
* FUNCTION: remove file system <ipmnt> from active list of log <iplog>
* and close it on last close.
*
* PARAMETER: sb - superblock
*
* RETURN: errors from subroutines
*
* serialization:
*/
int lmLogClose(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
struct block_device *bdev;
int rc = 0;
jfs_info("lmLogClose: log:0x%p", log);
mutex_lock(&jfs_log_mutex);
LOG_LOCK(log);
list_del(&sbi->log_list);
LOG_UNLOCK(log);
sbi->log = NULL;
/*
* We need to make sure all of the "written" metapages
* actually make it to disk
*/
sync_blockdev(sb->s_bdev);
if (test_bit(log_INLINELOG, &log->flag)) {
/*
* in-line log in host file system
*/
rc = lmLogShutdown(log);
kfree(log);
goto out;
}
if (!log->no_integrity)
lmLogFileSystem(log, sbi, 0);
if (!list_empty(&log->sb_list))
goto out;
/*
* TODO: ensure that the dummy_log is in a state to allow
* lbmLogShutdown to deallocate all the buffers and call
* kfree against dummy_log. For now, leave dummy_log & its
* buffers in memory, and resuse if another no-integrity mount
* is requested.
*/
if (log->no_integrity)
goto out;
/*
* external log as separate logical volume
*/
list_del(&log->journal_list);
bdev = log->bdev;
rc = lmLogShutdown(log);
blkdev_put(bdev, log);
kfree(log);
out:
mutex_unlock(&jfs_log_mutex);
jfs_info("lmLogClose: exit(%d)", rc);
return rc;
}
/*
* NAME: jfs_flush_journal()
*
* FUNCTION: initiate write of any outstanding transactions to the journal
* and optionally wait until they are all written to disk
*
* wait == 0 flush until latest txn is committed, don't wait
* wait == 1 flush until latest txn is committed, wait
* wait > 1 flush until all txn's are complete, wait
*/
void jfs_flush_journal(struct jfs_log *log, int wait)
{
int i;
struct tblock *target = NULL;
/* jfs_write_inode may call us during read-only mount */
if (!log)
return;
jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait);
LOGGC_LOCK(log);
if (!list_empty(&log->cqueue)) {
/*
* This ensures that we will keep writing to the journal as long
* as there are unwritten commit records
*/
target = list_entry(log->cqueue.prev, struct tblock, cqueue);
if (test_bit(log_FLUSH, &log->flag)) {
/*
* We're already flushing.
* if flush_tblk is NULL, we are flushing everything,
* so leave it that way. Otherwise, update it to the
* latest transaction
*/
if (log->flush_tblk)
log->flush_tblk = target;
} else {
/* Only flush until latest transaction is committed */
log->flush_tblk = target;
set_bit(log_FLUSH, &log->flag);
/*
* Initiate I/O on outstanding transactions
*/
if (!(log->cflag & logGC_PAGEOUT)) {
log->cflag |= logGC_PAGEOUT;
lmGCwrite(log, 0);
}
}
}
if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) {
/* Flush until all activity complete */
set_bit(log_FLUSH, &log->flag);
log->flush_tblk = NULL;
}
if (wait && target && !(target->flag & tblkGC_COMMITTED)) {
DECLARE_WAITQUEUE(__wait, current);
add_wait_queue(&target->gcwait, &__wait);
set_current_state(TASK_UNINTERRUPTIBLE);
LOGGC_UNLOCK(log);
schedule();
LOGGC_LOCK(log);
remove_wait_queue(&target->gcwait, &__wait);
}
LOGGC_UNLOCK(log);
if (wait < 2)
return;
write_special_inodes(log, filemap_fdatawrite);
/*
* If there was recent activity, we may need to wait
* for the lazycommit thread to catch up
*/
if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {
for (i = 0; i < 200; i++) { /* Too much? */
msleep(250);
write_special_inodes(log, filemap_fdatawrite);
if (list_empty(&log->cqueue) &&
list_empty(&log->synclist))
break;
}
}
assert(list_empty(&log->cqueue));
#ifdef CONFIG_JFS_DEBUG
if (!list_empty(&log->synclist)) {
struct logsyncblk *lp;
printk(KERN_ERR "jfs_flush_journal: synclist not empty\n");
list_for_each_entry(lp, &log->synclist, synclist) {
if (lp->xflag & COMMIT_PAGE) {
struct metapage *mp = (struct metapage *)lp;
print_hex_dump(KERN_ERR, "metapage: ",
DUMP_PREFIX_ADDRESS, 16, 4,
mp, sizeof(struct metapage), 0);
print_hex_dump(KERN_ERR, "page: ",
DUMP_PREFIX_ADDRESS, 16,
sizeof(long), mp->page,
sizeof(struct page), 0);
} else
print_hex_dump(KERN_ERR, "tblock:",
DUMP_PREFIX_ADDRESS, 16, 4,
lp, sizeof(struct tblock), 0);
}
}
#else
WARN_ON(!list_empty(&log->synclist));
#endif
clear_bit(log_FLUSH, &log->flag);
}
/*
* NAME: lmLogShutdown()
*
* FUNCTION: log shutdown at last LogClose().
*
* write log syncpt record.
* update super block to set redone flag to 0.
*
* PARAMETER: log - log inode
*
* RETURN: 0 - success
*
* serialization: single last close thread
*/
int lmLogShutdown(struct jfs_log * log)
{
int rc;
struct lrd lrd;
int lsn;
struct logsuper *logsuper;
struct lbuf *bpsuper;
struct lbuf *bp;
struct logpage *lp;
jfs_info("lmLogShutdown: log:0x%p", log);
jfs_flush_journal(log, 2);
/*
* write the last SYNCPT record with syncpoint = 0
* (i.e., log redo up to HERE !)
*/
lrd.logtid = 0;
lrd.backchain = 0;
lrd.type = cpu_to_le16(LOG_SYNCPT);
lrd.length = 0;
lrd.log.syncpt.sync = 0;
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
bp = log->bp;
lp = (struct logpage *) bp->l_ldata;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0);
lbmIOWait(log->bp, lbmFREE);
log->bp = NULL;
/*
* synchronous update log superblock
* mark log state as shutdown cleanly
* (i.e., Log does not need to be replayed).
*/
if ((rc = lbmRead(log, 1, &bpsuper)))
goto out;
logsuper = (struct logsuper *) bpsuper->l_ldata;
logsuper->state = cpu_to_le32(LOGREDONE);
logsuper->end = cpu_to_le32(lsn);
lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
rc = lbmIOWait(bpsuper, lbmFREE);
jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d",
lsn, log->page, log->eor);
out:
/*
* shutdown per log i/o
*/
lbmLogShutdown(log);
if (rc) {
jfs_warn("lmLogShutdown: exit(%d)", rc);
}
return rc;
}
/*
* NAME: lmLogFileSystem()
*
* FUNCTION: insert (<activate> = true)/remove (<activate> = false)
* file system into/from log active file system list.
*
* PARAMETE: log - pointer to logs inode.
* fsdev - kdev_t of filesystem.
* serial - pointer to returned log serial number
* activate - insert/remove device from active list.
*
* RETURN: 0 - success
* errors returned by vms_iowait().
*/
static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,
int activate)
{
int rc = 0;
int i;
struct logsuper *logsuper;
struct lbuf *bpsuper;
uuid_t *uuid = &sbi->uuid;
/*
* insert/remove file system device to log active file system list.
*/
if ((rc = lbmRead(log, 1, &bpsuper)))
return rc;
logsuper = (struct logsuper *) bpsuper->l_ldata;
if (activate) {
for (i = 0; i < MAX_ACTIVE; i++)
if (uuid_is_null(&logsuper->active[i].uuid)) {
uuid_copy(&logsuper->active[i].uuid, uuid);
sbi->aggregate = i;
break;
}
if (i == MAX_ACTIVE) {
jfs_warn("Too many file systems sharing journal!");
lbmFree(bpsuper);
return -EMFILE; /* Is there a better rc? */
}
} else {
for (i = 0; i < MAX_ACTIVE; i++)
if (uuid_equal(&logsuper->active[i].uuid, uuid)) {
uuid_copy(&logsuper->active[i].uuid,
&uuid_null);
break;
}
if (i == MAX_ACTIVE) {
jfs_warn("Somebody stomped on the journal!");
lbmFree(bpsuper);
return -EIO;
}
}
/*
* synchronous write log superblock:
*
* write sidestream bypassing write queue:
* at file system mount, log super block is updated for
* activation of the file system before any log record
* (MOUNT record) of the file system, and at file system
* unmount, all meta data for the file system has been
* flushed before log super block is updated for deactivation
* of the file system.
*/
lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
rc = lbmIOWait(bpsuper, lbmFREE);
return rc;
}
/*
* log buffer manager (lbm)
* ------------------------
*
* special purpose buffer manager supporting log i/o requirements.
*
* per log write queue:
* log pageout occurs in serial order by fifo write queue and
* restricting to a single i/o in pregress at any one time.
* a circular singly-linked list
* (log->wrqueue points to the tail, and buffers are linked via
* bp->wrqueue field), and
* maintains log page in pageout ot waiting for pageout in serial pageout.
*/
/*
* lbmLogInit()
*
* initialize per log I/O setup at lmLogInit()
*/
static int lbmLogInit(struct jfs_log * log)
{ /* log inode */
int i;
struct lbuf *lbuf;
jfs_info("lbmLogInit: log:0x%p", log);
/* initialize current buffer cursor */
log->bp = NULL;
/* initialize log device write queue */
log->wqueue = NULL;
/*
* Each log has its own buffer pages allocated to it. These are
* not managed by the page cache. This ensures that a transaction
* writing to the log does not block trying to allocate a page from
* the page cache (for the log). This would be bad, since page
* allocation waits on the kswapd thread that may be committing inodes
* which would cause log activity. Was that clear? I'm trying to
* avoid deadlock here.
*/
init_waitqueue_head(&log->free_wait);
log->lbuf_free = NULL;
for (i = 0; i < LOGPAGES;) {
char *buffer;
uint offset;
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto error;
buffer = page_address(page);
for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) {
lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);
if (lbuf == NULL) {
if (offset == 0)
__free_page(page);
goto error;
}
if (offset) /* we already have one reference */
get_page(page);
lbuf->l_offset = offset;
lbuf->l_ldata = buffer + offset;
lbuf->l_page = page;
lbuf->l_log = log;
init_waitqueue_head(&lbuf->l_ioevent);
lbuf->l_freelist = log->lbuf_free;
log->lbuf_free = lbuf;
i++;
}
}
return (0);
error:
lbmLogShutdown(log);
return -ENOMEM;
}
/*
* lbmLogShutdown()
*
* finalize per log I/O setup at lmLogShutdown()
*/
static void lbmLogShutdown(struct jfs_log * log)
{
struct lbuf *lbuf;
jfs_info("lbmLogShutdown: log:0x%p", log);
lbuf = log->lbuf_free;
while (lbuf) {
struct lbuf *next = lbuf->l_freelist;
__free_page(lbuf->l_page);
kfree(lbuf);
lbuf = next;
}
}
/*
* lbmAllocate()
*
* allocate an empty log buffer
*/
static struct lbuf *lbmAllocate(struct jfs_log * log, int pn)
{
struct lbuf *bp;
unsigned long flags;
/*
* recycle from log buffer freelist if any
*/
LCACHE_LOCK(flags);
LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags);
log->lbuf_free = bp->l_freelist;
LCACHE_UNLOCK(flags);
bp->l_flag = 0;
bp->l_wqnext = NULL;
bp->l_freelist = NULL;
bp->l_pn = pn;
bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize));
bp->l_ceor = 0;
return bp;
}
/*
* lbmFree()
*
* release a log buffer to freelist
*/
static void lbmFree(struct lbuf * bp)
{
unsigned long flags;
LCACHE_LOCK(flags);
lbmfree(bp);
LCACHE_UNLOCK(flags);
}
static void lbmfree(struct lbuf * bp)
{
struct jfs_log *log = bp->l_log;
assert(bp->l_wqnext == NULL);
/*
* return the buffer to head of freelist
*/
bp->l_freelist = log->lbuf_free;
log->lbuf_free = bp;
wake_up(&log->free_wait);
return;
}
/*
* NAME: lbmRedrive
*
* FUNCTION: add a log buffer to the log redrive list
*
* PARAMETER:
* bp - log buffer
*
* NOTES:
* Takes log_redrive_lock.
*/
static inline void lbmRedrive(struct lbuf *bp)
{
unsigned long flags;
spin_lock_irqsave(&log_redrive_lock, flags);
bp->l_redrive_next = log_redrive_list;
log_redrive_list = bp;
spin_unlock_irqrestore(&log_redrive_lock, flags);
wake_up_process(jfsIOthread);
}
/*
* lbmRead()
*/
static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
{
struct bio *bio;
struct lbuf *bp;
/*
* allocate a log buffer
*/
*bpp = bp = lbmAllocate(log, pn);
jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn);
bp->l_flag |= lbmREAD;
bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
/*check if journaling to disk has been disabled*/
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
lbmIODone(bio);
} else {
submit_bio(bio);
}
wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
return 0;
}
/*
* lbmWrite()
*
* buffer at head of pageout queue stays after completion of
* partial-page pageout and redriven by explicit initiation of
* pageout by caller until full-page pageout is completed and
* released.
*
* device driver i/o done redrives pageout of new buffer at
* head of pageout queue when current buffer at head of pageout
* queue is released at the completion of its full-page pageout.
*
* LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit().
* LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone()
*/
static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag,
int cant_block)
{
struct lbuf *tail;
unsigned long flags;
jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn);
/* map the logical block address to physical block address */
bp->l_blkno =
log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
LCACHE_LOCK(flags); /* disable+lock */
/*
* initialize buffer for device driver
*/
bp->l_flag = flag;
/*
* insert bp at tail of write queue associated with log
*
* (request is either for bp already/currently at head of queue
* or new bp to be inserted at tail)
*/
tail = log->wqueue;
/* is buffer not already on write queue ? */
if (bp->l_wqnext == NULL) {
/* insert at tail of wqueue */
if (tail == NULL) {
log->wqueue = bp;
bp->l_wqnext = bp;
} else {
log->wqueue = bp;
bp->l_wqnext = tail->l_wqnext;
tail->l_wqnext = bp;
}
tail = bp;
}
/* is buffer at head of wqueue and for write ? */
if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) {
LCACHE_UNLOCK(flags); /* unlock+enable */
return;
}
LCACHE_UNLOCK(flags); /* unlock+enable */
if (cant_block)
lbmRedrive(bp);
else if (flag & lbmSYNC)
lbmStartIO(bp);
else {
LOGGC_UNLOCK(log);
lbmStartIO(bp);
LOGGC_LOCK(log);
}
}
/*
* lbmDirectWrite()
*
* initiate pageout bypassing write queue for sidestream
* (e.g., log superblock) write;
*/
static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag)
{
jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x",
bp, flag, bp->l_pn);
/*
* initialize buffer for device driver
*/
bp->l_flag = flag | lbmDIRECT;
/* map the logical block address to physical block address */
bp->l_blkno =
log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
/*
* initiate pageout of the page
*/
lbmStartIO(bp);
}
/*
* NAME: lbmStartIO()
*
* FUNCTION: Interface to DD strategy routine
*
* RETURN: none
*
* serialization: LCACHE_LOCK() is NOT held during log i/o;
*/
static void lbmStartIO(struct lbuf * bp)
{
struct bio *bio;
struct jfs_log *log = bp->l_log;
jfs_info("lbmStartIO");
bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
lbmIODone(bio);
} else {
submit_bio(bio);
INCREMENT(lmStat.submitted);
}
}
/*
* lbmIOWait()
*/
static int lbmIOWait(struct lbuf * bp, int flag)
{
unsigned long flags;
int rc = 0;
jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag);
LCACHE_LOCK(flags); /* disable+lock */
LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags);
rc = (bp->l_flag & lbmERROR) ? -EIO : 0;
if (flag & lbmFREE)
lbmfree(bp);
LCACHE_UNLOCK(flags); /* unlock+enable */
jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag);
return rc;
}
/*
* lbmIODone()
*
* executed at INTIODONE level
*/
static void lbmIODone(struct bio *bio)
{
struct lbuf *bp = bio->bi_private;
struct lbuf *nextbp, *tail;
struct jfs_log *log;
unsigned long flags;
/*
* get back jfs buffer bound to the i/o buffer
*/
jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag);
LCACHE_LOCK(flags); /* disable+lock */
bp->l_flag |= lbmDONE;
if (bio->bi_status) {
bp->l_flag |= lbmERROR;
jfs_err("lbmIODone: I/O error in JFS log");
}
bio_put(bio);
/*
* pagein completion
*/
if (bp->l_flag & lbmREAD) {
bp->l_flag &= ~lbmREAD;
LCACHE_UNLOCK(flags); /* unlock+enable */
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
return;
}
/*
* pageout completion
*
* the bp at the head of write queue has completed pageout.
*
* if single-commit/full-page pageout, remove the current buffer
* from head of pageout queue, and redrive pageout with
* the new buffer at head of pageout queue;
* otherwise, the partial-page pageout buffer stays at
* the head of pageout queue to be redriven for pageout
* by lmGroupCommit() until full-page pageout is completed.
*/
bp->l_flag &= ~lbmWRITE;
INCREMENT(lmStat.pagedone);
/* update committed lsn */
log = bp->l_log;
log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor;
if (bp->l_flag & lbmDIRECT) {
LCACHE_WAKEUP(&bp->l_ioevent);
LCACHE_UNLOCK(flags);
return;
}
tail = log->wqueue;
/* single element queue */
if (bp == tail) {
/* remove head buffer of full-page pageout
* from log device write queue
*/
if (bp->l_flag & lbmRELEASE) {
log->wqueue = NULL;
bp->l_wqnext = NULL;
}
}
/* multi element queue */
else {
/* remove head buffer of full-page pageout
* from log device write queue
*/
if (bp->l_flag & lbmRELEASE) {
nextbp = tail->l_wqnext = bp->l_wqnext;
bp->l_wqnext = NULL;
/*
* redrive pageout of next page at head of write queue:
* redrive next page without any bound tblk
* (i.e., page w/o any COMMIT records), or
* first page of new group commit which has been
* queued after current page (subsequent pageout
* is performed synchronously, except page without
* any COMMITs) by lmGroupCommit() as indicated
* by lbmWRITE flag;
*/
if (nextbp->l_flag & lbmWRITE) {
/*
* We can't do the I/O at interrupt time.
* The jfsIO thread can do it
*/
lbmRedrive(nextbp);
}
}
}
/*
* synchronous pageout:
*
* buffer has not necessarily been removed from write queue
* (e.g., synchronous write of partial-page with COMMIT):
* leave buffer for i/o initiator to dispose
*/
if (bp->l_flag & lbmSYNC) {
LCACHE_UNLOCK(flags); /* unlock+enable */
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
}
/*
* Group Commit pageout:
*/
else if (bp->l_flag & lbmGC) {
LCACHE_UNLOCK(flags);
lmPostGC(bp);
}
/*
* asynchronous pageout:
*
* buffer must have been removed from write queue:
* insert buffer at head of freelist where it can be recycled
*/
else {
assert(bp->l_flag & lbmRELEASE);
assert(bp->l_flag & lbmFREE);
lbmfree(bp);
LCACHE_UNLOCK(flags); /* unlock+enable */
}
}
int jfsIOWait(void *arg)
{
struct lbuf *bp;
do {
spin_lock_irq(&log_redrive_lock);
while ((bp = log_redrive_list)) {
log_redrive_list = bp->l_redrive_next;
bp->l_redrive_next = NULL;
spin_unlock_irq(&log_redrive_lock);
lbmStartIO(bp);
spin_lock_irq(&log_redrive_lock);
}
if (freezing(current)) {
spin_unlock_irq(&log_redrive_lock);
try_to_freeze();
} else {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&log_redrive_lock);
schedule();
}
} while (!kthread_should_stop());
jfs_info("jfsIOWait being killed!");
return 0;
}
/*
* NAME: lmLogFormat()/jfs_logform()
*
* FUNCTION: format file system log
*
* PARAMETERS:
* log - volume log
* logAddress - start address of log space in FS block
* logSize - length of log space in FS block;
*
* RETURN: 0 - success
* -EIO - i/o error
*
* XXX: We're synchronously writing one page at a time. This needs to
* be improved by writing multiple pages at once.
*/
int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
{
int rc = -EIO;
struct jfs_sb_info *sbi;
struct logsuper *logsuper;
struct logpage *lp;
int lspn; /* log sequence page number */
struct lrd *lrd_ptr;
int npages = 0;
struct lbuf *bp;
jfs_info("lmLogFormat: logAddress:%Ld logSize:%d",
(long long)logAddress, logSize);
sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list);
/* allocate a log buffer */
bp = lbmAllocate(log, 1);
npages = logSize >> sbi->l2nbperpage;
/*
* log space:
*
* page 0 - reserved;
* page 1 - log superblock;
* page 2 - log data page: A SYNC log record is written
* into this page at logform time;
* pages 3-N - log data page: set to empty log data pages;
*/
/*
* init log superblock: log page 1
*/
logsuper = (struct logsuper *) bp->l_ldata;
logsuper->magic = cpu_to_le32(LOGMAGIC);
logsuper->version = cpu_to_le32(LOGVERSION);
logsuper->state = cpu_to_le32(LOGREDONE);
logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */
logsuper->size = cpu_to_le32(npages);
logsuper->bsize = cpu_to_le32(sbi->bsize);
logsuper->l2bsize = cpu_to_le32(sbi->l2bsize);
logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE);
bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
bp->l_blkno = logAddress + sbi->nbperpage;
lbmStartIO(bp);
if ((rc = lbmIOWait(bp, 0)))
goto exit;
/*
* init pages 2 to npages-1 as log data pages:
*
* log page sequence number (lpsn) initialization:
*
* pn: 0 1 2 3 n-1
* +-----+-----+=====+=====+===.....===+=====+
* lspn: N-1 0 1 N-2
* <--- N page circular file ---->
*
* the N (= npages-2) data pages of the log is maintained as
* a circular file for the log records;
* lpsn grows by 1 monotonically as each log page is written
* to the circular file of the log;
* and setLogpage() will not reset the page number even if
* the eor is equal to LOGPHDRSIZE. In order for binary search
* still work in find log end process, we have to simulate the
* log wrap situation at the log format time.
* The 1st log page written will have the highest lpsn. Then
* the succeeding log pages will have ascending order of
* the lspn starting from 0, ... (N-2)
*/
lp = (struct logpage *) bp->l_ldata;
/*
* initialize 1st log page to be written: lpsn = N - 1,
* write a SYNCPT log record is written to this page
*/
lp->h.page = lp->t.page = cpu_to_le32(npages - 3);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE);
lrd_ptr = (struct lrd *) &lp->data;
lrd_ptr->logtid = 0;
lrd_ptr->backchain = 0;
lrd_ptr->type = cpu_to_le16(LOG_SYNCPT);
lrd_ptr->length = 0;
lrd_ptr->log.syncpt.sync = 0;
bp->l_blkno += sbi->nbperpage;
bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
lbmStartIO(bp);
if ((rc = lbmIOWait(bp, 0)))
goto exit;
/*
* initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
*/
for (lspn = 0; lspn < npages - 3; lspn++) {
lp->h.page = lp->t.page = cpu_to_le32(lspn);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
bp->l_blkno += sbi->nbperpage;
bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
lbmStartIO(bp);
if ((rc = lbmIOWait(bp, 0)))
goto exit;
}
rc = 0;
exit:
/*
* finalize log
*/
/* release the buffer */
lbmFree(bp);
return rc;
}
#ifdef CONFIG_JFS_STATISTICS
int jfs_lmstats_proc_show(struct seq_file *m, void *v)
{
seq_printf(m,
"JFS Logmgr stats\n"
"================\n"
"commits = %d\n"
"writes submitted = %d\n"
"writes completed = %d\n"
"full pages submitted = %d\n"
"partial pages submitted = %d\n",
lmStat.commit,
lmStat.submitted,
lmStat.pagedone,
lmStat.full_page,
lmStat.partial_page);
return 0;
}
#endif /* CONFIG_JFS_STATISTICS */
| linux-master | fs/jfs/jfs_logmgr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
/*
* Module: jfs_mount.c
*
* note: file system in transition to aggregate/fileset:
*
* file system mount is interpreted as the mount of aggregate,
* if not already mounted, and mount of the single/only fileset in
* the aggregate;
*
* a file system/aggregate is represented by an internal inode
* (aka mount inode) initialized with aggregate superblock;
* each vfs represents a fileset, and points to its "fileset inode
* allocation map inode" (aka fileset inode):
* (an aggregate itself is structured recursively as a filset:
* an internal vfs is constructed and points to its "fileset inode
* allocation map inode" (aka aggregate inode) where each inode
* represents a fileset inode) so that inode number is mapped to
* on-disk inode in uniform way at both aggregate and fileset level;
*
* each vnode/inode of a fileset is linked to its vfs (to facilitate
* per fileset inode operations, e.g., unmount of a fileset, etc.);
* each inode points to the mount inode (to facilitate access to
* per aggregate information, e.g., block size, etc.) as well as
* its file set inode.
*
* aggregate
* ipmnt
* mntvfs -> fileset ipimap+ -> aggregate ipbmap -> aggregate ipaimap;
* fileset vfs -> vp(1) <-> ... <-> vp(n) <->vproot;
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/log2.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_metapage.h"
#include "jfs_debug.h"
/*
* forward references
*/
static int chkSuper(struct super_block *);
static int logMOUNT(struct super_block *sb);
/*
* NAME: jfs_mount(sb)
*
* FUNCTION: vfs_mount()
*
* PARAMETER: sb - super block
*
* RETURN: -EBUSY - device already mounted or open for write
* -EBUSY - cvrdvp already mounted;
* -EBUSY - mount table full
* -ENOTDIR- cvrdvp not directory on a device mount
* -ENXIO - device open failure
*/
int jfs_mount(struct super_block *sb)
{
int rc = 0; /* Return code */
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct inode *ipaimap = NULL;
struct inode *ipaimap2 = NULL;
struct inode *ipimap = NULL;
struct inode *ipbmap = NULL;
/*
* read/validate superblock
* (initialize mount inode from the superblock)
*/
if ((rc = chkSuper(sb))) {
goto out;
}
ipaimap = diReadSpecial(sb, AGGREGATE_I, 0);
if (ipaimap == NULL) {
jfs_err("jfs_mount: Failed to read AGGREGATE_I");
rc = -EIO;
goto out;
}
sbi->ipaimap = ipaimap;
jfs_info("jfs_mount: ipaimap:0x%p", ipaimap);
/*
* initialize aggregate inode allocation map
*/
if ((rc = diMount(ipaimap))) {
jfs_err("jfs_mount: diMount(ipaimap) failed w/rc = %d", rc);
goto err_ipaimap;
}
/*
* open aggregate block allocation map
*/
ipbmap = diReadSpecial(sb, BMAP_I, 0);
if (ipbmap == NULL) {
rc = -EIO;
goto err_umount_ipaimap;
}
jfs_info("jfs_mount: ipbmap:0x%p", ipbmap);
sbi->ipbmap = ipbmap;
/*
* initialize aggregate block allocation map
*/
if ((rc = dbMount(ipbmap))) {
jfs_err("jfs_mount: dbMount failed w/rc = %d", rc);
goto err_ipbmap;
}
/*
* open the secondary aggregate inode allocation map
*
* This is a duplicate of the aggregate inode allocation map.
*
* hand craft a vfs in the same fashion as we did to read ipaimap.
* By adding INOSPEREXT (32) to the inode number, we are telling
* diReadSpecial that we are reading from the secondary aggregate
* inode table. This also creates a unique entry in the inode hash
* table.
*/
if ((sbi->mntflag & JFS_BAD_SAIT) == 0) {
ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1);
if (!ipaimap2) {
jfs_err("jfs_mount: Failed to read AGGREGATE_I");
rc = -EIO;
goto err_umount_ipbmap;
}
sbi->ipaimap2 = ipaimap2;
jfs_info("jfs_mount: ipaimap2:0x%p", ipaimap2);
/*
* initialize secondary aggregate inode allocation map
*/
if ((rc = diMount(ipaimap2))) {
jfs_err("jfs_mount: diMount(ipaimap2) failed, rc = %d",
rc);
goto err_ipaimap2;
}
} else
/* Secondary aggregate inode table is not valid */
sbi->ipaimap2 = NULL;
/*
* mount (the only/single) fileset
*/
/*
* open fileset inode allocation map (aka fileset inode)
*/
ipimap = diReadSpecial(sb, FILESYSTEM_I, 0);
if (ipimap == NULL) {
jfs_err("jfs_mount: Failed to read FILESYSTEM_I");
/* open fileset secondary inode allocation map */
rc = -EIO;
goto err_umount_ipaimap2;
}
jfs_info("jfs_mount: ipimap:0x%p", ipimap);
/* map further access of per fileset inodes by the fileset inode */
sbi->ipimap = ipimap;
/* initialize fileset inode allocation map */
if ((rc = diMount(ipimap))) {
jfs_err("jfs_mount: diMount failed w/rc = %d", rc);
goto err_ipimap;
}
return rc;
/*
* unwind on error
*/
err_ipimap:
/* close fileset inode allocation map inode */
diFreeSpecial(ipimap);
err_umount_ipaimap2:
/* close secondary aggregate inode allocation map */
if (ipaimap2)
diUnmount(ipaimap2, 1);
err_ipaimap2:
/* close aggregate inodes */
if (ipaimap2)
diFreeSpecial(ipaimap2);
err_umount_ipbmap: /* close aggregate block allocation map */
dbUnmount(ipbmap, 1);
err_ipbmap: /* close aggregate inodes */
diFreeSpecial(ipbmap);
err_umount_ipaimap: /* close aggregate inode allocation map */
diUnmount(ipaimap, 1);
err_ipaimap: /* close aggregate inodes */
diFreeSpecial(ipaimap);
out:
if (rc)
jfs_err("Mount JFS Failure: %d", rc);
return rc;
}
/*
* NAME: jfs_mount_rw(sb, remount)
*
* FUNCTION: Completes read-write mount, or remounts read-only volume
* as read-write
*/
int jfs_mount_rw(struct super_block *sb, int remount)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
int rc;
/*
* If we are re-mounting a previously read-only volume, we want to
* re-read the inode and block maps, since fsck.jfs may have updated
* them.
*/
if (remount) {
if (chkSuper(sb) || (sbi->state != FM_CLEAN))
return -EINVAL;
truncate_inode_pages(sbi->ipimap->i_mapping, 0);
truncate_inode_pages(sbi->ipbmap->i_mapping, 0);
IWRITE_LOCK(sbi->ipimap, RDWRLOCK_IMAP);
diUnmount(sbi->ipimap, 1);
if ((rc = diMount(sbi->ipimap))) {
IWRITE_UNLOCK(sbi->ipimap);
jfs_err("jfs_mount_rw: diMount failed!");
return rc;
}
IWRITE_UNLOCK(sbi->ipimap);
dbUnmount(sbi->ipbmap, 1);
if ((rc = dbMount(sbi->ipbmap))) {
jfs_err("jfs_mount_rw: dbMount failed!");
return rc;
}
}
/*
* open/initialize log
*/
if ((rc = lmLogOpen(sb)))
return rc;
/*
* update file system superblock;
*/
if ((rc = updateSuper(sb, FM_MOUNT))) {
jfs_err("jfs_mount: updateSuper failed w/rc = %d", rc);
lmLogClose(sb);
return rc;
}
/*
* write MOUNT log record of the file system
*/
logMOUNT(sb);
return rc;
}
/*
* chkSuper()
*
* validate the superblock of the file system to be mounted and
* get the file system parameters.
*
* returns
* 0 with fragsize set if check successful
* error code if not successful
*/
static int chkSuper(struct super_block *sb)
{
int rc = 0;
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_superblock *j_sb;
struct buffer_head *bh;
int AIM_bytesize, AIT_bytesize;
int expected_AIM_bytesize, expected_AIT_bytesize;
s64 AIM_byte_addr, AIT_byte_addr, fsckwsp_addr;
s64 byte_addr_diff0, byte_addr_diff1;
s32 bsize;
if ((rc = readSuper(sb, &bh)))
return rc;
j_sb = (struct jfs_superblock *)bh->b_data;
/*
* validate superblock
*/
/* validate fs signature */
if (strncmp(j_sb->s_magic, JFS_MAGIC, 4) ||
le32_to_cpu(j_sb->s_version) > JFS_VERSION) {
rc = -EINVAL;
goto out;
}
bsize = le32_to_cpu(j_sb->s_bsize);
if (bsize != PSIZE) {
jfs_err("Only 4K block size supported!");
rc = -EINVAL;
goto out;
}
jfs_info("superblock: flag:0x%08x state:0x%08x size:0x%Lx",
le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state),
(unsigned long long) le64_to_cpu(j_sb->s_size));
/* validate the descriptors for Secondary AIM and AIT */
if ((j_sb->s_flag & cpu_to_le32(JFS_BAD_SAIT)) !=
cpu_to_le32(JFS_BAD_SAIT)) {
expected_AIM_bytesize = 2 * PSIZE;
AIM_bytesize = lengthPXD(&(j_sb->s_aim2)) * bsize;
expected_AIT_bytesize = 4 * PSIZE;
AIT_bytesize = lengthPXD(&(j_sb->s_ait2)) * bsize;
AIM_byte_addr = addressPXD(&(j_sb->s_aim2)) * bsize;
AIT_byte_addr = addressPXD(&(j_sb->s_ait2)) * bsize;
byte_addr_diff0 = AIT_byte_addr - AIM_byte_addr;
fsckwsp_addr = addressPXD(&(j_sb->s_fsckpxd)) * bsize;
byte_addr_diff1 = fsckwsp_addr - AIT_byte_addr;
if ((AIM_bytesize != expected_AIM_bytesize) ||
(AIT_bytesize != expected_AIT_bytesize) ||
(byte_addr_diff0 != AIM_bytesize) ||
(byte_addr_diff1 <= AIT_bytesize))
j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT);
}
if ((j_sb->s_flag & cpu_to_le32(JFS_GROUPCOMMIT)) !=
cpu_to_le32(JFS_GROUPCOMMIT))
j_sb->s_flag |= cpu_to_le32(JFS_GROUPCOMMIT);
/* validate fs state */
if (j_sb->s_state != cpu_to_le32(FM_CLEAN) &&
!sb_rdonly(sb)) {
jfs_err("jfs_mount: Mount Failure: File System Dirty.");
rc = -EINVAL;
goto out;
}
sbi->state = le32_to_cpu(j_sb->s_state);
sbi->mntflag = le32_to_cpu(j_sb->s_flag);
/*
* JFS always does I/O by 4K pages. Don't tell the buffer cache
* that we use anything else (leave s_blocksize alone).
*/
sbi->bsize = bsize;
sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);
/* check some fields for possible corruption */
if (sbi->l2bsize != ilog2((u32)bsize) ||
j_sb->pad != 0 ||
le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) {
rc = -EINVAL;
jfs_err("jfs_mount: Mount Failure: superblock is corrupt!");
goto out;
}
/*
* For now, ignore s_pbsize, l2bfactor. All I/O going through buffer
* cache.
*/
sbi->nbperpage = PSIZE >> sbi->l2bsize;
sbi->l2nbperpage = L2PSIZE - sbi->l2bsize;
sbi->l2niperblk = sbi->l2bsize - L2DISIZE;
if (sbi->mntflag & JFS_INLINELOG)
sbi->logpxd = j_sb->s_logpxd;
else {
sbi->logdev = new_decode_dev(le32_to_cpu(j_sb->s_logdev));
uuid_copy(&sbi->uuid, &j_sb->s_uuid);
uuid_copy(&sbi->loguuid, &j_sb->s_loguuid);
}
sbi->fsckpxd = j_sb->s_fsckpxd;
sbi->ait2 = j_sb->s_ait2;
out:
brelse(bh);
return rc;
}
/*
* updateSuper()
*
* update synchronously superblock if it is mounted read-write.
*/
int updateSuper(struct super_block *sb, uint state)
{
struct jfs_superblock *j_sb;
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct buffer_head *bh;
int rc;
if (sbi->flag & JFS_NOINTEGRITY) {
if (state == FM_DIRTY) {
sbi->p_state = state;
return 0;
} else if (state == FM_MOUNT) {
sbi->p_state = sbi->state;
state = FM_DIRTY;
} else if (state == FM_CLEAN) {
state = sbi->p_state;
} else
jfs_err("updateSuper: bad state");
} else if (sbi->state == FM_DIRTY)
return 0;
if ((rc = readSuper(sb, &bh)))
return rc;
j_sb = (struct jfs_superblock *)bh->b_data;
j_sb->s_state = cpu_to_le32(state);
sbi->state = state;
if (state == FM_MOUNT) {
/* record log's dev_t and mount serial number */
j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
} else if (state == FM_CLEAN) {
/*
* If this volume is shared with OS/2, OS/2 will need to
* recalculate DASD usage, since we don't deal with it.
*/
if (j_sb->s_flag & cpu_to_le32(JFS_DASD_ENABLED))
j_sb->s_flag |= cpu_to_le32(JFS_DASD_PRIME);
}
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
return 0;
}
/*
* readSuper()
*
* read superblock by raw sector address
*/
int readSuper(struct super_block *sb, struct buffer_head **bpp)
{
/* read in primary superblock */
*bpp = sb_bread(sb, SUPER1_OFF >> sb->s_blocksize_bits);
if (*bpp)
return 0;
/* read in secondary/replicated superblock */
*bpp = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits);
if (*bpp)
return 0;
return -EIO;
}
/*
* logMOUNT()
*
* function: write a MOUNT log record for file system.
*
* MOUNT record keeps logredo() from processing log records
* for this file system past this point in log.
* it is harmless if mount fails.
*
* note: MOUNT record is at aggregate level, not at fileset level,
* since log records of previous mounts of a fileset
* (e.g., AFTER record of extent allocation) have to be processed
* to update block allocation map at aggregate level.
*/
static int logMOUNT(struct super_block *sb)
{
struct jfs_log *log = JFS_SBI(sb)->log;
struct lrd lrd;
lrd.logtid = 0;
lrd.backchain = 0;
lrd.type = cpu_to_le16(LOG_MOUNT);
lrd.length = 0;
lrd.aggregate = cpu_to_le32(new_encode_dev(sb->s_bdev->bd_dev));
lmLog(log, NULL, &lrd, NULL);
return 0;
}
| linux-master | fs/jfs/jfs_mount.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2005
*/
/*
* jfs_xtree.c: extent allocation descriptor B+-tree manager
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_dmap.h"
#include "jfs_dinode.h"
#include "jfs_superblock.h"
#include "jfs_debug.h"
/*
* xtree local flag
*/
#define XT_INSERT 0x00000001
/*
* xtree key/entry comparison: extent offset
*
* return:
* -1: k < start of extent
* 0: start_of_extent <= k <= end_of_extent
* 1: k > end_of_extent
*/
#define XT_CMP(CMP, K, X, OFFSET64)\
{\
OFFSET64 = offsetXAD(X);\
(CMP) = ((K) >= OFFSET64 + lengthXAD(X)) ? 1 :\
((K) < OFFSET64) ? -1 : 0;\
}
/* write a xad entry */
#define XT_PUTENTRY(XAD, FLAG, OFF, LEN, ADDR)\
{\
(XAD)->flag = (FLAG);\
XADoffset((XAD), (OFF));\
XADlength((XAD), (LEN));\
XADaddress((XAD), (ADDR));\
}
#define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot)
/* get page buffer for specified block address */
/* ToDo: Replace this ugly macro with a function */
#define XT_GETPAGE(IP, BN, MP, SIZE, P, RC) \
do { \
BT_GETPAGE(IP, BN, MP, xtpage_t, SIZE, P, RC, i_xtroot); \
if (!(RC)) { \
if ((le16_to_cpu((P)->header.nextindex) < XTENTRYSTART) || \
(le16_to_cpu((P)->header.nextindex) > \
le16_to_cpu((P)->header.maxentry)) || \
(le16_to_cpu((P)->header.maxentry) > \
(((BN) == 0) ? XTROOTMAXSLOT : PSIZE >> L2XTSLOTSIZE))) { \
jfs_error((IP)->i_sb, \
"XT_GETPAGE: xtree page corrupt\n"); \
BT_PUTPAGE(MP); \
MP = NULL; \
RC = -EIO; \
} \
} \
} while (0)
/* for consistency */
#define XT_PUTPAGE(MP) BT_PUTPAGE(MP)
#define XT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
BT_GETSEARCH(IP, LEAF, BN, MP, xtpage_t, P, INDEX, i_xtroot)
/* xtree entry parameter descriptor */
struct xtsplit {
struct metapage *mp;
s16 index;
u8 flag;
s64 off;
s64 addr;
int len;
struct pxdlist *pxdlist;
};
/*
* statistics
*/
#ifdef CONFIG_JFS_STATISTICS
static struct {
uint search;
uint fastSearch;
uint split;
} xtStat;
#endif
/*
* forward references
*/
static int xtSearch(struct inode *ip, s64 xoff, s64 *next, int *cmpp,
struct btstack * btstack, int flag);
static int xtSplitUp(tid_t tid,
struct inode *ip,
struct xtsplit * split, struct btstack * btstack);
static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
struct metapage ** rmpp, s64 * rbnp);
static int xtSplitRoot(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp);
/*
* xtLookup()
*
* function: map a single page into a physical extent;
*/
int xtLookup(struct inode *ip, s64 lstart,
s64 llen, int *pflag, s64 * paddr, s32 * plen, int no_check)
{
int rc = 0;
struct btstack btstack;
int cmp;
s64 bn;
struct metapage *mp;
xtpage_t *p;
int index;
xad_t *xad;
s64 next, size, xoff, xend;
int xlen;
s64 xaddr;
*paddr = 0;
*plen = llen;
if (!no_check) {
/* is lookup offset beyond eof ? */
size = ((u64) ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
JFS_SBI(ip->i_sb)->l2bsize;
if (lstart >= size)
return 0;
}
/*
* search for the xad entry covering the logical extent
*/
//search:
if ((rc = xtSearch(ip, lstart, &next, &cmp, &btstack, 0))) {
jfs_err("xtLookup: xtSearch returned %d", rc);
return rc;
}
/*
* compute the physical extent covering logical extent
*
* N.B. search may have failed (e.g., hole in sparse file),
* and returned the index of the next entry.
*/
/* retrieve search result */
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
/* is xad found covering start of logical extent ?
* lstart is a page start address,
* i.e., lstart cannot start in a hole;
*/
if (cmp) {
if (next)
*plen = min(next - lstart, llen);
goto out;
}
/*
* lxd covered by xad
*/
xad = &p->xad[index];
xoff = offsetXAD(xad);
xlen = lengthXAD(xad);
xend = xoff + xlen;
xaddr = addressXAD(xad);
/* initialize new pxd */
*pflag = xad->flag;
*paddr = xaddr + (lstart - xoff);
/* a page must be fully covered by an xad */
*plen = min(xend - lstart, llen);
out:
XT_PUTPAGE(mp);
return rc;
}
/*
* xtSearch()
*
* function: search for the xad entry covering specified offset.
*
* parameters:
* ip - file object;
* xoff - extent offset;
* nextp - address of next extent (if any) for search miss
* cmpp - comparison result:
* btstack - traverse stack;
* flag - search process flag (XT_INSERT);
*
* returns:
* btstack contains (bn, index) of search path traversed to the entry.
* *cmpp is set to result of comparison with the entry returned.
* the page containing the entry is pinned at exit.
*/
static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
int *cmpp, struct btstack * btstack, int flag)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn; /* block number */
struct metapage *mp; /* page buffer */
xtpage_t *p; /* page */
xad_t *xad;
int base, index, lim, btindex;
struct btframe *btsp;
int nsplit = 0; /* number of pages to split */
s64 t64;
s64 next = 0;
INCREMENT(xtStat.search);
BT_CLR(btstack);
btstack->nsplit = 0;
/*
* search down tree from root:
*
* between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
* internal page, child page Pi contains entry with k, Ki <= K < Kj.
*
* if entry with search key K is not found
* internal page search find the entry with largest key Ki
* less than K which point to the child page to search;
* leaf page search find the entry with smallest key Kj
* greater than K so that the returned index is the position of
* the entry to be shifted right for insertion of new entry.
* for empty tree, search key is greater than any key of the tree.
*
* by convention, root bn = 0.
*/
for (bn = 0;;) {
/* get/pin the page to search */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* try sequential access heuristics with the previous
* access entry in target leaf page:
* once search narrowed down into the target leaf,
* key must either match an entry in the leaf or
* key entry does not exist in the tree;
*/
//fastSearch:
if ((jfs_ip->btorder & BT_SEQUENTIAL) &&
(p->header.flag & BT_LEAF) &&
(index = jfs_ip->btindex) <
le16_to_cpu(p->header.nextindex)) {
xad = &p->xad[index];
t64 = offsetXAD(xad);
if (xoff < t64 + lengthXAD(xad)) {
if (xoff >= t64) {
*cmpp = 0;
goto out;
}
/* stop sequential access heuristics */
goto binarySearch;
} else { /* (t64 + lengthXAD(xad)) <= xoff */
/* try next sequential entry */
index++;
if (index <
le16_to_cpu(p->header.nextindex)) {
xad++;
t64 = offsetXAD(xad);
if (xoff < t64 + lengthXAD(xad)) {
if (xoff >= t64) {
*cmpp = 0;
goto out;
}
/* miss: key falls between
* previous and this entry
*/
*cmpp = 1;
next = t64;
goto out;
}
/* (xoff >= t64 + lengthXAD(xad));
* matching entry may be further out:
* stop heuristic search
*/
/* stop sequential access heuristics */
goto binarySearch;
}
/* (index == p->header.nextindex);
* miss: key entry does not exist in
* the target leaf/tree
*/
*cmpp = 1;
goto out;
}
/*
* if hit, return index of the entry found, and
* if miss, where new entry with search key is
* to be inserted;
*/
out:
/* compute number of pages to split */
if (flag & XT_INSERT) {
if (p->header.nextindex == /* little-endian */
p->header.maxentry)
nsplit++;
else
nsplit = 0;
btstack->nsplit = nsplit;
}
/* save search result */
btsp = btstack->top;
btsp->bn = bn;
btsp->index = index;
btsp->mp = mp;
/* update sequential access heuristics */
jfs_ip->btindex = index;
if (nextp)
*nextp = next;
INCREMENT(xtStat.fastSearch);
return 0;
}
/* well, ... full search now */
binarySearch:
lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
/*
* binary search with search key K on the current page
*/
for (base = XTENTRYSTART; lim; lim >>= 1) {
index = base + (lim >> 1);
XT_CMP(cmp, xoff, &p->xad[index], t64);
if (cmp == 0) {
/*
* search hit
*/
/* search hit - leaf page:
* return the entry found
*/
if (p->header.flag & BT_LEAF) {
*cmpp = cmp;
/* compute number of pages to split */
if (flag & XT_INSERT) {
if (p->header.nextindex ==
p->header.maxentry)
nsplit++;
else
nsplit = 0;
btstack->nsplit = nsplit;
}
/* save search result */
btsp = btstack->top;
btsp->bn = bn;
btsp->index = index;
btsp->mp = mp;
/* init sequential access heuristics */
btindex = jfs_ip->btindex;
if (index == btindex ||
index == btindex + 1)
jfs_ip->btorder = BT_SEQUENTIAL;
else
jfs_ip->btorder = BT_RANDOM;
jfs_ip->btindex = index;
return 0;
}
/* search hit - internal page:
* descend/search its child page
*/
if (index < le16_to_cpu(p->header.nextindex)-1)
next = offsetXAD(&p->xad[index + 1]);
goto next;
}
if (cmp > 0) {
base = index + 1;
--lim;
}
}
/*
* search miss
*
* base is the smallest index with key (Kj) greater than
* search key (K) and may be zero or maxentry index.
*/
if (base < le16_to_cpu(p->header.nextindex))
next = offsetXAD(&p->xad[base]);
/*
* search miss - leaf page:
*
* return location of entry (base) where new entry with
* search key K is to be inserted.
*/
if (p->header.flag & BT_LEAF) {
*cmpp = cmp;
/* compute number of pages to split */
if (flag & XT_INSERT) {
if (p->header.nextindex ==
p->header.maxentry)
nsplit++;
else
nsplit = 0;
btstack->nsplit = nsplit;
}
/* save search result */
btsp = btstack->top;
btsp->bn = bn;
btsp->index = base;
btsp->mp = mp;
/* init sequential access heuristics */
btindex = jfs_ip->btindex;
if (base == btindex || base == btindex + 1)
jfs_ip->btorder = BT_SEQUENTIAL;
else
jfs_ip->btorder = BT_RANDOM;
jfs_ip->btindex = base;
if (nextp)
*nextp = next;
return 0;
}
/*
* search miss - non-leaf page:
*
* if base is non-zero, decrement base by one to get the parent
* entry of the child page to search.
*/
index = base ? base - 1 : base;
/*
* go down to child page
*/
next:
/* update number of pages to split */
if (p->header.nextindex == p->header.maxentry)
nsplit++;
else
nsplit = 0;
/* push (bn, index) of the parent page/entry */
if (BT_STACK_FULL(btstack)) {
jfs_error(ip->i_sb, "stack overrun!\n");
XT_PUTPAGE(mp);
return -EIO;
}
BT_PUSH(btstack, bn, index);
/* get the child page block number */
bn = addressXAD(&p->xad[index]);
/* unpin the parent page */
XT_PUTPAGE(mp);
}
}
/*
* xtInsert()
*
* function:
*
* parameter:
* tid - transaction id;
* ip - file object;
* xflag - extent flag (XAD_NOTRECORDED):
* xoff - extent offset;
* xlen - extent length;
* xaddrp - extent address pointer (in/out):
* if (*xaddrp)
* caller allocated data extent at *xaddrp;
* else
* allocate data extent and return its xaddr;
* flag -
*
* return:
*/
int xtInsert(tid_t tid, /* transaction id */
struct inode *ip, int xflag, s64 xoff, s32 xlen, s64 * xaddrp,
int flag)
{
int rc = 0;
s64 xaddr, hint;
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn;
int index, nextindex;
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad;
int cmp;
s64 next;
struct tlock *tlck;
struct xtlock *xtlck;
jfs_info("xtInsert: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen);
/*
* search for the entry location at which to insert:
*
* xtFastSearch() and xtSearch() both returns (leaf page
* pinned, index at which to insert).
* n.b. xtSearch() may return index of maxentry of
* the full page.
*/
if ((rc = xtSearch(ip, xoff, &next, &cmp, &btstack, XT_INSERT)))
return rc;
/* retrieve search result */
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
/* This test must follow XT_GETSEARCH since mp must be valid if
* we branch to out: */
if ((cmp == 0) || (next && (xlen > next - xoff))) {
rc = -EEXIST;
goto out;
}
/*
* allocate data extent requested
*
* allocation hint: last xad
*/
if ((xaddr = *xaddrp) == 0) {
if (index > XTENTRYSTART) {
xad = &p->xad[index - 1];
hint = addressXAD(xad) + lengthXAD(xad) - 1;
} else
hint = 0;
if ((rc = dquot_alloc_block(ip, xlen)))
goto out;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
dquot_free_block(ip, xlen);
goto out;
}
}
/*
* insert entry for new extent
*/
xflag |= XAD_NEW;
/*
* if the leaf page is full, split the page and
* propagate up the router entry for the new page from split
*
* The xtSplitUp() will insert the entry and unpin the leaf page.
*/
nextindex = le16_to_cpu(p->header.nextindex);
if (nextindex == le16_to_cpu(p->header.maxentry)) {
split.mp = mp;
split.index = index;
split.flag = xflag;
split.off = xoff;
split.len = xlen;
split.addr = xaddr;
split.pxdlist = NULL;
if ((rc = xtSplitUp(tid, ip, &split, &btstack))) {
/* undo data extent allocation */
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
dquot_free_block(ip, xlen);
}
return rc;
}
*xaddrp = xaddr;
return 0;
}
/*
* insert the new entry into the leaf page
*/
/*
* acquire a transaction lock on the leaf page;
*
* action: xad insertion/extension;
*/
BT_MARK_DIRTY(mp, ip);
/* if insert into middle, shift right remaining entries. */
if (index < nextindex)
memmove(&p->xad[index + 1], &p->xad[index],
(nextindex - index) * sizeof(xad_t));
/* insert the new entry: mark the entry NEW */
xad = &p->xad[index];
XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
/* advance next available entry index */
le16_add_cpu(&p->header.nextindex, 1);
/* Don't log it if there are no links to the file */
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset =
(xtlck->lwm.offset) ? min(index,
(int)xtlck->lwm.offset) : index;
xtlck->lwm.length =
le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
}
*xaddrp = xaddr;
out:
/* unpin the leaf page */
XT_PUTPAGE(mp);
return rc;
}
/*
* xtSplitUp()
*
* function:
* split full pages as propagating insertion up the tree
*
* parameter:
* tid - transaction id;
* ip - file object;
* split - entry parameter descriptor;
* btstack - traverse stack from xtSearch()
*
* return:
*/
static int
xtSplitUp(tid_t tid,
struct inode *ip, struct xtsplit * split, struct btstack * btstack)
{
int rc = 0;
struct metapage *smp;
xtpage_t *sp; /* split page */
struct metapage *rmp;
s64 rbn; /* new right page block number */
struct metapage *rcmp;
xtpage_t *rcp; /* right child page */
s64 rcbn; /* right child page block number */
int skip; /* index of entry of insertion */
int nextindex; /* next available entry index of p */
struct btframe *parent; /* parent page entry on traverse stack */
xad_t *xad;
s64 xaddr;
int xlen;
int nsplit; /* number of pages split */
struct pxdlist pxdlist;
pxd_t *pxd;
struct tlock *tlck;
struct xtlock *xtlck;
smp = split->mp;
sp = XT_PAGE(ip, smp);
/* is inode xtree root extension/inline EA area free ? */
if ((sp->header.flag & BT_ROOT) && (!S_ISDIR(ip->i_mode)) &&
(le16_to_cpu(sp->header.maxentry) < XTROOTMAXSLOT) &&
(JFS_IP(ip)->mode2 & INLINEEA)) {
sp->header.maxentry = cpu_to_le16(XTROOTMAXSLOT);
JFS_IP(ip)->mode2 &= ~INLINEEA;
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the leaf page;
*
* action: xad insertion/extension;
*/
/* if insert into middle, shift right remaining entries. */
skip = split->index;
nextindex = le16_to_cpu(sp->header.nextindex);
if (skip < nextindex)
memmove(&sp->xad[skip + 1], &sp->xad[skip],
(nextindex - skip) * sizeof(xad_t));
/* insert the new entry: mark the entry NEW */
xad = &sp->xad[skip];
XT_PUTENTRY(xad, split->flag, split->off, split->len,
split->addr);
/* advance next available entry index */
le16_add_cpu(&sp->header.nextindex, 1);
/* Don't log it if there are no links to the file */
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = (xtlck->lwm.offset) ?
min(skip, (int)xtlck->lwm.offset) : skip;
xtlck->lwm.length =
le16_to_cpu(sp->header.nextindex) -
xtlck->lwm.offset;
}
return 0;
}
/*
* allocate new index blocks to cover index page split(s)
*
* allocation hint: ?
*/
if (split->pxdlist == NULL) {
nsplit = btstack->nsplit;
split->pxdlist = &pxdlist;
pxdlist.maxnpxd = pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
xlen = JFS_SBI(ip->i_sb)->nbperpage;
for (; nsplit > 0; nsplit--, pxd++) {
if ((rc = dbAlloc(ip, (s64) 0, (s64) xlen, &xaddr))
== 0) {
PXDaddress(pxd, xaddr);
PXDlength(pxd, xlen);
pxdlist.maxnpxd++;
continue;
}
/* undo allocation */
XT_PUTPAGE(smp);
return rc;
}
}
/*
* Split leaf page <sp> into <sp> and a new right page <rp>.
*
* The split routines insert the new entry into the leaf page,
* and acquire txLock as appropriate.
* return <rp> pinned and its block number <rpbn>.
*/
rc = (sp->header.flag & BT_ROOT) ?
xtSplitRoot(tid, ip, split, &rmp) :
xtSplitPage(tid, ip, split, &rmp, &rbn);
XT_PUTPAGE(smp);
if (rc)
return -EIO;
/*
* propagate up the router entry for the leaf page just split
*
* insert a router entry for the new page into the parent page,
* propagate the insert/split up the tree by walking back the stack
* of (bn of parent page, index of child page entry in parent page)
* that were traversed during the search for the page that split.
*
* the propagation of insert/split up the tree stops if the root
* splits or the page inserted into doesn't have to split to hold
* the new entry.
*
* the parent entry for the split page remains the same, and
* a new entry is inserted at its right with the first key and
* block number of the new right page.
*
* There are a maximum of 3 pages pinned at any time:
* right child, left parent and right parent (when the parent splits)
* to keep the child page pinned while working on the parent.
* make sure that all pins are released at exit.
*/
while ((parent = BT_POP(btstack)) != NULL) {
/* parent page specified by stack frame <parent> */
/* keep current child pages <rcp> pinned */
rcmp = rmp;
rcbn = rbn;
rcp = XT_PAGE(ip, rcmp);
/*
* insert router entry in parent for new right child page <rp>
*/
/* get/pin the parent page <sp> */
XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
if (rc) {
XT_PUTPAGE(rcmp);
return rc;
}
/*
* The new key entry goes ONE AFTER the index of parent entry,
* because the split was to the right.
*/
skip = parent->index + 1;
/*
* split or shift right remaining entries of the parent page
*/
nextindex = le16_to_cpu(sp->header.nextindex);
/*
* parent page is full - split the parent page
*/
if (nextindex == le16_to_cpu(sp->header.maxentry)) {
/* init for parent page split */
split->mp = smp;
split->index = skip; /* index at insert */
split->flag = XAD_NEW;
split->off = offsetXAD(&rcp->xad[XTENTRYSTART]);
split->len = JFS_SBI(ip->i_sb)->nbperpage;
split->addr = rcbn;
/* unpin previous right child page */
XT_PUTPAGE(rcmp);
/* The split routines insert the new entry,
* and acquire txLock as appropriate.
* return <rp> pinned and its block number <rpbn>.
*/
rc = (sp->header.flag & BT_ROOT) ?
xtSplitRoot(tid, ip, split, &rmp) :
xtSplitPage(tid, ip, split, &rmp, &rbn);
if (rc) {
XT_PUTPAGE(smp);
return rc;
}
XT_PUTPAGE(smp);
/* keep new child page <rp> pinned */
}
/*
* parent page is not full - insert in parent page
*/
else {
/*
* insert router entry in parent for the right child
* page from the first entry of the right child page:
*/
/*
* acquire a transaction lock on the parent page;
*
* action: router xad insertion;
*/
BT_MARK_DIRTY(smp, ip);
/*
* if insert into middle, shift right remaining entries
*/
if (skip < nextindex)
memmove(&sp->xad[skip + 1], &sp->xad[skip],
(nextindex -
skip) << L2XTSLOTSIZE);
/* insert the router entry */
xad = &sp->xad[skip];
XT_PUTENTRY(xad, XAD_NEW,
offsetXAD(&rcp->xad[XTENTRYSTART]),
JFS_SBI(ip->i_sb)->nbperpage, rcbn);
/* advance next available entry index. */
le16_add_cpu(&sp->header.nextindex, 1);
/* Don't log it if there are no links to the file */
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, smp,
tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = (xtlck->lwm.offset) ?
min(skip, (int)xtlck->lwm.offset) : skip;
xtlck->lwm.length =
le16_to_cpu(sp->header.nextindex) -
xtlck->lwm.offset;
}
/* unpin parent page */
XT_PUTPAGE(smp);
/* exit propagate up */
break;
}
}
/* unpin current right page */
XT_PUTPAGE(rmp);
return 0;
}
/*
* xtSplitPage()
*
* function:
* split a full non-root page into
* original/split/left page and new right page
* i.e., the original/split page remains as left page.
*
* parameter:
* int tid,
* struct inode *ip,
* struct xtsplit *split,
* struct metapage **rmpp,
* u64 *rbnp,
*
* return:
* Pointer to page in which to insert or NULL on error.
*/
static int
xtSplitPage(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp, s64 * rbnp)
{
int rc = 0;
struct metapage *smp;
xtpage_t *sp;
struct metapage *rmp;
xtpage_t *rp; /* new right page allocated */
s64 rbn; /* new right page block number */
struct metapage *mp;
xtpage_t *p;
s64 nextbn;
int skip, maxentry, middle, righthalf, n;
xad_t *xad;
struct pxdlist *pxdlist;
pxd_t *pxd;
struct tlock *tlck;
struct xtlock *sxtlck = NULL, *rxtlck = NULL;
int quota_allocation = 0;
smp = split->mp;
sp = XT_PAGE(ip, smp);
INCREMENT(xtStat.split);
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc)
goto clean_up;
quota_allocation += lengthPXD(pxd);
/*
* allocate the new right page for the split
*/
rmp = get_metapage(ip, rbn, PSIZE, 1);
if (rmp == NULL) {
rc = -EIO;
goto clean_up;
}
jfs_info("xtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
BT_MARK_DIRTY(rmp, ip);
/*
* action: new page;
*/
rp = (xtpage_t *) rmp->data;
rp->header.self = *pxd;
rp->header.flag = sp->header.flag & BT_TYPE;
rp->header.maxentry = sp->header.maxentry; /* little-endian */
rp->header.nextindex = cpu_to_le16(XTENTRYSTART);
BT_MARK_DIRTY(smp, ip);
/* Don't log it if there are no links to the file */
if (!test_cflag(COMMIT_Nolink, ip)) {
/*
* acquire a transaction lock on the new right page;
*/
tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW);
rxtlck = (struct xtlock *) & tlck->lock;
rxtlck->lwm.offset = XTENTRYSTART;
/*
* acquire a transaction lock on the split page
*/
tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW);
sxtlck = (struct xtlock *) & tlck->lock;
}
/*
* initialize/update sibling pointers of <sp> and <rp>
*/
nextbn = le64_to_cpu(sp->header.next);
rp->header.next = cpu_to_le64(nextbn);
rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
sp->header.next = cpu_to_le64(rbn);
skip = split->index;
/*
* sequential append at tail (after last entry of last page)
*
* if splitting the last page on a level because of appending
* a entry to it (skip is maxentry), it's likely that the access is
* sequential. adding an empty page on the side of the level is less
* work and can push the fill factor much higher than normal.
* if we're wrong it's no big deal - we will do the split the right
* way next time.
* (it may look like it's equally easy to do a similar hack for
* reverse sorted data, that is, split the tree left, but it's not.
* Be my guest.)
*/
if (nextbn == 0 && skip == le16_to_cpu(sp->header.maxentry)) {
/*
* acquire a transaction lock on the new/right page;
*
* action: xad insertion;
*/
/* insert entry at the first entry of the new right page */
xad = &rp->xad[XTENTRYSTART];
XT_PUTENTRY(xad, split->flag, split->off, split->len,
split->addr);
rp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1);
if (!test_cflag(COMMIT_Nolink, ip)) {
/* rxtlck->lwm.offset = XTENTRYSTART; */
rxtlck->lwm.length = 1;
}
*rmpp = rmp;
*rbnp = rbn;
jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
return 0;
}
/*
* non-sequential insert (at possibly middle page)
*/
/*
* update previous pointer of old next/right page of <sp>
*/
if (nextbn != 0) {
XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
if (rc) {
XT_PUTPAGE(rmp);
goto clean_up;
}
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the next page;
*
* action:sibling pointer update;
*/
if (!test_cflag(COMMIT_Nolink, ip))
tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
p->header.prev = cpu_to_le64(rbn);
/* sibling page may have been updated previously, or
* it may be updated later;
*/
XT_PUTPAGE(mp);
}
/*
* split the data between the split and new/right pages
*/
maxentry = le16_to_cpu(sp->header.maxentry);
middle = maxentry >> 1;
righthalf = maxentry - middle;
/*
* skip index in old split/left page - insert into left page:
*/
if (skip <= middle) {
/* move right half of split page to the new right page */
memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle],
righthalf << L2XTSLOTSIZE);
/* shift right tail of left half to make room for new entry */
if (skip < middle)
memmove(&sp->xad[skip + 1], &sp->xad[skip],
(middle - skip) << L2XTSLOTSIZE);
/* insert new entry */
xad = &sp->xad[skip];
XT_PUTENTRY(xad, split->flag, split->off, split->len,
split->addr);
/* update page header */
sp->header.nextindex = cpu_to_le16(middle + 1);
if (!test_cflag(COMMIT_Nolink, ip)) {
sxtlck->lwm.offset = (sxtlck->lwm.offset) ?
min(skip, (int)sxtlck->lwm.offset) : skip;
}
rp->header.nextindex =
cpu_to_le16(XTENTRYSTART + righthalf);
}
/*
* skip index in new right page - insert into right page:
*/
else {
/* move left head of right half to right page */
n = skip - middle;
memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle],
n << L2XTSLOTSIZE);
/* insert new entry */
n += XTENTRYSTART;
xad = &rp->xad[n];
XT_PUTENTRY(xad, split->flag, split->off, split->len,
split->addr);
/* move right tail of right half to right page */
if (skip < maxentry)
memmove(&rp->xad[n + 1], &sp->xad[skip],
(maxentry - skip) << L2XTSLOTSIZE);
/* update page header */
sp->header.nextindex = cpu_to_le16(middle);
if (!test_cflag(COMMIT_Nolink, ip)) {
sxtlck->lwm.offset = (sxtlck->lwm.offset) ?
min(middle, (int)sxtlck->lwm.offset) : middle;
}
rp->header.nextindex = cpu_to_le16(XTENTRYSTART +
righthalf + 1);
}
if (!test_cflag(COMMIT_Nolink, ip)) {
sxtlck->lwm.length = le16_to_cpu(sp->header.nextindex) -
sxtlck->lwm.offset;
/* rxtlck->lwm.offset = XTENTRYSTART; */
rxtlck->lwm.length = le16_to_cpu(rp->header.nextindex) -
XTENTRYSTART;
}
*rmpp = rmp;
*rbnp = rbn;
jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
return rc;
clean_up:
/* Rollback quota allocation. */
if (quota_allocation)
dquot_free_block(ip, quota_allocation);
return (rc);
}
/*
* xtSplitRoot()
*
* function:
* split the full root page into original/root/split page and new
* right page
* i.e., root remains fixed in tree anchor (inode) and the root is
* copied to a single new right child page since root page <<
* non-root page, and the split root page contains a single entry
* for the new right child page.
*
* parameter:
* int tid,
* struct inode *ip,
* struct xtsplit *split,
* struct metapage **rmpp)
*
* return:
* Pointer to page in which to insert or NULL on error.
*/
static int
xtSplitRoot(tid_t tid,
struct inode *ip, struct xtsplit * split, struct metapage ** rmpp)
{
xtpage_t *sp;
struct metapage *rmp;
xtpage_t *rp;
s64 rbn;
int skip, nextindex;
xad_t *xad;
pxd_t *pxd;
struct pxdlist *pxdlist;
struct tlock *tlck;
struct xtlock *xtlck;
int rc;
sp = &JFS_IP(ip)->i_xtroot;
INCREMENT(xtStat.split);
/*
* allocate a single (right) child page
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
rbn = addressPXD(pxd);
rmp = get_metapage(ip, rbn, PSIZE, 1);
if (rmp == NULL)
return -EIO;
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return rc;
}
jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp);
/*
* acquire a transaction lock on the new right page;
*
* action: new page;
*/
BT_MARK_DIRTY(rmp, ip);
rp = (xtpage_t *) rmp->data;
rp->header.flag =
(sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
rp->header.self = *pxd;
rp->header.nextindex = cpu_to_le16(XTENTRYSTART);
rp->header.maxentry = cpu_to_le16(PSIZE >> L2XTSLOTSIZE);
/* initialize sibling pointers */
rp->header.next = 0;
rp->header.prev = 0;
/*
* copy the in-line root page into new right page extent
*/
nextindex = le16_to_cpu(sp->header.maxentry);
memmove(&rp->xad[XTENTRYSTART], &sp->xad[XTENTRYSTART],
(nextindex - XTENTRYSTART) << L2XTSLOTSIZE);
/*
* insert the new entry into the new right/child page
* (skip index in the new right page will not change)
*/
skip = split->index;
/* if insert into middle, shift right remaining entries */
if (skip != nextindex)
memmove(&rp->xad[skip + 1], &rp->xad[skip],
(nextindex - skip) * sizeof(xad_t));
xad = &rp->xad[skip];
XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr);
/* update page header */
rp->header.nextindex = cpu_to_le16(nextindex + 1);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = XTENTRYSTART;
xtlck->lwm.length = le16_to_cpu(rp->header.nextindex) -
XTENTRYSTART;
}
/*
* reset the root
*
* init root with the single entry for the new right page
* set the 1st entry offset to 0, which force the left-most key
* at any level of the tree to be less than any search key.
*/
/*
* acquire a transaction lock on the root page (in-memory inode);
*
* action: root split;
*/
BT_MARK_DIRTY(split->mp, ip);
xad = &sp->xad[XTENTRYSTART];
XT_PUTENTRY(xad, XAD_NEW, 0, JFS_SBI(ip->i_sb)->nbperpage, rbn);
/* update page header of root */
sp->header.flag &= ~BT_LEAF;
sp->header.flag |= BT_INTERNAL;
sp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, split->mp, tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = XTENTRYSTART;
xtlck->lwm.length = 1;
}
*rmpp = rmp;
jfs_info("xtSplitRoot: sp:0x%p rp:0x%p", sp, rp);
return 0;
}
/*
* xtExtend()
*
* function: extend in-place;
*
* note: existing extent may or may not have been committed.
* caller is responsible for pager buffer cache update, and
* working block allocation map update;
* update pmap: alloc whole extended extent;
*/
int xtExtend(tid_t tid, /* transaction id */
struct inode *ip, s64 xoff, /* delta extent offset */
s32 xlen, /* delta extent length */
int flag)
{
int rc = 0;
int cmp;
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn;
int index, nextindex, len;
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad;
s64 xaddr;
struct tlock *tlck;
struct xtlock *xtlck = NULL;
jfs_info("xtExtend: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen);
/* there must exist extent to be extended */
if ((rc = xtSearch(ip, xoff - 1, NULL, &cmp, &btstack, XT_INSERT)))
return rc;
/* retrieve search result */
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
if (cmp != 0) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb, "xtSearch did not find extent\n");
return -EIO;
}
/* extension must be contiguous */
xad = &p->xad[index];
if ((offsetXAD(xad) + lengthXAD(xad)) != xoff) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb, "extension is not contiguous\n");
return -EIO;
}
/*
* acquire a transaction lock on the leaf page;
*
* action: xad insertion/extension;
*/
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
}
/* extend will overflow extent ? */
xlen = lengthXAD(xad) + xlen;
if ((len = xlen - MAXXLEN) <= 0)
goto extendOld;
/*
* extent overflow: insert entry for new extent
*/
//insertNew:
xoff = offsetXAD(xad) + MAXXLEN;
xaddr = addressXAD(xad) + MAXXLEN;
nextindex = le16_to_cpu(p->header.nextindex);
/*
* if the leaf page is full, insert the new entry and
* propagate up the router entry for the new page from split
*
* The xtSplitUp() will insert the entry and unpin the leaf page.
*/
if (nextindex == le16_to_cpu(p->header.maxentry)) {
/* xtSpliUp() unpins leaf pages */
split.mp = mp;
split.index = index + 1;
split.flag = XAD_NEW;
split.off = xoff; /* split offset */
split.len = len;
split.addr = xaddr;
split.pxdlist = NULL;
if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
return rc;
/* get back old page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
* resides on the new child page;
*/
if (p->header.flag & BT_INTERNAL) {
ASSERT(p->header.nextindex ==
cpu_to_le16(XTENTRYSTART + 1));
xad = &p->xad[XTENTRYSTART];
bn = addressXAD(xad);
XT_PUTPAGE(mp);
/* get new child page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
}
}
}
/*
* insert the new entry into the leaf page
*/
else {
/* insert the new entry: mark the entry NEW */
xad = &p->xad[index + 1];
XT_PUTENTRY(xad, XAD_NEW, xoff, len, xaddr);
/* advance next available entry index */
le16_add_cpu(&p->header.nextindex, 1);
}
/* get back old entry */
xad = &p->xad[index];
xlen = MAXXLEN;
/*
* extend old extent
*/
extendOld:
XADlength(xad, xlen);
if (!(xad->flag & XAD_NEW))
xad->flag |= XAD_EXTENDED;
if (!test_cflag(COMMIT_Nolink, ip)) {
xtlck->lwm.offset =
(xtlck->lwm.offset) ? min(index,
(int)xtlck->lwm.offset) : index;
xtlck->lwm.length =
le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
}
/* unpin the leaf page */
XT_PUTPAGE(mp);
return rc;
}
/*
* xtUpdate()
*
* function: update XAD;
*
* update extent for allocated_but_not_recorded or
* compressed extent;
*
* parameter:
* nxad - new XAD;
* logical extent of the specified XAD must be completely
* contained by an existing XAD;
*/
int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
{ /* new XAD */
int rc = 0;
int cmp;
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn;
int index0, index, newindex, nextindex;
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad, *lxad, *rxad;
int xflag;
s64 nxoff, xoff;
int nxlen, xlen, lxlen, rxlen;
s64 nxaddr, xaddr;
struct tlock *tlck;
struct xtlock *xtlck = NULL;
int newpage = 0;
/* there must exist extent to be tailgated */
nxoff = offsetXAD(nxad);
nxlen = lengthXAD(nxad);
nxaddr = addressXAD(nxad);
if ((rc = xtSearch(ip, nxoff, NULL, &cmp, &btstack, XT_INSERT)))
return rc;
/* retrieve search result */
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0);
if (cmp != 0) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb, "Could not find extent\n");
return -EIO;
}
BT_MARK_DIRTY(mp, ip);
/*
* acquire tlock of the leaf page containing original entry
*/
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
}
xad = &p->xad[index0];
xflag = xad->flag;
xoff = offsetXAD(xad);
xlen = lengthXAD(xad);
xaddr = addressXAD(xad);
/* nXAD must be completely contained within XAD */
if ((xoff > nxoff) ||
(nxoff + nxlen > xoff + xlen)) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb,
"nXAD in not completely contained within XAD\n");
return -EIO;
}
index = index0;
newindex = index + 1;
nextindex = le16_to_cpu(p->header.nextindex);
if (xoff < nxoff)
goto coalesceRight;
/*
* coalesce with left XAD
*/
/* is XAD first entry of page ? */
if (index == XTENTRYSTART)
goto replace;
/* is nXAD logically and physically contiguous with lXAD ? */
lxad = &p->xad[index - 1];
lxlen = lengthXAD(lxad);
if (!(lxad->flag & XAD_NOTRECORDED) &&
(nxoff == offsetXAD(lxad) + lxlen) &&
(nxaddr == addressXAD(lxad) + lxlen) &&
(lxlen + nxlen < MAXXLEN)) {
/* extend right lXAD */
index0 = index - 1;
XADlength(lxad, lxlen + nxlen);
/* If we just merged two extents together, need to make sure the
* right extent gets logged. If the left one is marked XAD_NEW,
* then we know it will be logged. Otherwise, mark as
* XAD_EXTENDED
*/
if (!(lxad->flag & XAD_NEW))
lxad->flag |= XAD_EXTENDED;
if (xlen > nxlen) {
/* truncate XAD */
XADoffset(xad, xoff + nxlen);
XADlength(xad, xlen - nxlen);
XADaddress(xad, xaddr + nxlen);
goto out;
} else { /* (xlen == nxlen) */
/* remove XAD */
if (index < nextindex - 1)
memmove(&p->xad[index], &p->xad[index + 1],
(nextindex - index -
1) << L2XTSLOTSIZE);
p->header.nextindex =
cpu_to_le16(le16_to_cpu(p->header.nextindex) -
1);
index = index0;
newindex = index + 1;
nextindex = le16_to_cpu(p->header.nextindex);
xoff = nxoff = offsetXAD(lxad);
xlen = nxlen = lxlen + nxlen;
xaddr = nxaddr = addressXAD(lxad);
goto coalesceRight;
}
}
/*
* replace XAD with nXAD
*/
replace: /* (nxoff == xoff) */
if (nxlen == xlen) {
/* replace XAD with nXAD:recorded */
*xad = *nxad;
xad->flag = xflag & ~XAD_NOTRECORDED;
goto coalesceRight;
} else /* (nxlen < xlen) */
goto updateLeft;
/*
* coalesce with right XAD
*/
coalesceRight: /* (xoff <= nxoff) */
/* is XAD last entry of page ? */
if (newindex == nextindex) {
if (xoff == nxoff)
goto out;
goto updateRight;
}
/* is nXAD logically and physically contiguous with rXAD ? */
rxad = &p->xad[index + 1];
rxlen = lengthXAD(rxad);
if (!(rxad->flag & XAD_NOTRECORDED) &&
(nxoff + nxlen == offsetXAD(rxad)) &&
(nxaddr + nxlen == addressXAD(rxad)) &&
(rxlen + nxlen < MAXXLEN)) {
/* extend left rXAD */
XADoffset(rxad, nxoff);
XADlength(rxad, rxlen + nxlen);
XADaddress(rxad, nxaddr);
/* If we just merged two extents together, need to make sure
* the left extent gets logged. If the right one is marked
* XAD_NEW, then we know it will be logged. Otherwise, mark as
* XAD_EXTENDED
*/
if (!(rxad->flag & XAD_NEW))
rxad->flag |= XAD_EXTENDED;
if (xlen > nxlen)
/* truncate XAD */
XADlength(xad, xlen - nxlen);
else { /* (xlen == nxlen) */
/* remove XAD */
memmove(&p->xad[index], &p->xad[index + 1],
(nextindex - index - 1) << L2XTSLOTSIZE);
p->header.nextindex =
cpu_to_le16(le16_to_cpu(p->header.nextindex) -
1);
}
goto out;
} else if (xoff == nxoff)
goto out;
if (xoff >= nxoff) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb, "xoff >= nxoff\n");
return -EIO;
}
/*
* split XAD into (lXAD, nXAD):
*
* |---nXAD--->
* --|----------XAD----------|--
* |-lXAD-|
*/
updateRight: /* (xoff < nxoff) */
/* truncate old XAD as lXAD:not_recorded */
xad = &p->xad[index];
XADlength(xad, nxoff - xoff);
/* insert nXAD:recorded */
if (nextindex == le16_to_cpu(p->header.maxentry)) {
/* xtSpliUp() unpins leaf pages */
split.mp = mp;
split.index = newindex;
split.flag = xflag & ~XAD_NOTRECORDED;
split.off = nxoff;
split.len = nxlen;
split.addr = nxaddr;
split.pxdlist = NULL;
if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
return rc;
/* get back old page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
* resides on the new child page;
*/
if (p->header.flag & BT_INTERNAL) {
ASSERT(p->header.nextindex ==
cpu_to_le16(XTENTRYSTART + 1));
xad = &p->xad[XTENTRYSTART];
bn = addressXAD(xad);
XT_PUTPAGE(mp);
/* get new child page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
}
} else {
/* is nXAD on new page ? */
if (newindex >
(le16_to_cpu(p->header.maxentry) >> 1)) {
newindex =
newindex -
le16_to_cpu(p->header.nextindex) +
XTENTRYSTART;
newpage = 1;
}
}
} else {
/* if insert into middle, shift right remaining entries */
if (newindex < nextindex)
memmove(&p->xad[newindex + 1], &p->xad[newindex],
(nextindex - newindex) << L2XTSLOTSIZE);
/* insert the entry */
xad = &p->xad[newindex];
*xad = *nxad;
xad->flag = xflag & ~XAD_NOTRECORDED;
/* advance next available entry index. */
p->header.nextindex =
cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
}
/*
* does nXAD force 3-way split ?
*
* |---nXAD--->|
* --|----------XAD-------------|--
* |-lXAD-| |-rXAD -|
*/
if (nxoff + nxlen == xoff + xlen)
goto out;
/* reorient nXAD as XAD for further split XAD into (nXAD, rXAD) */
if (newpage) {
/* close out old page */
if (!test_cflag(COMMIT_Nolink, ip)) {
xtlck->lwm.offset = (xtlck->lwm.offset) ?
min(index0, (int)xtlck->lwm.offset) : index0;
xtlck->lwm.length =
le16_to_cpu(p->header.nextindex) -
xtlck->lwm.offset;
}
bn = le64_to_cpu(p->header.next);
XT_PUTPAGE(mp);
/* get new right page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
}
index0 = index = newindex;
} else
index++;
newindex = index + 1;
nextindex = le16_to_cpu(p->header.nextindex);
xlen = xlen - (nxoff - xoff);
xoff = nxoff;
xaddr = nxaddr;
/* recompute split pages */
if (nextindex == le16_to_cpu(p->header.maxentry)) {
XT_PUTPAGE(mp);
if ((rc = xtSearch(ip, nxoff, NULL, &cmp, &btstack, XT_INSERT)))
return rc;
/* retrieve search result */
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0);
if (cmp != 0) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb, "xtSearch failed\n");
return -EIO;
}
if (index0 != index) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb, "unexpected value of index\n");
return -EIO;
}
}
/*
* split XAD into (nXAD, rXAD)
*
* ---nXAD---|
* --|----------XAD----------|--
* |-rXAD-|
*/
updateLeft: /* (nxoff == xoff) && (nxlen < xlen) */
/* update old XAD with nXAD:recorded */
xad = &p->xad[index];
*xad = *nxad;
xad->flag = xflag & ~XAD_NOTRECORDED;
/* insert rXAD:not_recorded */
xoff = xoff + nxlen;
xlen = xlen - nxlen;
xaddr = xaddr + nxlen;
if (nextindex == le16_to_cpu(p->header.maxentry)) {
/*
printf("xtUpdate.updateLeft.split p:0x%p\n", p);
*/
/* xtSpliUp() unpins leaf pages */
split.mp = mp;
split.index = newindex;
split.flag = xflag;
split.off = xoff;
split.len = xlen;
split.addr = xaddr;
split.pxdlist = NULL;
if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
return rc;
/* get back old page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
* resides on the new child page;
*/
if (p->header.flag & BT_INTERNAL) {
ASSERT(p->header.nextindex ==
cpu_to_le16(XTENTRYSTART + 1));
xad = &p->xad[XTENTRYSTART];
bn = addressXAD(xad);
XT_PUTPAGE(mp);
/* get new child page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
}
}
} else {
/* if insert into middle, shift right remaining entries */
if (newindex < nextindex)
memmove(&p->xad[newindex + 1], &p->xad[newindex],
(nextindex - newindex) << L2XTSLOTSIZE);
/* insert the entry */
xad = &p->xad[newindex];
XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
/* advance next available entry index. */
p->header.nextindex =
cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
}
out:
if (!test_cflag(COMMIT_Nolink, ip)) {
xtlck->lwm.offset = (xtlck->lwm.offset) ?
min(index0, (int)xtlck->lwm.offset) : index0;
xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
xtlck->lwm.offset;
}
/* unpin the leaf page */
XT_PUTPAGE(mp);
return rc;
}
/*
* xtAppend()
*
* function: grow in append mode from contiguous region specified ;
*
* parameter:
* tid - transaction id;
* ip - file object;
* xflag - extent flag:
* xoff - extent offset;
* maxblocks - max extent length;
* xlen - extent length (in/out);
* xaddrp - extent address pointer (in/out):
* flag -
*
* return:
*/
int xtAppend(tid_t tid, /* transaction id */
struct inode *ip, int xflag, s64 xoff, s32 maxblocks,
s32 * xlenp, /* (in/out) */
s64 * xaddrp, /* (in/out) */
int flag)
{
int rc = 0;
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn, xaddr;
int index, nextindex;
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad;
int cmp;
struct tlock *tlck;
struct xtlock *xtlck;
int nsplit, nblocks, xlen;
struct pxdlist pxdlist;
pxd_t *pxd;
s64 next;
xaddr = *xaddrp;
xlen = *xlenp;
jfs_info("xtAppend: xoff:0x%lx maxblocks:%d xlen:%d xaddr:0x%lx",
(ulong) xoff, maxblocks, xlen, (ulong) xaddr);
/*
* search for the entry location at which to insert:
*
* xtFastSearch() and xtSearch() both returns (leaf page
* pinned, index at which to insert).
* n.b. xtSearch() may return index of maxentry of
* the full page.
*/
if ((rc = xtSearch(ip, xoff, &next, &cmp, &btstack, XT_INSERT)))
return rc;
/* retrieve search result */
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
if (cmp == 0) {
rc = -EEXIST;
goto out;
}
if (next)
xlen = min(xlen, (int)(next - xoff));
//insert:
/*
* insert entry for new extent
*/
xflag |= XAD_NEW;
/*
* if the leaf page is full, split the page and
* propagate up the router entry for the new page from split
*
* The xtSplitUp() will insert the entry and unpin the leaf page.
*/
nextindex = le16_to_cpu(p->header.nextindex);
if (nextindex < le16_to_cpu(p->header.maxentry))
goto insertLeaf;
/*
* allocate new index blocks to cover index page split(s)
*/
nsplit = btstack.nsplit;
split.pxdlist = &pxdlist;
pxdlist.maxnpxd = pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
nblocks = JFS_SBI(ip->i_sb)->nbperpage;
for (; nsplit > 0; nsplit--, pxd++, xaddr += nblocks, maxblocks -= nblocks) {
if ((rc = dbAllocBottomUp(ip, xaddr, (s64) nblocks)) == 0) {
PXDaddress(pxd, xaddr);
PXDlength(pxd, nblocks);
pxdlist.maxnpxd++;
continue;
}
/* undo allocation */
goto out;
}
xlen = min(xlen, maxblocks);
/*
* allocate data extent requested
*/
if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen)))
goto out;
split.mp = mp;
split.index = index;
split.flag = xflag;
split.off = xoff;
split.len = xlen;
split.addr = xaddr;
if ((rc = xtSplitUp(tid, ip, &split, &btstack))) {
/* undo data extent allocation */
dbFree(ip, *xaddrp, (s64) * xlenp);
return rc;
}
*xaddrp = xaddr;
*xlenp = xlen;
return 0;
/*
* insert the new entry into the leaf page
*/
insertLeaf:
/*
* allocate data extent requested
*/
if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen)))
goto out;
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the leaf page;
*
* action: xad insertion/extension;
*/
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (struct xtlock *) & tlck->lock;
/* insert the new entry: mark the entry NEW */
xad = &p->xad[index];
XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr);
/* advance next available entry index */
le16_add_cpu(&p->header.nextindex, 1);
xtlck->lwm.offset =
(xtlck->lwm.offset) ? min(index,(int) xtlck->lwm.offset) : index;
xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
xtlck->lwm.offset;
*xaddrp = xaddr;
*xlenp = xlen;
out:
/* unpin the leaf page */
XT_PUTPAGE(mp);
return rc;
}
/*
* xtInitRoot()
*
* initialize file root (inline in inode)
*/
void xtInitRoot(tid_t tid, struct inode *ip)
{
xtpage_t *p;
/*
* acquire a transaction lock on the root
*
* action:
*/
txLock(tid, ip, (struct metapage *) &JFS_IP(ip)->bxflag,
tlckXTREE | tlckNEW);
p = &JFS_IP(ip)->i_xtroot;
p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
p->header.nextindex = cpu_to_le16(XTENTRYSTART);
if (S_ISDIR(ip->i_mode))
p->header.maxentry = cpu_to_le16(XTROOTINITSLOT_DIR);
else {
p->header.maxentry = cpu_to_le16(XTROOTINITSLOT);
ip->i_size = 0;
}
return;
}
/*
* We can run into a deadlock truncating a file with a large number of
* xtree pages (large fragmented file). A robust fix would entail a
* reservation system where we would reserve a number of metadata pages
* and tlocks which we would be guaranteed without a deadlock. Without
* this, a partial fix is to limit number of metadata pages we will lock
* in a single transaction. Currently we will truncate the file so that
* no more than 50 leaf pages will be locked. The caller of xtTruncate
* will be responsible for ensuring that the current transaction gets
* committed, and that subsequent transactions are created to truncate
* the file further if needed.
*/
#define MAX_TRUNCATE_LEAVES 50
/*
* xtTruncate()
*
* function:
* traverse for truncation logging backward bottom up;
* terminate at the last extent entry at the current subtree
* root page covering new down size.
* truncation may occur within the last extent entry.
*
* parameter:
* int tid,
* struct inode *ip,
* s64 newsize,
* int type) {PWMAP, PMAP, WMAP; DELETE, TRUNCATE}
*
* return:
*
* note:
* PWMAP:
* 1. truncate (non-COMMIT_NOLINK file)
* by jfs_truncate() or jfs_open(O_TRUNC):
* xtree is updated;
* 2. truncate index table of directory when last entry removed
* map update via tlock at commit time;
* PMAP:
* Call xtTruncate_pmap instead
* WMAP:
* 1. remove (free zero link count) on last reference release
* (pmap has been freed at commit zero link count);
* 2. truncate (COMMIT_NOLINK file, i.e., tmp file):
* xtree is updated;
* map update directly at truncation time;
*
* if (DELETE)
* no LOG_NOREDOPAGE is required (NOREDOFILE is sufficient);
* else if (TRUNCATE)
* must write LOG_NOREDOPAGE for deleted index page;
*
* pages may already have been tlocked by anonymous transactions
* during file growth (i.e., write) before truncation;
*
* except last truncated entry, deleted entries remains as is
* in the page (nextindex is updated) for other use
* (e.g., log/update allocation map): this avoid copying the page
* info but delay free of pages;
*
*/
s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
{
int rc = 0;
s64 teof;
struct metapage *mp;
xtpage_t *p;
s64 bn;
int index, nextindex;
xad_t *xad;
s64 xoff, xaddr;
int xlen, len, freexlen;
struct btstack btstack;
struct btframe *parent;
struct tblock *tblk = NULL;
struct tlock *tlck = NULL;
struct xtlock *xtlck = NULL;
struct xdlistlock xadlock; /* maplock for COMMIT_WMAP */
struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */
s64 nfreed;
int freed, log;
int locked_leaves = 0;
/* save object truncation type */
if (tid) {
tblk = tid_to_tblock(tid);
tblk->xflag |= flag;
}
nfreed = 0;
flag &= COMMIT_MAP;
assert(flag != COMMIT_PMAP);
if (flag == COMMIT_PWMAP)
log = 1;
else {
log = 0;
xadlock.flag = mlckFREEXADLIST;
xadlock.index = 1;
}
/*
* if the newsize is not an integral number of pages,
* the file between newsize and next page boundary will
* be cleared.
* if truncating into a file hole, it will cause
* a full block to be allocated for the logical block.
*/
/*
* release page blocks of truncated region <teof, eof>
*
* free the data blocks from the leaf index blocks.
* delete the parent index entries corresponding to
* the freed child data/index blocks.
* free the index blocks themselves which aren't needed
* in new sized file.
*
* index blocks are updated only if the blocks are to be
* retained in the new sized file.
* if type is PMAP, the data and index pages are NOT
* freed, and the data and index blocks are NOT freed
* from working map.
* (this will allow continued access of data/index of
* temporary file (zerolink count file truncated to zero-length)).
*/
teof = (newsize + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
JFS_SBI(ip->i_sb)->l2bsize;
/* clear stack */
BT_CLR(&btstack);
/*
* start with root
*
* root resides in the inode
*/
bn = 0;
/*
* first access of each page:
*/
getPage:
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
/* Since this is the rightmost page at this level, and we may have
* already freed a page that was formerly to the right, let's make
* sure that the next pointer is zero.
*/
if (p->header.next) {
if (log)
/*
* Make sure this change to the header is logged.
* If we really truncate this leaf, the flag
* will be changed to tlckTRUNCATE
*/
tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
BT_MARK_DIRTY(mp, ip);
p->header.next = 0;
}
if (p->header.flag & BT_INTERNAL)
goto getChild;
/*
* leaf page
*/
freed = 0;
/* does region covered by leaf page precede Teof ? */
xad = &p->xad[index];
xoff = offsetXAD(xad);
xlen = lengthXAD(xad);
if (teof >= xoff + xlen) {
XT_PUTPAGE(mp);
goto getParent;
}
/* (re)acquire tlock of the leaf page */
if (log) {
if (++locked_leaves > MAX_TRUNCATE_LEAVES) {
/*
* We need to limit the size of the transaction
* to avoid exhausting pagecache & tlocks
*/
XT_PUTPAGE(mp);
newsize = (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
goto getParent;
}
tlck = txLock(tid, ip, mp, tlckXTREE);
tlck->type = tlckXTREE | tlckTRUNCATE;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
}
BT_MARK_DIRTY(mp, ip);
/*
* scan backward leaf page entries
*/
for (; index >= XTENTRYSTART; index--) {
xad = &p->xad[index];
xoff = offsetXAD(xad);
xlen = lengthXAD(xad);
xaddr = addressXAD(xad);
/*
* The "data" for a directory is indexed by the block
* device's address space. This metadata must be invalidated
* here
*/
if (S_ISDIR(ip->i_mode) && (teof == 0))
invalidate_xad_metapages(ip, *xad);
/*
* entry beyond eof: continue scan of current page
* xad
* ---|---=======------->
* eof
*/
if (teof < xoff) {
nfreed += xlen;
continue;
}
/*
* (xoff <= teof): last entry to be deleted from page;
* If other entries remain in page: keep and update the page.
*/
/*
* eof == entry_start: delete the entry
* xad
* -------|=======------->
* eof
*
*/
if (teof == xoff) {
nfreed += xlen;
if (index == XTENTRYSTART)
break;
nextindex = index;
}
/*
* eof within the entry: truncate the entry.
* xad
* -------===|===------->
* eof
*/
else if (teof < xoff + xlen) {
/* update truncated entry */
len = teof - xoff;
freexlen = xlen - len;
XADlength(xad, len);
/* save pxd of truncated extent in tlck */
xaddr += len;
if (log) { /* COMMIT_PWMAP */
xtlck->lwm.offset = (xtlck->lwm.offset) ?
min(index, (int)xtlck->lwm.offset) : index;
xtlck->lwm.length = index + 1 -
xtlck->lwm.offset;
xtlck->twm.offset = index;
pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, freexlen);
}
/* free truncated extent */
else { /* COMMIT_WMAP */
pxdlock = (struct pxd_lock *) & xadlock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, freexlen);
txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP);
/* reset map lock */
xadlock.flag = mlckFREEXADLIST;
}
/* current entry is new last entry; */
nextindex = index + 1;
nfreed += freexlen;
}
/*
* eof beyond the entry:
* xad
* -------=======---|--->
* eof
*/
else { /* (xoff + xlen < teof) */
nextindex = index + 1;
}
if (nextindex < le16_to_cpu(p->header.nextindex)) {
if (!log) { /* COMMIT_WAMP */
xadlock.xdlist = &p->xad[nextindex];
xadlock.count =
le16_to_cpu(p->header.nextindex) -
nextindex;
txFreeMap(ip, (struct maplock *) & xadlock,
NULL, COMMIT_WMAP);
}
p->header.nextindex = cpu_to_le16(nextindex);
}
XT_PUTPAGE(mp);
/* assert(freed == 0); */
goto getParent;
} /* end scan of leaf page entries */
freed = 1;
/*
* leaf page become empty: free the page if type != PMAP
*/
if (log) { /* COMMIT_PWMAP */
/* txCommit() with tlckFREE:
* free data extents covered by leaf [XTENTRYSTART:hwm);
* invalidate leaf if COMMIT_PWMAP;
* if (TRUNCATE), will write LOG_NOREDOPAGE;
*/
tlck->type = tlckXTREE | tlckFREE;
} else { /* COMMIT_WAMP */
/* free data extents covered by leaf */
xadlock.xdlist = &p->xad[XTENTRYSTART];
xadlock.count =
le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
txFreeMap(ip, (struct maplock *) & xadlock, NULL, COMMIT_WMAP);
}
if (p->header.flag & BT_ROOT) {
p->header.flag &= ~BT_INTERNAL;
p->header.flag |= BT_LEAF;
p->header.nextindex = cpu_to_le16(XTENTRYSTART);
XT_PUTPAGE(mp); /* debug */
goto out;
} else {
if (log) { /* COMMIT_PWMAP */
/* page will be invalidated at tx completion
*/
XT_PUTPAGE(mp);
} else { /* COMMIT_WMAP */
if (mp->lid)
lid_to_tlock(mp->lid)->flag |= tlckFREELOCK;
/* invalidate empty leaf page */
discard_metapage(mp);
}
}
/*
* the leaf page become empty: delete the parent entry
* for the leaf page if the parent page is to be kept
* in the new sized file.
*/
/*
* go back up to the parent page
*/
getParent:
/* pop/restore parent entry for the current child page */
if ((parent = BT_POP(&btstack)) == NULL)
/* current page must have been root */
goto out;
/* get back the parent page */
bn = parent->bn;
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
index = parent->index;
/*
* child page was not empty:
*/
if (freed == 0) {
/* has any entry deleted from parent ? */
if (index < le16_to_cpu(p->header.nextindex) - 1) {
/* (re)acquire tlock on the parent page */
if (log) { /* COMMIT_PWMAP */
/* txCommit() with tlckTRUNCATE:
* free child extents covered by parent [);
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (struct xtlock *) & tlck->lock;
if (!(tlck->type & tlckTRUNCATE)) {
xtlck->hwm.offset =
le16_to_cpu(p->header.
nextindex) - 1;
tlck->type =
tlckXTREE | tlckTRUNCATE;
}
} else { /* COMMIT_WMAP */
/* free child extents covered by parent */
xadlock.xdlist = &p->xad[index + 1];
xadlock.count =
le16_to_cpu(p->header.nextindex) -
index - 1;
txFreeMap(ip, (struct maplock *) & xadlock,
NULL, COMMIT_WMAP);
}
BT_MARK_DIRTY(mp, ip);
p->header.nextindex = cpu_to_le16(index + 1);
}
XT_PUTPAGE(mp);
goto getParent;
}
/*
* child page was empty:
*/
nfreed += lengthXAD(&p->xad[index]);
/*
* During working map update, child page's tlock must be handled
* before parent's. This is because the parent's tlock will cause
* the child's disk space to be marked available in the wmap, so
* it's important that the child page be released by that time.
*
* ToDo: tlocks should be on doubly-linked list, so we can
* quickly remove it and add it to the end.
*/
/*
* Move parent page's tlock to the end of the tid's tlock list
*/
if (log && mp->lid && (tblk->last != mp->lid) &&
lid_to_tlock(mp->lid)->tid) {
lid_t lid = mp->lid;
struct tlock *prev;
tlck = lid_to_tlock(lid);
if (tblk->next == lid)
tblk->next = tlck->next;
else {
for (prev = lid_to_tlock(tblk->next);
prev->next != lid;
prev = lid_to_tlock(prev->next)) {
assert(prev->next);
}
prev->next = tlck->next;
}
lid_to_tlock(tblk->last)->next = lid;
tlck->next = 0;
tblk->last = lid;
}
/*
* parent page become empty: free the page
*/
if (index == XTENTRYSTART) {
if (log) { /* COMMIT_PWMAP */
/* txCommit() with tlckFREE:
* free child extents covered by parent;
* invalidate parent if COMMIT_PWMAP;
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset =
le16_to_cpu(p->header.nextindex) - 1;
tlck->type = tlckXTREE | tlckFREE;
} else { /* COMMIT_WMAP */
/* free child extents covered by parent */
xadlock.xdlist = &p->xad[XTENTRYSTART];
xadlock.count =
le16_to_cpu(p->header.nextindex) -
XTENTRYSTART;
txFreeMap(ip, (struct maplock *) & xadlock, NULL,
COMMIT_WMAP);
}
BT_MARK_DIRTY(mp, ip);
if (p->header.flag & BT_ROOT) {
p->header.flag &= ~BT_INTERNAL;
p->header.flag |= BT_LEAF;
p->header.nextindex = cpu_to_le16(XTENTRYSTART);
if (le16_to_cpu(p->header.maxentry) == XTROOTMAXSLOT) {
/*
* Shrink root down to allow inline
* EA (otherwise fsck complains)
*/
p->header.maxentry =
cpu_to_le16(XTROOTINITSLOT);
JFS_IP(ip)->mode2 |= INLINEEA;
}
XT_PUTPAGE(mp); /* debug */
goto out;
} else {
if (log) { /* COMMIT_PWMAP */
/* page will be invalidated at tx completion
*/
XT_PUTPAGE(mp);
} else { /* COMMIT_WMAP */
if (mp->lid)
lid_to_tlock(mp->lid)->flag |=
tlckFREELOCK;
/* invalidate parent page */
discard_metapage(mp);
}
/* parent has become empty and freed:
* go back up to its parent page
*/
/* freed = 1; */
goto getParent;
}
}
/*
* parent page still has entries for front region;
*/
else {
/* try truncate region covered by preceding entry
* (process backward)
*/
index--;
/* go back down to the child page corresponding
* to the entry
*/
goto getChild;
}
/*
* internal page: go down to child page of current entry
*/
getChild:
/* save current parent entry for the child page */
if (BT_STACK_FULL(&btstack)) {
jfs_error(ip->i_sb, "stack overrun!\n");
XT_PUTPAGE(mp);
return -EIO;
}
BT_PUSH(&btstack, bn, index);
/* get child page */
xad = &p->xad[index];
bn = addressXAD(xad);
/*
* first access of each internal entry:
*/
/* release parent page */
XT_PUTPAGE(mp);
/* process the child page */
goto getPage;
out:
/*
* update file resource stat
*/
/* set size
*/
if (S_ISDIR(ip->i_mode) && !newsize)
ip->i_size = 1; /* fsck hates zero-length directories */
else
ip->i_size = newsize;
/* update quota allocation to reflect freed blocks */
dquot_free_block(ip, nfreed);
/*
* free tlock of invalidated pages
*/
if (flag == COMMIT_WMAP)
txFreelock(ip);
return newsize;
}
/*
* xtTruncate_pmap()
*
* function:
* Perform truncate to zero length for deleted file, leaving the
* xtree and working map untouched. This allows the file to
* be accessed via open file handles, while the delete of the file
* is committed to disk.
*
* parameter:
* tid_t tid,
* struct inode *ip,
* s64 committed_size)
*
* return: new committed size
*
* note:
*
* To avoid deadlock by holding too many transaction locks, the
* truncation may be broken up into multiple transactions.
* The committed_size keeps track of part of the file has been
* freed from the pmaps.
*/
s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
{
s64 bn;
struct btstack btstack;
int cmp;
int index;
int locked_leaves = 0;
struct metapage *mp;
xtpage_t *p;
struct btframe *parent;
int rc;
struct tblock *tblk;
struct tlock *tlck = NULL;
xad_t *xad;
int xlen;
s64 xoff;
struct xtlock *xtlck = NULL;
/* save object truncation type */
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_PMAP;
/* clear stack */
BT_CLR(&btstack);
if (committed_size) {
xoff = (committed_size >> JFS_SBI(ip->i_sb)->l2bsize) - 1;
rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0);
if (rc)
return rc;
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
if (cmp != 0) {
XT_PUTPAGE(mp);
jfs_error(ip->i_sb, "did not find extent\n");
return -EIO;
}
} else {
/*
* start with root
*
* root resides in the inode
*/
bn = 0;
/*
* first access of each page:
*/
getPage:
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
if (p->header.flag & BT_INTERNAL)
goto getChild;
}
/*
* leaf page
*/
if (++locked_leaves > MAX_TRUNCATE_LEAVES) {
/*
* We need to limit the size of the transaction
* to avoid exhausting pagecache & tlocks
*/
xad = &p->xad[index];
xoff = offsetXAD(xad);
xlen = lengthXAD(xad);
XT_PUTPAGE(mp);
return (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
}
tlck = txLock(tid, ip, mp, tlckXTREE);
tlck->type = tlckXTREE | tlckFREE;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset = index;
XT_PUTPAGE(mp);
/*
* go back up to the parent page
*/
getParent:
/* pop/restore parent entry for the current child page */
if ((parent = BT_POP(&btstack)) == NULL)
/* current page must have been root */
goto out;
/* get back the parent page */
bn = parent->bn;
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
index = parent->index;
/*
* parent page become empty: free the page
*/
if (index == XTENTRYSTART) {
/* txCommit() with tlckFREE:
* free child extents covered by parent;
* invalidate parent if COMMIT_PWMAP;
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
tlck->type = tlckXTREE | tlckFREE;
XT_PUTPAGE(mp);
if (p->header.flag & BT_ROOT) {
goto out;
} else {
goto getParent;
}
}
/*
* parent page still has entries for front region;
*/
else
index--;
/*
* internal page: go down to child page of current entry
*/
getChild:
/* save current parent entry for the child page */
if (BT_STACK_FULL(&btstack)) {
jfs_error(ip->i_sb, "stack overrun!\n");
XT_PUTPAGE(mp);
return -EIO;
}
BT_PUSH(&btstack, bn, index);
/* get child page */
xad = &p->xad[index];
bn = addressXAD(xad);
/*
* first access of each internal entry:
*/
/* release parent page */
XT_PUTPAGE(mp);
/* process the child page */
goto getPage;
out:
return 0;
}
#ifdef CONFIG_JFS_STATISTICS
int jfs_xtstat_proc_show(struct seq_file *m, void *v)
{
seq_printf(m,
"JFS Xtree statistics\n"
"====================\n"
"searches = %d\n"
"fast searches = %d\n"
"splits = %d\n",
xtStat.search,
xtStat.fastSearch,
xtStat.split);
return 0;
}
#endif
| linux-master | fs/jfs/jfs_xtree.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) International Business Machines Corp., 2000-2004
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_unicode.h"
#include "jfs_debug.h"
/*
* NAME: jfs_strfromUCS()
*
* FUNCTION: Convert little-endian unicode string to character string
*
*/
int jfs_strfromUCS_le(char *to, const __le16 * from,
int len, struct nls_table *codepage)
{
int i;
int outlen = 0;
static int warn_again = 5; /* Only warn up to 5 times total */
int warn = !!warn_again; /* once per string */
if (codepage) {
for (i = 0; (i < len) && from[i]; i++) {
int charlen;
charlen =
codepage->uni2char(le16_to_cpu(from[i]),
&to[outlen],
NLS_MAX_CHARSET_SIZE);
if (charlen > 0)
outlen += charlen;
else
to[outlen++] = '?';
}
} else {
for (i = 0; (i < len) && from[i]; i++) {
if (unlikely(le16_to_cpu(from[i]) & 0xff00)) {
to[i] = '?';
if (unlikely(warn)) {
warn--;
warn_again--;
printk(KERN_ERR
"non-latin1 character 0x%x found in JFS file name\n",
le16_to_cpu(from[i]));
printk(KERN_ERR
"mount with iocharset=utf8 to access\n");
}
}
else
to[i] = (char) (le16_to_cpu(from[i]));
}
outlen = i;
}
to[outlen] = 0;
return outlen;
}
/*
* NAME: jfs_strtoUCS()
*
* FUNCTION: Convert character string to unicode string
*
*/
static int jfs_strtoUCS(wchar_t * to, const unsigned char *from, int len,
struct nls_table *codepage)
{
int charlen;
int i;
if (codepage) {
for (i = 0; len && *from; i++, from += charlen, len -= charlen)
{
charlen = codepage->char2uni(from, len, &to[i]);
if (charlen < 1) {
jfs_err("jfs_strtoUCS: char2uni returned %d.",
charlen);
jfs_err("charset = %s, char = 0x%x",
codepage->charset, *from);
return charlen;
}
}
} else {
for (i = 0; (i < len) && from[i]; i++)
to[i] = (wchar_t) from[i];
}
to[i] = 0;
return i;
}
/*
* NAME: get_UCSname()
*
* FUNCTION: Allocate and translate to unicode string
*
*/
int get_UCSname(struct component_name * uniName, struct dentry *dentry)
{
struct nls_table *nls_tab = JFS_SBI(dentry->d_sb)->nls_tab;
int length = dentry->d_name.len;
if (length > JFS_NAME_MAX)
return -ENAMETOOLONG;
uniName->name =
kmalloc_array(length + 1, sizeof(wchar_t), GFP_NOFS);
if (uniName->name == NULL)
return -ENOMEM;
uniName->namlen = jfs_strtoUCS(uniName->name, dentry->d_name.name,
length, nls_tab);
if (uniName->namlen < 0) {
kfree(uniName->name);
return uniName->namlen;
}
return 0;
}
| linux-master | fs/jfs/jfs_unicode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* QNX6 file system, Linux implementation.
*
* Version : 1.0.0
*
* History :
*
* 01-02-2012 by Kai Bankett ([email protected]) : first release.
*
*/
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include "qnx6.h"
static void qnx6_mmi_copy_sb(struct qnx6_super_block *qsb,
struct qnx6_mmi_super_block *sb)
{
qsb->sb_magic = sb->sb_magic;
qsb->sb_checksum = sb->sb_checksum;
qsb->sb_serial = sb->sb_serial;
qsb->sb_blocksize = sb->sb_blocksize;
qsb->sb_num_inodes = sb->sb_num_inodes;
qsb->sb_free_inodes = sb->sb_free_inodes;
qsb->sb_num_blocks = sb->sb_num_blocks;
qsb->sb_free_blocks = sb->sb_free_blocks;
/* the rest of the superblock is the same */
memcpy(&qsb->Inode, &sb->Inode, sizeof(sb->Inode));
memcpy(&qsb->Bitmap, &sb->Bitmap, sizeof(sb->Bitmap));
memcpy(&qsb->Longfile, &sb->Longfile, sizeof(sb->Longfile));
}
struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, int silent)
{
struct buffer_head *bh1, *bh2 = NULL;
struct qnx6_mmi_super_block *sb1, *sb2;
struct qnx6_super_block *qsb = NULL;
struct qnx6_sb_info *sbi;
__u64 offset;
/* Check the superblock signatures
start with the first superblock */
bh1 = sb_bread(s, 0);
if (!bh1) {
pr_err("Unable to read first mmi superblock\n");
return NULL;
}
sb1 = (struct qnx6_mmi_super_block *)bh1->b_data;
sbi = QNX6_SB(s);
if (fs32_to_cpu(sbi, sb1->sb_magic) != QNX6_SUPER_MAGIC) {
if (!silent) {
pr_err("wrong signature (magic) in superblock #1.\n");
goto out;
}
}
/* checksum check - start at byte 8 and end at byte 512 */
if (fs32_to_cpu(sbi, sb1->sb_checksum) !=
crc32_be(0, (char *)(bh1->b_data + 8), 504)) {
pr_err("superblock #1 checksum error\n");
goto out;
}
/* calculate second superblock blocknumber */
offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + QNX6_SUPERBLOCK_AREA /
fs32_to_cpu(sbi, sb1->sb_blocksize);
/* set new blocksize */
if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) {
pr_err("unable to set blocksize\n");
goto out;
}
/* blocksize invalidates bh - pull it back in */
brelse(bh1);
bh1 = sb_bread(s, 0);
if (!bh1)
goto out;
sb1 = (struct qnx6_mmi_super_block *)bh1->b_data;
/* read second superblock */
bh2 = sb_bread(s, offset);
if (!bh2) {
pr_err("unable to read the second superblock\n");
goto out;
}
sb2 = (struct qnx6_mmi_super_block *)bh2->b_data;
if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) {
if (!silent)
pr_err("wrong signature (magic) in superblock #2.\n");
goto out;
}
/* checksum check - start at byte 8 and end at byte 512 */
if (fs32_to_cpu(sbi, sb2->sb_checksum)
!= crc32_be(0, (char *)(bh2->b_data + 8), 504)) {
pr_err("superblock #1 checksum error\n");
goto out;
}
qsb = kmalloc(sizeof(*qsb), GFP_KERNEL);
if (!qsb) {
pr_err("unable to allocate memory.\n");
goto out;
}
if (fs64_to_cpu(sbi, sb1->sb_serial) >
fs64_to_cpu(sbi, sb2->sb_serial)) {
/* superblock #1 active */
qnx6_mmi_copy_sb(qsb, sb1);
#ifdef CONFIG_QNX6FS_DEBUG
qnx6_superblock_debug(qsb, s);
#endif
memcpy(bh1->b_data, qsb, sizeof(struct qnx6_super_block));
sbi->sb_buf = bh1;
sbi->sb = (struct qnx6_super_block *)bh1->b_data;
brelse(bh2);
pr_info("superblock #1 active\n");
} else {
/* superblock #2 active */
qnx6_mmi_copy_sb(qsb, sb2);
#ifdef CONFIG_QNX6FS_DEBUG
qnx6_superblock_debug(qsb, s);
#endif
memcpy(bh2->b_data, qsb, sizeof(struct qnx6_super_block));
sbi->sb_buf = bh2;
sbi->sb = (struct qnx6_super_block *)bh2->b_data;
brelse(bh1);
pr_info("superblock #2 active\n");
}
kfree(qsb);
/* offset for mmi_fs is just SUPERBLOCK_AREA bytes */
sbi->s_blks_off = QNX6_SUPERBLOCK_AREA / s->s_blocksize;
/* success */
return sbi->sb;
out:
if (bh1 != NULL)
brelse(bh1);
if (bh2 != NULL)
brelse(bh2);
return NULL;
}
| linux-master | fs/qnx6/super_mmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* QNX6 file system, Linux implementation.
*
* Version : 1.0.0
*
* History :
*
* 01-02-2012 by Kai Bankett ([email protected]) : first release.
* 16-02-2012 pagemap extension by Al Viro
*
*/
#include "qnx6.h"
static unsigned qnx6_lfile_checksum(char *name, unsigned size)
{
unsigned crc = 0;
char *end = name + size;
while (name < end) {
crc = ((crc >> 1) + *(name++)) ^
((crc & 0x00000001) ? 0x80000000 : 0);
}
return crc;
}
static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page))
kmap(page);
return page;
}
static unsigned last_entry(struct inode *inode, unsigned long page_nr)
{
unsigned long last_byte = inode->i_size;
last_byte -= page_nr << PAGE_SHIFT;
if (last_byte > PAGE_SIZE)
last_byte = PAGE_SIZE;
return last_byte / QNX6_DIR_ENTRY_SIZE;
}
static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
struct qnx6_long_dir_entry *de,
struct page **p)
{
struct qnx6_sb_info *sbi = QNX6_SB(sb);
u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
/* within page */
u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
struct address_space *mapping = sbi->longfile->i_mapping;
struct page *page = read_mapping_page(mapping, n, NULL);
if (IS_ERR(page))
return ERR_CAST(page);
kmap(*p = page);
return (struct qnx6_long_filename *)(page_address(page) + offs);
}
static int qnx6_dir_longfilename(struct inode *inode,
struct qnx6_long_dir_entry *de,
struct dir_context *ctx,
unsigned de_inode)
{
struct qnx6_long_filename *lf;
struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
struct page *page;
int lf_size;
if (de->de_size != 0xff) {
/* error - long filename entries always have size 0xff
in direntry */
pr_err("invalid direntry size (%i).\n", de->de_size);
return 0;
}
lf = qnx6_longname(s, de, &page);
if (IS_ERR(lf)) {
pr_err("Error reading longname\n");
return 0;
}
lf_size = fs16_to_cpu(sbi, lf->lf_size);
if (lf_size > QNX6_LONG_NAME_MAX) {
pr_debug("file %s\n", lf->lf_fname);
pr_err("Filename too long (%i)\n", lf_size);
qnx6_put_page(page);
return 0;
}
/* calc & validate longfilename checksum
mmi 3g filesystem does not have that checksum */
if (!test_opt(s, MMI_FS) && fs32_to_cpu(sbi, de->de_checksum) !=
qnx6_lfile_checksum(lf->lf_fname, lf_size))
pr_info("long filename checksum error.\n");
pr_debug("qnx6_readdir:%.*s inode:%u\n",
lf_size, lf->lf_fname, de_inode);
if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) {
qnx6_put_page(page);
return 0;
}
qnx6_put_page(page);
/* success */
return 1;
}
static int qnx6_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
unsigned long npages = dir_pages(inode);
unsigned long n = pos >> PAGE_SHIFT;
unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
bool done = false;
ctx->pos = pos;
if (ctx->pos >= inode->i_size)
return 0;
for ( ; !done && n < npages; n++, start = 0) {
struct page *page = qnx6_get_page(inode, n);
int limit = last_entry(inode, n);
struct qnx6_dir_entry *de;
int i = start;
if (IS_ERR(page)) {
pr_err("%s(): read failed\n", __func__);
ctx->pos = (n + 1) << PAGE_SHIFT;
return PTR_ERR(page);
}
de = ((struct qnx6_dir_entry *)page_address(page)) + start;
for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
int size = de->de_size;
u32 no_inode = fs32_to_cpu(sbi, de->de_inode);
if (!no_inode || !size)
continue;
if (size > QNX6_SHORT_NAME_MAX) {
/* long filename detected
get the filename from long filename
structure / block */
if (!qnx6_dir_longfilename(inode,
(struct qnx6_long_dir_entry *)de,
ctx, no_inode)) {
done = true;
break;
}
} else {
pr_debug("%s():%.*s inode:%u\n",
__func__, size, de->de_fname,
no_inode);
if (!dir_emit(ctx, de->de_fname, size,
no_inode, DT_UNKNOWN)) {
done = true;
break;
}
}
}
qnx6_put_page(page);
}
return 0;
}
/*
* check if the long filename is correct.
*/
static unsigned qnx6_long_match(int len, const char *name,
struct qnx6_long_dir_entry *de, struct inode *dir)
{
struct super_block *s = dir->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
struct page *page;
int thislen;
struct qnx6_long_filename *lf = qnx6_longname(s, de, &page);
if (IS_ERR(lf))
return 0;
thislen = fs16_to_cpu(sbi, lf->lf_size);
if (len != thislen) {
qnx6_put_page(page);
return 0;
}
if (memcmp(name, lf->lf_fname, len) == 0) {
qnx6_put_page(page);
return fs32_to_cpu(sbi, de->de_inode);
}
qnx6_put_page(page);
return 0;
}
/*
* check if the filename is correct.
*/
static unsigned qnx6_match(struct super_block *s, int len, const char *name,
struct qnx6_dir_entry *de)
{
struct qnx6_sb_info *sbi = QNX6_SB(s);
if (memcmp(name, de->de_fname, len) == 0)
return fs32_to_cpu(sbi, de->de_inode);
return 0;
}
unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
struct page **res_page)
{
struct super_block *s = dir->i_sb;
struct qnx6_inode_info *ei = QNX6_I(dir);
struct page *page = NULL;
unsigned long start, n;
unsigned long npages = dir_pages(dir);
unsigned ino;
struct qnx6_dir_entry *de;
struct qnx6_long_dir_entry *lde;
*res_page = NULL;
if (npages == 0)
return 0;
start = ei->i_dir_start_lookup;
if (start >= npages)
start = 0;
n = start;
do {
page = qnx6_get_page(dir, n);
if (!IS_ERR(page)) {
int limit = last_entry(dir, n);
int i;
de = (struct qnx6_dir_entry *)page_address(page);
for (i = 0; i < limit; i++, de++) {
if (len <= QNX6_SHORT_NAME_MAX) {
/* short filename */
if (len != de->de_size)
continue;
ino = qnx6_match(s, len, name, de);
if (ino)
goto found;
} else if (de->de_size == 0xff) {
/* deal with long filename */
lde = (struct qnx6_long_dir_entry *)de;
ino = qnx6_long_match(len,
name, lde, dir);
if (ino)
goto found;
} else
pr_err("undefined filename size in inode.\n");
}
qnx6_put_page(page);
}
if (++n >= npages)
n = 0;
} while (n != start);
return 0;
found:
*res_page = page;
ei->i_dir_start_lookup = n;
return ino;
}
const struct file_operations qnx6_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = qnx6_readdir,
.fsync = generic_file_fsync,
};
const struct inode_operations qnx6_dir_inode_operations = {
.lookup = qnx6_lookup,
};
| linux-master | fs/qnx6/dir.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* QNX6 file system, Linux implementation.
*
* Version : 1.0.0
*
* History :
*
* 01-02-2012 by Kai Bankett ([email protected]) : first release.
* 16-02-2012 pagemap extension by Al Viro
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/crc32.h>
#include <linux/mpage.h>
#include "qnx6.h"
static const struct super_operations qnx6_sops;
static void qnx6_put_super(struct super_block *sb);
static struct inode *qnx6_alloc_inode(struct super_block *sb);
static void qnx6_free_inode(struct inode *inode);
static int qnx6_remount(struct super_block *sb, int *flags, char *data);
static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf);
static int qnx6_show_options(struct seq_file *seq, struct dentry *root);
static const struct super_operations qnx6_sops = {
.alloc_inode = qnx6_alloc_inode,
.free_inode = qnx6_free_inode,
.put_super = qnx6_put_super,
.statfs = qnx6_statfs,
.remount_fs = qnx6_remount,
.show_options = qnx6_show_options,
};
static int qnx6_show_options(struct seq_file *seq, struct dentry *root)
{
struct super_block *sb = root->d_sb;
struct qnx6_sb_info *sbi = QNX6_SB(sb);
if (sbi->s_mount_opt & QNX6_MOUNT_MMI_FS)
seq_puts(seq, ",mmi_fs");
return 0;
}
static int qnx6_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
*flags |= SB_RDONLY;
return 0;
}
static unsigned qnx6_get_devblock(struct super_block *sb, __fs32 block)
{
struct qnx6_sb_info *sbi = QNX6_SB(sb);
return fs32_to_cpu(sbi, block) + sbi->s_blks_off;
}
static unsigned qnx6_block_map(struct inode *inode, unsigned iblock);
static int qnx6_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
unsigned phys;
pr_debug("qnx6_get_block inode=[%ld] iblock=[%ld]\n",
inode->i_ino, (unsigned long)iblock);
phys = qnx6_block_map(inode, iblock);
if (phys) {
/* logical block is before EOF */
map_bh(bh, inode->i_sb, phys);
}
return 0;
}
static int qnx6_check_blockptr(__fs32 ptr)
{
if (ptr == ~(__fs32)0) {
pr_err("hit unused blockpointer.\n");
return 0;
}
return 1;
}
static int qnx6_read_folio(struct file *file, struct folio *folio)
{
return mpage_read_folio(folio, qnx6_get_block);
}
static void qnx6_readahead(struct readahead_control *rac)
{
mpage_readahead(rac, qnx6_get_block);
}
/*
* returns the block number for the no-th element in the tree
* inodebits requred as there are multiple inodes in one inode block
*/
static unsigned qnx6_block_map(struct inode *inode, unsigned no)
{
struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
struct qnx6_inode_info *ei = QNX6_I(inode);
unsigned block = 0;
struct buffer_head *bh;
__fs32 ptr;
int levelptr;
int ptrbits = sbi->s_ptrbits;
int bitdelta;
u32 mask = (1 << ptrbits) - 1;
int depth = ei->di_filelevels;
int i;
bitdelta = ptrbits * depth;
levelptr = no >> bitdelta;
if (levelptr > QNX6_NO_DIRECT_POINTERS - 1) {
pr_err("Requested file block number (%u) too big.", no);
return 0;
}
block = qnx6_get_devblock(s, ei->di_block_ptr[levelptr]);
for (i = 0; i < depth; i++) {
bh = sb_bread(s, block);
if (!bh) {
pr_err("Error reading block (%u)\n", block);
return 0;
}
bitdelta -= ptrbits;
levelptr = (no >> bitdelta) & mask;
ptr = ((__fs32 *)bh->b_data)[levelptr];
if (!qnx6_check_blockptr(ptr))
return 0;
block = qnx6_get_devblock(s, ptr);
brelse(bh);
}
return block;
}
static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct qnx6_sb_info *sbi = QNX6_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = sb->s_magic;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = fs32_to_cpu(sbi, sbi->sb->sb_num_blocks);
buf->f_bfree = fs32_to_cpu(sbi, sbi->sb->sb_free_blocks);
buf->f_files = fs32_to_cpu(sbi, sbi->sb->sb_num_inodes);
buf->f_ffree = fs32_to_cpu(sbi, sbi->sb->sb_free_inodes);
buf->f_bavail = buf->f_bfree;
buf->f_namelen = QNX6_LONG_NAME_MAX;
buf->f_fsid = u64_to_fsid(id);
return 0;
}
/*
* Check the root directory of the filesystem to make sure
* it really _is_ a qnx6 filesystem, and to check the size
* of the directory entry.
*/
static const char *qnx6_checkroot(struct super_block *s)
{
static char match_root[2][3] = {".\0\0", "..\0"};
int i, error = 0;
struct qnx6_dir_entry *dir_entry;
struct inode *root = d_inode(s->s_root);
struct address_space *mapping = root->i_mapping;
struct page *page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
return "error reading root directory";
kmap(page);
dir_entry = page_address(page);
for (i = 0; i < 2; i++) {
/* maximum 3 bytes - due to match_root limitation */
if (strncmp(dir_entry[i].de_fname, match_root[i], 3))
error = 1;
}
qnx6_put_page(page);
if (error)
return "error reading root directory.";
return NULL;
}
#ifdef CONFIG_QNX6FS_DEBUG
void qnx6_superblock_debug(struct qnx6_super_block *sb, struct super_block *s)
{
struct qnx6_sb_info *sbi = QNX6_SB(s);
pr_debug("magic: %08x\n", fs32_to_cpu(sbi, sb->sb_magic));
pr_debug("checksum: %08x\n", fs32_to_cpu(sbi, sb->sb_checksum));
pr_debug("serial: %llx\n", fs64_to_cpu(sbi, sb->sb_serial));
pr_debug("flags: %08x\n", fs32_to_cpu(sbi, sb->sb_flags));
pr_debug("blocksize: %08x\n", fs32_to_cpu(sbi, sb->sb_blocksize));
pr_debug("num_inodes: %08x\n", fs32_to_cpu(sbi, sb->sb_num_inodes));
pr_debug("free_inodes: %08x\n", fs32_to_cpu(sbi, sb->sb_free_inodes));
pr_debug("num_blocks: %08x\n", fs32_to_cpu(sbi, sb->sb_num_blocks));
pr_debug("free_blocks: %08x\n", fs32_to_cpu(sbi, sb->sb_free_blocks));
pr_debug("inode_levels: %02x\n", sb->Inode.levels);
}
#endif
enum {
Opt_mmifs,
Opt_err
};
static const match_table_t tokens = {
{Opt_mmifs, "mmi_fs"},
{Opt_err, NULL}
};
static int qnx6_parse_options(char *options, struct super_block *sb)
{
char *p;
struct qnx6_sb_info *sbi = QNX6_SB(sb);
substring_t args[MAX_OPT_ARGS];
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_mmifs:
set_opt(sbi->s_mount_opt, MMI_FS);
break;
default:
return 0;
}
}
return 1;
}
static struct buffer_head *qnx6_check_first_superblock(struct super_block *s,
int offset, int silent)
{
struct qnx6_sb_info *sbi = QNX6_SB(s);
struct buffer_head *bh;
struct qnx6_super_block *sb;
/* Check the superblock signatures
start with the first superblock */
bh = sb_bread(s, offset);
if (!bh) {
pr_err("unable to read the first superblock\n");
return NULL;
}
sb = (struct qnx6_super_block *)bh->b_data;
if (fs32_to_cpu(sbi, sb->sb_magic) != QNX6_SUPER_MAGIC) {
sbi->s_bytesex = BYTESEX_BE;
if (fs32_to_cpu(sbi, sb->sb_magic) == QNX6_SUPER_MAGIC) {
/* we got a big endian fs */
pr_debug("fs got different endianness.\n");
return bh;
} else
sbi->s_bytesex = BYTESEX_LE;
if (!silent) {
if (offset == 0) {
pr_err("wrong signature (magic) in superblock #1.\n");
} else {
pr_info("wrong signature (magic) at position (0x%lx) - will try alternative position (0x0000).\n",
offset * s->s_blocksize);
}
}
brelse(bh);
return NULL;
}
return bh;
}
static struct inode *qnx6_private_inode(struct super_block *s,
struct qnx6_root_node *p);
static int qnx6_fill_super(struct super_block *s, void *data, int silent)
{
struct buffer_head *bh1 = NULL, *bh2 = NULL;
struct qnx6_super_block *sb1 = NULL, *sb2 = NULL;
struct qnx6_sb_info *sbi;
struct inode *root;
const char *errmsg;
struct qnx6_sb_info *qs;
int ret = -EINVAL;
u64 offset;
int bootblock_offset = QNX6_BOOTBLOCK_SIZE;
qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL);
if (!qs)
return -ENOMEM;
s->s_fs_info = qs;
/* Superblock always is 512 Byte long */
if (!sb_set_blocksize(s, QNX6_SUPERBLOCK_SIZE)) {
pr_err("unable to set blocksize\n");
goto outnobh;
}
/* parse the mount-options */
if (!qnx6_parse_options((char *) data, s)) {
pr_err("invalid mount options.\n");
goto outnobh;
}
if (test_opt(s, MMI_FS)) {
sb1 = qnx6_mmi_fill_super(s, silent);
if (sb1)
goto mmi_success;
else
goto outnobh;
}
sbi = QNX6_SB(s);
sbi->s_bytesex = BYTESEX_LE;
/* Check the superblock signatures
start with the first superblock */
bh1 = qnx6_check_first_superblock(s,
bootblock_offset / QNX6_SUPERBLOCK_SIZE, silent);
if (!bh1) {
/* try again without bootblock offset */
bh1 = qnx6_check_first_superblock(s, 0, silent);
if (!bh1) {
pr_err("unable to read the first superblock\n");
goto outnobh;
}
/* seems that no bootblock at partition start */
bootblock_offset = 0;
}
sb1 = (struct qnx6_super_block *)bh1->b_data;
#ifdef CONFIG_QNX6FS_DEBUG
qnx6_superblock_debug(sb1, s);
#endif
/* checksum check - start at byte 8 and end at byte 512 */
if (fs32_to_cpu(sbi, sb1->sb_checksum) !=
crc32_be(0, (char *)(bh1->b_data + 8), 504)) {
pr_err("superblock #1 checksum error\n");
goto out;
}
/* set new blocksize */
if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) {
pr_err("unable to set blocksize\n");
goto out;
}
/* blocksize invalidates bh - pull it back in */
brelse(bh1);
bh1 = sb_bread(s, bootblock_offset >> s->s_blocksize_bits);
if (!bh1)
goto outnobh;
sb1 = (struct qnx6_super_block *)bh1->b_data;
/* calculate second superblock blocknumber */
offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) +
(bootblock_offset >> s->s_blocksize_bits) +
(QNX6_SUPERBLOCK_AREA >> s->s_blocksize_bits);
/* set bootblock offset */
sbi->s_blks_off = (bootblock_offset >> s->s_blocksize_bits) +
(QNX6_SUPERBLOCK_AREA >> s->s_blocksize_bits);
/* next the second superblock */
bh2 = sb_bread(s, offset);
if (!bh2) {
pr_err("unable to read the second superblock\n");
goto out;
}
sb2 = (struct qnx6_super_block *)bh2->b_data;
if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) {
if (!silent)
pr_err("wrong signature (magic) in superblock #2.\n");
goto out;
}
/* checksum check - start at byte 8 and end at byte 512 */
if (fs32_to_cpu(sbi, sb2->sb_checksum) !=
crc32_be(0, (char *)(bh2->b_data + 8), 504)) {
pr_err("superblock #2 checksum error\n");
goto out;
}
if (fs64_to_cpu(sbi, sb1->sb_serial) >=
fs64_to_cpu(sbi, sb2->sb_serial)) {
/* superblock #1 active */
sbi->sb_buf = bh1;
sbi->sb = (struct qnx6_super_block *)bh1->b_data;
brelse(bh2);
pr_info("superblock #1 active\n");
} else {
/* superblock #2 active */
sbi->sb_buf = bh2;
sbi->sb = (struct qnx6_super_block *)bh2->b_data;
brelse(bh1);
pr_info("superblock #2 active\n");
}
mmi_success:
/* sanity check - limit maximum indirect pointer levels */
if (sb1->Inode.levels > QNX6_PTR_MAX_LEVELS) {
pr_err("too many inode levels (max %i, sb %i)\n",
QNX6_PTR_MAX_LEVELS, sb1->Inode.levels);
goto out;
}
if (sb1->Longfile.levels > QNX6_PTR_MAX_LEVELS) {
pr_err("too many longfilename levels (max %i, sb %i)\n",
QNX6_PTR_MAX_LEVELS, sb1->Longfile.levels);
goto out;
}
s->s_op = &qnx6_sops;
s->s_magic = QNX6_SUPER_MAGIC;
s->s_flags |= SB_RDONLY; /* Yup, read-only yet */
s->s_time_min = 0;
s->s_time_max = U32_MAX;
/* ease the later tree level calculations */
sbi = QNX6_SB(s);
sbi->s_ptrbits = ilog2(s->s_blocksize / 4);
sbi->inodes = qnx6_private_inode(s, &sb1->Inode);
if (!sbi->inodes)
goto out;
sbi->longfile = qnx6_private_inode(s, &sb1->Longfile);
if (!sbi->longfile)
goto out1;
/* prefetch root inode */
root = qnx6_iget(s, QNX6_ROOT_INO);
if (IS_ERR(root)) {
pr_err("get inode failed\n");
ret = PTR_ERR(root);
goto out2;
}
ret = -ENOMEM;
s->s_root = d_make_root(root);
if (!s->s_root)
goto out2;
ret = -EINVAL;
errmsg = qnx6_checkroot(s);
if (errmsg != NULL) {
if (!silent)
pr_err("%s\n", errmsg);
goto out3;
}
return 0;
out3:
dput(s->s_root);
s->s_root = NULL;
out2:
iput(sbi->longfile);
out1:
iput(sbi->inodes);
out:
brelse(bh1);
brelse(bh2);
outnobh:
kfree(qs);
s->s_fs_info = NULL;
return ret;
}
static void qnx6_put_super(struct super_block *sb)
{
struct qnx6_sb_info *qs = QNX6_SB(sb);
brelse(qs->sb_buf);
iput(qs->longfile);
iput(qs->inodes);
kfree(qs);
sb->s_fs_info = NULL;
return;
}
static sector_t qnx6_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, qnx6_get_block);
}
static const struct address_space_operations qnx6_aops = {
.read_folio = qnx6_read_folio,
.readahead = qnx6_readahead,
.bmap = qnx6_bmap
};
static struct inode *qnx6_private_inode(struct super_block *s,
struct qnx6_root_node *p)
{
struct inode *inode = new_inode(s);
if (inode) {
struct qnx6_inode_info *ei = QNX6_I(inode);
struct qnx6_sb_info *sbi = QNX6_SB(s);
inode->i_size = fs64_to_cpu(sbi, p->size);
memcpy(ei->di_block_ptr, p->ptr, sizeof(p->ptr));
ei->di_filelevels = p->levels;
inode->i_mode = S_IFREG | S_IRUSR; /* probably wrong */
inode->i_mapping->a_ops = &qnx6_aops;
}
return inode;
}
struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
{
struct qnx6_sb_info *sbi = QNX6_SB(sb);
struct qnx6_inode_entry *raw_inode;
struct inode *inode;
struct qnx6_inode_info *ei;
struct address_space *mapping;
struct page *page;
u32 n, offs;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
ei = QNX6_I(inode);
inode->i_mode = 0;
if (ino == 0) {
pr_err("bad inode number on dev %s: %u is out of range\n",
sb->s_id, ino);
iget_failed(inode);
return ERR_PTR(-EIO);
}
n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
mapping = sbi->inodes->i_mapping;
page = read_mapping_page(mapping, n, NULL);
if (IS_ERR(page)) {
pr_err("major problem: unable to read inode from dev %s\n",
sb->s_id);
iget_failed(inode);
return ERR_CAST(page);
}
kmap(page);
raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs;
inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode);
i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid));
i_gid_write(inode, (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid));
inode->i_size = fs64_to_cpu(sbi, raw_inode->di_size);
inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_mtime);
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_atime);
inode->i_atime.tv_nsec = 0;
inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->di_ctime), 0);
/* calc blocks based on 512 byte blocksize */
inode->i_blocks = (inode->i_size + 511) >> 9;
memcpy(&ei->di_block_ptr, &raw_inode->di_block_ptr,
sizeof(raw_inode->di_block_ptr));
ei->di_filelevels = raw_inode->di_filelevels;
if (S_ISREG(inode->i_mode)) {
inode->i_fop = &generic_ro_fops;
inode->i_mapping->a_ops = &qnx6_aops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &qnx6_dir_inode_operations;
inode->i_fop = &qnx6_dir_operations;
inode->i_mapping->a_ops = &qnx6_aops;
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &qnx6_aops;
} else
init_special_inode(inode, inode->i_mode, 0);
qnx6_put_page(page);
unlock_new_inode(inode);
return inode;
}
static struct kmem_cache *qnx6_inode_cachep;
static struct inode *qnx6_alloc_inode(struct super_block *sb)
{
struct qnx6_inode_info *ei;
ei = alloc_inode_sb(sb, qnx6_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
}
static void qnx6_free_inode(struct inode *inode)
{
kmem_cache_free(qnx6_inode_cachep, QNX6_I(inode));
}
static void init_once(void *foo)
{
struct qnx6_inode_info *ei = (struct qnx6_inode_info *) foo;
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
{
qnx6_inode_cachep = kmem_cache_create("qnx6_inode_cache",
sizeof(struct qnx6_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
init_once);
if (!qnx6_inode_cachep)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(qnx6_inode_cachep);
}
static struct dentry *qnx6_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, qnx6_fill_super);
}
static struct file_system_type qnx6_fs_type = {
.owner = THIS_MODULE,
.name = "qnx6",
.mount = qnx6_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("qnx6");
static int __init init_qnx6_fs(void)
{
int err;
err = init_inodecache();
if (err)
return err;
err = register_filesystem(&qnx6_fs_type);
if (err) {
destroy_inodecache();
return err;
}
pr_info("QNX6 filesystem 1.0.0 registered.\n");
return 0;
}
static void __exit exit_qnx6_fs(void)
{
unregister_filesystem(&qnx6_fs_type);
destroy_inodecache();
}
module_init(init_qnx6_fs)
module_exit(exit_qnx6_fs)
MODULE_LICENSE("GPL");
| linux-master | fs/qnx6/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* QNX6 file system, Linux implementation.
*
* Version : 1.0.0
*
* History :
*
* 01-02-2012 by Kai Bankett ([email protected]) : first release.
* 16-02-2012 pagemap extension by Al Viro
*
*/
#include "qnx6.h"
struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
unsigned ino;
struct page *page;
struct inode *foundinode = NULL;
const char *name = dentry->d_name.name;
int len = dentry->d_name.len;
if (len > QNX6_LONG_NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
ino = qnx6_find_entry(len, dir, name, &page);
if (ino) {
foundinode = qnx6_iget(dir->i_sb, ino);
qnx6_put_page(page);
if (IS_ERR(foundinode))
pr_debug("lookup->iget -> error %ld\n",
PTR_ERR(foundinode));
} else {
pr_debug("%s(): not found %s\n", __func__, name);
}
return d_splice_alias(foundinode, dentry);
}
| linux-master | fs/qnx6/namei.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Process version 3 NFS requests.
*
* Copyright (C) 1996, 1997, 1998 Olaf Kirch <[email protected]>
*/
#include <linux/fs.h>
#include <linux/ext2_fs.h>
#include <linux/magic.h>
#include <linux/namei.h>
#include "cache.h"
#include "xdr3.h"
#include "vfs.h"
#include "filecache.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
static int nfs3_ftypes[] = {
0, /* NF3NON */
S_IFREG, /* NF3REG */
S_IFDIR, /* NF3DIR */
S_IFBLK, /* NF3BLK */
S_IFCHR, /* NF3CHR */
S_IFLNK, /* NF3LNK */
S_IFSOCK, /* NF3SOCK */
S_IFIFO, /* NF3FIFO */
};
/*
* NULL call.
*/
static __be32
nfsd3_proc_null(struct svc_rqst *rqstp)
{
return rpc_success;
}
/*
* Get a file's attributes
*/
static __be32
nfsd3_proc_getattr(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
dprintk("nfsd: GETATTR(3) %s\n",
SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0,
NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
/*
* Set a file's attributes
*/
static __be32
nfsd3_proc_setattr(struct svc_rqst *rqstp)
{
struct nfsd3_sattrargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
struct nfsd_attrs attrs = {
.na_iattr = &argp->attrs,
};
dprintk("nfsd: SETATTR(3) %s\n",
SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_setattr(rqstp, &resp->fh, &attrs,
argp->check_guard, argp->guardtime);
return rpc_success;
}
/*
* Look up a path name component
*/
static __be32
nfsd3_proc_lookup(struct svc_rqst *rqstp)
{
struct nfsd3_diropargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
dprintk("nfsd: LOOKUP(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
argp->len,
argp->name);
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_lookup(rqstp, &resp->dirfh,
argp->name, argp->len,
&resp->fh);
return rpc_success;
}
/*
* Check file access
*/
static __be32
nfsd3_proc_access(struct svc_rqst *rqstp)
{
struct nfsd3_accessargs *argp = rqstp->rq_argp;
struct nfsd3_accessres *resp = rqstp->rq_resp;
dprintk("nfsd: ACCESS(3) %s 0x%x\n",
SVCFH_fmt(&argp->fh),
argp->access);
fh_copy(&resp->fh, &argp->fh);
resp->access = argp->access;
resp->status = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
return rpc_success;
}
/*
* Read a symlink.
*/
static __be32
nfsd3_proc_readlink(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_readlinkres *resp = rqstp->rq_resp;
dprintk("nfsd: READLINK(3) %s\n", SVCFH_fmt(&argp->fh));
/* Read the symlink. */
fh_copy(&resp->fh, &argp->fh);
resp->len = NFS3_MAXPATHLEN;
resp->pages = rqstp->rq_next_page++;
resp->status = nfsd_readlink(rqstp, &resp->fh,
page_address(*resp->pages), &resp->len);
return rpc_success;
}
/*
* Read a portion of a file.
*/
static __be32
nfsd3_proc_read(struct svc_rqst *rqstp)
{
struct nfsd3_readargs *argp = rqstp->rq_argp;
struct nfsd3_readres *resp = rqstp->rq_resp;
dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
SVCFH_fmt(&argp->fh),
(unsigned long) argp->count,
(unsigned long long) argp->offset);
argp->count = min_t(u32, argp->count, svc_max_payload(rqstp));
argp->count = min_t(u32, argp->count, rqstp->rq_res.buflen);
if (argp->offset > (u64)OFFSET_MAX)
argp->offset = (u64)OFFSET_MAX;
if (argp->offset + argp->count > (u64)OFFSET_MAX)
argp->count = (u64)OFFSET_MAX - argp->offset;
resp->pages = rqstp->rq_next_page;
/* Obtain buffer pointer for payload.
* 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
* + 1 (xdr opaque byte count) = 26
*/
resp->count = argp->count;
svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
&resp->count, &resp->eof);
return rpc_success;
}
/*
* Write data to a file
*/
static __be32
nfsd3_proc_write(struct svc_rqst *rqstp)
{
struct nfsd3_writeargs *argp = rqstp->rq_argp;
struct nfsd3_writeres *resp = rqstp->rq_resp;
unsigned long cnt = argp->len;
unsigned int nvecs;
dprintk("nfsd: WRITE(3) %s %d bytes at %Lu%s\n",
SVCFH_fmt(&argp->fh),
argp->len,
(unsigned long long) argp->offset,
argp->stable? " stable" : "");
resp->status = nfserr_fbig;
if (argp->offset > (u64)OFFSET_MAX ||
argp->offset + argp->len > (u64)OFFSET_MAX)
return rpc_success;
fh_copy(&resp->fh, &argp->fh);
resp->committed = argp->stable;
nvecs = svc_fill_write_vector(rqstp, &argp->payload);
resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
rqstp->rq_vec, nvecs, &cnt,
resp->committed, resp->verf);
resp->count = cnt;
return rpc_success;
}
/*
* Implement NFSv3's unchecked, guarded, and exclusive CREATE
* semantics for regular files. Except for the created file,
* this operation is stateless on the server.
*
* Upon return, caller must release @fhp and @resfhp.
*/
static __be32
nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct svc_fh *resfhp, struct nfsd3_createargs *argp)
{
struct iattr *iap = &argp->attrs;
struct dentry *parent, *child;
struct nfsd_attrs attrs = {
.na_iattr = iap,
};
__u32 v_mtime, v_atime;
struct inode *inode;
__be32 status;
int host_err;
if (isdotent(argp->name, argp->len))
return nfserr_exist;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (status != nfs_ok)
return status;
parent = fhp->fh_dentry;
inode = d_inode(parent);
host_err = fh_want_write(fhp);
if (host_err)
return nfserrno(host_err);
inode_lock_nested(inode, I_MUTEX_PARENT);
child = lookup_one_len(argp->name, parent, argp->len);
if (IS_ERR(child)) {
status = nfserrno(PTR_ERR(child));
goto out;
}
if (d_really_is_negative(child)) {
status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (status != nfs_ok)
goto out;
}
status = fh_compose(resfhp, fhp->fh_export, child, fhp);
if (status != nfs_ok)
goto out;
v_mtime = 0;
v_atime = 0;
if (argp->createmode == NFS3_CREATE_EXCLUSIVE) {
u32 *verifier = (u32 *)argp->verf;
/*
* Solaris 7 gets confused (bugid 4218508) if these have
* the high bit set, as do xfs filesystems without the
* "bigtime" feature. So just clear the high bits.
*/
v_mtime = verifier[0] & 0x7fffffff;
v_atime = verifier[1] & 0x7fffffff;
}
if (d_really_is_positive(child)) {
status = nfs_ok;
switch (argp->createmode) {
case NFS3_CREATE_UNCHECKED:
if (!d_is_reg(child))
break;
iap->ia_valid &= ATTR_SIZE;
goto set_attr;
case NFS3_CREATE_GUARDED:
status = nfserr_exist;
break;
case NFS3_CREATE_EXCLUSIVE:
if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
d_inode(child)->i_atime.tv_sec == v_atime &&
d_inode(child)->i_size == 0) {
break;
}
status = nfserr_exist;
}
goto out;
}
if (!IS_POSIXACL(inode))
iap->ia_mode &= ~current_umask();
status = fh_fill_pre_attrs(fhp);
if (status != nfs_ok)
goto out;
host_err = vfs_create(&nop_mnt_idmap, inode, child, iap->ia_mode, true);
if (host_err < 0) {
status = nfserrno(host_err);
goto out;
}
fh_fill_post_attrs(fhp);
/* A newly created file already has a file size of zero. */
if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
iap->ia_valid &= ~ATTR_SIZE;
if (argp->createmode == NFS3_CREATE_EXCLUSIVE) {
iap->ia_valid = ATTR_MTIME | ATTR_ATIME |
ATTR_MTIME_SET | ATTR_ATIME_SET;
iap->ia_mtime.tv_sec = v_mtime;
iap->ia_atime.tv_sec = v_atime;
iap->ia_mtime.tv_nsec = 0;
iap->ia_atime.tv_nsec = 0;
}
set_attr:
status = nfsd_create_setattr(rqstp, fhp, resfhp, &attrs);
out:
inode_unlock(inode);
if (child && !IS_ERR(child))
dput(child);
fh_drop_write(fhp);
return status;
}
static __be32
nfsd3_proc_create(struct svc_rqst *rqstp)
{
struct nfsd3_createargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
svc_fh *dirfhp, *newfhp;
dprintk("nfsd: CREATE(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
argp->len,
argp->name);
dirfhp = fh_copy(&resp->dirfh, &argp->fh);
newfhp = fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd3_create_file(rqstp, dirfhp, newfhp, argp);
return rpc_success;
}
/*
* Make directory. This operation is not idempotent.
*/
static __be32
nfsd3_proc_mkdir(struct svc_rqst *rqstp)
{
struct nfsd3_createargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
struct nfsd_attrs attrs = {
.na_iattr = &argp->attrs,
};
dprintk("nfsd: MKDIR(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
argp->len,
argp->name);
argp->attrs.ia_valid &= ~ATTR_SIZE;
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
&attrs, S_IFDIR, 0, &resp->fh);
return rpc_success;
}
static __be32
nfsd3_proc_symlink(struct svc_rqst *rqstp)
{
struct nfsd3_symlinkargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
struct nfsd_attrs attrs = {
.na_iattr = &argp->attrs,
};
if (argp->tlen == 0) {
resp->status = nfserr_inval;
goto out;
}
if (argp->tlen > NFS3_MAXPATHLEN) {
resp->status = nfserr_nametoolong;
goto out;
}
argp->tname = svc_fill_symlink_pathname(rqstp, &argp->first,
page_address(rqstp->rq_arg.pages[0]),
argp->tlen);
if (IS_ERR(argp->tname)) {
resp->status = nfserrno(PTR_ERR(argp->tname));
goto out;
}
dprintk("nfsd: SYMLINK(3) %s %.*s -> %.*s\n",
SVCFH_fmt(&argp->ffh),
argp->flen, argp->fname,
argp->tlen, argp->tname);
fh_copy(&resp->dirfh, &argp->ffh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_symlink(rqstp, &resp->dirfh, argp->fname,
argp->flen, argp->tname, &attrs, &resp->fh);
kfree(argp->tname);
out:
return rpc_success;
}
/*
* Make socket/fifo/device.
*/
static __be32
nfsd3_proc_mknod(struct svc_rqst *rqstp)
{
struct nfsd3_mknodargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
struct nfsd_attrs attrs = {
.na_iattr = &argp->attrs,
};
int type;
dev_t rdev = 0;
dprintk("nfsd: MKNOD(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
argp->len,
argp->name);
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
if (argp->ftype == NF3CHR || argp->ftype == NF3BLK) {
rdev = MKDEV(argp->major, argp->minor);
if (MAJOR(rdev) != argp->major ||
MINOR(rdev) != argp->minor) {
resp->status = nfserr_inval;
goto out;
}
} else if (argp->ftype != NF3SOCK && argp->ftype != NF3FIFO) {
resp->status = nfserr_badtype;
goto out;
}
type = nfs3_ftypes[argp->ftype];
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
&attrs, type, rdev, &resp->fh);
out:
return rpc_success;
}
/*
* Remove file/fifo/socket etc.
*/
static __be32
nfsd3_proc_remove(struct svc_rqst *rqstp)
{
struct nfsd3_diropargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
dprintk("nfsd: REMOVE(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
argp->len,
argp->name);
/* Unlink. -S_IFDIR means file must not be a directory */
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR,
argp->name, argp->len);
return rpc_success;
}
/*
* Remove a directory
*/
static __be32
nfsd3_proc_rmdir(struct svc_rqst *rqstp)
{
struct nfsd3_diropargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
dprintk("nfsd: RMDIR(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
argp->len,
argp->name);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, S_IFDIR,
argp->name, argp->len);
return rpc_success;
}
static __be32
nfsd3_proc_rename(struct svc_rqst *rqstp)
{
struct nfsd3_renameargs *argp = rqstp->rq_argp;
struct nfsd3_renameres *resp = rqstp->rq_resp;
dprintk("nfsd: RENAME(3) %s %.*s ->\n",
SVCFH_fmt(&argp->ffh),
argp->flen,
argp->fname);
dprintk("nfsd: -> %s %.*s\n",
SVCFH_fmt(&argp->tfh),
argp->tlen,
argp->tname);
fh_copy(&resp->ffh, &argp->ffh);
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_rename(rqstp, &resp->ffh, argp->fname, argp->flen,
&resp->tfh, argp->tname, argp->tlen);
return rpc_success;
}
static __be32
nfsd3_proc_link(struct svc_rqst *rqstp)
{
struct nfsd3_linkargs *argp = rqstp->rq_argp;
struct nfsd3_linkres *resp = rqstp->rq_resp;
dprintk("nfsd: LINK(3) %s ->\n",
SVCFH_fmt(&argp->ffh));
dprintk("nfsd: -> %s %.*s\n",
SVCFH_fmt(&argp->tfh),
argp->tlen,
argp->tname);
fh_copy(&resp->fh, &argp->ffh);
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_link(rqstp, &resp->tfh, argp->tname, argp->tlen,
&resp->fh);
return rpc_success;
}
static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd3_readdirres *resp,
u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
unsigned int sendbuf = min_t(unsigned int, rqstp->rq_res.buflen,
svc_max_payload(rqstp));
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
buf->buflen = clamp(count, (u32)(XDR_UNIT * 2), sendbuf);
buf->buflen -= XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
xdr_init_encode_pages(xdr, buf, buf->pages, NULL);
}
/*
* Read a portion of a directory.
*/
static __be32
nfsd3_proc_readdir(struct svc_rqst *rqstp)
{
struct nfsd3_readdirargs *argp = rqstp->rq_argp;
struct nfsd3_readdirres *resp = rqstp->rq_resp;
loff_t offset;
dprintk("nfsd: READDIR(3) %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, (u32) argp->cookie);
nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
fh_copy(&resp->fh, &argp->fh);
resp->common.err = nfs_ok;
resp->cookie_offset = 0;
resp->rqstp = rqstp;
offset = argp->cookie;
resp->status = nfsd_readdir(rqstp, &resp->fh, &offset,
&resp->common, nfs3svc_encode_entry3);
memcpy(resp->verf, argp->verf, 8);
nfs3svc_encode_cookie3(resp, offset);
/* Recycle only pages that were part of the reply */
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
return rpc_success;
}
/*
* Read a portion of a directory, including file handles and attrs.
* For now, we choose to ignore the dircount parameter.
*/
static __be32
nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
{
struct nfsd3_readdirargs *argp = rqstp->rq_argp;
struct nfsd3_readdirres *resp = rqstp->rq_resp;
loff_t offset;
dprintk("nfsd: READDIR+(3) %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, (u32) argp->cookie);
nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
fh_copy(&resp->fh, &argp->fh);
resp->common.err = nfs_ok;
resp->cookie_offset = 0;
resp->rqstp = rqstp;
offset = argp->cookie;
resp->status = fh_verify(rqstp, &resp->fh, S_IFDIR, NFSD_MAY_NOP);
if (resp->status != nfs_ok)
goto out;
if (resp->fh.fh_export->ex_flags & NFSEXP_NOREADDIRPLUS) {
resp->status = nfserr_notsupp;
goto out;
}
resp->status = nfsd_readdir(rqstp, &resp->fh, &offset,
&resp->common, nfs3svc_encode_entryplus3);
memcpy(resp->verf, argp->verf, 8);
nfs3svc_encode_cookie3(resp, offset);
/* Recycle only pages that were part of the reply */
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
out:
return rpc_success;
}
/*
* Get file system stats
*/
static __be32
nfsd3_proc_fsstat(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_fsstatres *resp = rqstp->rq_resp;
dprintk("nfsd: FSSTAT(3) %s\n",
SVCFH_fmt(&argp->fh));
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats, 0);
fh_put(&argp->fh);
return rpc_success;
}
/*
* Get file system info
*/
static __be32
nfsd3_proc_fsinfo(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_fsinfores *resp = rqstp->rq_resp;
u32 max_blocksize = svc_max_payload(rqstp);
dprintk("nfsd: FSINFO(3) %s\n",
SVCFH_fmt(&argp->fh));
resp->f_rtmax = max_blocksize;
resp->f_rtpref = max_blocksize;
resp->f_rtmult = PAGE_SIZE;
resp->f_wtmax = max_blocksize;
resp->f_wtpref = max_blocksize;
resp->f_wtmult = PAGE_SIZE;
resp->f_dtpref = max_blocksize;
resp->f_maxfilesize = ~(u32) 0;
resp->f_properties = NFS3_FSF_DEFAULT;
resp->status = fh_verify(rqstp, &argp->fh, 0,
NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT);
/* Check special features of the file system. May request
* different read/write sizes for file systems known to have
* problems with large blocks */
if (resp->status == nfs_ok) {
struct super_block *sb = argp->fh.fh_dentry->d_sb;
/* Note that we don't care for remote fs's here */
if (sb->s_magic == MSDOS_SUPER_MAGIC) {
resp->f_properties = NFS3_FSF_BILLYBOY;
}
resp->f_maxfilesize = sb->s_maxbytes;
}
fh_put(&argp->fh);
return rpc_success;
}
/*
* Get pathconf info for the specified file
*/
static __be32
nfsd3_proc_pathconf(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_pathconfres *resp = rqstp->rq_resp;
dprintk("nfsd: PATHCONF(3) %s\n",
SVCFH_fmt(&argp->fh));
/* Set default pathconf */
resp->p_link_max = 255; /* at least */
resp->p_name_max = 255; /* at least */
resp->p_no_trunc = 0;
resp->p_chown_restricted = 1;
resp->p_case_insensitive = 0;
resp->p_case_preserving = 1;
resp->status = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP);
if (resp->status == nfs_ok) {
struct super_block *sb = argp->fh.fh_dentry->d_sb;
/* Note that we don't care for remote fs's here */
switch (sb->s_magic) {
case EXT2_SUPER_MAGIC:
resp->p_link_max = EXT2_LINK_MAX;
resp->p_name_max = EXT2_NAME_LEN;
break;
case MSDOS_SUPER_MAGIC:
resp->p_case_insensitive = 1;
resp->p_case_preserving = 0;
break;
}
}
fh_put(&argp->fh);
return rpc_success;
}
/*
* Commit a file (range) to stable storage.
*/
static __be32
nfsd3_proc_commit(struct svc_rqst *rqstp)
{
struct nfsd3_commitargs *argp = rqstp->rq_argp;
struct nfsd3_commitres *resp = rqstp->rq_resp;
struct nfsd_file *nf;
dprintk("nfsd: COMMIT(3) %s %u@%Lu\n",
SVCFH_fmt(&argp->fh),
argp->count,
(unsigned long long) argp->offset);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_file_acquire_gc(rqstp, &resp->fh, NFSD_MAY_WRITE |
NFSD_MAY_NOT_BREAK_LEASE, &nf);
if (resp->status)
goto out;
resp->status = nfsd_commit(rqstp, &resp->fh, nf, argp->offset,
argp->count, resp->verf);
nfsd_file_put(nf);
out:
return rpc_success;
}
/*
* NFSv3 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
#define nfs3svc_encode_attrstatres nfs3svc_encode_attrstat
#define nfs3svc_encode_wccstatres nfs3svc_encode_wccstat
#define nfsd3_mkdirargs nfsd3_createargs
#define nfsd3_readdirplusargs nfsd3_readdirargs
#define nfsd3_fhandleargs nfsd_fhandle
#define nfsd3_attrstatres nfsd3_attrstat
#define nfsd3_wccstatres nfsd3_attrstat
#define nfsd3_createres nfsd3_diropres
#define ST 1 /* status*/
#define FH 17 /* filehandle with length */
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
#define WC (7+pAT) /* WCC attributes */
static const struct svc_procedure nfsd_procedures3[22] = {
[NFS3PROC_NULL] = {
.pc_func = nfsd3_proc_null,
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
.pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
.pc_name = "NULL",
},
[NFS3PROC_GETATTR] = {
.pc_func = nfsd3_proc_getattr,
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_getattrres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd3_attrstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
.pc_name = "GETATTR",
},
[NFS3PROC_SETATTR] = {
.pc_func = nfsd3_proc_setattr,
.pc_decode = nfs3svc_decode_sattrargs,
.pc_encode = nfs3svc_encode_wccstatres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_sattrargs),
.pc_argzero = sizeof(struct nfsd3_sattrargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
.pc_name = "SETATTR",
},
[NFS3PROC_LOOKUP] = {
.pc_func = nfsd3_proc_lookup,
.pc_decode = nfs3svc_decode_diropargs,
.pc_encode = nfs3svc_encode_lookupres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_diropargs),
.pc_argzero = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+pAT+pAT,
.pc_name = "LOOKUP",
},
[NFS3PROC_ACCESS] = {
.pc_func = nfsd3_proc_access,
.pc_decode = nfs3svc_decode_accessargs,
.pc_encode = nfs3svc_encode_accessres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_accessargs),
.pc_argzero = sizeof(struct nfsd3_accessargs),
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1,
.pc_name = "ACCESS",
},
[NFS3PROC_READLINK] = {
.pc_func = nfsd3_proc_readlink,
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_readlinkres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd3_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
.pc_name = "READLINK",
},
[NFS3PROC_READ] = {
.pc_func = nfsd3_proc_read,
.pc_decode = nfs3svc_decode_readargs,
.pc_encode = nfs3svc_encode_readres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readargs),
.pc_argzero = sizeof(struct nfsd3_readargs),
.pc_ressize = sizeof(struct nfsd3_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
.pc_name = "READ",
},
[NFS3PROC_WRITE] = {
.pc_func = nfsd3_proc_write,
.pc_decode = nfs3svc_decode_writeargs,
.pc_encode = nfs3svc_encode_writeres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_writeargs),
.pc_argzero = sizeof(struct nfsd3_writeargs),
.pc_ressize = sizeof(struct nfsd3_writeres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+4,
.pc_name = "WRITE",
},
[NFS3PROC_CREATE] = {
.pc_func = nfsd3_proc_create,
.pc_decode = nfs3svc_decode_createargs,
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_createargs),
.pc_argzero = sizeof(struct nfsd3_createargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
.pc_name = "CREATE",
},
[NFS3PROC_MKDIR] = {
.pc_func = nfsd3_proc_mkdir,
.pc_decode = nfs3svc_decode_mkdirargs,
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_mkdirargs),
.pc_argzero = sizeof(struct nfsd3_mkdirargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
.pc_name = "MKDIR",
},
[NFS3PROC_SYMLINK] = {
.pc_func = nfsd3_proc_symlink,
.pc_decode = nfs3svc_decode_symlinkargs,
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_symlinkargs),
.pc_argzero = sizeof(struct nfsd3_symlinkargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
.pc_name = "SYMLINK",
},
[NFS3PROC_MKNOD] = {
.pc_func = nfsd3_proc_mknod,
.pc_decode = nfs3svc_decode_mknodargs,
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_mknodargs),
.pc_argzero = sizeof(struct nfsd3_mknodargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
.pc_name = "MKNOD",
},
[NFS3PROC_REMOVE] = {
.pc_func = nfsd3_proc_remove,
.pc_decode = nfs3svc_decode_diropargs,
.pc_encode = nfs3svc_encode_wccstatres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_diropargs),
.pc_argzero = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
.pc_name = "REMOVE",
},
[NFS3PROC_RMDIR] = {
.pc_func = nfsd3_proc_rmdir,
.pc_decode = nfs3svc_decode_diropargs,
.pc_encode = nfs3svc_encode_wccstatres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_diropargs),
.pc_argzero = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
.pc_name = "RMDIR",
},
[NFS3PROC_RENAME] = {
.pc_func = nfsd3_proc_rename,
.pc_decode = nfs3svc_decode_renameargs,
.pc_encode = nfs3svc_encode_renameres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_renameargs),
.pc_argzero = sizeof(struct nfsd3_renameargs),
.pc_ressize = sizeof(struct nfsd3_renameres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+WC,
.pc_name = "RENAME",
},
[NFS3PROC_LINK] = {
.pc_func = nfsd3_proc_link,
.pc_decode = nfs3svc_decode_linkargs,
.pc_encode = nfs3svc_encode_linkres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_linkargs),
.pc_argzero = sizeof(struct nfsd3_linkargs),
.pc_ressize = sizeof(struct nfsd3_linkres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+pAT+WC,
.pc_name = "LINK",
},
[NFS3PROC_READDIR] = {
.pc_func = nfsd3_proc_readdir,
.pc_decode = nfs3svc_decode_readdirargs,
.pc_encode = nfs3svc_encode_readdirres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readdirargs),
.pc_argzero = sizeof(struct nfsd3_readdirargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
.pc_name = "READDIR",
},
[NFS3PROC_READDIRPLUS] = {
.pc_func = nfsd3_proc_readdirplus,
.pc_decode = nfs3svc_decode_readdirplusargs,
.pc_encode = nfs3svc_encode_readdirres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readdirplusargs),
.pc_argzero = sizeof(struct nfsd3_readdirplusargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
.pc_name = "READDIRPLUS",
},
[NFS3PROC_FSSTAT] = {
.pc_func = nfsd3_proc_fsstat,
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_fsstatres,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
.pc_argzero = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_fsstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+2*6+1,
.pc_name = "FSSTAT",
},
[NFS3PROC_FSINFO] = {
.pc_func = nfsd3_proc_fsinfo,
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_fsinfores,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
.pc_argzero = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_fsinfores),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+12,
.pc_name = "FSINFO",
},
[NFS3PROC_PATHCONF] = {
.pc_func = nfsd3_proc_pathconf,
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_pathconfres,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
.pc_argzero = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_pathconfres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+6,
.pc_name = "PATHCONF",
},
[NFS3PROC_COMMIT] = {
.pc_func = nfsd3_proc_commit,
.pc_decode = nfs3svc_decode_commitargs,
.pc_encode = nfs3svc_encode_commitres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_commitargs),
.pc_argzero = sizeof(struct nfsd3_commitargs),
.pc_ressize = sizeof(struct nfsd3_commitres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+WC+2,
.pc_name = "COMMIT",
},
};
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_count3[ARRAY_SIZE(nfsd_procedures3)]);
const struct svc_version nfsd_version3 = {
.vs_vers = 3,
.vs_nproc = ARRAY_SIZE(nfsd_procedures3),
.vs_proc = nfsd_procedures3,
.vs_dispatch = nfsd_dispatch,
.vs_count = nfsd_count3,
.vs_xdrsize = NFS3_SVC_XDRSIZE,
};
| linux-master | fs/nfsd/nfs3proc.c |
// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | fs/nfsd/trace.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> */
#include <linux/sched.h>
#include "nfsd.h"
#include "auth.h"
int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
for (f = exp->ex_flavors; f < end; f++) {
if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
return f->flags;
}
return exp->ex_flags;
}
int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
{
struct group_info *rqgi;
struct group_info *gi;
struct cred *new;
int i;
int flags = nfsexp_flags(rqstp, exp);
validate_process_creds();
/* discard any old override before preparing the new set */
revert_creds(get_cred(current_real_cred()));
new = prepare_creds();
if (!new)
return -ENOMEM;
new->fsuid = rqstp->rq_cred.cr_uid;
new->fsgid = rqstp->rq_cred.cr_gid;
rqgi = rqstp->rq_cred.cr_group_info;
if (flags & NFSEXP_ALLSQUASH) {
new->fsuid = exp->ex_anon_uid;
new->fsgid = exp->ex_anon_gid;
gi = groups_alloc(0);
if (!gi)
goto oom;
} else if (flags & NFSEXP_ROOTSQUASH) {
if (uid_eq(new->fsuid, GLOBAL_ROOT_UID))
new->fsuid = exp->ex_anon_uid;
if (gid_eq(new->fsgid, GLOBAL_ROOT_GID))
new->fsgid = exp->ex_anon_gid;
gi = groups_alloc(rqgi->ngroups);
if (!gi)
goto oom;
for (i = 0; i < rqgi->ngroups; i++) {
if (gid_eq(GLOBAL_ROOT_GID, rqgi->gid[i]))
gi->gid[i] = exp->ex_anon_gid;
else
gi->gid[i] = rqgi->gid[i];
}
/* Each thread allocates its own gi, no race */
groups_sort(gi);
} else {
gi = get_group_info(rqgi);
}
if (uid_eq(new->fsuid, INVALID_UID))
new->fsuid = exp->ex_anon_uid;
if (gid_eq(new->fsgid, INVALID_GID))
new->fsgid = exp->ex_anon_gid;
set_groups(new, gi);
put_group_info(gi);
if (!uid_eq(new->fsuid, GLOBAL_ROOT_UID))
new->cap_effective = cap_drop_nfsd_set(new->cap_effective);
else
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
validate_process_creds();
put_cred(override_creds(new));
put_cred(new);
validate_process_creds();
return 0;
oom:
abort_creds(new);
return -ENOMEM;
}
| linux-master | fs/nfsd/auth.c |
// SPDX-License-Identifier: GPL-2.0
/*
* XDR support for nfsd
*
* Copyright (C) 1995, 1996 Olaf Kirch <[email protected]>
*/
#include "vfs.h"
#include "xdr.h"
#include "auth.h"
/*
* Mapping of S_IF* types to NFS file types
*/
static const u32 nfs_ftypes[] = {
NFNON, NFCHR, NFCHR, NFBAD,
NFDIR, NFBAD, NFBLK, NFBAD,
NFREG, NFBAD, NFLNK, NFBAD,
NFSOCK, NFBAD, NFLNK, NFBAD,
};
/*
* Basic NFSv2 data types (RFC 1094 Section 2.3)
*/
/**
* svcxdr_encode_stat - Encode an NFSv2 status code
* @xdr: XDR stream
* @status: status value to encode
*
* Return values:
* %false: Send buffer space was exhausted
* %true: Success
*/
bool
svcxdr_encode_stat(struct xdr_stream *xdr, __be32 status)
{
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(status));
if (!p)
return false;
*p = status;
return true;
}
/**
* svcxdr_decode_fhandle - Decode an NFSv2 file handle
* @xdr: XDR stream positioned at an encoded NFSv2 FH
* @fhp: OUT: filled-in server file handle
*
* Return values:
* %false: The encoded file handle was not valid
* %true: @fhp has been initialized
*/
bool
svcxdr_decode_fhandle(struct xdr_stream *xdr, struct svc_fh *fhp)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS_FHSIZE);
if (!p)
return false;
fh_init(fhp, NFS_FHSIZE);
memcpy(&fhp->fh_handle.fh_raw, p, NFS_FHSIZE);
fhp->fh_handle.fh_size = NFS_FHSIZE;
return true;
}
static bool
svcxdr_encode_fhandle(struct xdr_stream *xdr, const struct svc_fh *fhp)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS_FHSIZE);
if (!p)
return false;
memcpy(p, &fhp->fh_handle.fh_raw, NFS_FHSIZE);
return true;
}
static __be32 *
encode_timeval(__be32 *p, const struct timespec64 *time)
{
*p++ = cpu_to_be32((u32)time->tv_sec);
if (time->tv_nsec)
*p++ = cpu_to_be32(time->tv_nsec / NSEC_PER_USEC);
else
*p++ = xdr_zero;
return p;
}
static bool
svcxdr_decode_filename(struct xdr_stream *xdr, char **name, unsigned int *len)
{
u32 size, i;
__be32 *p;
char *c;
if (xdr_stream_decode_u32(xdr, &size) < 0)
return false;
if (size == 0 || size > NFS_MAXNAMLEN)
return false;
p = xdr_inline_decode(xdr, size);
if (!p)
return false;
*len = size;
*name = (char *)p;
for (i = 0, c = *name; i < size; i++, c++)
if (*c == '\0' || *c == '/')
return false;
return true;
}
static bool
svcxdr_decode_diropargs(struct xdr_stream *xdr, struct svc_fh *fhp,
char **name, unsigned int *len)
{
return svcxdr_decode_fhandle(xdr, fhp) &&
svcxdr_decode_filename(xdr, name, len);
}
static bool
svcxdr_decode_sattr(struct svc_rqst *rqstp, struct xdr_stream *xdr,
struct iattr *iap)
{
u32 tmp1, tmp2;
__be32 *p;
p = xdr_inline_decode(xdr, XDR_UNIT * 8);
if (!p)
return false;
iap->ia_valid = 0;
/*
* Some Sun clients put 0xffff in the mode field when they
* mean 0xffffffff.
*/
tmp1 = be32_to_cpup(p++);
if (tmp1 != (u32)-1 && tmp1 != 0xffff) {
iap->ia_valid |= ATTR_MODE;
iap->ia_mode = tmp1;
}
tmp1 = be32_to_cpup(p++);
if (tmp1 != (u32)-1) {
iap->ia_uid = make_kuid(nfsd_user_namespace(rqstp), tmp1);
if (uid_valid(iap->ia_uid))
iap->ia_valid |= ATTR_UID;
}
tmp1 = be32_to_cpup(p++);
if (tmp1 != (u32)-1) {
iap->ia_gid = make_kgid(nfsd_user_namespace(rqstp), tmp1);
if (gid_valid(iap->ia_gid))
iap->ia_valid |= ATTR_GID;
}
tmp1 = be32_to_cpup(p++);
if (tmp1 != (u32)-1) {
iap->ia_valid |= ATTR_SIZE;
iap->ia_size = tmp1;
}
tmp1 = be32_to_cpup(p++);
tmp2 = be32_to_cpup(p++);
if (tmp1 != (u32)-1 && tmp2 != (u32)-1) {
iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
iap->ia_atime.tv_sec = tmp1;
iap->ia_atime.tv_nsec = tmp2 * NSEC_PER_USEC;
}
tmp1 = be32_to_cpup(p++);
tmp2 = be32_to_cpup(p++);
if (tmp1 != (u32)-1 && tmp2 != (u32)-1) {
iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
iap->ia_mtime.tv_sec = tmp1;
iap->ia_mtime.tv_nsec = tmp2 * NSEC_PER_USEC;
/*
* Passing the invalid value useconds=1000000 for mtime
* is a Sun convention for "set both mtime and atime to
* current server time". It's needed to make permissions
* checks for the "touch" program across v2 mounts to
* Solaris and Irix boxes work correctly. See description of
* sattr in section 6.1 of "NFS Illustrated" by
* Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
*/
if (tmp2 == 1000000)
iap->ia_valid &= ~(ATTR_ATIME_SET|ATTR_MTIME_SET);
}
return true;
}
/**
* svcxdr_encode_fattr - Encode NFSv2 file attributes
* @rqstp: Context of a completed RPC transaction
* @xdr: XDR stream
* @fhp: File handle to encode
* @stat: Attributes to encode
*
* Return values:
* %false: Send buffer space was exhausted
* %true: Success
*/
bool
svcxdr_encode_fattr(struct svc_rqst *rqstp, struct xdr_stream *xdr,
const struct svc_fh *fhp, const struct kstat *stat)
{
struct user_namespace *userns = nfsd_user_namespace(rqstp);
struct dentry *dentry = fhp->fh_dentry;
int type = stat->mode & S_IFMT;
struct timespec64 time;
__be32 *p;
u32 fsid;
p = xdr_reserve_space(xdr, XDR_UNIT * 17);
if (!p)
return false;
*p++ = cpu_to_be32(nfs_ftypes[type >> 12]);
*p++ = cpu_to_be32((u32)stat->mode);
*p++ = cpu_to_be32((u32)stat->nlink);
*p++ = cpu_to_be32((u32)from_kuid_munged(userns, stat->uid));
*p++ = cpu_to_be32((u32)from_kgid_munged(userns, stat->gid));
if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN)
*p++ = cpu_to_be32(NFS_MAXPATHLEN);
else
*p++ = cpu_to_be32((u32) stat->size);
*p++ = cpu_to_be32((u32) stat->blksize);
if (S_ISCHR(type) || S_ISBLK(type))
*p++ = cpu_to_be32(new_encode_dev(stat->rdev));
else
*p++ = cpu_to_be32(0xffffffff);
*p++ = cpu_to_be32((u32)stat->blocks);
switch (fsid_source(fhp)) {
case FSIDSOURCE_FSID:
fsid = (u32)fhp->fh_export->ex_fsid;
break;
case FSIDSOURCE_UUID:
fsid = ((u32 *)fhp->fh_export->ex_uuid)[0];
fsid ^= ((u32 *)fhp->fh_export->ex_uuid)[1];
fsid ^= ((u32 *)fhp->fh_export->ex_uuid)[2];
fsid ^= ((u32 *)fhp->fh_export->ex_uuid)[3];
break;
default:
fsid = new_encode_dev(stat->dev);
break;
}
*p++ = cpu_to_be32(fsid);
*p++ = cpu_to_be32((u32)stat->ino);
p = encode_timeval(p, &stat->atime);
time = stat->mtime;
lease_get_mtime(d_inode(dentry), &time);
p = encode_timeval(p, &time);
encode_timeval(p, &stat->ctime);
return true;
}
/*
* XDR decode functions
*/
bool
nfssvc_decode_fhandleargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_fhandle *args = rqstp->rq_argp;
return svcxdr_decode_fhandle(xdr, &args->fh);
}
bool
nfssvc_decode_sattrargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_sattrargs *args = rqstp->rq_argp;
return svcxdr_decode_fhandle(xdr, &args->fh) &&
svcxdr_decode_sattr(rqstp, xdr, &args->attrs);
}
bool
nfssvc_decode_diropargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_diropargs *args = rqstp->rq_argp;
return svcxdr_decode_diropargs(xdr, &args->fh, &args->name, &args->len);
}
bool
nfssvc_decode_readargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_readargs *args = rqstp->rq_argp;
u32 totalcount;
if (!svcxdr_decode_fhandle(xdr, &args->fh))
return false;
if (xdr_stream_decode_u32(xdr, &args->offset) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return false;
/* totalcount is ignored */
if (xdr_stream_decode_u32(xdr, &totalcount) < 0)
return false;
return true;
}
bool
nfssvc_decode_writeargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_writeargs *args = rqstp->rq_argp;
u32 beginoffset, totalcount;
if (!svcxdr_decode_fhandle(xdr, &args->fh))
return false;
/* beginoffset is ignored */
if (xdr_stream_decode_u32(xdr, &beginoffset) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->offset) < 0)
return false;
/* totalcount is ignored */
if (xdr_stream_decode_u32(xdr, &totalcount) < 0)
return false;
/* opaque data */
if (xdr_stream_decode_u32(xdr, &args->len) < 0)
return false;
if (args->len > NFSSVC_MAXBLKSIZE_V2)
return false;
return xdr_stream_subsegment(xdr, &args->payload, args->len);
}
bool
nfssvc_decode_createargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_createargs *args = rqstp->rq_argp;
return svcxdr_decode_diropargs(xdr, &args->fh,
&args->name, &args->len) &&
svcxdr_decode_sattr(rqstp, xdr, &args->attrs);
}
bool
nfssvc_decode_renameargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_renameargs *args = rqstp->rq_argp;
return svcxdr_decode_diropargs(xdr, &args->ffh,
&args->fname, &args->flen) &&
svcxdr_decode_diropargs(xdr, &args->tfh,
&args->tname, &args->tlen);
}
bool
nfssvc_decode_linkargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_linkargs *args = rqstp->rq_argp;
return svcxdr_decode_fhandle(xdr, &args->ffh) &&
svcxdr_decode_diropargs(xdr, &args->tfh,
&args->tname, &args->tlen);
}
bool
nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_symlinkargs *args = rqstp->rq_argp;
struct kvec *head = rqstp->rq_arg.head;
if (!svcxdr_decode_diropargs(xdr, &args->ffh, &args->fname, &args->flen))
return false;
if (xdr_stream_decode_u32(xdr, &args->tlen) < 0)
return false;
if (args->tlen == 0)
return false;
args->first.iov_len = head->iov_len - xdr_stream_pos(xdr);
args->first.iov_base = xdr_inline_decode(xdr, args->tlen);
if (!args->first.iov_base)
return false;
return svcxdr_decode_sattr(rqstp, xdr, &args->attrs);
}
bool
nfssvc_decode_readdirargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_readdirargs *args = rqstp->rq_argp;
if (!svcxdr_decode_fhandle(xdr, &args->fh))
return false;
if (xdr_stream_decode_u32(xdr, &args->cookie) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return false;
return true;
}
/*
* XDR encode functions
*/
bool
nfssvc_encode_statres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_stat *resp = rqstp->rq_resp;
return svcxdr_encode_stat(xdr, resp->status);
}
bool
nfssvc_encode_attrstatres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_attrstat *resp = rqstp->rq_resp;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
return false;
break;
}
return true;
}
bool
nfssvc_encode_diropres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_diropres *resp = rqstp->rq_resp;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_fhandle(xdr, &resp->fh))
return false;
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
return false;
break;
}
return true;
}
bool
nfssvc_encode_readlinkres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_readlinkres *resp = rqstp->rq_resp;
struct kvec *head = rqstp->rq_res.head;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (xdr_stream_encode_u32(xdr, resp->len) < 0)
return false;
svcxdr_encode_opaque_pages(rqstp, xdr, &resp->page, 0,
resp->len);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->len) < 0)
return false;
break;
}
return true;
}
bool
nfssvc_encode_readres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_readres *resp = rqstp->rq_resp;
struct kvec *head = rqstp->rq_res.head;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
return false;
if (xdr_stream_encode_u32(xdr, resp->count) < 0)
return false;
svcxdr_encode_opaque_pages(rqstp, xdr, resp->pages,
rqstp->rq_res.page_base,
resp->count);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->count) < 0)
return false;
break;
}
return true;
}
bool
nfssvc_encode_readdirres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_readdirres *resp = rqstp->rq_resp;
struct xdr_buf *dirlist = &resp->dirlist;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
svcxdr_encode_opaque_pages(rqstp, xdr, dirlist->pages, 0,
dirlist->len);
/* no more entries */
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
if (xdr_stream_encode_bool(xdr, resp->common.err == nfserr_eof) < 0)
return false;
break;
}
return true;
}
bool
nfssvc_encode_statfsres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_statfsres *resp = rqstp->rq_resp;
struct kstatfs *stat = &resp->stats;
__be32 *p;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
p = xdr_reserve_space(xdr, XDR_UNIT * 5);
if (!p)
return false;
*p++ = cpu_to_be32(NFSSVC_MAXBLKSIZE_V2);
*p++ = cpu_to_be32(stat->f_bsize);
*p++ = cpu_to_be32(stat->f_blocks);
*p++ = cpu_to_be32(stat->f_bfree);
*p = cpu_to_be32(stat->f_bavail);
break;
}
return true;
}
/**
* nfssvc_encode_nfscookie - Encode a directory offset cookie
* @resp: readdir result context
* @offset: offset cookie to encode
*
* The buffer space for the offset cookie has already been reserved
* by svcxdr_encode_entry_common().
*/
void nfssvc_encode_nfscookie(struct nfsd_readdirres *resp, u32 offset)
{
__be32 cookie = cpu_to_be32(offset);
if (!resp->cookie_offset)
return;
write_bytes_to_xdr_buf(&resp->dirlist, resp->cookie_offset, &cookie,
sizeof(cookie));
resp->cookie_offset = 0;
}
static bool
svcxdr_encode_entry_common(struct nfsd_readdirres *resp, const char *name,
int namlen, loff_t offset, u64 ino)
{
struct xdr_buf *dirlist = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
if (xdr_stream_encode_item_present(xdr) < 0)
return false;
/* fileid */
if (xdr_stream_encode_u32(xdr, (u32)ino) < 0)
return false;
/* name */
if (xdr_stream_encode_opaque(xdr, name, min(namlen, NFS2_MAXNAMLEN)) < 0)
return false;
/* cookie */
resp->cookie_offset = dirlist->len;
if (xdr_stream_encode_u32(xdr, ~0U) < 0)
return false;
return true;
}
/**
* nfssvc_encode_entry - encode one NFSv2 READDIR entry
* @data: directory context
* @name: name of the object to be encoded
* @namlen: length of that name, in bytes
* @offset: the offset of the previous entry
* @ino: the fileid of this entry
* @d_type: unused
*
* Return values:
* %0: Entry was successfully encoded.
* %-EINVAL: An encoding problem occured, secondary status code in resp->common.err
*
* On exit, the following fields are updated:
* - resp->xdr
* - resp->common.err
* - resp->cookie_offset
*/
int nfssvc_encode_entry(void *data, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = data;
struct nfsd_readdirres *resp = container_of(ccd,
struct nfsd_readdirres,
common);
unsigned int starting_length = resp->dirlist.len;
/* The offset cookie for the previous entry */
nfssvc_encode_nfscookie(resp, offset);
if (!svcxdr_encode_entry_common(resp, name, namlen, offset, ino))
goto out_toosmall;
xdr_commit_encode(&resp->xdr);
resp->common.err = nfs_ok;
return 0;
out_toosmall:
resp->cookie_offset = 0;
resp->common.err = nfserr_toosmall;
resp->dirlist.len = starting_length;
return -EINVAL;
}
/*
* XDR release functions
*/
void nfssvc_release_attrstat(struct svc_rqst *rqstp)
{
struct nfsd_attrstat *resp = rqstp->rq_resp;
fh_put(&resp->fh);
}
void nfssvc_release_diropres(struct svc_rqst *rqstp)
{
struct nfsd_diropres *resp = rqstp->rq_resp;
fh_put(&resp->fh);
}
void nfssvc_release_readres(struct svc_rqst *rqstp)
{
struct nfsd_readres *resp = rqstp->rq_resp;
fh_put(&resp->fh);
}
| linux-master | fs/nfsd/nfsxdr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NFS server file handle treatment.
*
* Copyright (C) 1995, 1996 Olaf Kirch <[email protected]>
* Portions Copyright (C) 1999 G. Allen Morris III <[email protected]>
* Extensive rewrite by Neil Brown <[email protected]> Southern-Spring 1999
* ... and again Southern-Winter 2001 to support export_operations
*/
#include <linux/exportfs.h>
#include <linux/sunrpc/svcauth_gss.h>
#include "nfsd.h"
#include "vfs.h"
#include "auth.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_FH
/*
* our acceptability function.
* if NOSUBTREECHECK, accept anything
* if not, require that we can walk up to exp->ex_dentry
* doing some checks on the 'x' bits
*/
static int nfsd_acceptable(void *expv, struct dentry *dentry)
{
struct svc_export *exp = expv;
int rv;
struct dentry *tdentry;
struct dentry *parent;
if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
return 1;
tdentry = dget(dentry);
while (tdentry != exp->ex_path.dentry && !IS_ROOT(tdentry)) {
/* make sure parents give x permission to user */
int err;
parent = dget_parent(tdentry);
err = inode_permission(&nop_mnt_idmap,
d_inode(parent), MAY_EXEC);
if (err < 0) {
dput(parent);
break;
}
dput(tdentry);
tdentry = parent;
}
if (tdentry != exp->ex_path.dentry)
dprintk("nfsd_acceptable failed at %p %pd\n", tdentry, tdentry);
rv = (tdentry == exp->ex_path.dentry);
dput(tdentry);
return rv;
}
/* Type check. The correct error return for type mismatches does not seem to be
* generally agreed upon. SunOS seems to use EISDIR if file isn't S_IFREG; a
* comment in the NFSv3 spec says this is incorrect (implementation notes for
* the write call).
*/
static inline __be32
nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
umode_t requested)
{
umode_t mode = d_inode(dentry)->i_mode & S_IFMT;
if (requested == 0) /* the caller doesn't care */
return nfs_ok;
if (mode == requested) {
if (mode == S_IFDIR && !d_can_lookup(dentry)) {
WARN_ON_ONCE(1);
return nfserr_notdir;
}
return nfs_ok;
}
/*
* v4 has an error more specific than err_notdir which we should
* return in preference to err_notdir:
*/
if (rqstp->rq_vers == 4 && mode == S_IFLNK)
return nfserr_symlink;
if (requested == S_IFDIR)
return nfserr_notdir;
if (mode == S_IFDIR)
return nfserr_isdir;
return nfserr_inval;
}
static bool nfsd_originating_port_ok(struct svc_rqst *rqstp, int flags)
{
if (flags & NFSEXP_INSECURE_PORT)
return true;
/* We don't require gss requests to use low ports: */
if (rqstp->rq_cred.cr_flavor >= RPC_AUTH_GSS)
return true;
return test_bit(RQ_SECURE, &rqstp->rq_flags);
}
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
struct svc_export *exp)
{
int flags = nfsexp_flags(rqstp, exp);
/* Check if the request originated from a secure port. */
if (!nfsd_originating_port_ok(rqstp, flags)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("nfsd: request from insecure port %s!\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
return nfserr_perm;
}
/* Set user creds for this exportpoint */
return nfserrno(nfsd_setuser(rqstp, exp));
}
static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
struct dentry *dentry, struct svc_export *exp)
{
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return nfs_ok;
/*
* v2/v3 clients have no need for the V4ROOT export--they use
* the mount protocl instead; also, further V4ROOT checks may be
* in v4-specific code, in which case v2/v3 clients could bypass
* them.
*/
if (!nfsd_v4client(rqstp))
return nfserr_stale;
/*
* We're exposing only the directories and symlinks that have to be
* traversed on the way to real exports:
*/
if (unlikely(!d_is_dir(dentry) &&
!d_is_symlink(dentry)))
return nfserr_stale;
/*
* A pseudoroot export gives permission to access only one
* single directory; the kernel has to make another upcall
* before granting access to anything else under it:
*/
if (unlikely(dentry != exp->ex_path.dentry))
return nfserr_stale;
return nfs_ok;
}
/*
* Use the given filehandle to look up the corresponding export and
* dentry. On success, the results are used to set fh_export and
* fh_dentry.
*/
static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
{
struct knfsd_fh *fh = &fhp->fh_handle;
struct fid *fid = NULL;
struct svc_export *exp;
struct dentry *dentry;
int fileid_type;
int data_left = fh->fh_size/4;
int len;
__be32 error;
error = nfserr_stale;
if (rqstp->rq_vers > 2)
error = nfserr_badhandle;
if (rqstp->rq_vers == 4 && fh->fh_size == 0)
return nfserr_nofilehandle;
if (fh->fh_version != 1)
return error;
if (--data_left < 0)
return error;
if (fh->fh_auth_type != 0)
return error;
len = key_len(fh->fh_fsid_type) / 4;
if (len == 0)
return error;
if (fh->fh_fsid_type == FSID_MAJOR_MINOR) {
/* deprecated, convert to type 3 */
len = key_len(FSID_ENCODE_DEV)/4;
fh->fh_fsid_type = FSID_ENCODE_DEV;
/*
* struct knfsd_fh uses host-endian fields, which are
* sometimes used to hold net-endian values. This
* confuses sparse, so we must use __force here to
* keep it from complaining.
*/
fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fh->fh_fsid[0]),
ntohl((__force __be32)fh->fh_fsid[1])));
fh->fh_fsid[1] = fh->fh_fsid[2];
}
data_left -= len;
if (data_left < 0)
return error;
exp = rqst_exp_find(rqstp, fh->fh_fsid_type, fh->fh_fsid);
fid = (struct fid *)(fh->fh_fsid + len);
error = nfserr_stale;
if (IS_ERR(exp)) {
trace_nfsd_set_fh_dentry_badexport(rqstp, fhp, PTR_ERR(exp));
if (PTR_ERR(exp) == -ENOENT)
return error;
return nfserrno(PTR_ERR(exp));
}
if (exp->ex_flags & NFSEXP_NOSUBTREECHECK) {
/* Elevate privileges so that the lack of 'r' or 'x'
* permission on some parent directory will
* not stop exportfs_decode_fh from being able
* to reconnect a directory into the dentry cache.
* The same problem can affect "SUBTREECHECK" exports,
* but as nfsd_acceptable depends on correct
* access control settings being in effect, we cannot
* fix that case easily.
*/
struct cred *new = prepare_creds();
if (!new) {
error = nfserrno(-ENOMEM);
goto out;
}
new->cap_effective =
cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
put_cred(override_creds(new));
put_cred(new);
} else {
error = nfsd_setuser_and_check_port(rqstp, exp);
if (error)
goto out;
}
/*
* Look up the dentry using the NFS file handle.
*/
error = nfserr_stale;
if (rqstp->rq_vers > 2)
error = nfserr_badhandle;
fileid_type = fh->fh_fileid_type;
if (fileid_type == FILEID_ROOT)
dentry = dget(exp->ex_path.dentry);
else {
dentry = exportfs_decode_fh_raw(exp->ex_path.mnt, fid,
data_left, fileid_type,
nfsd_acceptable, exp);
if (IS_ERR_OR_NULL(dentry)) {
trace_nfsd_set_fh_dentry_badhandle(rqstp, fhp,
dentry ? PTR_ERR(dentry) : -ESTALE);
switch (PTR_ERR(dentry)) {
case -ENOMEM:
case -ETIMEDOUT:
break;
default:
dentry = ERR_PTR(-ESTALE);
}
}
}
if (dentry == NULL)
goto out;
if (IS_ERR(dentry)) {
if (PTR_ERR(dentry) != -EINVAL)
error = nfserrno(PTR_ERR(dentry));
goto out;
}
if (d_is_dir(dentry) &&
(dentry->d_flags & DCACHE_DISCONNECTED)) {
printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
dentry);
}
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
switch (rqstp->rq_vers) {
case 4:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
fhp->fh_no_atomic_attr = true;
break;
case 3:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOWCC)
fhp->fh_no_wcc = true;
break;
case 2:
fhp->fh_no_wcc = true;
}
return 0;
out:
exp_put(exp);
return error;
}
/**
* fh_verify - filehandle lookup and access checking
* @rqstp: pointer to current rpc request
* @fhp: filehandle to be verified
* @type: expected type of object pointed to by filehandle
* @access: type of access needed to object
*
* Look up a dentry from the on-the-wire filehandle, check the client's
* access to the export, and set the current task's credentials.
*
* Regardless of success or failure of fh_verify(), fh_put() should be
* called on @fhp when the caller is finished with the filehandle.
*
* fh_verify() may be called multiple times on a given filehandle, for
* example, when processing an NFSv4 compound. The first call will look
* up a dentry using the on-the-wire filehandle. Subsequent calls will
* skip the lookup and just perform the other checks and possibly change
* the current task's credentials.
*
* @type specifies the type of object expected using one of the S_IF*
* constants defined in include/linux/stat.h. The caller may use zero
* to indicate that it doesn't care, or a negative integer to indicate
* that it expects something not of the given type.
*
* @access is formed from the NFSD_MAY_* constants defined in
* fs/nfsd/vfs.h.
*/
__be32
fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
{
struct svc_export *exp = NULL;
struct dentry *dentry;
__be32 error;
if (!fhp->fh_dentry) {
error = nfsd_set_fh_dentry(rqstp, fhp);
if (error)
goto out;
}
dentry = fhp->fh_dentry;
exp = fhp->fh_export;
trace_nfsd_fh_verify(rqstp, fhp, type, access);
/*
* We still have to do all these permission checks, even when
* fh_dentry is already set:
* - fh_verify may be called multiple times with different
* "access" arguments (e.g. nfsd_proc_create calls
* fh_verify(...,NFSD_MAY_EXEC) first, then later (in
* nfsd_create) calls fh_verify(...,NFSD_MAY_CREATE).
* - in the NFSv4 case, the filehandle may have been filled
* in by fh_compose, and given a dentry, but further
* compound operations performed with that filehandle
* still need permissions checks. In the worst case, a
* mountpoint crossing may have changed the export
* options, and we may now need to use a different uid
* (for example, if different id-squashing options are in
* effect on the new filesystem).
*/
error = check_pseudo_root(rqstp, dentry, exp);
if (error)
goto out;
error = nfsd_setuser_and_check_port(rqstp, exp);
if (error)
goto out;
error = nfsd_mode_check(rqstp, dentry, type);
if (error)
goto out;
/*
* pseudoflavor restrictions are not enforced on NLM,
* which clients virtually always use auth_sys for,
* even while using RPCSEC_GSS for NFS.
*/
if (access & NFSD_MAY_LOCK || access & NFSD_MAY_BYPASS_GSS)
goto skip_pseudoflavor_check;
/*
* Clients may expect to be able to use auth_sys during mount,
* even if they use gss for everything else; see section 2.3.2
* of rfc 2623.
*/
if (access & NFSD_MAY_BYPASS_GSS_ON_ROOT
&& exp->ex_path.dentry == dentry)
goto skip_pseudoflavor_check;
error = check_nfsd_access(exp, rqstp);
if (error)
goto out;
skip_pseudoflavor_check:
/* Finally, check access permissions. */
error = nfsd_permission(rqstp, exp, dentry, access);
out:
trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
if (error == nfserr_stale)
nfsd_stats_fh_stale_inc(exp);
return error;
}
/*
* Compose a file handle for an NFS reply.
*
* Note that when first composed, the dentry may not yet have
* an inode. In this case a call to fh_update should be made
* before the fh goes out on the wire ...
*/
static void _fh_update(struct svc_fh *fhp, struct svc_export *exp,
struct dentry *dentry)
{
if (dentry != exp->ex_path.dentry) {
struct fid *fid = (struct fid *)
(fhp->fh_handle.fh_fsid + fhp->fh_handle.fh_size/4 - 1);
int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
int fh_flags = (exp->ex_flags & NFSEXP_NOSUBTREECHECK) ? 0 :
EXPORT_FH_CONNECTABLE;
int fileid_type =
exportfs_encode_fh(dentry, fid, &maxsize, fh_flags);
fhp->fh_handle.fh_fileid_type =
fileid_type > 0 ? fileid_type : FILEID_INVALID;
fhp->fh_handle.fh_size += maxsize * 4;
} else {
fhp->fh_handle.fh_fileid_type = FILEID_ROOT;
}
}
static bool is_root_export(struct svc_export *exp)
{
return exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root;
}
static struct super_block *exp_sb(struct svc_export *exp)
{
return exp->ex_path.dentry->d_sb;
}
static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp)
{
switch (fsid_type) {
case FSID_DEV:
if (!old_valid_dev(exp_sb(exp)->s_dev))
return false;
fallthrough;
case FSID_MAJOR_MINOR:
case FSID_ENCODE_DEV:
return exp_sb(exp)->s_type->fs_flags & FS_REQUIRES_DEV;
case FSID_NUM:
return exp->ex_flags & NFSEXP_FSID;
case FSID_UUID8:
case FSID_UUID16:
if (!is_root_export(exp))
return false;
fallthrough;
case FSID_UUID4_INUM:
case FSID_UUID16_INUM:
return exp->ex_uuid != NULL;
}
return true;
}
static void set_version_and_fsid_type(struct svc_fh *fhp, struct svc_export *exp, struct svc_fh *ref_fh)
{
u8 version;
u8 fsid_type;
retry:
version = 1;
if (ref_fh && ref_fh->fh_export == exp) {
version = ref_fh->fh_handle.fh_version;
fsid_type = ref_fh->fh_handle.fh_fsid_type;
ref_fh = NULL;
switch (version) {
case 0xca:
fsid_type = FSID_DEV;
break;
case 1:
break;
default:
goto retry;
}
/*
* As the fsid -> filesystem mapping was guided by
* user-space, there is no guarantee that the filesystem
* actually supports that fsid type. If it doesn't we
* loop around again without ref_fh set.
*/
if (!fsid_type_ok_for_exp(fsid_type, exp))
goto retry;
} else if (exp->ex_flags & NFSEXP_FSID) {
fsid_type = FSID_NUM;
} else if (exp->ex_uuid) {
if (fhp->fh_maxsize >= 64) {
if (is_root_export(exp))
fsid_type = FSID_UUID16;
else
fsid_type = FSID_UUID16_INUM;
} else {
if (is_root_export(exp))
fsid_type = FSID_UUID8;
else
fsid_type = FSID_UUID4_INUM;
}
} else if (!old_valid_dev(exp_sb(exp)->s_dev))
/* for newer device numbers, we must use a newer fsid format */
fsid_type = FSID_ENCODE_DEV;
else
fsid_type = FSID_DEV;
fhp->fh_handle.fh_version = version;
if (version)
fhp->fh_handle.fh_fsid_type = fsid_type;
}
__be32
fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
struct svc_fh *ref_fh)
{
/* ref_fh is a reference file handle.
* if it is non-null and for the same filesystem, then we should compose
* a filehandle which is of the same version, where possible.
*/
struct inode * inode = d_inode(dentry);
dev_t ex_dev = exp_sb(exp)->s_dev;
dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %pd2, ino=%ld)\n",
MAJOR(ex_dev), MINOR(ex_dev),
(long) d_inode(exp->ex_path.dentry)->i_ino,
dentry,
(inode ? inode->i_ino : 0));
/* Choose filehandle version and fsid type based on
* the reference filehandle (if it is in the same export)
* or the export options.
*/
set_version_and_fsid_type(fhp, exp, ref_fh);
/* If we have a ref_fh, then copy the fh_no_wcc setting from it. */
fhp->fh_no_wcc = ref_fh ? ref_fh->fh_no_wcc : false;
if (ref_fh == fhp)
fh_put(ref_fh);
if (fhp->fh_dentry) {
printk(KERN_ERR "fh_compose: fh %pd2 not initialized!\n",
dentry);
}
if (fhp->fh_maxsize < NFS_FHSIZE)
printk(KERN_ERR "fh_compose: called with maxsize %d! %pd2\n",
fhp->fh_maxsize,
dentry);
fhp->fh_dentry = dget(dentry); /* our internal copy */
fhp->fh_export = exp_get(exp);
fhp->fh_handle.fh_size =
key_len(fhp->fh_handle.fh_fsid_type) + 4;
fhp->fh_handle.fh_auth_type = 0;
mk_fsid(fhp->fh_handle.fh_fsid_type,
fhp->fh_handle.fh_fsid,
ex_dev,
d_inode(exp->ex_path.dentry)->i_ino,
exp->ex_fsid, exp->ex_uuid);
if (inode)
_fh_update(fhp, exp, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
fh_put(fhp);
return nfserr_opnotsupp;
}
return 0;
}
/*
* Update file handle information after changing a dentry.
* This is only called by nfsd_create, nfsd_create_v3 and nfsd_proc_create
*/
__be32
fh_update(struct svc_fh *fhp)
{
struct dentry *dentry;
if (!fhp->fh_dentry)
goto out_bad;
dentry = fhp->fh_dentry;
if (d_really_is_negative(dentry))
goto out_negative;
if (fhp->fh_handle.fh_fileid_type != FILEID_ROOT)
return 0;
_fh_update(fhp, fhp->fh_export, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
return nfserr_opnotsupp;
return 0;
out_bad:
printk(KERN_ERR "fh_update: fh not verified!\n");
return nfserr_serverfault;
out_negative:
printk(KERN_ERR "fh_update: %pd2 still negative!\n",
dentry);
return nfserr_serverfault;
}
/**
* fh_fill_pre_attrs - Fill in pre-op attributes
* @fhp: file handle to be updated
*
*/
__be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp)
{
bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
struct inode *inode;
struct kstat stat;
__be32 err;
if (fhp->fh_no_wcc || fhp->fh_pre_saved)
return nfs_ok;
inode = d_inode(fhp->fh_dentry);
err = fh_getattr(fhp, &stat);
if (err)
return err;
if (v4)
fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
fhp->fh_pre_mtime = stat.mtime;
fhp->fh_pre_ctime = stat.ctime;
fhp->fh_pre_size = stat.size;
fhp->fh_pre_saved = true;
return nfs_ok;
}
/**
* fh_fill_post_attrs - Fill in post-op attributes
* @fhp: file handle to be updated
*
*/
__be32 fh_fill_post_attrs(struct svc_fh *fhp)
{
bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
struct inode *inode = d_inode(fhp->fh_dentry);
__be32 err;
if (fhp->fh_no_wcc)
return nfs_ok;
if (fhp->fh_post_saved)
printk("nfsd: inode locked twice during operation.\n");
err = fh_getattr(fhp, &fhp->fh_post_attr);
if (err)
return err;
fhp->fh_post_saved = true;
if (v4)
fhp->fh_post_change =
nfsd4_change_attribute(&fhp->fh_post_attr, inode);
return nfs_ok;
}
/**
* fh_fill_both_attrs - Fill pre-op and post-op attributes
* @fhp: file handle to be updated
*
* This is used when the directory wasn't changed, but wcc attributes
* are needed anyway.
*/
__be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp)
{
__be32 err;
err = fh_fill_post_attrs(fhp);
if (err)
return err;
fhp->fh_pre_change = fhp->fh_post_change;
fhp->fh_pre_mtime = fhp->fh_post_attr.mtime;
fhp->fh_pre_ctime = fhp->fh_post_attr.ctime;
fhp->fh_pre_size = fhp->fh_post_attr.size;
fhp->fh_pre_saved = true;
return nfs_ok;
}
/*
* Release a file handle.
*/
void
fh_put(struct svc_fh *fhp)
{
struct dentry * dentry = fhp->fh_dentry;
struct svc_export * exp = fhp->fh_export;
if (dentry) {
fhp->fh_dentry = NULL;
dput(dentry);
fh_clear_pre_post_attrs(fhp);
}
fh_drop_write(fhp);
if (exp) {
exp_put(exp);
fhp->fh_export = NULL;
}
fhp->fh_no_wcc = false;
return;
}
/*
* Shorthand for dprintk()'s
*/
char * SVCFH_fmt(struct svc_fh *fhp)
{
struct knfsd_fh *fh = &fhp->fh_handle;
static char buf[2+1+1+64*3+1];
if (fh->fh_size < 0 || fh->fh_size> 64)
return "bad-fh";
sprintf(buf, "%d: %*ph", fh->fh_size, fh->fh_size, fh->fh_raw);
return buf;
}
enum fsid_source fsid_source(const struct svc_fh *fhp)
{
if (fhp->fh_handle.fh_version != 1)
return FSIDSOURCE_DEV;
switch(fhp->fh_handle.fh_fsid_type) {
case FSID_DEV:
case FSID_ENCODE_DEV:
case FSID_MAJOR_MINOR:
if (exp_sb(fhp->fh_export)->s_type->fs_flags & FS_REQUIRES_DEV)
return FSIDSOURCE_DEV;
break;
case FSID_NUM:
if (fhp->fh_export->ex_flags & NFSEXP_FSID)
return FSIDSOURCE_FSID;
break;
default:
break;
}
/* either a UUID type filehandle, or the filehandle doesn't
* match the export.
*/
if (fhp->fh_export->ex_flags & NFSEXP_FSID)
return FSIDSOURCE_FSID;
if (fhp->fh_export->ex_uuid)
return FSIDSOURCE_UUID;
return FSIDSOURCE_DEV;
}
/*
* We could use i_version alone as the change attribute. However, i_version
* can go backwards on a regular file after an unclean shutdown. On its own
* that doesn't necessarily cause a problem, but if i_version goes backwards
* and then is incremented again it could reuse a value that was previously
* used before boot, and a client who queried the two values might incorrectly
* assume nothing changed.
*
* By using both ctime and the i_version counter we guarantee that as long as
* time doesn't go backwards we never reuse an old value. If the filesystem
* advertises STATX_ATTR_CHANGE_MONOTONIC, then this mitigation is not
* needed.
*
* We only need to do this for regular files as well. For directories, we
* assume that the new change attr is always logged to stable storage in some
* fashion before the results can be seen.
*/
u64 nfsd4_change_attribute(struct kstat *stat, struct inode *inode)
{
u64 chattr;
if (stat->result_mask & STATX_CHANGE_COOKIE) {
chattr = stat->change_cookie;
if (S_ISREG(inode->i_mode) &&
!(stat->attributes & STATX_ATTR_CHANGE_MONOTONIC)) {
chattr += (u64)stat->ctime.tv_sec << 30;
chattr += stat->ctime.tv_nsec;
}
} else {
chattr = time_to_chattr(&stat->ctime);
}
return chattr;
}
| linux-master | fs/nfsd/nfsfh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* XDR support for nfsd/protocol version 3.
*
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <[email protected]>
*
* 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()!
*/
#include <linux/namei.h>
#include <linux/sunrpc/svc_xprt.h>
#include "xdr3.h"
#include "auth.h"
#include "netns.h"
#include "vfs.h"
/*
* Force construction of an empty post-op attr
*/
static const struct svc_fh nfs3svc_null_fh = {
.fh_no_wcc = true,
};
/*
* time_delta. {1, 0} means the server is accurate only
* to the nearest second.
*/
static const struct timespec64 nfs3svc_time_delta = {
.tv_sec = 1,
.tv_nsec = 0,
};
/*
* Mapping of S_IF* types to NFS file types
*/
static const u32 nfs3_ftypes[] = {
NF3NON, NF3FIFO, NF3CHR, NF3BAD,
NF3DIR, NF3BAD, NF3BLK, NF3BAD,
NF3REG, NF3BAD, NF3LNK, NF3BAD,
NF3SOCK, NF3BAD, NF3LNK, NF3BAD,
};
/*
* Basic NFSv3 data types (RFC 1813 Sections 2.5 and 2.6)
*/
static __be32 *
encode_nfstime3(__be32 *p, const struct timespec64 *time)
{
*p++ = cpu_to_be32((u32)time->tv_sec);
*p++ = cpu_to_be32(time->tv_nsec);
return p;
}
static bool
svcxdr_decode_nfstime3(struct xdr_stream *xdr, struct timespec64 *timep)
{
__be32 *p;
p = xdr_inline_decode(xdr, XDR_UNIT * 2);
if (!p)
return false;
timep->tv_sec = be32_to_cpup(p++);
timep->tv_nsec = be32_to_cpup(p);
return true;
}
/**
* svcxdr_decode_nfs_fh3 - Decode an NFSv3 file handle
* @xdr: XDR stream positioned at an undecoded NFSv3 FH
* @fhp: OUT: filled-in server file handle
*
* Return values:
* %false: The encoded file handle was not valid
* %true: @fhp has been initialized
*/
bool
svcxdr_decode_nfs_fh3(struct xdr_stream *xdr, struct svc_fh *fhp)
{
__be32 *p;
u32 size;
if (xdr_stream_decode_u32(xdr, &size) < 0)
return false;
if (size == 0 || size > NFS3_FHSIZE)
return false;
p = xdr_inline_decode(xdr, size);
if (!p)
return false;
fh_init(fhp, NFS3_FHSIZE);
fhp->fh_handle.fh_size = size;
memcpy(&fhp->fh_handle.fh_raw, p, size);
return true;
}
/**
* svcxdr_encode_nfsstat3 - Encode an NFSv3 status code
* @xdr: XDR stream
* @status: status value to encode
*
* Return values:
* %false: Send buffer space was exhausted
* %true: Success
*/
bool
svcxdr_encode_nfsstat3(struct xdr_stream *xdr, __be32 status)
{
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(status));
if (!p)
return false;
*p = status;
return true;
}
static bool
svcxdr_encode_nfs_fh3(struct xdr_stream *xdr, const struct svc_fh *fhp)
{
u32 size = fhp->fh_handle.fh_size;
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT + size);
if (!p)
return false;
*p++ = cpu_to_be32(size);
if (size)
p[XDR_QUADLEN(size) - 1] = 0;
memcpy(p, &fhp->fh_handle.fh_raw, size);
return true;
}
static bool
svcxdr_encode_post_op_fh3(struct xdr_stream *xdr, const struct svc_fh *fhp)
{
if (xdr_stream_encode_item_present(xdr) < 0)
return false;
if (!svcxdr_encode_nfs_fh3(xdr, fhp))
return false;
return true;
}
static bool
svcxdr_encode_cookieverf3(struct xdr_stream *xdr, const __be32 *verf)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS3_COOKIEVERFSIZE);
if (!p)
return false;
memcpy(p, verf, NFS3_COOKIEVERFSIZE);
return true;
}
static bool
svcxdr_encode_writeverf3(struct xdr_stream *xdr, const __be32 *verf)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS3_WRITEVERFSIZE);
if (!p)
return false;
memcpy(p, verf, NFS3_WRITEVERFSIZE);
return true;
}
static bool
svcxdr_decode_filename3(struct xdr_stream *xdr, char **name, unsigned int *len)
{
u32 size, i;
__be32 *p;
char *c;
if (xdr_stream_decode_u32(xdr, &size) < 0)
return false;
if (size == 0 || size > NFS3_MAXNAMLEN)
return false;
p = xdr_inline_decode(xdr, size);
if (!p)
return false;
*len = size;
*name = (char *)p;
for (i = 0, c = *name; i < size; i++, c++) {
if (*c == '\0' || *c == '/')
return false;
}
return true;
}
static bool
svcxdr_decode_diropargs3(struct xdr_stream *xdr, struct svc_fh *fhp,
char **name, unsigned int *len)
{
return svcxdr_decode_nfs_fh3(xdr, fhp) &&
svcxdr_decode_filename3(xdr, name, len);
}
static bool
svcxdr_decode_sattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
struct iattr *iap)
{
u32 set_it;
iap->ia_valid = 0;
if (xdr_stream_decode_bool(xdr, &set_it) < 0)
return false;
if (set_it) {
u32 mode;
if (xdr_stream_decode_u32(xdr, &mode) < 0)
return false;
iap->ia_valid |= ATTR_MODE;
iap->ia_mode = mode;
}
if (xdr_stream_decode_bool(xdr, &set_it) < 0)
return false;
if (set_it) {
u32 uid;
if (xdr_stream_decode_u32(xdr, &uid) < 0)
return false;
iap->ia_uid = make_kuid(nfsd_user_namespace(rqstp), uid);
if (uid_valid(iap->ia_uid))
iap->ia_valid |= ATTR_UID;
}
if (xdr_stream_decode_bool(xdr, &set_it) < 0)
return false;
if (set_it) {
u32 gid;
if (xdr_stream_decode_u32(xdr, &gid) < 0)
return false;
iap->ia_gid = make_kgid(nfsd_user_namespace(rqstp), gid);
if (gid_valid(iap->ia_gid))
iap->ia_valid |= ATTR_GID;
}
if (xdr_stream_decode_bool(xdr, &set_it) < 0)
return false;
if (set_it) {
u64 newsize;
if (xdr_stream_decode_u64(xdr, &newsize) < 0)
return false;
iap->ia_valid |= ATTR_SIZE;
iap->ia_size = newsize;
}
if (xdr_stream_decode_u32(xdr, &set_it) < 0)
return false;
switch (set_it) {
case DONT_CHANGE:
break;
case SET_TO_SERVER_TIME:
iap->ia_valid |= ATTR_ATIME;
break;
case SET_TO_CLIENT_TIME:
if (!svcxdr_decode_nfstime3(xdr, &iap->ia_atime))
return false;
iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
break;
default:
return false;
}
if (xdr_stream_decode_u32(xdr, &set_it) < 0)
return false;
switch (set_it) {
case DONT_CHANGE:
break;
case SET_TO_SERVER_TIME:
iap->ia_valid |= ATTR_MTIME;
break;
case SET_TO_CLIENT_TIME:
if (!svcxdr_decode_nfstime3(xdr, &iap->ia_mtime))
return false;
iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
break;
default:
return false;
}
return true;
}
static bool
svcxdr_decode_sattrguard3(struct xdr_stream *xdr, struct nfsd3_sattrargs *args)
{
__be32 *p;
u32 check;
if (xdr_stream_decode_bool(xdr, &check) < 0)
return false;
if (check) {
p = xdr_inline_decode(xdr, XDR_UNIT * 2);
if (!p)
return false;
args->check_guard = 1;
args->guardtime = be32_to_cpup(p);
} else
args->check_guard = 0;
return true;
}
static bool
svcxdr_decode_specdata3(struct xdr_stream *xdr, struct nfsd3_mknodargs *args)
{
__be32 *p;
p = xdr_inline_decode(xdr, XDR_UNIT * 2);
if (!p)
return false;
args->major = be32_to_cpup(p++);
args->minor = be32_to_cpup(p);
return true;
}
static bool
svcxdr_decode_devicedata3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
struct nfsd3_mknodargs *args)
{
return svcxdr_decode_sattr3(rqstp, xdr, &args->attrs) &&
svcxdr_decode_specdata3(xdr, args);
}
static bool
svcxdr_encode_fattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
const struct svc_fh *fhp, const struct kstat *stat)
{
struct user_namespace *userns = nfsd_user_namespace(rqstp);
__be32 *p;
u64 fsid;
p = xdr_reserve_space(xdr, XDR_UNIT * 21);
if (!p)
return false;
*p++ = cpu_to_be32(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
*p++ = cpu_to_be32((u32)(stat->mode & S_IALLUGO));
*p++ = cpu_to_be32((u32)stat->nlink);
*p++ = cpu_to_be32((u32)from_kuid_munged(userns, stat->uid));
*p++ = cpu_to_be32((u32)from_kgid_munged(userns, stat->gid));
if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN)
p = xdr_encode_hyper(p, (u64)NFS3_MAXPATHLEN);
else
p = xdr_encode_hyper(p, (u64)stat->size);
/* used */
p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9);
/* rdev */
*p++ = cpu_to_be32((u32)MAJOR(stat->rdev));
*p++ = cpu_to_be32((u32)MINOR(stat->rdev));
switch(fsid_source(fhp)) {
case FSIDSOURCE_FSID:
fsid = (u64)fhp->fh_export->ex_fsid;
break;
case FSIDSOURCE_UUID:
fsid = ((u64 *)fhp->fh_export->ex_uuid)[0];
fsid ^= ((u64 *)fhp->fh_export->ex_uuid)[1];
break;
default:
fsid = (u64)huge_encode_dev(fhp->fh_dentry->d_sb->s_dev);
}
p = xdr_encode_hyper(p, fsid);
/* fileid */
p = xdr_encode_hyper(p, stat->ino);
p = encode_nfstime3(p, &stat->atime);
p = encode_nfstime3(p, &stat->mtime);
encode_nfstime3(p, &stat->ctime);
return true;
}
static bool
svcxdr_encode_wcc_attr(struct xdr_stream *xdr, const struct svc_fh *fhp)
{
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT * 6);
if (!p)
return false;
p = xdr_encode_hyper(p, (u64)fhp->fh_pre_size);
p = encode_nfstime3(p, &fhp->fh_pre_mtime);
encode_nfstime3(p, &fhp->fh_pre_ctime);
return true;
}
static bool
svcxdr_encode_pre_op_attr(struct xdr_stream *xdr, const struct svc_fh *fhp)
{
if (!fhp->fh_pre_saved) {
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
return true;
}
if (xdr_stream_encode_item_present(xdr) < 0)
return false;
return svcxdr_encode_wcc_attr(xdr, fhp);
}
/**
* svcxdr_encode_post_op_attr - Encode NFSv3 post-op attributes
* @rqstp: Context of a completed RPC transaction
* @xdr: XDR stream
* @fhp: File handle to encode
*
* Return values:
* %false: Send buffer space was exhausted
* %true: Success
*/
bool
svcxdr_encode_post_op_attr(struct svc_rqst *rqstp, struct xdr_stream *xdr,
const struct svc_fh *fhp)
{
struct dentry *dentry = fhp->fh_dentry;
struct kstat stat;
/*
* The inode may be NULL if the call failed because of a
* stale file handle. In this case, no attributes are
* returned.
*/
if (fhp->fh_no_wcc || !dentry || !d_really_is_positive(dentry))
goto no_post_op_attrs;
if (fh_getattr(fhp, &stat) != nfs_ok)
goto no_post_op_attrs;
if (xdr_stream_encode_item_present(xdr) < 0)
return false;
lease_get_mtime(d_inode(dentry), &stat.mtime);
if (!svcxdr_encode_fattr3(rqstp, xdr, fhp, &stat))
return false;
return true;
no_post_op_attrs:
return xdr_stream_encode_item_absent(xdr) > 0;
}
/*
* Encode weak cache consistency data
*/
static bool
svcxdr_encode_wcc_data(struct svc_rqst *rqstp, struct xdr_stream *xdr,
const struct svc_fh *fhp)
{
struct dentry *dentry = fhp->fh_dentry;
if (!dentry || !d_really_is_positive(dentry) || !fhp->fh_post_saved)
goto neither;
/* before */
if (!svcxdr_encode_pre_op_attr(xdr, fhp))
return false;
/* after */
if (xdr_stream_encode_item_present(xdr) < 0)
return false;
if (!svcxdr_encode_fattr3(rqstp, xdr, fhp, &fhp->fh_post_attr))
return false;
return true;
neither:
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
if (!svcxdr_encode_post_op_attr(rqstp, xdr, fhp))
return false;
return true;
}
/*
* XDR decode functions
*/
bool
nfs3svc_decode_fhandleargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd_fhandle *args = rqstp->rq_argp;
return svcxdr_decode_nfs_fh3(xdr, &args->fh);
}
bool
nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_sattrargs *args = rqstp->rq_argp;
return svcxdr_decode_nfs_fh3(xdr, &args->fh) &&
svcxdr_decode_sattr3(rqstp, xdr, &args->attrs) &&
svcxdr_decode_sattrguard3(xdr, args);
}
bool
nfs3svc_decode_diropargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_diropargs *args = rqstp->rq_argp;
return svcxdr_decode_diropargs3(xdr, &args->fh, &args->name, &args->len);
}
bool
nfs3svc_decode_accessargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_accessargs *args = rqstp->rq_argp;
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return false;
if (xdr_stream_decode_u32(xdr, &args->access) < 0)
return false;
return true;
}
bool
nfs3svc_decode_readargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_readargs *args = rqstp->rq_argp;
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return false;
if (xdr_stream_decode_u64(xdr, &args->offset) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return false;
return true;
}
bool
nfs3svc_decode_writeargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_writeargs *args = rqstp->rq_argp;
u32 max_blocksize = svc_max_payload(rqstp);
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return false;
if (xdr_stream_decode_u64(xdr, &args->offset) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->stable) < 0)
return false;
/* opaque data */
if (xdr_stream_decode_u32(xdr, &args->len) < 0)
return false;
/* request sanity */
if (args->count != args->len)
return false;
if (args->count > max_blocksize) {
args->count = max_blocksize;
args->len = max_blocksize;
}
return xdr_stream_subsegment(xdr, &args->payload, args->count);
}
bool
nfs3svc_decode_createargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_createargs *args = rqstp->rq_argp;
if (!svcxdr_decode_diropargs3(xdr, &args->fh, &args->name, &args->len))
return false;
if (xdr_stream_decode_u32(xdr, &args->createmode) < 0)
return false;
switch (args->createmode) {
case NFS3_CREATE_UNCHECKED:
case NFS3_CREATE_GUARDED:
return svcxdr_decode_sattr3(rqstp, xdr, &args->attrs);
case NFS3_CREATE_EXCLUSIVE:
args->verf = xdr_inline_decode(xdr, NFS3_CREATEVERFSIZE);
if (!args->verf)
return false;
break;
default:
return false;
}
return true;
}
bool
nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_createargs *args = rqstp->rq_argp;
return svcxdr_decode_diropargs3(xdr, &args->fh,
&args->name, &args->len) &&
svcxdr_decode_sattr3(rqstp, xdr, &args->attrs);
}
bool
nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_symlinkargs *args = rqstp->rq_argp;
struct kvec *head = rqstp->rq_arg.head;
if (!svcxdr_decode_diropargs3(xdr, &args->ffh, &args->fname, &args->flen))
return false;
if (!svcxdr_decode_sattr3(rqstp, xdr, &args->attrs))
return false;
if (xdr_stream_decode_u32(xdr, &args->tlen) < 0)
return false;
/* symlink_data */
args->first.iov_len = head->iov_len - xdr_stream_pos(xdr);
args->first.iov_base = xdr_inline_decode(xdr, args->tlen);
return args->first.iov_base != NULL;
}
bool
nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_mknodargs *args = rqstp->rq_argp;
if (!svcxdr_decode_diropargs3(xdr, &args->fh, &args->name, &args->len))
return false;
if (xdr_stream_decode_u32(xdr, &args->ftype) < 0)
return false;
switch (args->ftype) {
case NF3CHR:
case NF3BLK:
return svcxdr_decode_devicedata3(rqstp, xdr, args);
case NF3SOCK:
case NF3FIFO:
return svcxdr_decode_sattr3(rqstp, xdr, &args->attrs);
case NF3REG:
case NF3DIR:
case NF3LNK:
/* Valid XDR but illegal file types */
break;
default:
return false;
}
return true;
}
bool
nfs3svc_decode_renameargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_renameargs *args = rqstp->rq_argp;
return svcxdr_decode_diropargs3(xdr, &args->ffh,
&args->fname, &args->flen) &&
svcxdr_decode_diropargs3(xdr, &args->tfh,
&args->tname, &args->tlen);
}
bool
nfs3svc_decode_linkargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_linkargs *args = rqstp->rq_argp;
return svcxdr_decode_nfs_fh3(xdr, &args->ffh) &&
svcxdr_decode_diropargs3(xdr, &args->tfh,
&args->tname, &args->tlen);
}
bool
nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_readdirargs *args = rqstp->rq_argp;
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return false;
if (xdr_stream_decode_u64(xdr, &args->cookie) < 0)
return false;
args->verf = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
if (!args->verf)
return false;
if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return false;
return true;
}
bool
nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_readdirargs *args = rqstp->rq_argp;
u32 dircount;
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return false;
if (xdr_stream_decode_u64(xdr, &args->cookie) < 0)
return false;
args->verf = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
if (!args->verf)
return false;
/* dircount is ignored */
if (xdr_stream_decode_u32(xdr, &dircount) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return false;
return true;
}
bool
nfs3svc_decode_commitargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_commitargs *args = rqstp->rq_argp;
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return false;
if (xdr_stream_decode_u64(xdr, &args->offset) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return false;
return true;
}
/*
* XDR encode functions
*/
/* GETATTR */
bool
nfs3svc_encode_getattrres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_attrstat *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
lease_get_mtime(d_inode(resp->fh.fh_dentry), &resp->stat.mtime);
if (!svcxdr_encode_fattr3(rqstp, xdr, &resp->fh, &resp->stat))
return false;
break;
}
return true;
}
/* SETATTR, REMOVE, RMDIR */
bool
nfs3svc_encode_wccstat(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_attrstat *resp = rqstp->rq_resp;
return svcxdr_encode_nfsstat3(xdr, resp->status) &&
svcxdr_encode_wcc_data(rqstp, xdr, &resp->fh);
}
/* LOOKUP */
bool
nfs3svc_encode_lookupres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_diropres *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_nfs_fh3(xdr, &resp->fh))
return false;
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->dirfh))
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->dirfh))
return false;
}
return true;
}
/* ACCESS */
bool
nfs3svc_encode_accessres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_accessres *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
if (xdr_stream_encode_u32(xdr, resp->access) < 0)
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
}
return true;
}
/* READLINK */
bool
nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_readlinkres *resp = rqstp->rq_resp;
struct kvec *head = rqstp->rq_res.head;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
if (xdr_stream_encode_u32(xdr, resp->len) < 0)
return false;
svcxdr_encode_opaque_pages(rqstp, xdr, resp->pages, 0,
resp->len);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->len) < 0)
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
}
return true;
}
/* READ */
bool
nfs3svc_encode_readres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_readres *resp = rqstp->rq_resp;
struct kvec *head = rqstp->rq_res.head;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
if (xdr_stream_encode_u32(xdr, resp->count) < 0)
return false;
if (xdr_stream_encode_bool(xdr, resp->eof) < 0)
return false;
if (xdr_stream_encode_u32(xdr, resp->count) < 0)
return false;
svcxdr_encode_opaque_pages(rqstp, xdr, resp->pages,
rqstp->rq_res.page_base,
resp->count);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->count) < 0)
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
}
return true;
}
/* WRITE */
bool
nfs3svc_encode_writeres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_writeres *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_wcc_data(rqstp, xdr, &resp->fh))
return false;
if (xdr_stream_encode_u32(xdr, resp->count) < 0)
return false;
if (xdr_stream_encode_u32(xdr, resp->committed) < 0)
return false;
if (!svcxdr_encode_writeverf3(xdr, resp->verf))
return false;
break;
default:
if (!svcxdr_encode_wcc_data(rqstp, xdr, &resp->fh))
return false;
}
return true;
}
/* CREATE, MKDIR, SYMLINK, MKNOD */
bool
nfs3svc_encode_createres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_diropres *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_fh3(xdr, &resp->fh))
return false;
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
if (!svcxdr_encode_wcc_data(rqstp, xdr, &resp->dirfh))
return false;
break;
default:
if (!svcxdr_encode_wcc_data(rqstp, xdr, &resp->dirfh))
return false;
}
return true;
}
/* RENAME */
bool
nfs3svc_encode_renameres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_renameres *resp = rqstp->rq_resp;
return svcxdr_encode_nfsstat3(xdr, resp->status) &&
svcxdr_encode_wcc_data(rqstp, xdr, &resp->ffh) &&
svcxdr_encode_wcc_data(rqstp, xdr, &resp->tfh);
}
/* LINK */
bool
nfs3svc_encode_linkres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_linkres *resp = rqstp->rq_resp;
return svcxdr_encode_nfsstat3(xdr, resp->status) &&
svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh) &&
svcxdr_encode_wcc_data(rqstp, xdr, &resp->tfh);
}
/* READDIR */
bool
nfs3svc_encode_readdirres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_readdirres *resp = rqstp->rq_resp;
struct xdr_buf *dirlist = &resp->dirlist;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
if (!svcxdr_encode_cookieverf3(xdr, resp->verf))
return false;
svcxdr_encode_opaque_pages(rqstp, xdr, dirlist->pages, 0,
dirlist->len);
/* no more entries */
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
if (xdr_stream_encode_bool(xdr, resp->common.err == nfserr_eof) < 0)
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
}
return true;
}
static __be32
compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
const char *name, int namlen, u64 ino)
{
struct svc_export *exp;
struct dentry *dparent, *dchild;
__be32 rv = nfserr_noent;
dparent = cd->fh.fh_dentry;
exp = cd->fh.fh_export;
if (isdotent(name, namlen)) {
if (namlen == 2) {
dchild = dget_parent(dparent);
/*
* Don't return filehandle for ".." if we're at
* the filesystem or export root:
*/
if (dchild == dparent)
goto out;
if (dparent == exp->ex_path.dentry)
goto out;
} else
dchild = dget(dparent);
} else
dchild = lookup_positive_unlocked(name, dparent, namlen);
if (IS_ERR(dchild))
return rv;
if (d_mountpoint(dchild))
goto out;
if (dchild->d_inode->i_ino != ino)
goto out;
rv = fh_compose(fhp, exp, dchild, &cd->fh);
out:
dput(dchild);
return rv;
}
/**
* nfs3svc_encode_cookie3 - Encode a directory offset cookie
* @resp: readdir result context
* @offset: offset cookie to encode
*
* The buffer space for the offset cookie has already been reserved
* by svcxdr_encode_entry3_common().
*/
void nfs3svc_encode_cookie3(struct nfsd3_readdirres *resp, u64 offset)
{
__be64 cookie = cpu_to_be64(offset);
if (!resp->cookie_offset)
return;
write_bytes_to_xdr_buf(&resp->dirlist, resp->cookie_offset, &cookie,
sizeof(cookie));
resp->cookie_offset = 0;
}
static bool
svcxdr_encode_entry3_common(struct nfsd3_readdirres *resp, const char *name,
int namlen, loff_t offset, u64 ino)
{
struct xdr_buf *dirlist = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
if (xdr_stream_encode_item_present(xdr) < 0)
return false;
/* fileid */
if (xdr_stream_encode_u64(xdr, ino) < 0)
return false;
/* name */
if (xdr_stream_encode_opaque(xdr, name, min(namlen, NFS3_MAXNAMLEN)) < 0)
return false;
/* cookie */
resp->cookie_offset = dirlist->len;
if (xdr_stream_encode_u64(xdr, OFFSET_MAX) < 0)
return false;
return true;
}
/**
* nfs3svc_encode_entry3 - encode one NFSv3 READDIR entry
* @data: directory context
* @name: name of the object to be encoded
* @namlen: length of that name, in bytes
* @offset: the offset of the previous entry
* @ino: the fileid of this entry
* @d_type: unused
*
* Return values:
* %0: Entry was successfully encoded.
* %-EINVAL: An encoding problem occured, secondary status code in resp->common.err
*
* On exit, the following fields are updated:
* - resp->xdr
* - resp->common.err
* - resp->cookie_offset
*/
int nfs3svc_encode_entry3(void *data, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = data;
struct nfsd3_readdirres *resp = container_of(ccd,
struct nfsd3_readdirres,
common);
unsigned int starting_length = resp->dirlist.len;
/* The offset cookie for the previous entry */
nfs3svc_encode_cookie3(resp, offset);
if (!svcxdr_encode_entry3_common(resp, name, namlen, offset, ino))
goto out_toosmall;
xdr_commit_encode(&resp->xdr);
resp->common.err = nfs_ok;
return 0;
out_toosmall:
resp->cookie_offset = 0;
resp->common.err = nfserr_toosmall;
resp->dirlist.len = starting_length;
return -EINVAL;
}
static bool
svcxdr_encode_entry3_plus(struct nfsd3_readdirres *resp, const char *name,
int namlen, u64 ino)
{
struct xdr_stream *xdr = &resp->xdr;
struct svc_fh *fhp = &resp->scratch;
bool result;
result = false;
fh_init(fhp, NFS3_FHSIZE);
if (compose_entry_fh(resp, fhp, name, namlen, ino) != nfs_ok)
goto out_noattrs;
if (!svcxdr_encode_post_op_attr(resp->rqstp, xdr, fhp))
goto out;
if (!svcxdr_encode_post_op_fh3(xdr, fhp))
goto out;
result = true;
out:
fh_put(fhp);
return result;
out_noattrs:
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
return true;
}
/**
* nfs3svc_encode_entryplus3 - encode one NFSv3 READDIRPLUS entry
* @data: directory context
* @name: name of the object to be encoded
* @namlen: length of that name, in bytes
* @offset: the offset of the previous entry
* @ino: the fileid of this entry
* @d_type: unused
*
* Return values:
* %0: Entry was successfully encoded.
* %-EINVAL: An encoding problem occured, secondary status code in resp->common.err
*
* On exit, the following fields are updated:
* - resp->xdr
* - resp->common.err
* - resp->cookie_offset
*/
int nfs3svc_encode_entryplus3(void *data, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = data;
struct nfsd3_readdirres *resp = container_of(ccd,
struct nfsd3_readdirres,
common);
unsigned int starting_length = resp->dirlist.len;
/* The offset cookie for the previous entry */
nfs3svc_encode_cookie3(resp, offset);
if (!svcxdr_encode_entry3_common(resp, name, namlen, offset, ino))
goto out_toosmall;
if (!svcxdr_encode_entry3_plus(resp, name, namlen, ino))
goto out_toosmall;
xdr_commit_encode(&resp->xdr);
resp->common.err = nfs_ok;
return 0;
out_toosmall:
resp->cookie_offset = 0;
resp->common.err = nfserr_toosmall;
resp->dirlist.len = starting_length;
return -EINVAL;
}
static bool
svcxdr_encode_fsstat3resok(struct xdr_stream *xdr,
const struct nfsd3_fsstatres *resp)
{
const struct kstatfs *s = &resp->stats;
u64 bs = s->f_bsize;
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT * 13);
if (!p)
return false;
p = xdr_encode_hyper(p, bs * s->f_blocks); /* total bytes */
p = xdr_encode_hyper(p, bs * s->f_bfree); /* free bytes */
p = xdr_encode_hyper(p, bs * s->f_bavail); /* user available bytes */
p = xdr_encode_hyper(p, s->f_files); /* total inodes */
p = xdr_encode_hyper(p, s->f_ffree); /* free inodes */
p = xdr_encode_hyper(p, s->f_ffree); /* user available inodes */
*p = cpu_to_be32(resp->invarsec); /* mean unchanged time */
return true;
}
/* FSSTAT */
bool
nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_fsstatres *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &nfs3svc_null_fh))
return false;
if (!svcxdr_encode_fsstat3resok(xdr, resp))
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &nfs3svc_null_fh))
return false;
}
return true;
}
static bool
svcxdr_encode_fsinfo3resok(struct xdr_stream *xdr,
const struct nfsd3_fsinfores *resp)
{
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT * 12);
if (!p)
return false;
*p++ = cpu_to_be32(resp->f_rtmax);
*p++ = cpu_to_be32(resp->f_rtpref);
*p++ = cpu_to_be32(resp->f_rtmult);
*p++ = cpu_to_be32(resp->f_wtmax);
*p++ = cpu_to_be32(resp->f_wtpref);
*p++ = cpu_to_be32(resp->f_wtmult);
*p++ = cpu_to_be32(resp->f_dtpref);
p = xdr_encode_hyper(p, resp->f_maxfilesize);
p = encode_nfstime3(p, &nfs3svc_time_delta);
*p = cpu_to_be32(resp->f_properties);
return true;
}
/* FSINFO */
bool
nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_fsinfores *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &nfs3svc_null_fh))
return false;
if (!svcxdr_encode_fsinfo3resok(xdr, resp))
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &nfs3svc_null_fh))
return false;
}
return true;
}
static bool
svcxdr_encode_pathconf3resok(struct xdr_stream *xdr,
const struct nfsd3_pathconfres *resp)
{
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT * 6);
if (!p)
return false;
*p++ = cpu_to_be32(resp->p_link_max);
*p++ = cpu_to_be32(resp->p_name_max);
p = xdr_encode_bool(p, resp->p_no_trunc);
p = xdr_encode_bool(p, resp->p_chown_restricted);
p = xdr_encode_bool(p, resp->p_case_insensitive);
xdr_encode_bool(p, resp->p_case_preserving);
return true;
}
/* PATHCONF */
bool
nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_pathconfres *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &nfs3svc_null_fh))
return false;
if (!svcxdr_encode_pathconf3resok(xdr, resp))
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &nfs3svc_null_fh))
return false;
}
return true;
}
/* COMMIT */
bool
nfs3svc_encode_commitres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_commitres *resp = rqstp->rq_resp;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_wcc_data(rqstp, xdr, &resp->fh))
return false;
if (!svcxdr_encode_writeverf3(xdr, resp->verf))
return false;
break;
default:
if (!svcxdr_encode_wcc_data(rqstp, xdr, &resp->fh))
return false;
}
return true;
}
/*
* XDR release functions
*/
void
nfs3svc_release_fhandle(struct svc_rqst *rqstp)
{
struct nfsd3_attrstat *resp = rqstp->rq_resp;
fh_put(&resp->fh);
}
void
nfs3svc_release_fhandle2(struct svc_rqst *rqstp)
{
struct nfsd3_fhandle_pair *resp = rqstp->rq_resp;
fh_put(&resp->fh1);
fh_put(&resp->fh2);
}
| linux-master | fs/nfsd/nfs3xdr.c |
/*
* Copyright (c) 2001 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <[email protected]>
* Andy Adamson <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/slab.h>
#include "nfsd.h"
#include "state.h"
#include "netns.h"
#include "trace.h"
#include "xdr4cb.h"
#include "xdr4.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
/* Index of predefined Linux callback client operations */
struct nfs4_cb_compound_hdr {
/* args */
u32 ident; /* minorversion 0 only */
u32 nops;
__be32 *nops_p;
u32 minorversion;
/* res */
int status;
};
static __be32 *xdr_encode_empty_array(__be32 *p)
{
*p++ = xdr_zero;
return p;
}
/*
* Encode/decode NFSv4 CB basic data types
*
* Basic NFSv4 callback data types are defined in section 15 of RFC
* 3530: "Network File System (NFS) version 4 Protocol" and section
* 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
* 1 Protocol"
*/
static void encode_uint32(struct xdr_stream *xdr, u32 n)
{
WARN_ON_ONCE(xdr_stream_encode_u32(xdr, n) < 0);
}
static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap,
size_t len)
{
WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0);
}
/*
* nfs_cb_opnum4
*
* enum nfs_cb_opnum4 {
* OP_CB_GETATTR = 3,
* ...
* };
*/
enum nfs_cb_opnum4 {
OP_CB_GETATTR = 3,
OP_CB_RECALL = 4,
OP_CB_LAYOUTRECALL = 5,
OP_CB_NOTIFY = 6,
OP_CB_PUSH_DELEG = 7,
OP_CB_RECALL_ANY = 8,
OP_CB_RECALLABLE_OBJ_AVAIL = 9,
OP_CB_RECALL_SLOT = 10,
OP_CB_SEQUENCE = 11,
OP_CB_WANTS_CANCELLED = 12,
OP_CB_NOTIFY_LOCK = 13,
OP_CB_NOTIFY_DEVICEID = 14,
OP_CB_OFFLOAD = 15,
OP_CB_ILLEGAL = 10044
};
static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(op);
}
/*
* nfs_fh4
*
* typedef opaque nfs_fh4<NFS4_FHSIZE>;
*/
static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
{
u32 length = fh->fh_size;
__be32 *p;
BUG_ON(length > NFS4_FHSIZE);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, &fh->fh_raw, length);
}
/*
* stateid4
*
* struct stateid4 {
* uint32_t seqid;
* opaque other[12];
* };
*/
static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(sid->si_generation);
xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
}
/*
* sessionid4
*
* typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
*/
static void encode_sessionid4(struct xdr_stream *xdr,
const struct nfsd4_session *session)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
xdr_encode_opaque_fixed(p, session->se_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
}
/*
* nfsstat4
*/
static const struct {
int stat;
int errno;
} nfs_cb_errtbl[] = {
{ NFS4_OK, 0 },
{ NFS4ERR_PERM, -EPERM },
{ NFS4ERR_NOENT, -ENOENT },
{ NFS4ERR_IO, -EIO },
{ NFS4ERR_NXIO, -ENXIO },
{ NFS4ERR_ACCESS, -EACCES },
{ NFS4ERR_EXIST, -EEXIST },
{ NFS4ERR_XDEV, -EXDEV },
{ NFS4ERR_NOTDIR, -ENOTDIR },
{ NFS4ERR_ISDIR, -EISDIR },
{ NFS4ERR_INVAL, -EINVAL },
{ NFS4ERR_FBIG, -EFBIG },
{ NFS4ERR_NOSPC, -ENOSPC },
{ NFS4ERR_ROFS, -EROFS },
{ NFS4ERR_MLINK, -EMLINK },
{ NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
{ NFS4ERR_NOTEMPTY, -ENOTEMPTY },
{ NFS4ERR_DQUOT, -EDQUOT },
{ NFS4ERR_STALE, -ESTALE },
{ NFS4ERR_BADHANDLE, -EBADHANDLE },
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
{ NFS4ERR_SERVERFAULT, -ESERVERFAULT },
{ NFS4ERR_BADTYPE, -EBADTYPE },
{ NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_RESOURCE, -EREMOTEIO },
{ NFS4ERR_SYMLINK, -ELOOP },
{ NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, -EDEADLK },
{ -1, -EIO }
};
/*
* If we cannot translate the error, the recovery routines should
* handle it.
*
* Note: remaining NFSv4 error codes have values > 10000, so should
* not conflict with native Linux error codes.
*/
static int nfs_cb_stat_to_errno(int status)
{
int i;
for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
if (nfs_cb_errtbl[i].stat == status)
return nfs_cb_errtbl[i].errno;
}
dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
return -status;
}
static int decode_cb_op_status(struct xdr_stream *xdr,
enum nfs_cb_opnum4 expected, int *status)
{
__be32 *p;
u32 op;
p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
op = be32_to_cpup(p++);
if (unlikely(op != expected))
goto out_unexpected;
*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
return 0;
out_overflow:
return -EIO;
out_unexpected:
dprintk("NFSD: Callback server returned operation %d but "
"we issued a request for %d\n", op, expected);
return -EIO;
}
/*
* CB_COMPOUND4args
*
* struct CB_COMPOUND4args {
* utf8str_cs tag;
* uint32_t minorversion;
* uint32_t callback_ident;
* nfs_cb_argop4 argarray<>;
* };
*/
static void encode_cb_compound4args(struct xdr_stream *xdr,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 * p;
p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
p = xdr_encode_empty_array(p); /* empty tag */
*p++ = cpu_to_be32(hdr->minorversion);
*p++ = cpu_to_be32(hdr->ident);
hdr->nops_p = p;
*p = cpu_to_be32(hdr->nops); /* argarray element count */
}
/*
* Update argarray element count
*/
static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
{
BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
*hdr->nops_p = cpu_to_be32(hdr->nops);
}
/*
* CB_COMPOUND4res
*
* struct CB_COMPOUND4res {
* nfsstat4 status;
* utf8str_cs tag;
* nfs_cb_resop4 resarray<>;
* };
*/
static int decode_cb_compound4res(struct xdr_stream *xdr,
struct nfs4_cb_compound_hdr *hdr)
{
u32 length;
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
hdr->status = be32_to_cpup(p++);
/* Ignore the tag */
length = be32_to_cpup(p++);
p = xdr_inline_decode(xdr, length + 4);
if (unlikely(p == NULL))
goto out_overflow;
p += XDR_QUADLEN(length);
hdr->nops = be32_to_cpup(p);
return 0;
out_overflow:
return -EIO;
}
/*
* CB_RECALL4args
*
* struct CB_RECALL4args {
* stateid4 stateid;
* bool truncate;
* nfs_fh4 fh;
* };
*/
static void encode_cb_recall4args(struct xdr_stream *xdr,
const struct nfs4_delegation *dp,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
p = xdr_reserve_space(xdr, 4);
*p++ = xdr_zero; /* truncate */
encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle);
hdr->nops++;
}
/*
* CB_RECALLANY4args
*
* struct CB_RECALLANY4args {
* uint32_t craa_objects_to_keep;
* bitmap4 craa_type_mask;
* };
*/
static void
encode_cb_recallany4args(struct xdr_stream *xdr,
struct nfs4_cb_compound_hdr *hdr, struct nfsd4_cb_recall_any *ra)
{
encode_nfs_cb_opnum4(xdr, OP_CB_RECALL_ANY);
encode_uint32(xdr, ra->ra_keep);
encode_bitmap4(xdr, ra->ra_bmval, ARRAY_SIZE(ra->ra_bmval));
hdr->nops++;
}
/*
* CB_SEQUENCE4args
*
* struct CB_SEQUENCE4args {
* sessionid4 csa_sessionid;
* sequenceid4 csa_sequenceid;
* slotid4 csa_slotid;
* slotid4 csa_highest_slotid;
* bool csa_cachethis;
* referring_call_list4 csa_referring_call_lists<>;
* };
*/
static void encode_cb_sequence4args(struct xdr_stream *xdr,
const struct nfsd4_callback *cb,
struct nfs4_cb_compound_hdr *hdr)
{
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
__be32 *p;
if (hdr->minorversion == 0)
return;
encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
encode_sessionid4(xdr, session);
p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
*p++ = cpu_to_be32(session->se_cb_seq_nr); /* csa_sequenceid */
*p++ = xdr_zero; /* csa_slotid */
*p++ = xdr_zero; /* csa_highest_slotid */
*p++ = xdr_zero; /* csa_cachethis */
xdr_encode_empty_array(p); /* csa_referring_call_lists */
hdr->nops++;
}
/*
* CB_SEQUENCE4resok
*
* struct CB_SEQUENCE4resok {
* sessionid4 csr_sessionid;
* sequenceid4 csr_sequenceid;
* slotid4 csr_slotid;
* slotid4 csr_highest_slotid;
* slotid4 csr_target_highest_slotid;
* };
*
* union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
* case NFS4_OK:
* CB_SEQUENCE4resok csr_resok4;
* default:
* void;
* };
*
* Our current back channel implmentation supports a single backchannel
* with a single slot.
*/
static int decode_cb_sequence4resok(struct xdr_stream *xdr,
struct nfsd4_callback *cb)
{
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
int status = -ESERVERFAULT;
__be32 *p;
u32 dummy;
/*
* If the server returns different values for sessionID, slotID or
* sequence number, the server is looney tunes.
*/
p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
dprintk("NFS: %s Invalid session id\n", __func__);
goto out;
}
p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
dummy = be32_to_cpup(p++);
if (dummy != session->se_cb_seq_nr) {
dprintk("NFS: %s Invalid sequence number\n", __func__);
goto out;
}
dummy = be32_to_cpup(p++);
if (dummy != 0) {
dprintk("NFS: %s Invalid slotid\n", __func__);
goto out;
}
/*
* FIXME: process highest slotid and target highest slotid
*/
status = 0;
out:
cb->cb_seq_status = status;
return status;
out_overflow:
status = -EIO;
goto out;
}
static int decode_cb_sequence4res(struct xdr_stream *xdr,
struct nfsd4_callback *cb)
{
int status;
if (cb->cb_clp->cl_minorversion == 0)
return 0;
status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
if (unlikely(status || cb->cb_seq_status))
return status;
return decode_cb_sequence4resok(xdr, cb);
}
/*
* NFSv4.0 and NFSv4.1 XDR encode functions
*
* NFSv4.0 callback argument types are defined in section 15 of RFC
* 3530: "Network File System (NFS) version 4 Protocol" and section 20
* of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
* Protocol".
*/
/*
* NB: Without this zero space reservation, callbacks over krb5p fail
*/
static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
const void *__unused)
{
xdr_reserve_space(xdr, 0);
}
/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
const void *data)
{
const struct nfsd4_callback *cb = data;
const struct nfs4_delegation *dp = cb_to_delegation(cb);
struct nfs4_cb_compound_hdr hdr = {
.ident = cb->cb_clp->cl_cb_ident,
.minorversion = cb->cb_clp->cl_minorversion,
};
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
encode_cb_recall4args(xdr, dp, &hdr);
encode_cb_nops(&hdr);
}
/*
* 20.6. Operation 8: CB_RECALL_ANY - Keep Any N Recallable Objects
*/
static void
nfs4_xdr_enc_cb_recall_any(struct rpc_rqst *req,
struct xdr_stream *xdr, const void *data)
{
const struct nfsd4_callback *cb = data;
struct nfsd4_cb_recall_any *ra;
struct nfs4_cb_compound_hdr hdr = {
.ident = cb->cb_clp->cl_cb_ident,
.minorversion = cb->cb_clp->cl_minorversion,
};
ra = container_of(cb, struct nfsd4_cb_recall_any, ra_cb);
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
encode_cb_recallany4args(xdr, &hdr, ra);
encode_cb_nops(&hdr);
}
/*
* NFSv4.0 and NFSv4.1 XDR decode functions
*
* NFSv4.0 callback result types are defined in section 15 of RFC
* 3530: "Network File System (NFS) version 4 Protocol" and section 20
* of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
* Protocol".
*/
static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
void *__unused)
{
return 0;
}
/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
return status;
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status || cb->cb_seq_status))
return status;
return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
}
/*
* 20.6. Operation 8: CB_RECALL_ANY - Keep Any N Recallable Objects
*/
static int
nfs4_xdr_dec_cb_recall_any(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
return status;
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status || cb->cb_seq_status))
return status;
status = decode_cb_op_status(xdr, OP_CB_RECALL_ANY, &cb->cb_status);
return status;
}
#ifdef CONFIG_NFSD_PNFS
/*
* CB_LAYOUTRECALL4args
*
* struct layoutrecall_file4 {
* nfs_fh4 lor_fh;
* offset4 lor_offset;
* length4 lor_length;
* stateid4 lor_stateid;
* };
*
* union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
* case LAYOUTRECALL4_FILE:
* layoutrecall_file4 lor_layout;
* case LAYOUTRECALL4_FSID:
* fsid4 lor_fsid;
* case LAYOUTRECALL4_ALL:
* void;
* };
*
* struct CB_LAYOUTRECALL4args {
* layouttype4 clora_type;
* layoutiomode4 clora_iomode;
* bool clora_changed;
* layoutrecall4 clora_recall;
* };
*/
static void encode_cb_layout4args(struct xdr_stream *xdr,
const struct nfs4_layout_stateid *ls,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
BUG_ON(hdr->minorversion == 0);
p = xdr_reserve_space(xdr, 5 * 4);
*p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
*p++ = cpu_to_be32(ls->ls_layout_type);
*p++ = cpu_to_be32(IOMODE_ANY);
*p++ = cpu_to_be32(1);
*p = cpu_to_be32(RETURN_FILE);
encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
p = xdr_reserve_space(xdr, 2 * 8);
p = xdr_encode_hyper(p, 0);
xdr_encode_hyper(p, NFS4_MAX_UINT64);
encode_stateid4(xdr, &ls->ls_recall_sid);
hdr->nops++;
}
static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
struct xdr_stream *xdr,
const void *data)
{
const struct nfsd4_callback *cb = data;
const struct nfs4_layout_stateid *ls =
container_of(cb, struct nfs4_layout_stateid, ls_recall);
struct nfs4_cb_compound_hdr hdr = {
.ident = 0,
.minorversion = cb->cb_clp->cl_minorversion,
};
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
encode_cb_layout4args(xdr, ls, &hdr);
encode_cb_nops(&hdr);
}
static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
return status;
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status || cb->cb_seq_status))
return status;
return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
}
#endif /* CONFIG_NFSD_PNFS */
static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
{
__be32 *p;
p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
}
static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
struct xdr_stream *xdr,
const void *data)
{
const struct nfsd4_callback *cb = data;
const struct nfsd4_blocked_lock *nbl =
container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
struct nfs4_cb_compound_hdr hdr = {
.ident = 0,
.minorversion = cb->cb_clp->cl_minorversion,
};
__be32 *p;
BUG_ON(hdr.minorversion == 0);
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
encode_nfs_fh4(xdr, &nbl->nbl_fh);
encode_stateowner(xdr, &lo->lo_owner);
hdr.nops++;
encode_cb_nops(&hdr);
}
static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
return status;
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status || cb->cb_seq_status))
return status;
return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
}
/*
* struct write_response4 {
* stateid4 wr_callback_id<1>;
* length4 wr_count;
* stable_how4 wr_committed;
* verifier4 wr_writeverf;
* };
* union offload_info4 switch (nfsstat4 coa_status) {
* case NFS4_OK:
* write_response4 coa_resok4;
* default:
* length4 coa_bytes_copied;
* };
* struct CB_OFFLOAD4args {
* nfs_fh4 coa_fh;
* stateid4 coa_stateid;
* offload_info4 coa_offload_info;
* };
*/
static void encode_offload_info4(struct xdr_stream *xdr,
const struct nfsd4_cb_offload *cbo)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4);
*p = cbo->co_nfserr;
switch (cbo->co_nfserr) {
case nfs_ok:
p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
p = xdr_encode_empty_array(p);
p = xdr_encode_hyper(p, cbo->co_res.wr_bytes_written);
*p++ = cpu_to_be32(cbo->co_res.wr_stable_how);
p = xdr_encode_opaque_fixed(p, cbo->co_res.wr_verifier.data,
NFS4_VERIFIER_SIZE);
break;
default:
p = xdr_reserve_space(xdr, 8);
/* We always return success if bytes were written */
p = xdr_encode_hyper(p, 0);
}
}
static void encode_cb_offload4args(struct xdr_stream *xdr,
const struct nfsd4_cb_offload *cbo,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(OP_CB_OFFLOAD);
encode_nfs_fh4(xdr, &cbo->co_fh);
encode_stateid4(xdr, &cbo->co_res.cb_stateid);
encode_offload_info4(xdr, cbo);
hdr->nops++;
}
static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
struct xdr_stream *xdr,
const void *data)
{
const struct nfsd4_callback *cb = data;
const struct nfsd4_cb_offload *cbo =
container_of(cb, struct nfsd4_cb_offload, co_cb);
struct nfs4_cb_compound_hdr hdr = {
.ident = 0,
.minorversion = cb->cb_clp->cl_minorversion,
};
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
encode_cb_offload4args(xdr, cbo, &hdr);
encode_cb_nops(&hdr);
}
static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nfsd4_callback *cb = data;
struct nfs4_cb_compound_hdr hdr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
return status;
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status || cb->cb_seq_status))
return status;
return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
}
/*
* RPC procedure tables
*/
#define PROC(proc, call, argtype, restype) \
[NFSPROC4_CLNT_##proc] = { \
.p_proc = NFSPROC4_CB_##call, \
.p_encode = nfs4_xdr_enc_##argtype, \
.p_decode = nfs4_xdr_dec_##restype, \
.p_arglen = NFS4_enc_##argtype##_sz, \
.p_replen = NFS4_dec_##restype##_sz, \
.p_statidx = NFSPROC4_CB_##call, \
.p_name = #proc, \
}
static const struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NULL, NULL, cb_null, cb_null),
PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
#ifdef CONFIG_NFSD_PNFS
PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
#endif
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
PROC(CB_RECALL_ANY, COMPOUND, cb_recall_any, cb_recall_any),
};
static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
static const struct rpc_version nfs_cb_version4 = {
/*
* Note on the callback rpc program version number: despite language in rfc
* 5661 section 18.36.3 requiring servers to use 4 in this field, the
* official xdr descriptions for both 4.0 and 4.1 specify version 1, and
* in practice that appears to be what implementations use. The section
* 18.36.3 language is expected to be fixed in an erratum.
*/
.number = 1,
.nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
.procs = nfs4_cb_procedures,
.counts = nfs4_cb_counts,
};
static const struct rpc_version *nfs_cb_version[2] = {
[1] = &nfs_cb_version4,
};
static const struct rpc_program cb_program;
static struct rpc_stat cb_stats = {
.program = &cb_program
};
#define NFS4_CALLBACK 0x40000000
static const struct rpc_program cb_program = {
.name = "nfs4_cb",
.number = NFS4_CALLBACK,
.nrvers = ARRAY_SIZE(nfs_cb_version),
.version = nfs_cb_version,
.stats = &cb_stats,
.pipe_dir_name = "nfsd4_cb",
};
static int max_cb_time(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
/*
* nfsd4_lease is set to at most one hour in __nfsd4_write_time,
* so we can use 32-bit math on it. Warn if that assumption
* ever stops being true.
*/
if (WARN_ON_ONCE(nn->nfsd4_lease > 3600))
return 360 * HZ;
return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
}
static struct workqueue_struct *callback_wq;
static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
{
return queue_work(callback_wq, &cb->cb_work);
}
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
{
atomic_inc(&clp->cl_cb_inflight);
}
static void nfsd41_cb_inflight_end(struct nfs4_client *clp)
{
if (atomic_dec_and_test(&clp->cl_cb_inflight))
wake_up_var(&clp->cl_cb_inflight);
}
static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp)
{
wait_var_event(&clp->cl_cb_inflight,
!atomic_read(&clp->cl_cb_inflight));
}
static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
{
if (clp->cl_minorversion == 0) {
client->cl_principal = clp->cl_cred.cr_targ_princ ?
clp->cl_cred.cr_targ_princ : "nfs";
return get_cred(rpc_machine_cred());
} else {
struct cred *kcred;
kcred = prepare_kernel_cred(&init_task);
if (!kcred)
return NULL;
kcred->fsuid = ses->se_cb_sec.uid;
kcred->fsgid = ses->se_cb_sec.gid;
return kcred;
}
}
static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
{
int maxtime = max_cb_time(clp->net);
struct rpc_timeout timeparms = {
.to_initval = maxtime,
.to_retries = 0,
.to_maxval = maxtime,
};
struct rpc_create_args args = {
.net = clp->net,
.address = (struct sockaddr *) &conn->cb_addr,
.addrsize = conn->cb_addrlen,
.saddress = (struct sockaddr *) &conn->cb_saddr,
.timeout = &timeparms,
.program = &cb_program,
.version = 1,
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
.cred = current_cred(),
};
struct rpc_clnt *client;
const struct cred *cred;
if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal &&
(clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) {
trace_nfsd_cb_setup_err(clp, -EINVAL);
return -EINVAL;
}
args.client_name = clp->cl_cred.cr_principal;
args.prognumber = conn->cb_prog;
args.protocol = XPRT_TRANSPORT_TCP;
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
if (!conn->cb_xprt)
return -EINVAL;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog;
args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
XPRT_TRANSPORT_BC;
args.authflavor = ses->se_cb_sec.flavor;
}
/* Create RPC client */
client = rpc_create(&args);
if (IS_ERR(client)) {
trace_nfsd_cb_setup_err(clp, PTR_ERR(client));
return PTR_ERR(client);
}
cred = get_backchannel_cred(clp, client, ses);
if (!cred) {
trace_nfsd_cb_setup_err(clp, -ENOMEM);
rpc_shutdown_client(client);
return -ENOMEM;
}
if (clp->cl_minorversion != 0)
clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_client = client;
clp->cl_cb_cred = cred;
rcu_read_lock();
trace_nfsd_cb_setup(clp, rpc_peeraddr2str(client, RPC_DISPLAY_NETID),
args.authflavor);
rcu_read_unlock();
return 0;
}
static void nfsd4_mark_cb_state(struct nfs4_client *clp, int newstate)
{
if (clp->cl_cb_state != newstate) {
clp->cl_cb_state = newstate;
trace_nfsd_cb_state(clp);
}
}
static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
nfsd4_mark_cb_state(clp, NFSD4_CB_DOWN);
}
static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
nfsd4_mark_cb_state(clp, NFSD4_CB_FAULT);
}
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
if (task->tk_status)
nfsd4_mark_cb_down(clp, task->tk_status);
else
nfsd4_mark_cb_state(clp, NFSD4_CB_UP);
}
static void nfsd4_cb_probe_release(void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
nfsd41_cb_inflight_end(clp);
}
static const struct rpc_call_ops nfsd4_cb_probe_ops = {
/* XXX: release method to ensure we set the cb channel down if
* necessary on early failure? */
.rpc_call_done = nfsd4_cb_probe_done,
.rpc_release = nfsd4_cb_probe_release,
};
/*
* Poke the callback thread to process any updates to the callback
* parameters, and send a null probe.
*/
void nfsd4_probe_callback(struct nfs4_client *clp)
{
trace_nfsd_cb_probe(clp);
nfsd4_mark_cb_state(clp, NFSD4_CB_UNKNOWN);
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
nfsd4_run_cb(&clp->cl_cb_null);
}
void nfsd4_probe_callback_sync(struct nfs4_client *clp)
{
nfsd4_probe_callback(clp);
flush_workqueue(callback_wq);
}
void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
{
nfsd4_mark_cb_state(clp, NFSD4_CB_UNKNOWN);
spin_lock(&clp->cl_lock);
memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
spin_unlock(&clp->cl_lock);
}
/*
* There's currently a single callback channel slot.
* If the slot is available, then mark it busy. Otherwise, set the
* thread for sleeping on the callback RPC wait queue.
*/
static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
{
struct nfs4_client *clp = cb->cb_clp;
if (!cb->cb_holds_slot &&
test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
/* Race breaker */
if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
dprintk("%s slot is busy\n", __func__);
return false;
}
rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
}
cb->cb_holds_slot = true;
return true;
}
static void nfsd41_cb_release_slot(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
if (cb->cb_holds_slot) {
cb->cb_holds_slot = false;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
}
}
static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
nfsd41_cb_release_slot(cb);
if (cb->cb_ops && cb->cb_ops->release)
cb->cb_ops->release(cb);
nfsd41_cb_inflight_end(clp);
}
/*
* TODO: cb_sequence should support referring call lists, cachethis, multiple
* slots, and mark callback channel down on communication errors.
*/
static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
u32 minorversion = clp->cl_minorversion;
/*
* cb_seq_status is only set in decode_cb_sequence4res,
* and so will remain 1 if an rpc level failure occurs.
*/
cb->cb_seq_status = 1;
cb->cb_status = 0;
if (minorversion && !nfsd41_cb_get_slot(cb, task))
return;
rpc_call_start(task);
}
static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
struct nfsd4_session *session = clp->cl_cb_session;
bool ret = true;
if (!clp->cl_minorversion) {
/*
* If the backchannel connection was shut down while this
* task was queued, we need to resubmit it after setting up
* a new backchannel connection.
*
* Note that if we lost our callback connection permanently
* the submission code will error out, so we don't need to
* handle that case here.
*/
if (RPC_SIGNALLED(task))
goto need_restart;
return true;
}
if (!cb->cb_holds_slot)
goto need_restart;
switch (cb->cb_seq_status) {
case 0:
/*
* No need for lock, access serialized in nfsd4_cb_prepare
*
* RFC5661 20.9.3
* If CB_SEQUENCE returns an error, then the state of the slot
* (sequence ID, cached reply) MUST NOT change.
*/
++session->se_cb_seq_nr;
break;
case -ESERVERFAULT:
++session->se_cb_seq_nr;
fallthrough;
case 1:
case -NFS4ERR_BADSESSION:
nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
ret = false;
break;
case -NFS4ERR_DELAY:
if (!rpc_restart_call(task))
goto out;
rpc_delay(task, 2 * HZ);
return false;
case -NFS4ERR_BADSLOT:
goto retry_nowait;
case -NFS4ERR_SEQ_MISORDERED:
if (session->se_cb_seq_nr != 1) {
session->se_cb_seq_nr = 1;
goto retry_nowait;
}
break;
default:
nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
dprintk("%s: unprocessed error %d\n", __func__,
cb->cb_seq_status);
}
nfsd41_cb_release_slot(cb);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
clp->cl_cb_session->se_cb_seq_nr);
if (RPC_SIGNALLED(task))
goto need_restart;
out:
return ret;
retry_nowait:
if (rpc_restart_call_prepare(task))
ret = false;
goto out;
need_restart:
if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
task->tk_status = 0;
cb->cb_need_restart = true;
}
return false;
}
static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
if (!nfsd4_cb_sequence_done(task, cb))
return;
if (cb->cb_status) {
WARN_ON_ONCE(task->tk_status);
task->tk_status = cb->cb_status;
}
switch (cb->cb_ops->done(cb, task)) {
case 0:
task->tk_status = 0;
rpc_restart_call_prepare(task);
return;
case 1:
switch (task->tk_status) {
case -EIO:
case -ETIMEDOUT:
case -EACCES:
nfsd4_mark_cb_down(clp, task->tk_status);
}
break;
default:
BUG();
}
}
static void nfsd4_cb_release(void *calldata)
{
struct nfsd4_callback *cb = calldata;
if (cb->cb_need_restart)
nfsd4_queue_cb(cb);
else
nfsd41_destroy_cb(cb);
}
static const struct rpc_call_ops nfsd4_cb_ops = {
.rpc_call_prepare = nfsd4_cb_prepare,
.rpc_call_done = nfsd4_cb_done,
.rpc_release = nfsd4_cb_release,
};
int nfsd4_create_callback_queue(void)
{
callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
if (!callback_wq)
return -ENOMEM;
return 0;
}
void nfsd4_destroy_callback_queue(void)
{
destroy_workqueue(callback_wq);
}
/* must be called under the state lock */
void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
if (clp->cl_cb_state != NFSD4_CB_UNKNOWN)
trace_nfsd_cb_shutdown(clp);
set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
/*
* Note this won't actually result in a null callback;
* instead, nfsd4_run_cb_null() will detect the killed
* client, destroy the rpc client, and stop:
*/
nfsd4_run_cb(&clp->cl_cb_null);
flush_workqueue(callback_wq);
nfsd41_cb_inflight_wait_complete(clp);
}
/* requires cl_lock: */
static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
{
struct nfsd4_session *s;
struct nfsd4_conn *c;
list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
list_for_each_entry(c, &s->se_conns, cn_persession) {
if (c->cn_flags & NFS4_CDFC4_BACK)
return c;
}
}
return NULL;
}
/*
* Note there isn't a lot of locking in this code; instead we depend on
* the fact that it is run from the callback_wq, which won't run two
* work items at once. So, for example, callback_wq handles all access
* of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
*/
static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
{
struct nfs4_cb_conn conn;
struct nfs4_client *clp = cb->cb_clp;
struct nfsd4_session *ses = NULL;
struct nfsd4_conn *c;
int err;
/*
* This is either an update, or the client dying; in either case,
* kill the old client:
*/
if (clp->cl_cb_client) {
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
put_cred(clp->cl_cb_cred);
clp->cl_cb_cred = NULL;
}
if (clp->cl_cb_conn.cb_xprt) {
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
clp->cl_cb_conn.cb_xprt = NULL;
}
if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
return;
spin_lock(&clp->cl_lock);
/*
* Only serialized callback code is allowed to clear these
* flags; main nfsd code can only set them:
*/
BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
c = __nfsd4_find_backchannel(clp);
if (c) {
svc_xprt_get(c->cn_xprt);
conn.cb_xprt = c->cn_xprt;
ses = c->cn_session;
}
spin_unlock(&clp->cl_lock);
err = setup_callback_client(clp, &conn, ses);
if (err) {
nfsd4_mark_cb_down(clp, err);
if (c)
svc_xprt_put(c->cn_xprt);
return;
}
}
static void
nfsd4_run_cb_work(struct work_struct *work)
{
struct nfsd4_callback *cb =
container_of(work, struct nfsd4_callback, cb_work);
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
int flags;
if (cb->cb_need_restart) {
cb->cb_need_restart = false;
} else {
if (cb->cb_ops && cb->cb_ops->prepare)
cb->cb_ops->prepare(cb);
}
if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
nfsd4_process_cb_update(cb);
clnt = clp->cl_cb_client;
if (!clnt) {
/* Callback channel broken, or client killed; give up: */
nfsd41_destroy_cb(cb);
return;
}
/*
* Don't send probe messages for 4.1 or later.
*/
if (!cb->cb_ops && clp->cl_minorversion) {
nfsd4_mark_cb_state(clp, NFSD4_CB_UP);
nfsd41_destroy_cb(cb);
return;
}
cb->cb_msg.rpc_cred = clp->cl_cb_cred;
flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
}
void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op)
{
cb->cb_clp = clp;
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
cb->cb_ops = ops;
INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
cb->cb_seq_status = 1;
cb->cb_status = 0;
cb->cb_need_restart = false;
cb->cb_holds_slot = false;
}
/**
* nfsd4_run_cb - queue up a callback job to run
* @cb: callback to queue
*
* Kick off a callback to do its thing. Returns false if it was already
* on a queue, true otherwise.
*/
bool nfsd4_run_cb(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
bool queued;
nfsd41_cb_inflight_begin(clp);
queued = nfsd4_queue_cb(cb);
if (!queued)
nfsd41_cb_inflight_end(clp);
return queued;
}
| linux-master | fs/nfsd/nfs4callback.c |
/*
* Server-side XDR for NFSv4
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <[email protected]>
* Andy Adamson <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/utsname.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/addr.h>
#include <linux/xattr.h>
#include <linux/vmalloc.h>
#include <uapi/linux/xattr.h>
#include "idmap.h"
#include "acl.h"
#include "xdr4.h"
#include "vfs.h"
#include "state.h"
#include "cache.h"
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
#include "trace.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
#endif
#define NFSDDBG_FACILITY NFSDDBG_XDR
const u32 nfsd_suppattrs[3][3] = {
{NFSD4_SUPPORTED_ATTRS_WORD0,
NFSD4_SUPPORTED_ATTRS_WORD1,
NFSD4_SUPPORTED_ATTRS_WORD2},
{NFSD4_1_SUPPORTED_ATTRS_WORD0,
NFSD4_1_SUPPORTED_ATTRS_WORD1,
NFSD4_1_SUPPORTED_ATTRS_WORD2},
{NFSD4_1_SUPPORTED_ATTRS_WORD0,
NFSD4_1_SUPPORTED_ATTRS_WORD1,
NFSD4_2_SUPPORTED_ATTRS_WORD2},
};
/*
* As per referral draft, the fsid for a referral MUST be different from the fsid of the containing
* directory in order to indicate to the client that a filesystem boundary is present
* We use a fixed fsid for a referral
*/
#define NFS4_REFERRAL_FSID_MAJOR 0x8000000ULL
#define NFS4_REFERRAL_FSID_MINOR 0x8000000ULL
static __be32
check_filename(char *str, int len)
{
int i;
if (len == 0)
return nfserr_inval;
if (len > NFS4_MAXNAMLEN)
return nfserr_nametoolong;
if (isdotent(str, len))
return nfserr_badname;
for (i = 0; i < len; i++)
if (str[i] == '/')
return nfserr_badname;
return 0;
}
static int zero_clientid(clientid_t *clid)
{
return (clid->cl_boot == 0) && (clid->cl_id == 0);
}
/**
* svcxdr_tmpalloc - allocate memory to be freed after compound processing
* @argp: NFSv4 compound argument structure
* @len: length of buffer to allocate
*
* Allocates a buffer of size @len to be freed when processing the compound
* operation described in @argp finishes.
*/
static void *
svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
{
struct svcxdr_tmpbuf *tb;
tb = kmalloc(sizeof(*tb) + len, GFP_KERNEL);
if (!tb)
return NULL;
tb->next = argp->to_free;
argp->to_free = tb;
return tb->buf;
}
/*
* For xdr strings that need to be passed to other kernel api's
* as null-terminated strings.
*
* Note null-terminating in place usually isn't safe since the
* buffer might end on a page boundary.
*/
static char *
svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
{
char *p = svcxdr_tmpalloc(argp, len + 1);
if (!p)
return NULL;
memcpy(p, buf, len);
p[len] = '\0';
return p;
}
static void *
svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
{
__be32 *tmp;
/*
* The location of the decoded data item is stable,
* so @p is OK to use. This is the common case.
*/
if (p != argp->xdr->scratch.iov_base)
return p;
tmp = svcxdr_tmpalloc(argp, len);
if (!tmp)
return NULL;
memcpy(tmp, p, len);
return tmp;
}
/*
* NFSv4 basic data type decoders
*/
/*
* This helper handles variable-length opaques which belong to protocol
* elements that this implementation does not support.
*/
static __be32
nfsd4_decode_ignored_string(struct nfsd4_compoundargs *argp, u32 maxlen)
{
u32 len;
if (xdr_stream_decode_u32(argp->xdr, &len) < 0)
return nfserr_bad_xdr;
if (maxlen && len > maxlen)
return nfserr_bad_xdr;
if (!xdr_inline_decode(argp->xdr, len))
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
{
__be32 *p;
u32 len;
if (xdr_stream_decode_u32(argp->xdr, &len) < 0)
return nfserr_bad_xdr;
if (len == 0 || len > NFS4_OPAQUE_LIMIT)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, len);
if (!p)
return nfserr_bad_xdr;
o->data = svcxdr_savemem(argp, p, len);
if (!o->data)
return nfserr_jukebox;
o->len = len;
return nfs_ok;
}
static __be32
nfsd4_decode_component4(struct nfsd4_compoundargs *argp, char **namp, u32 *lenp)
{
__be32 *p, status;
if (xdr_stream_decode_u32(argp->xdr, lenp) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, *lenp);
if (!p)
return nfserr_bad_xdr;
status = check_filename((char *)p, *lenp);
if (status)
return status;
*namp = svcxdr_savemem(argp, p, *lenp);
if (!*namp)
return nfserr_jukebox;
return nfs_ok;
}
static __be32
nfsd4_decode_nfstime4(struct nfsd4_compoundargs *argp, struct timespec64 *tv)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, XDR_UNIT * 3);
if (!p)
return nfserr_bad_xdr;
p = xdr_decode_hyper(p, &tv->tv_sec);
tv->tv_nsec = be32_to_cpup(p++);
if (tv->tv_nsec >= (u32)1000000000)
return nfserr_inval;
return nfs_ok;
}
static __be32
nfsd4_decode_verifier4(struct nfsd4_compoundargs *argp, nfs4_verifier *verf)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_bad_xdr;
memcpy(verf->data, p, sizeof(verf->data));
return nfs_ok;
}
/**
* nfsd4_decode_bitmap4 - Decode an NFSv4 bitmap4
* @argp: NFSv4 compound argument structure
* @bmval: pointer to an array of u32's to decode into
* @bmlen: size of the @bmval array
*
* The server needs to return nfs_ok rather than nfserr_bad_xdr when
* encountering bitmaps containing bits it does not recognize. This
* includes bits in bitmap words past WORDn, where WORDn is the last
* bitmap WORD the implementation currently supports. Thus we are
* careful here to simply ignore bits in bitmap words that this
* implementation has yet to support explicitly.
*
* Return values:
* %nfs_ok: @bmval populated successfully
* %nfserr_bad_xdr: the encoded bitmap was invalid
*/
static __be32
nfsd4_decode_bitmap4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen)
{
ssize_t status;
status = xdr_stream_decode_uint32_array(argp->xdr, bmval, bmlen);
return status == -EBADMSG ? nfserr_bad_xdr : nfs_ok;
}
static __be32
nfsd4_decode_nfsace4(struct nfsd4_compoundargs *argp, struct nfs4_ace *ace)
{
__be32 *p, status;
u32 length;
if (xdr_stream_decode_u32(argp->xdr, &ace->type) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &ace->flag) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &ace->access_mask) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &length) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, length);
if (!p)
return nfserr_bad_xdr;
ace->whotype = nfs4_acl_get_whotype((char *)p, length);
if (ace->whotype != NFS4_ACL_WHO_NAMED)
status = nfs_ok;
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
status = nfsd_map_name_to_gid(argp->rqstp,
(char *)p, length, &ace->who_gid);
else
status = nfsd_map_name_to_uid(argp->rqstp,
(char *)p, length, &ace->who_uid);
return status;
}
/* A counted array of nfsace4's */
static noinline __be32
nfsd4_decode_acl(struct nfsd4_compoundargs *argp, struct nfs4_acl **acl)
{
struct nfs4_ace *ace;
__be32 status;
u32 count;
if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
return nfserr_bad_xdr;
if (count > xdr_stream_remaining(argp->xdr) / 20)
/*
* Even with 4-byte names there wouldn't be
* space for that many aces; something fishy is
* going on:
*/
return nfserr_fbig;
*acl = svcxdr_tmpalloc(argp, nfs4_acl_bytes(count));
if (*acl == NULL)
return nfserr_jukebox;
(*acl)->naces = count;
for (ace = (*acl)->aces; ace < (*acl)->aces + count; ace++) {
status = nfsd4_decode_nfsace4(argp, ace);
if (status)
return status;
}
return nfs_ok;
}
static noinline __be32
nfsd4_decode_security_label(struct nfsd4_compoundargs *argp,
struct xdr_netobj *label)
{
u32 lfs, pi, length;
__be32 *p;
if (xdr_stream_decode_u32(argp->xdr, &lfs) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &pi) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &length) < 0)
return nfserr_bad_xdr;
if (length > NFS4_MAXLABELLEN)
return nfserr_badlabel;
p = xdr_inline_decode(argp->xdr, length);
if (!p)
return nfserr_bad_xdr;
label->len = length;
label->data = svcxdr_dupstr(argp, p, length);
if (!label->data)
return nfserr_jukebox;
return nfs_ok;
}
static __be32
nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,
struct iattr *iattr, struct nfs4_acl **acl,
struct xdr_netobj *label, int *umask)
{
unsigned int starting_pos;
u32 attrlist4_count;
__be32 *p, status;
iattr->ia_valid = 0;
status = nfsd4_decode_bitmap4(argp, bmval, bmlen);
if (status)
return nfserr_bad_xdr;
if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
|| bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
|| bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2) {
if (nfsd_attrs_supported(argp->minorversion, bmval))
return nfserr_inval;
return nfserr_attrnotsupp;
}
if (xdr_stream_decode_u32(argp->xdr, &attrlist4_count) < 0)
return nfserr_bad_xdr;
starting_pos = xdr_stream_pos(argp->xdr);
if (bmval[0] & FATTR4_WORD0_SIZE) {
u64 size;
if (xdr_stream_decode_u64(argp->xdr, &size) < 0)
return nfserr_bad_xdr;
iattr->ia_size = size;
iattr->ia_valid |= ATTR_SIZE;
}
if (bmval[0] & FATTR4_WORD0_ACL) {
status = nfsd4_decode_acl(argp, acl);
if (status)
return status;
} else
*acl = NULL;
if (bmval[1] & FATTR4_WORD1_MODE) {
u32 mode;
if (xdr_stream_decode_u32(argp->xdr, &mode) < 0)
return nfserr_bad_xdr;
iattr->ia_mode = mode;
iattr->ia_mode &= (S_IFMT | S_IALLUGO);
iattr->ia_valid |= ATTR_MODE;
}
if (bmval[1] & FATTR4_WORD1_OWNER) {
u32 length;
if (xdr_stream_decode_u32(argp->xdr, &length) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, length);
if (!p)
return nfserr_bad_xdr;
status = nfsd_map_name_to_uid(argp->rqstp, (char *)p, length,
&iattr->ia_uid);
if (status)
return status;
iattr->ia_valid |= ATTR_UID;
}
if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
u32 length;
if (xdr_stream_decode_u32(argp->xdr, &length) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, length);
if (!p)
return nfserr_bad_xdr;
status = nfsd_map_name_to_gid(argp->rqstp, (char *)p, length,
&iattr->ia_gid);
if (status)
return status;
iattr->ia_valid |= ATTR_GID;
}
if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
u32 set_it;
if (xdr_stream_decode_u32(argp->xdr, &set_it) < 0)
return nfserr_bad_xdr;
switch (set_it) {
case NFS4_SET_TO_CLIENT_TIME:
status = nfsd4_decode_nfstime4(argp, &iattr->ia_atime);
if (status)
return status;
iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_ATIME;
break;
default:
return nfserr_bad_xdr;
}
}
if (bmval[1] & FATTR4_WORD1_TIME_CREATE) {
struct timespec64 ts;
/* No Linux filesystem supports setting this attribute. */
bmval[1] &= ~FATTR4_WORD1_TIME_CREATE;
status = nfsd4_decode_nfstime4(argp, &ts);
if (status)
return status;
}
if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
u32 set_it;
if (xdr_stream_decode_u32(argp->xdr, &set_it) < 0)
return nfserr_bad_xdr;
switch (set_it) {
case NFS4_SET_TO_CLIENT_TIME:
status = nfsd4_decode_nfstime4(argp, &iattr->ia_mtime);
if (status)
return status;
iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_MTIME;
break;
default:
return nfserr_bad_xdr;
}
}
label->len = 0;
if (IS_ENABLED(CONFIG_NFSD_V4_SECURITY_LABEL) &&
bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
status = nfsd4_decode_security_label(argp, label);
if (status)
return status;
}
if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
u32 mode, mask;
if (!umask)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &mode) < 0)
return nfserr_bad_xdr;
iattr->ia_mode = mode & (S_IFMT | S_IALLUGO);
if (xdr_stream_decode_u32(argp->xdr, &mask) < 0)
return nfserr_bad_xdr;
*umask = mask & S_IRWXUGO;
iattr->ia_valid |= ATTR_MODE;
}
/* request sanity: did attrlist4 contain the expected number of words? */
if (attrlist4_count != xdr_stream_pos(argp->xdr) - starting_pos)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_stateid4(struct nfsd4_compoundargs *argp, stateid_t *sid)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, NFS4_STATEID_SIZE);
if (!p)
return nfserr_bad_xdr;
sid->si_generation = be32_to_cpup(p++);
memcpy(&sid->si_opaque, p, sizeof(sid->si_opaque));
return nfs_ok;
}
static __be32
nfsd4_decode_clientid4(struct nfsd4_compoundargs *argp, clientid_t *clientid)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, sizeof(__be64));
if (!p)
return nfserr_bad_xdr;
memcpy(clientid, p, sizeof(*clientid));
return nfs_ok;
}
static __be32
nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp,
clientid_t *clientid, struct xdr_netobj *owner)
{
__be32 status;
status = nfsd4_decode_clientid4(argp, clientid);
if (status)
return status;
return nfsd4_decode_opaque(argp, owner);
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
struct nfsd4_deviceid *devid)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
if (!p)
return nfserr_bad_xdr;
memcpy(devid, p, sizeof(*devid));
return nfs_ok;
}
static __be32
nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutcommit *lcp)
{
if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_layout_type) < 0)
return nfserr_bad_xdr;
if (lcp->lc_layout_type < LAYOUT_NFSV4_1_FILES)
return nfserr_bad_xdr;
if (lcp->lc_layout_type >= LAYOUT_TYPE_MAX)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_up_len) < 0)
return nfserr_bad_xdr;
if (lcp->lc_up_len > 0) {
lcp->lc_up_layout = xdr_inline_decode(argp->xdr, lcp->lc_up_len);
if (!lcp->lc_up_layout)
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32
nfsd4_decode_layoutreturn4(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutreturn *lrp)
{
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &lrp->lr_return_type) < 0)
return nfserr_bad_xdr;
switch (lrp->lr_return_type) {
case RETURN_FILE:
if (xdr_stream_decode_u64(argp->xdr, &lrp->lr_seg.offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lrp->lr_seg.length) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_stateid4(argp, &lrp->lr_sid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &lrp->lrf_body_len) < 0)
return nfserr_bad_xdr;
if (lrp->lrf_body_len > 0) {
lrp->lrf_body = xdr_inline_decode(argp->xdr, lrp->lrf_body_len);
if (!lrp->lrf_body)
return nfserr_bad_xdr;
}
break;
case RETURN_FSID:
case RETURN_ALL:
lrp->lr_seg.offset = 0;
lrp->lr_seg.length = NFS4_MAX_UINT64;
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd4_decode_sessionid4(struct nfsd4_compoundargs *argp,
struct nfs4_sessionid *sessionid)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, NFS4_MAX_SESSIONID_LEN);
if (!p)
return nfserr_bad_xdr;
memcpy(sessionid->data, p, sizeof(sessionid->data));
return nfs_ok;
}
/* Defined in Appendix A of RFC 5531 */
static __be32
nfsd4_decode_authsys_parms(struct nfsd4_compoundargs *argp,
struct nfsd4_cb_sec *cbs)
{
u32 stamp, gidcount, uid, gid;
__be32 *p, status;
if (xdr_stream_decode_u32(argp->xdr, &stamp) < 0)
return nfserr_bad_xdr;
/* machine name */
status = nfsd4_decode_ignored_string(argp, 255);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &uid) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &gid) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &gidcount) < 0)
return nfserr_bad_xdr;
if (gidcount > 16)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, gidcount << 2);
if (!p)
return nfserr_bad_xdr;
if (cbs->flavor == (u32)(-1)) {
struct user_namespace *userns = nfsd_user_namespace(argp->rqstp);
kuid_t kuid = make_kuid(userns, uid);
kgid_t kgid = make_kgid(userns, gid);
if (uid_valid(kuid) && gid_valid(kgid)) {
cbs->uid = kuid;
cbs->gid = kgid;
cbs->flavor = RPC_AUTH_UNIX;
} else {
dprintk("RPC_AUTH_UNIX with invalid uid or gid, ignoring!\n");
}
}
return nfs_ok;
}
static __be32
nfsd4_decode_gss_cb_handles4(struct nfsd4_compoundargs *argp,
struct nfsd4_cb_sec *cbs)
{
__be32 status;
u32 service;
dprintk("RPC_AUTH_GSS callback secflavor not supported!\n");
if (xdr_stream_decode_u32(argp->xdr, &service) < 0)
return nfserr_bad_xdr;
if (service < RPC_GSS_SVC_NONE || service > RPC_GSS_SVC_PRIVACY)
return nfserr_bad_xdr;
/* gcbp_handle_from_server */
status = nfsd4_decode_ignored_string(argp, 0);
if (status)
return status;
/* gcbp_handle_from_client */
status = nfsd4_decode_ignored_string(argp, 0);
if (status)
return status;
return nfs_ok;
}
/* a counted array of callback_sec_parms4 items */
static __be32
nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
{
u32 i, secflavor, nr_secflavs;
__be32 status;
/* callback_sec_params4 */
if (xdr_stream_decode_u32(argp->xdr, &nr_secflavs) < 0)
return nfserr_bad_xdr;
if (nr_secflavs)
cbs->flavor = (u32)(-1);
else
/* Is this legal? Be generous, take it to mean AUTH_NONE: */
cbs->flavor = 0;
for (i = 0; i < nr_secflavs; ++i) {
if (xdr_stream_decode_u32(argp->xdr, &secflavor) < 0)
return nfserr_bad_xdr;
switch (secflavor) {
case RPC_AUTH_NULL:
/* void */
if (cbs->flavor == (u32)(-1))
cbs->flavor = RPC_AUTH_NULL;
break;
case RPC_AUTH_UNIX:
status = nfsd4_decode_authsys_parms(argp, cbs);
if (status)
return status;
break;
case RPC_AUTH_GSS:
status = nfsd4_decode_gss_cb_handles4(argp, cbs);
if (status)
return status;
break;
default:
return nfserr_inval;
}
}
return nfs_ok;
}
/*
* NFSv4 operation argument decoders
*/
static __be32
nfsd4_decode_access(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_access *access = &u->access;
if (xdr_stream_decode_u32(argp->xdr, &access->ac_req_access) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_close(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_close *close = &u->close;
if (xdr_stream_decode_u32(argp->xdr, &close->cl_seqid) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_stateid4(argp, &close->cl_stateid);
}
static __be32
nfsd4_decode_commit(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_commit *commit = &u->commit;
if (xdr_stream_decode_u64(argp->xdr, &commit->co_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &commit->co_count) < 0)
return nfserr_bad_xdr;
memset(&commit->co_verf, 0, sizeof(commit->co_verf));
return nfs_ok;
}
static __be32
nfsd4_decode_create(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_create *create = &u->create;
__be32 *p, status;
memset(create, 0, sizeof(*create));
if (xdr_stream_decode_u32(argp->xdr, &create->cr_type) < 0)
return nfserr_bad_xdr;
switch (create->cr_type) {
case NF4LNK:
if (xdr_stream_decode_u32(argp->xdr, &create->cr_datalen) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, create->cr_datalen);
if (!p)
return nfserr_bad_xdr;
create->cr_data = svcxdr_dupstr(argp, p, create->cr_datalen);
if (!create->cr_data)
return nfserr_jukebox;
break;
case NF4BLK:
case NF4CHR:
if (xdr_stream_decode_u32(argp->xdr, &create->cr_specdata1) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &create->cr_specdata2) < 0)
return nfserr_bad_xdr;
break;
case NF4SOCK:
case NF4FIFO:
case NF4DIR:
default:
break;
}
status = nfsd4_decode_component4(argp, &create->cr_name,
&create->cr_namelen);
if (status)
return status;
status = nfsd4_decode_fattr4(argp, create->cr_bmval,
ARRAY_SIZE(create->cr_bmval),
&create->cr_iattr, &create->cr_acl,
&create->cr_label, &create->cr_umask);
if (status)
return status;
return nfs_ok;
}
static inline __be32
nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_delegreturn *dr = &u->delegreturn;
return nfsd4_decode_stateid4(argp, &dr->dr_stateid);
}
static inline __be32
nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_getattr *getattr = &u->getattr;
memset(getattr, 0, sizeof(*getattr));
return nfsd4_decode_bitmap4(argp, getattr->ga_bmval,
ARRAY_SIZE(getattr->ga_bmval));
}
static __be32
nfsd4_decode_link(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_link *link = &u->link;
memset(link, 0, sizeof(*link));
return nfsd4_decode_component4(argp, &link->li_name, &link->li_namelen);
}
static __be32
nfsd4_decode_open_to_lock_owner4(struct nfsd4_compoundargs *argp,
struct nfsd4_lock *lock)
{
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &lock->lk_new_open_seqid) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_stateid4(argp, &lock->lk_new_open_stateid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &lock->lk_new_lock_seqid) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_state_owner4(argp, &lock->lk_new_clientid,
&lock->lk_new_owner);
}
static __be32
nfsd4_decode_exist_lock_owner4(struct nfsd4_compoundargs *argp,
struct nfsd4_lock *lock)
{
__be32 status;
status = nfsd4_decode_stateid4(argp, &lock->lk_old_lock_stateid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &lock->lk_old_lock_seqid) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_locker4(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
{
if (xdr_stream_decode_bool(argp->xdr, &lock->lk_is_new) < 0)
return nfserr_bad_xdr;
if (lock->lk_is_new)
return nfsd4_decode_open_to_lock_owner4(argp, lock);
return nfsd4_decode_exist_lock_owner4(argp, lock);
}
static __be32
nfsd4_decode_lock(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_lock *lock = &u->lock;
memset(lock, 0, sizeof(*lock));
if (xdr_stream_decode_u32(argp->xdr, &lock->lk_type) < 0)
return nfserr_bad_xdr;
if ((lock->lk_type < NFS4_READ_LT) || (lock->lk_type > NFS4_WRITEW_LT))
return nfserr_bad_xdr;
if (xdr_stream_decode_bool(argp->xdr, &lock->lk_reclaim) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lock->lk_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lock->lk_length) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_locker4(argp, lock);
}
static __be32
nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_lockt *lockt = &u->lockt;
memset(lockt, 0, sizeof(*lockt));
if (xdr_stream_decode_u32(argp->xdr, &lockt->lt_type) < 0)
return nfserr_bad_xdr;
if ((lockt->lt_type < NFS4_READ_LT) || (lockt->lt_type > NFS4_WRITEW_LT))
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lockt->lt_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lockt->lt_length) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_state_owner4(argp, &lockt->lt_clientid,
&lockt->lt_owner);
}
static __be32
nfsd4_decode_locku(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_locku *locku = &u->locku;
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &locku->lu_type) < 0)
return nfserr_bad_xdr;
if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &locku->lu_seqid) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_stateid4(argp, &locku->lu_stateid);
if (status)
return status;
if (xdr_stream_decode_u64(argp->xdr, &locku->lu_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &locku->lu_length) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_lookup *lookup = &u->lookup;
return nfsd4_decode_component4(argp, &lookup->lo_name, &lookup->lo_len);
}
static __be32
nfsd4_decode_createhow4(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &open->op_createmode) < 0)
return nfserr_bad_xdr;
switch (open->op_createmode) {
case NFS4_CREATE_UNCHECKED:
case NFS4_CREATE_GUARDED:
status = nfsd4_decode_fattr4(argp, open->op_bmval,
ARRAY_SIZE(open->op_bmval),
&open->op_iattr, &open->op_acl,
&open->op_label, &open->op_umask);
if (status)
return status;
break;
case NFS4_CREATE_EXCLUSIVE:
status = nfsd4_decode_verifier4(argp, &open->op_verf);
if (status)
return status;
break;
case NFS4_CREATE_EXCLUSIVE4_1:
if (argp->minorversion < 1)
return nfserr_bad_xdr;
status = nfsd4_decode_verifier4(argp, &open->op_verf);
if (status)
return status;
status = nfsd4_decode_fattr4(argp, open->op_bmval,
ARRAY_SIZE(open->op_bmval),
&open->op_iattr, &open->op_acl,
&open->op_label, &open->op_umask);
if (status)
return status;
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32
nfsd4_decode_openflag4(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &open->op_create) < 0)
return nfserr_bad_xdr;
switch (open->op_create) {
case NFS4_OPEN_NOCREATE:
break;
case NFS4_OPEN_CREATE:
status = nfsd4_decode_createhow4(argp, open);
if (status)
return status;
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when)
{
u32 w;
if (xdr_stream_decode_u32(argp->xdr, &w) < 0)
return nfserr_bad_xdr;
*share_access = w & NFS4_SHARE_ACCESS_MASK;
*deleg_want = w & NFS4_SHARE_WANT_MASK;
if (deleg_when)
*deleg_when = w & NFS4_SHARE_WHEN_MASK;
switch (w & NFS4_SHARE_ACCESS_MASK) {
case NFS4_SHARE_ACCESS_READ:
case NFS4_SHARE_ACCESS_WRITE:
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_ACCESS_MASK;
if (!w)
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
switch (w & NFS4_SHARE_WANT_MASK) {
case NFS4_SHARE_WANT_NO_PREFERENCE:
case NFS4_SHARE_WANT_READ_DELEG:
case NFS4_SHARE_WANT_WRITE_DELEG:
case NFS4_SHARE_WANT_ANY_DELEG:
case NFS4_SHARE_WANT_NO_DELEG:
case NFS4_SHARE_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_WANT_MASK;
if (!w)
return nfs_ok;
if (!deleg_when) /* open_downgrade */
return nfserr_inval;
switch (w) {
case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
case (NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL |
NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED):
return nfs_ok;
}
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_share_deny(struct nfsd4_compoundargs *argp, u32 *x)
{
if (xdr_stream_decode_u32(argp->xdr, x) < 0)
return nfserr_bad_xdr;
/* Note: unlike access bits, deny bits may be zero. */
if (*x & ~NFS4_SHARE_DENY_BOTH)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_open_claim4(struct nfsd4_compoundargs *argp,
struct nfsd4_open *open)
{
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &open->op_claim_type) < 0)
return nfserr_bad_xdr;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
status = nfsd4_decode_component4(argp, &open->op_fname,
&open->op_fnamelen);
if (status)
return status;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
if (xdr_stream_decode_u32(argp->xdr, &open->op_delegate_type) < 0)
return nfserr_bad_xdr;
break;
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
status = nfsd4_decode_stateid4(argp, &open->op_delegate_stateid);
if (status)
return status;
status = nfsd4_decode_component4(argp, &open->op_fname,
&open->op_fnamelen);
if (status)
return status;
break;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
if (argp->minorversion < 1)
return nfserr_bad_xdr;
/* void */
break;
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
if (argp->minorversion < 1)
return nfserr_bad_xdr;
status = nfsd4_decode_stateid4(argp, &open->op_delegate_stateid);
if (status)
return status;
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_open *open = &u->open;
__be32 status;
u32 dummy;
memset(open, 0, sizeof(*open));
if (xdr_stream_decode_u32(argp->xdr, &open->op_seqid) < 0)
return nfserr_bad_xdr;
/* deleg_want is ignored */
status = nfsd4_decode_share_access(argp, &open->op_share_access,
&open->op_deleg_want, &dummy);
if (status)
return status;
status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
if (status)
return status;
status = nfsd4_decode_state_owner4(argp, &open->op_clientid,
&open->op_owner);
if (status)
return status;
status = nfsd4_decode_openflag4(argp, open);
if (status)
return status;
return nfsd4_decode_open_claim4(argp, open);
}
static __be32
nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_open_confirm *open_conf = &u->open_confirm;
__be32 status;
if (argp->minorversion >= 1)
return nfserr_notsupp;
status = nfsd4_decode_stateid4(argp, &open_conf->oc_req_stateid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &open_conf->oc_seqid) < 0)
return nfserr_bad_xdr;
memset(&open_conf->oc_resp_stateid, 0,
sizeof(open_conf->oc_resp_stateid));
return nfs_ok;
}
static __be32
nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_open_downgrade *open_down = &u->open_downgrade;
__be32 status;
memset(open_down, 0, sizeof(*open_down));
status = nfsd4_decode_stateid4(argp, &open_down->od_stateid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &open_down->od_seqid) < 0)
return nfserr_bad_xdr;
/* deleg_want is ignored */
status = nfsd4_decode_share_access(argp, &open_down->od_share_access,
&open_down->od_deleg_want, NULL);
if (status)
return status;
return nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
}
static __be32
nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_putfh *putfh = &u->putfh;
__be32 *p;
if (xdr_stream_decode_u32(argp->xdr, &putfh->pf_fhlen) < 0)
return nfserr_bad_xdr;
if (putfh->pf_fhlen > NFS4_FHSIZE)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, putfh->pf_fhlen);
if (!p)
return nfserr_bad_xdr;
putfh->pf_fhval = svcxdr_savemem(argp, p, putfh->pf_fhlen);
if (!putfh->pf_fhval)
return nfserr_jukebox;
putfh->no_verify = false;
return nfs_ok;
}
static __be32
nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
{
if (argp->minorversion == 0)
return nfs_ok;
return nfserr_notsupp;
}
static __be32
nfsd4_decode_read(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_read *read = &u->read;
__be32 status;
memset(read, 0, sizeof(*read));
status = nfsd4_decode_stateid4(argp, &read->rd_stateid);
if (status)
return status;
if (xdr_stream_decode_u64(argp->xdr, &read->rd_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &read->rd_length) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_readdir *readdir = &u->readdir;
__be32 status;
memset(readdir, 0, sizeof(*readdir));
if (xdr_stream_decode_u64(argp->xdr, &readdir->rd_cookie) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_verifier4(argp, &readdir->rd_verf);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &readdir->rd_dircount) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &readdir->rd_maxcount) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_uint32_array(argp->xdr, readdir->rd_bmval,
ARRAY_SIZE(readdir->rd_bmval)) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_remove(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_remove *remove = &u->remove;
memset(&remove->rm_cinfo, 0, sizeof(remove->rm_cinfo));
return nfsd4_decode_component4(argp, &remove->rm_name, &remove->rm_namelen);
}
static __be32
nfsd4_decode_rename(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_rename *rename = &u->rename;
__be32 status;
memset(rename, 0, sizeof(*rename));
status = nfsd4_decode_component4(argp, &rename->rn_sname, &rename->rn_snamelen);
if (status)
return status;
return nfsd4_decode_component4(argp, &rename->rn_tname, &rename->rn_tnamelen);
}
static __be32
nfsd4_decode_renew(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
clientid_t *clientid = &u->renew;
return nfsd4_decode_clientid4(argp, clientid);
}
static __be32
nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_secinfo *secinfo = &u->secinfo;
secinfo->si_exp = NULL;
return nfsd4_decode_component4(argp, &secinfo->si_name, &secinfo->si_namelen);
}
static __be32
nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_setattr *setattr = &u->setattr;
__be32 status;
memset(setattr, 0, sizeof(*setattr));
status = nfsd4_decode_stateid4(argp, &setattr->sa_stateid);
if (status)
return status;
return nfsd4_decode_fattr4(argp, setattr->sa_bmval,
ARRAY_SIZE(setattr->sa_bmval),
&setattr->sa_iattr, &setattr->sa_acl,
&setattr->sa_label, NULL);
}
static __be32
nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_setclientid *setclientid = &u->setclientid;
__be32 *p, status;
memset(setclientid, 0, sizeof(*setclientid));
if (argp->minorversion >= 1)
return nfserr_notsupp;
status = nfsd4_decode_verifier4(argp, &setclientid->se_verf);
if (status)
return status;
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_prog) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_netid_len) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, setclientid->se_callback_netid_len);
if (!p)
return nfserr_bad_xdr;
setclientid->se_callback_netid_val = svcxdr_savemem(argp, p,
setclientid->se_callback_netid_len);
if (!setclientid->se_callback_netid_val)
return nfserr_jukebox;
if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_addr_len) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, setclientid->se_callback_addr_len);
if (!p)
return nfserr_bad_xdr;
setclientid->se_callback_addr_val = svcxdr_savemem(argp, p,
setclientid->se_callback_addr_len);
if (!setclientid->se_callback_addr_val)
return nfserr_jukebox;
if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_ident) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_setclientid_confirm *scd_c = &u->setclientid_confirm;
__be32 status;
if (argp->minorversion >= 1)
return nfserr_notsupp;
status = nfsd4_decode_clientid4(argp, &scd_c->sc_clientid);
if (status)
return status;
return nfsd4_decode_verifier4(argp, &scd_c->sc_confirm);
}
/* Also used for NVERIFY */
static __be32
nfsd4_decode_verify(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_verify *verify = &u->verify;
__be32 *p, status;
memset(verify, 0, sizeof(*verify));
status = nfsd4_decode_bitmap4(argp, verify->ve_bmval,
ARRAY_SIZE(verify->ve_bmval));
if (status)
return status;
/* For convenience's sake, we compare raw xdr'd attributes in
* nfsd4_proc_verify */
if (xdr_stream_decode_u32(argp->xdr, &verify->ve_attrlen) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, verify->ve_attrlen);
if (!p)
return nfserr_bad_xdr;
verify->ve_attrval = svcxdr_savemem(argp, p, verify->ve_attrlen);
if (!verify->ve_attrval)
return nfserr_jukebox;
return nfs_ok;
}
static __be32
nfsd4_decode_write(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
__be32 status;
status = nfsd4_decode_stateid4(argp, &write->wr_stateid);
if (status)
return status;
if (xdr_stream_decode_u64(argp->xdr, &write->wr_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &write->wr_stable_how) < 0)
return nfserr_bad_xdr;
if (write->wr_stable_how > NFS_FILE_SYNC)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &write->wr_buflen) < 0)
return nfserr_bad_xdr;
if (!xdr_stream_subsegment(argp->xdr, &write->wr_payload, write->wr_buflen))
return nfserr_bad_xdr;
write->wr_bytes_written = 0;
write->wr_how_written = 0;
memset(&write->wr_verifier, 0, sizeof(write->wr_verifier));
return nfs_ok;
}
static __be32
nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
__be32 status;
if (argp->minorversion >= 1)
return nfserr_notsupp;
status = nfsd4_decode_state_owner4(argp, &rlockowner->rl_clientid,
&rlockowner->rl_owner);
if (status)
return status;
if (argp->minorversion && !zero_clientid(&rlockowner->rl_clientid))
return nfserr_inval;
return nfs_ok;
}
static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
memset(bc, 0, sizeof(*bc));
if (xdr_stream_decode_u32(argp->xdr, &bc->bc_cb_program) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_cb_sec(argp, &bc->bc_cb_sec);
}
static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
u32 use_conn_in_rdma_mode;
__be32 status;
memset(bcts, 0, sizeof(*bcts));
status = nfsd4_decode_sessionid4(argp, &bcts->sessionid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &bcts->dir) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &use_conn_in_rdma_mode) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_state_protect_ops(struct nfsd4_compoundargs *argp,
struct nfsd4_exchange_id *exid)
{
__be32 status;
status = nfsd4_decode_bitmap4(argp, exid->spo_must_enforce,
ARRAY_SIZE(exid->spo_must_enforce));
if (status)
return nfserr_bad_xdr;
status = nfsd4_decode_bitmap4(argp, exid->spo_must_allow,
ARRAY_SIZE(exid->spo_must_allow));
if (status)
return nfserr_bad_xdr;
return nfs_ok;
}
/*
* This implementation currently does not support SP4_SSV.
* This decoder simply skips over these arguments.
*/
static noinline __be32
nfsd4_decode_ssv_sp_parms(struct nfsd4_compoundargs *argp,
struct nfsd4_exchange_id *exid)
{
u32 count, window, num_gss_handles;
__be32 status;
/* ssp_ops */
status = nfsd4_decode_state_protect_ops(argp, exid);
if (status)
return status;
/* ssp_hash_algs<> */
if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
return nfserr_bad_xdr;
while (count--) {
status = nfsd4_decode_ignored_string(argp, 0);
if (status)
return status;
}
/* ssp_encr_algs<> */
if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
return nfserr_bad_xdr;
while (count--) {
status = nfsd4_decode_ignored_string(argp, 0);
if (status)
return status;
}
if (xdr_stream_decode_u32(argp->xdr, &window) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &num_gss_handles) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_state_protect4_a(struct nfsd4_compoundargs *argp,
struct nfsd4_exchange_id *exid)
{
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &exid->spa_how) < 0)
return nfserr_bad_xdr;
switch (exid->spa_how) {
case SP4_NONE:
break;
case SP4_MACH_CRED:
status = nfsd4_decode_state_protect_ops(argp, exid);
if (status)
return status;
break;
case SP4_SSV:
status = nfsd4_decode_ssv_sp_parms(argp, exid);
if (status)
return status;
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32
nfsd4_decode_nfs_impl_id4(struct nfsd4_compoundargs *argp,
struct nfsd4_exchange_id *exid)
{
__be32 status;
u32 count;
if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
return nfserr_bad_xdr;
switch (count) {
case 0:
break;
case 1:
/* Note that RFC 8881 places no length limit on
* nii_domain, but this implementation permits no
* more than NFS4_OPAQUE_LIMIT bytes */
status = nfsd4_decode_opaque(argp, &exid->nii_domain);
if (status)
return status;
/* Note that RFC 8881 places no length limit on
* nii_name, but this implementation permits no
* more than NFS4_OPAQUE_LIMIT bytes */
status = nfsd4_decode_opaque(argp, &exid->nii_name);
if (status)
return status;
status = nfsd4_decode_nfstime4(argp, &exid->nii_time);
if (status)
return status;
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32
nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_exchange_id *exid = &u->exchange_id;
__be32 status;
memset(exid, 0, sizeof(*exid));
status = nfsd4_decode_verifier4(argp, &exid->verifier);
if (status)
return status;
status = nfsd4_decode_opaque(argp, &exid->clname);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &exid->flags) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_state_protect4_a(argp, exid);
if (status)
return status;
return nfsd4_decode_nfs_impl_id4(argp, exid);
}
static __be32
nfsd4_decode_channel_attrs4(struct nfsd4_compoundargs *argp,
struct nfsd4_channel_attrs *ca)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, XDR_UNIT * 7);
if (!p)
return nfserr_bad_xdr;
/* headerpadsz is ignored */
p++;
ca->maxreq_sz = be32_to_cpup(p++);
ca->maxresp_sz = be32_to_cpup(p++);
ca->maxresp_cached = be32_to_cpup(p++);
ca->maxops = be32_to_cpup(p++);
ca->maxreqs = be32_to_cpup(p++);
ca->nr_rdma_attrs = be32_to_cpup(p);
switch (ca->nr_rdma_attrs) {
case 0:
break;
case 1:
if (xdr_stream_decode_u32(argp->xdr, &ca->rdma_attrs) < 0)
return nfserr_bad_xdr;
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32
nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_create_session *sess = &u->create_session;
__be32 status;
memset(sess, 0, sizeof(*sess));
status = nfsd4_decode_clientid4(argp, &sess->clientid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &sess->seqid) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &sess->flags) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_channel_attrs4(argp, &sess->fore_channel);
if (status)
return status;
status = nfsd4_decode_channel_attrs4(argp, &sess->back_channel);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &sess->callback_prog) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_cb_sec(argp, &sess->cb_sec);
}
static __be32
nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_destroy_session *destroy_session = &u->destroy_session;
return nfsd4_decode_sessionid4(argp, &destroy_session->sessionid);
}
static __be32
nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
return nfsd4_decode_stateid4(argp, &free_stateid->fr_stateid);
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
__be32 status;
memset(gdev, 0, sizeof(*gdev));
status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_maxcount) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_uint32_array(argp->xdr,
&gdev->gd_notify_types, 1) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
__be32 *p, status;
memset(lcp, 0, sizeof(*lcp));
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_seg.offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_seg.length) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_bool(argp->xdr, &lcp->lc_reclaim) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_stateid4(argp, &lcp->lc_sid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_newoffset) < 0)
return nfserr_bad_xdr;
if (lcp->lc_newoffset) {
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_last_wr) < 0)
return nfserr_bad_xdr;
} else
lcp->lc_last_wr = 0;
p = xdr_inline_decode(argp->xdr, XDR_UNIT);
if (!p)
return nfserr_bad_xdr;
if (xdr_item_is_present(p)) {
status = nfsd4_decode_nfstime4(argp, &lcp->lc_mtime);
if (status)
return status;
} else {
lcp->lc_mtime.tv_nsec = UTIME_NOW;
}
return nfsd4_decode_layoutupdate4(argp, lcp);
}
static __be32
nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_layoutget *lgp = &u->layoutget;
__be32 status;
memset(lgp, 0, sizeof(*lgp));
if (xdr_stream_decode_u32(argp->xdr, &lgp->lg_signal) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &lgp->lg_layout_type) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &lgp->lg_seg.iomode) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lgp->lg_seg.offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lgp->lg_seg.length) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lgp->lg_minlength) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_stateid4(argp, &lgp->lg_sid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &lgp->lg_maxcount) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
memset(lrp, 0, sizeof(*lrp));
if (xdr_stream_decode_bool(argp->xdr, &lrp->lr_reclaim) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &lrp->lr_layout_type) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &lrp->lr_seg.iomode) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_layoutreturn4(argp, lrp);
}
#endif /* CONFIG_NFSD_PNFS */
static __be32 nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_secinfo_no_name *sin = &u->secinfo_no_name;
if (xdr_stream_decode_u32(argp->xdr, &sin->sin_style) < 0)
return nfserr_bad_xdr;
sin->sin_exp = NULL;
return nfs_ok;
}
static __be32
nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_sequence *seq = &u->sequence;
__be32 *p, status;
status = nfsd4_decode_sessionid4(argp, &seq->sessionid);
if (status)
return status;
p = xdr_inline_decode(argp->xdr, XDR_UNIT * 4);
if (!p)
return nfserr_bad_xdr;
seq->seqid = be32_to_cpup(p++);
seq->slotid = be32_to_cpup(p++);
seq->maxslots = be32_to_cpup(p++);
seq->cachethis = be32_to_cpup(p);
seq->status_flags = 0;
return nfs_ok;
}
static __be32
nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct nfsd4_test_stateid_id *stateid;
__be32 status;
u32 i;
memset(test_stateid, 0, sizeof(*test_stateid));
if (xdr_stream_decode_u32(argp->xdr, &test_stateid->ts_num_ids) < 0)
return nfserr_bad_xdr;
INIT_LIST_HEAD(&test_stateid->ts_stateid_list);
for (i = 0; i < test_stateid->ts_num_ids; i++) {
stateid = svcxdr_tmpalloc(argp, sizeof(*stateid));
if (!stateid)
return nfserr_jukebox;
INIT_LIST_HEAD(&stateid->ts_id_list);
list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
status = nfsd4_decode_stateid4(argp, &stateid->ts_id_stateid);
if (status)
return status;
}
return nfs_ok;
}
static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
return nfsd4_decode_clientid4(argp, &dc->clientid);
}
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
if (xdr_stream_decode_bool(argp->xdr, &rc->rca_one_fs) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_fallocate *fallocate = &u->allocate;
__be32 status;
status = nfsd4_decode_stateid4(argp, &fallocate->falloc_stateid);
if (status)
return status;
if (xdr_stream_decode_u64(argp->xdr, &fallocate->falloc_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &fallocate->falloc_length) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32 nfsd4_decode_nl4_server(struct nfsd4_compoundargs *argp,
struct nl4_server *ns)
{
struct nfs42_netaddr *naddr;
__be32 *p;
if (xdr_stream_decode_u32(argp->xdr, &ns->nl4_type) < 0)
return nfserr_bad_xdr;
/* currently support for 1 inter-server source server */
switch (ns->nl4_type) {
case NL4_NETADDR:
naddr = &ns->u.nl4_addr;
if (xdr_stream_decode_u32(argp->xdr, &naddr->netid_len) < 0)
return nfserr_bad_xdr;
if (naddr->netid_len > RPCBIND_MAXNETIDLEN)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, naddr->netid_len);
if (!p)
return nfserr_bad_xdr;
memcpy(naddr->netid, p, naddr->netid_len);
if (xdr_stream_decode_u32(argp->xdr, &naddr->addr_len) < 0)
return nfserr_bad_xdr;
if (naddr->addr_len > RPCBIND_MAXUADDRLEN)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, naddr->addr_len);
if (!p)
return nfserr_bad_xdr;
memcpy(naddr->addr, p, naddr->addr_len);
break;
default:
return nfserr_bad_xdr;
}
return nfs_ok;
}
static __be32
nfsd4_decode_copy(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_copy *copy = &u->copy;
u32 consecutive, i, count, sync;
struct nl4_server *ns_dummy;
__be32 status;
memset(copy, 0, sizeof(*copy));
status = nfsd4_decode_stateid4(argp, ©->cp_src_stateid);
if (status)
return status;
status = nfsd4_decode_stateid4(argp, ©->cp_dst_stateid);
if (status)
return status;
if (xdr_stream_decode_u64(argp->xdr, ©->cp_src_pos) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, ©->cp_dst_pos) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, ©->cp_count) < 0)
return nfserr_bad_xdr;
/* ca_consecutive: we always do consecutive copies */
if (xdr_stream_decode_u32(argp->xdr, &consecutive) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_bool(argp->xdr, &sync) < 0)
return nfserr_bad_xdr;
nfsd4_copy_set_sync(copy, sync);
if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
return nfserr_bad_xdr;
copy->cp_src = svcxdr_tmpalloc(argp, sizeof(*copy->cp_src));
if (copy->cp_src == NULL)
return nfserr_jukebox;
if (count == 0) { /* intra-server copy */
__set_bit(NFSD4_COPY_F_INTRA, ©->cp_flags);
return nfs_ok;
}
/* decode all the supplied server addresses but use only the first */
status = nfsd4_decode_nl4_server(argp, copy->cp_src);
if (status)
return status;
ns_dummy = kmalloc(sizeof(struct nl4_server), GFP_KERNEL);
if (ns_dummy == NULL)
return nfserr_jukebox;
for (i = 0; i < count - 1; i++) {
status = nfsd4_decode_nl4_server(argp, ns_dummy);
if (status) {
kfree(ns_dummy);
return status;
}
}
kfree(ns_dummy);
return nfs_ok;
}
static __be32
nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_copy_notify *cn = &u->copy_notify;
__be32 status;
memset(cn, 0, sizeof(*cn));
cn->cpn_src = svcxdr_tmpalloc(argp, sizeof(*cn->cpn_src));
if (cn->cpn_src == NULL)
return nfserr_jukebox;
cn->cpn_dst = svcxdr_tmpalloc(argp, sizeof(*cn->cpn_dst));
if (cn->cpn_dst == NULL)
return nfserr_jukebox;
status = nfsd4_decode_stateid4(argp, &cn->cpn_src_stateid);
if (status)
return status;
return nfsd4_decode_nl4_server(argp, cn->cpn_dst);
}
static __be32
nfsd4_decode_offload_status(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_offload_status *os = &u->offload_status;
os->count = 0;
os->status = 0;
return nfsd4_decode_stateid4(argp, &os->stateid);
}
static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_seek *seek = &u->seek;
__be32 status;
status = nfsd4_decode_stateid4(argp, &seek->seek_stateid);
if (status)
return status;
if (xdr_stream_decode_u64(argp->xdr, &seek->seek_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &seek->seek_whence) < 0)
return nfserr_bad_xdr;
seek->seek_eof = 0;
seek->seek_pos = 0;
return nfs_ok;
}
static __be32
nfsd4_decode_clone(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_clone *clone = &u->clone;
__be32 status;
status = nfsd4_decode_stateid4(argp, &clone->cl_src_stateid);
if (status)
return status;
status = nfsd4_decode_stateid4(argp, &clone->cl_dst_stateid);
if (status)
return status;
if (xdr_stream_decode_u64(argp->xdr, &clone->cl_src_pos) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &clone->cl_dst_pos) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &clone->cl_count) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
/*
* XDR data that is more than PAGE_SIZE in size is normally part of a
* read or write. However, the size of extended attributes is limited
* by the maximum request size, and then further limited by the underlying
* filesystem limits. This can exceed PAGE_SIZE (currently, XATTR_SIZE_MAX
* is 64k). Since there is no kvec- or page-based interface to xattrs,
* and we're not dealing with contiguous pages, we need to do some copying.
*/
/*
* Decode data into buffer.
*/
static __be32
nfsd4_vbuf_from_vector(struct nfsd4_compoundargs *argp, struct xdr_buf *xdr,
char **bufp, u32 buflen)
{
struct page **pages = xdr->pages;
struct kvec *head = xdr->head;
char *tmp, *dp;
u32 len;
if (buflen <= head->iov_len) {
/*
* We're in luck, the head has enough space. Just return
* the head, no need for copying.
*/
*bufp = head->iov_base;
return 0;
}
tmp = svcxdr_tmpalloc(argp, buflen);
if (tmp == NULL)
return nfserr_jukebox;
dp = tmp;
memcpy(dp, head->iov_base, head->iov_len);
buflen -= head->iov_len;
dp += head->iov_len;
while (buflen > 0) {
len = min_t(u32, buflen, PAGE_SIZE);
memcpy(dp, page_address(*pages), len);
buflen -= len;
dp += len;
pages++;
}
*bufp = tmp;
return 0;
}
/*
* Get a user extended attribute name from the XDR buffer.
* It will not have the "user." prefix, so prepend it.
* Lastly, check for nul characters in the name.
*/
static __be32
nfsd4_decode_xattr_name(struct nfsd4_compoundargs *argp, char **namep)
{
char *name, *sp, *dp;
u32 namelen, cnt;
__be32 *p;
if (xdr_stream_decode_u32(argp->xdr, &namelen) < 0)
return nfserr_bad_xdr;
if (namelen > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN))
return nfserr_nametoolong;
if (namelen == 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, namelen);
if (!p)
return nfserr_bad_xdr;
name = svcxdr_tmpalloc(argp, namelen + XATTR_USER_PREFIX_LEN + 1);
if (!name)
return nfserr_jukebox;
memcpy(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
/*
* Copy the extended attribute name over while checking for 0
* characters.
*/
sp = (char *)p;
dp = name + XATTR_USER_PREFIX_LEN;
cnt = namelen;
while (cnt-- > 0) {
if (*sp == '\0')
return nfserr_bad_xdr;
*dp++ = *sp++;
}
*dp = '\0';
*namep = name;
return nfs_ok;
}
/*
* A GETXATTR op request comes without a length specifier. We just set the
* maximum length for the reply based on XATTR_SIZE_MAX and the maximum
* channel reply size. nfsd_getxattr will probe the length of the xattr,
* check it against getxa_len, and allocate + return the value.
*/
static __be32
nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_getxattr *getxattr = &u->getxattr;
__be32 status;
u32 maxcount;
memset(getxattr, 0, sizeof(*getxattr));
status = nfsd4_decode_xattr_name(argp, &getxattr->getxa_name);
if (status)
return status;
maxcount = svc_max_payload(argp->rqstp);
maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount);
getxattr->getxa_len = maxcount;
return nfs_ok;
}
static __be32
nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_setxattr *setxattr = &u->setxattr;
u32 flags, maxcount, size;
__be32 status;
memset(setxattr, 0, sizeof(*setxattr));
if (xdr_stream_decode_u32(argp->xdr, &flags) < 0)
return nfserr_bad_xdr;
if (flags > SETXATTR4_REPLACE)
return nfserr_inval;
setxattr->setxa_flags = flags;
status = nfsd4_decode_xattr_name(argp, &setxattr->setxa_name);
if (status)
return status;
maxcount = svc_max_payload(argp->rqstp);
maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount);
if (xdr_stream_decode_u32(argp->xdr, &size) < 0)
return nfserr_bad_xdr;
if (size > maxcount)
return nfserr_xattr2big;
setxattr->setxa_len = size;
if (size > 0) {
struct xdr_buf payload;
if (!xdr_stream_subsegment(argp->xdr, &payload, size))
return nfserr_bad_xdr;
status = nfsd4_vbuf_from_vector(argp, &payload,
&setxattr->setxa_buf, size);
}
return nfs_ok;
}
static __be32
nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_listxattrs *listxattrs = &u->listxattrs;
u32 maxcount;
memset(listxattrs, 0, sizeof(*listxattrs));
if (xdr_stream_decode_u64(argp->xdr, &listxattrs->lsxa_cookie) < 0)
return nfserr_bad_xdr;
/*
* If the cookie is too large to have even one user.x attribute
* plus trailing '\0' left in a maximum size buffer, it's invalid.
*/
if (listxattrs->lsxa_cookie >=
(XATTR_LIST_MAX / (XATTR_USER_PREFIX_LEN + 2)))
return nfserr_badcookie;
if (xdr_stream_decode_u32(argp->xdr, &maxcount) < 0)
return nfserr_bad_xdr;
if (maxcount < 8)
/* Always need at least 2 words (length and one character) */
return nfserr_inval;
maxcount = min(maxcount, svc_max_payload(argp->rqstp));
listxattrs->lsxa_maxcount = maxcount;
return nfs_ok;
}
static __be32
nfsd4_decode_removexattr(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_removexattr *removexattr = &u->removexattr;
memset(removexattr, 0, sizeof(*removexattr));
return nfsd4_decode_xattr_name(argp, &removexattr->rmxa_name);
}
static __be32
nfsd4_decode_noop(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
{
return nfs_ok;
}
static __be32
nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
{
return nfserr_notsupp;
}
typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u);
static const nfsd4_dec nfsd4_dec_ops[] = {
[OP_ACCESS] = nfsd4_decode_access,
[OP_CLOSE] = nfsd4_decode_close,
[OP_COMMIT] = nfsd4_decode_commit,
[OP_CREATE] = nfsd4_decode_create,
[OP_DELEGPURGE] = nfsd4_decode_notsupp,
[OP_DELEGRETURN] = nfsd4_decode_delegreturn,
[OP_GETATTR] = nfsd4_decode_getattr,
[OP_GETFH] = nfsd4_decode_noop,
[OP_LINK] = nfsd4_decode_link,
[OP_LOCK] = nfsd4_decode_lock,
[OP_LOCKT] = nfsd4_decode_lockt,
[OP_LOCKU] = nfsd4_decode_locku,
[OP_LOOKUP] = nfsd4_decode_lookup,
[OP_LOOKUPP] = nfsd4_decode_noop,
[OP_NVERIFY] = nfsd4_decode_verify,
[OP_OPEN] = nfsd4_decode_open,
[OP_OPENATTR] = nfsd4_decode_notsupp,
[OP_OPEN_CONFIRM] = nfsd4_decode_open_confirm,
[OP_OPEN_DOWNGRADE] = nfsd4_decode_open_downgrade,
[OP_PUTFH] = nfsd4_decode_putfh,
[OP_PUTPUBFH] = nfsd4_decode_putpubfh,
[OP_PUTROOTFH] = nfsd4_decode_noop,
[OP_READ] = nfsd4_decode_read,
[OP_READDIR] = nfsd4_decode_readdir,
[OP_READLINK] = nfsd4_decode_noop,
[OP_REMOVE] = nfsd4_decode_remove,
[OP_RENAME] = nfsd4_decode_rename,
[OP_RENEW] = nfsd4_decode_renew,
[OP_RESTOREFH] = nfsd4_decode_noop,
[OP_SAVEFH] = nfsd4_decode_noop,
[OP_SECINFO] = nfsd4_decode_secinfo,
[OP_SETATTR] = nfsd4_decode_setattr,
[OP_SETCLIENTID] = nfsd4_decode_setclientid,
[OP_SETCLIENTID_CONFIRM] = nfsd4_decode_setclientid_confirm,
[OP_VERIFY] = nfsd4_decode_verify,
[OP_WRITE] = nfsd4_decode_write,
[OP_RELEASE_LOCKOWNER] = nfsd4_decode_release_lockowner,
/* new operations for NFSv4.1 */
[OP_BACKCHANNEL_CTL] = nfsd4_decode_backchannel_ctl,
[OP_BIND_CONN_TO_SESSION] = nfsd4_decode_bind_conn_to_session,
[OP_EXCHANGE_ID] = nfsd4_decode_exchange_id,
[OP_CREATE_SESSION] = nfsd4_decode_create_session,
[OP_DESTROY_SESSION] = nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = nfsd4_decode_notsupp,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = nfsd4_decode_getdeviceinfo,
[OP_GETDEVICELIST] = nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = nfsd4_decode_layoutcommit,
[OP_LAYOUTGET] = nfsd4_decode_layoutget,
[OP_LAYOUTRETURN] = nfsd4_decode_layoutreturn,
#else
[OP_GETDEVICEINFO] = nfsd4_decode_notsupp,
[OP_GETDEVICELIST] = nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = nfsd4_decode_notsupp,
[OP_LAYOUTGET] = nfsd4_decode_notsupp,
[OP_LAYOUTRETURN] = nfsd4_decode_notsupp,
#endif
[OP_SECINFO_NO_NAME] = nfsd4_decode_secinfo_no_name,
[OP_SEQUENCE] = nfsd4_decode_sequence,
[OP_SET_SSV] = nfsd4_decode_notsupp,
[OP_TEST_STATEID] = nfsd4_decode_test_stateid,
[OP_WANT_DELEGATION] = nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = nfsd4_decode_destroy_clientid,
[OP_RECLAIM_COMPLETE] = nfsd4_decode_reclaim_complete,
/* new operations for NFSv4.2 */
[OP_ALLOCATE] = nfsd4_decode_fallocate,
[OP_COPY] = nfsd4_decode_copy,
[OP_COPY_NOTIFY] = nfsd4_decode_copy_notify,
[OP_DEALLOCATE] = nfsd4_decode_fallocate,
[OP_IO_ADVISE] = nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = nfsd4_decode_notsupp,
[OP_LAYOUTSTATS] = nfsd4_decode_notsupp,
[OP_OFFLOAD_CANCEL] = nfsd4_decode_offload_status,
[OP_OFFLOAD_STATUS] = nfsd4_decode_offload_status,
[OP_READ_PLUS] = nfsd4_decode_read,
[OP_SEEK] = nfsd4_decode_seek,
[OP_WRITE_SAME] = nfsd4_decode_notsupp,
[OP_CLONE] = nfsd4_decode_clone,
/* RFC 8276 extended atributes operations */
[OP_GETXATTR] = nfsd4_decode_getxattr,
[OP_SETXATTR] = nfsd4_decode_setxattr,
[OP_LISTXATTRS] = nfsd4_decode_listxattrs,
[OP_REMOVEXATTR] = nfsd4_decode_removexattr,
};
static inline bool
nfsd4_opnum_in_range(struct nfsd4_compoundargs *argp, struct nfsd4_op *op)
{
if (op->opnum < FIRST_NFS4_OP)
return false;
else if (argp->minorversion == 0 && op->opnum > LAST_NFS40_OP)
return false;
else if (argp->minorversion == 1 && op->opnum > LAST_NFS41_OP)
return false;
else if (argp->minorversion == 2 && op->opnum > LAST_NFS42_OP)
return false;
return true;
}
static bool
nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
{
struct nfsd4_op *op;
bool cachethis = false;
int auth_slack= argp->rqstp->rq_auth_slack;
int max_reply = auth_slack + 8; /* opcnt, status */
int readcount = 0;
int readbytes = 0;
__be32 *p;
int i;
if (xdr_stream_decode_u32(argp->xdr, &argp->taglen) < 0)
return false;
max_reply += XDR_UNIT;
argp->tag = NULL;
if (unlikely(argp->taglen)) {
if (argp->taglen > NFSD4_MAX_TAGLEN)
return false;
p = xdr_inline_decode(argp->xdr, argp->taglen);
if (!p)
return false;
argp->tag = svcxdr_savemem(argp, p, argp->taglen);
if (!argp->tag)
return false;
max_reply += xdr_align_size(argp->taglen);
}
if (xdr_stream_decode_u32(argp->xdr, &argp->minorversion) < 0)
return false;
if (xdr_stream_decode_u32(argp->xdr, &argp->client_opcnt) < 0)
return false;
argp->opcnt = min_t(u32, argp->client_opcnt,
NFSD_MAX_OPS_PER_COMPOUND);
if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
argp->ops = vcalloc(argp->opcnt, sizeof(*argp->ops));
if (!argp->ops) {
argp->ops = argp->iops;
return false;
}
}
if (argp->minorversion > NFSD_SUPPORTED_MINOR_VERSION)
argp->opcnt = 0;
for (i = 0; i < argp->opcnt; i++) {
op = &argp->ops[i];
op->replay = NULL;
op->opdesc = NULL;
if (xdr_stream_decode_u32(argp->xdr, &op->opnum) < 0)
return false;
if (nfsd4_opnum_in_range(argp, op)) {
op->opdesc = OPDESC(op);
op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
if (op->status != nfs_ok)
trace_nfsd_compound_decode_err(argp->rqstp,
argp->opcnt, i,
op->opnum,
op->status);
} else {
op->opnum = OP_ILLEGAL;
op->status = nfserr_op_illegal;
}
/*
* We'll try to cache the result in the DRC if any one
* op in the compound wants to be cached:
*/
cachethis |= nfsd4_cache_this_op(op);
if (op->opnum == OP_READ || op->opnum == OP_READ_PLUS) {
readcount++;
readbytes += nfsd4_max_reply(argp->rqstp, op);
} else
max_reply += nfsd4_max_reply(argp->rqstp, op);
/*
* OP_LOCK and OP_LOCKT may return a conflicting lock.
* (Special case because it will just skip encoding this
* if it runs out of xdr buffer space, and it is the only
* operation that behaves this way.)
*/
if (op->opnum == OP_LOCK || op->opnum == OP_LOCKT)
max_reply += NFS4_OPAQUE_LIMIT;
if (op->status) {
argp->opcnt = i+1;
break;
}
}
/* Sessions make the DRC unnecessary: */
if (argp->minorversion)
cachethis = false;
svc_reserve(argp->rqstp, max_reply + readbytes);
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
return true;
}
static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
struct svc_export *exp)
{
if (exp->ex_flags & NFSEXP_V4ROOT) {
*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
*p++ = 0;
} else
p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
return p;
}
static __be32 nfsd4_encode_nfstime4(struct xdr_stream *xdr,
struct timespec64 *tv)
{
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT * 3);
if (!p)
return nfserr_resource;
p = xdr_encode_hyper(p, (s64)tv->tv_sec);
*p = cpu_to_be32(tv->tv_nsec);
return nfs_ok;
}
/*
* ctime (in NFSv4, time_metadata) is not writeable, and the client
* doesn't really care what resolution could theoretically be stored by
* the filesystem.
*
* The client cares how close together changes can be while still
* guaranteeing ctime changes. For most filesystems (which have
* timestamps with nanosecond fields) that is limited by the resolution
* of the time returned from current_time() (which I'm assuming to be
* 1/HZ).
*/
static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
{
struct timespec64 ts;
u32 ns;
ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
ts = ns_to_timespec64(ns);
p = xdr_encode_hyper(p, ts.tv_sec);
*p++ = cpu_to_be32(ts.tv_nsec);
return p;
}
static __be32
nfsd4_encode_change_info4(struct xdr_stream *xdr, struct nfsd4_change_info *c)
{
if (xdr_stream_encode_bool(xdr, c->atomic) < 0)
return nfserr_resource;
if (xdr_stream_encode_u64(xdr, c->before_change) < 0)
return nfserr_resource;
if (xdr_stream_encode_u64(xdr, c->after_change) < 0)
return nfserr_resource;
return nfs_ok;
}
/* Encode as an array of strings the string given with components
* separated @sep, escaped with esc_enter and esc_exit.
*/
static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
char *components, char esc_enter,
char esc_exit)
{
__be32 *p;
__be32 pathlen;
int pathlen_offset;
int strlen, count=0;
char *str, *end, *next;
dprintk("nfsd4_encode_components(%s)\n", components);
pathlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
p++; /* We will fill this in with @count later */
end = str = components;
while (*end) {
bool found_esc = false;
/* try to parse as esc_start, ..., esc_end, sep */
if (*str == esc_enter) {
for (; *end && (*end != esc_exit); end++)
/* find esc_exit or end of string */;
next = end + 1;
if (*end && (!*next || *next == sep)) {
str++;
found_esc = true;
}
}
if (!found_esc)
for (; *end && (*end != sep); end++)
/* find sep or end of string */;
strlen = end - str;
if (strlen) {
p = xdr_reserve_space(xdr, strlen + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, str, strlen);
count++;
}
else
end++;
if (found_esc)
end = next;
str = end;
}
pathlen = htonl(count);
write_bytes_to_xdr_buf(xdr->buf, pathlen_offset, &pathlen, 4);
return 0;
}
/* Encode as an array of strings the string given with components
* separated @sep.
*/
static __be32 nfsd4_encode_components(struct xdr_stream *xdr, char sep,
char *components)
{
return nfsd4_encode_components_esc(xdr, sep, components, 0, 0);
}
/*
* encode a location element of a fs_locations structure
*/
static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
struct nfsd4_fs_location *location)
{
__be32 status;
status = nfsd4_encode_components_esc(xdr, ':', location->hosts,
'[', ']');
if (status)
return status;
status = nfsd4_encode_components(xdr, '/', location->path);
if (status)
return status;
return 0;
}
/*
* Encode a path in RFC3530 'pathname4' format
*/
static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
const struct path *root,
const struct path *path)
{
struct path cur = *path;
__be32 *p;
struct dentry **components = NULL;
unsigned int ncomponents = 0;
__be32 err = nfserr_jukebox;
dprintk("nfsd4_encode_components(");
path_get(&cur);
/* First walk the path up to the nfsd root, and store the
* dentries/path components in an array.
*/
for (;;) {
if (path_equal(&cur, root))
break;
if (cur.dentry == cur.mnt->mnt_root) {
if (follow_up(&cur))
continue;
goto out_free;
}
if ((ncomponents & 15) == 0) {
struct dentry **new;
new = krealloc(components,
sizeof(*new) * (ncomponents + 16),
GFP_KERNEL);
if (!new)
goto out_free;
components = new;
}
components[ncomponents++] = cur.dentry;
cur.dentry = dget_parent(cur.dentry);
}
err = nfserr_resource;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_free;
*p++ = cpu_to_be32(ncomponents);
while (ncomponents) {
struct dentry *dentry = components[ncomponents - 1];
unsigned int len;
spin_lock(&dentry->d_lock);
len = dentry->d_name.len;
p = xdr_reserve_space(xdr, len + 4);
if (!p) {
spin_unlock(&dentry->d_lock);
goto out_free;
}
p = xdr_encode_opaque(p, dentry->d_name.name, len);
dprintk("/%pd", dentry);
spin_unlock(&dentry->d_lock);
dput(dentry);
ncomponents--;
}
err = 0;
out_free:
dprintk(")\n");
while (ncomponents)
dput(components[--ncomponents]);
kfree(components);
path_put(&cur);
return err;
}
static __be32 nfsd4_encode_fsloc_fsroot(struct xdr_stream *xdr,
struct svc_rqst *rqstp, const struct path *path)
{
struct svc_export *exp_ps;
__be32 res;
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
res = nfsd4_encode_path(xdr, &exp_ps->ex_path, path);
exp_put(exp_ps);
return res;
}
/*
* encode a fs_locations structure
*/
static __be32 nfsd4_encode_fs_locations(struct xdr_stream *xdr,
struct svc_rqst *rqstp, struct svc_export *exp)
{
__be32 status;
int i;
__be32 *p;
struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
status = nfsd4_encode_fsloc_fsroot(xdr, rqstp, &exp->ex_path);
if (status)
return status;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(fslocs->locations_count);
for (i=0; i<fslocs->locations_count; i++) {
status = nfsd4_encode_fs_location4(xdr, &fslocs->locations[i]);
if (status)
return status;
}
return 0;
}
static u32 nfs4_file_type(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFIFO: return NF4FIFO;
case S_IFCHR: return NF4CHR;
case S_IFDIR: return NF4DIR;
case S_IFBLK: return NF4BLK;
case S_IFLNK: return NF4LNK;
case S_IFREG: return NF4REG;
case S_IFSOCK: return NF4SOCK;
default: return NF4BAD;
}
}
static inline __be32
nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
struct nfs4_ace *ace)
{
if (ace->whotype != NFS4_ACL_WHO_NAMED)
return nfs4_acl_write_who(xdr, ace->whotype);
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
return nfsd4_encode_group(xdr, rqstp, ace->who_gid);
else
return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
}
static inline __be32
nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
{
__be32 *p;
unsigned long i = hweight_long(layout_types);
p = xdr_reserve_space(xdr, 4 + 4 * i);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(i);
for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
if (layout_types & (1 << i))
*p++ = cpu_to_be32(i);
return 0;
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
FATTR4_WORD0_RDATTR_ERROR)
#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
#define WORD2_ABSENT_FS_ATTRS 0
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
void *context, int len)
{
__be32 *p;
p = xdr_reserve_space(xdr, len + 4 + 4 + 4);
if (!p)
return nfserr_resource;
/*
* For now we use a 0 here to indicate the null translation; in
* the future we may place a call to translation code here.
*/
*p++ = cpu_to_be32(0); /* lfs */
*p++ = cpu_to_be32(0); /* pi */
p = xdr_encode_opaque(p, context, len);
return 0;
}
#else
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
void *context, int len)
{ return 0; }
#endif
static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
{
/* As per referral draft: */
if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
*bmval1 & ~WORD1_ABSENT_FS_ATTRS) {
if (*bmval0 & FATTR4_WORD0_RDATTR_ERROR ||
*bmval0 & FATTR4_WORD0_FS_LOCATIONS)
*rdattr_err = NFSERR_MOVED;
else
return nfserr_moved;
}
*bmval0 &= WORD0_ABSENT_FS_ATTRS;
*bmval1 &= WORD1_ABSENT_FS_ATTRS;
*bmval2 &= WORD2_ABSENT_FS_ATTRS;
return 0;
}
static int nfsd4_get_mounted_on_ino(struct svc_export *exp, u64 *pino)
{
struct path path = exp->ex_path;
struct kstat stat;
int err;
path_get(&path);
while (follow_up(&path)) {
if (path.dentry != path.mnt->mnt_root)
break;
}
err = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
path_put(&path);
if (!err)
*pino = stat.ino;
return err;
}
static __be32
nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
{
__be32 *p;
if (bmval2) {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
*p++ = cpu_to_be32(bmval2);
} else if (bmval1) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
} else {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(bmval0);
}
return 0;
out_resource:
return nfserr_resource;
}
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*/
static __be32
nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
struct svc_export *exp,
struct dentry *dentry, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
u32 bmval0 = bmval[0];
u32 bmval1 = bmval[1];
u32 bmval2 = bmval[2];
struct kstat stat;
struct svc_fh *tempfh = NULL;
struct kstatfs statfs;
__be32 *p, *attrlen_p;
int starting_len = xdr->buf->len;
int attrlen_offset;
u32 dummy;
u64 dummy64;
u32 rdattr_err = 0;
__be32 status;
int err;
struct nfs4_acl *acl = NULL;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
void *context = NULL;
int contextlen;
#endif
bool contextsupport = false;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
struct path path = {
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
if (exp->ex_fslocs.migrated) {
status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
if (status)
goto out;
}
if (bmval0 & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry));
if (status)
goto out;
}
err = vfs_getattr(&path, &stat,
STATX_BASIC_STATS | STATX_BTIME | STATX_CHANGE_COOKIE,
AT_STATX_SYNC_AS_STAT);
if (err)
goto out_nfserr;
if (!(stat.result_mask & STATX_BTIME))
/* underlying FS does not offer btime so we can't share it */
bmval1 &= ~FATTR4_WORD1_TIME_CREATE;
if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &statfs);
if (err)
goto out_nfserr;
}
if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
if (!tempfh)
goto out;
fh_init(tempfh, NFS4_FHSIZE);
status = fh_compose(tempfh, exp, dentry, NULL);
if (status)
goto out;
fhp = tempfh;
}
if (bmval0 & FATTR4_WORD0_ACL) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
if (err == -EOPNOTSUPP)
bmval0 &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
} else if (err != 0)
goto out_nfserr;
}
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
&context, &contextlen);
else
err = -EOPNOTSUPP;
contextsupport = (err == 0);
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
bmval2 &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
}
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
if (status)
goto out;
attrlen_offset = xdr->buf->len;
attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
if (!attrlen_p)
goto out_resource;
if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
u32 supp[3];
memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
if (!IS_POSIXACL(dentry->d_inode))
supp[0] &= ~FATTR4_WORD0_ACL;
if (!contextsupport)
supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
if (!supp[2]) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(supp[0]);
*p++ = cpu_to_be32(supp[1]);
} else {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(supp[0]);
*p++ = cpu_to_be32(supp[1]);
*p++ = cpu_to_be32(supp[2]);
}
}
if (bmval0 & FATTR4_WORD0_TYPE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
dummy = nfs4_file_type(stat.mode);
if (dummy == NF4BAD) {
status = nfserr_serverfault;
goto out;
}
*p++ = cpu_to_be32(dummy);
}
if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
*p++ = cpu_to_be32(NFS4_FH_PERSISTENT);
else
*p++ = cpu_to_be32(NFS4_FH_PERSISTENT|
NFS4_FH_VOL_RENAME);
}
if (bmval0 & FATTR4_WORD0_CHANGE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = encode_change(p, &stat, d_inode(dentry), exp);
}
if (bmval0 & FATTR4_WORD0_SIZE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, stat.size);
}
if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_FSID) {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
if (exp->ex_fslocs.migrated) {
p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
} else switch(fsid_source(fhp)) {
case FSIDSOURCE_FSID:
p = xdr_encode_hyper(p, (u64)exp->ex_fsid);
p = xdr_encode_hyper(p, (u64)0);
break;
case FSIDSOURCE_DEV:
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(MAJOR(stat.dev));
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(MINOR(stat.dev));
break;
case FSIDSOURCE_UUID:
p = xdr_encode_opaque_fixed(p, exp->ex_uuid,
EX_UUID_LEN);
break;
}
}
if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(nn->nfsd4_lease);
}
if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(rdattr_err);
}
if (bmval0 & FATTR4_WORD0_ACL) {
struct nfs4_ace *ace;
if (acl == NULL) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
goto out_acl;
}
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(acl->naces);
for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
p = xdr_reserve_space(xdr, 4*3);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(ace->type);
*p++ = cpu_to_be32(ace->flag);
*p++ = cpu_to_be32(ace->access_mask &
NFS4_ACE_MASK_ALL);
status = nfsd4_encode_aclname(xdr, rqstp, ace);
if (status)
goto out;
}
}
out_acl:
if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(IS_POSIXACL(dentry->d_inode) ?
ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
}
if (bmval0 & FATTR4_WORD0_CANSETTIME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
p = xdr_reserve_space(xdr, fhp->fh_handle.fh_size + 4);
if (!p)
goto out_resource;
p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw,
fhp->fh_handle.fh_size);
}
if (bmval0 & FATTR4_WORD0_FILEID) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, stat.ino);
}
if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_FREE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_files);
}
if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
status = nfsd4_encode_fs_locations(xdr, rqstp, exp);
if (status)
goto out;
}
if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, exp->ex_path.mnt->mnt_sb->s_maxbytes);
}
if (bmval0 & FATTR4_WORD0_MAXLINK) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(255);
}
if (bmval0 & FATTR4_WORD0_MAXNAME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(statfs.f_namelen);
}
if (bmval0 & FATTR4_WORD0_MAXREAD) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
}
if (bmval0 & FATTR4_WORD0_MAXWRITE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
}
if (bmval1 & FATTR4_WORD1_MODE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.mode & S_IALLUGO);
}
if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval1 & FATTR4_WORD1_NUMLINKS) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.nlink);
}
if (bmval1 & FATTR4_WORD1_OWNER) {
status = nfsd4_encode_user(xdr, rqstp, stat.uid);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
status = nfsd4_encode_group(xdr, rqstp, stat.gid);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_RAWDEV) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
*p++ = cpu_to_be32((u32) MAJOR(stat.rdev));
*p++ = cpu_to_be32((u32) MINOR(stat.rdev));
}
if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_USED) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)stat.blocks << 9;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
status = nfsd4_encode_nfstime4(xdr, &stat.atime);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
status = nfsd4_encode_nfstime4(xdr, &stat.btime);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
p = encode_time_delta(p, d_inode(dentry));
}
if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
status = nfsd4_encode_nfstime4(xdr, &stat.ctime);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
status = nfsd4_encode_nfstime4(xdr, &stat.mtime);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
u64 ino = stat.ino;
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
/*
* Get ino of mountpoint in parent filesystem, if not ignoring
* crossmount and this is the root of a cross-mounted
* filesystem.
*/
if (ignore_crossmnt == 0 &&
dentry == exp->ex_path.mnt->mnt_root) {
err = nfsd4_get_mounted_on_ino(exp, &ino);
if (err)
goto out_nfserr;
}
p = xdr_encode_hyper(p, ino);
}
#ifdef CONFIG_NFSD_PNFS
if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.blksize);
}
#endif /* CONFIG_NFSD_PNFS */
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
u32 supp[3];
memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
if (status)
goto out;
}
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
status = nfsd4_encode_security_label(xdr, rqstp, context,
contextlen);
if (status)
goto out;
}
#endif
if (bmval2 & FATTR4_WORD2_XATTR_SUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
err = xattr_supports_user_prefix(d_inode(dentry));
*p++ = cpu_to_be32(err == 0);
}
*attrlen_p = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
status = nfs_ok;
out:
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if (context)
security_release_secctx(context, contextlen);
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
kfree(acl);
if (tempfh) {
fh_put(tempfh);
kfree(tempfh);
}
if (status)
xdr_truncate_encode(xdr, starting_len);
return status;
out_nfserr:
status = nfserrno(err);
goto out;
out_resource:
status = nfserr_resource;
goto out;
}
static void svcxdr_init_encode_from_buffer(struct xdr_stream *xdr,
struct xdr_buf *buf, __be32 *p, int bytes)
{
xdr->scratch.iov_len = 0;
memset(buf, 0, sizeof(struct xdr_buf));
buf->head[0].iov_base = p;
buf->head[0].iov_len = 0;
buf->len = 0;
xdr->buf = buf;
xdr->iov = buf->head;
xdr->p = p;
xdr->end = (void *)p + bytes;
buf->buflen = bytes;
}
__be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
struct svc_fh *fhp, struct svc_export *exp,
struct dentry *dentry, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
struct xdr_buf dummy;
struct xdr_stream xdr;
__be32 ret;
svcxdr_init_encode_from_buffer(&xdr, &dummy, *p, words << 2);
ret = nfsd4_encode_fattr(&xdr, fhp, exp, dentry, bmval, rqstp,
ignore_crossmnt);
*p = xdr.p;
return ret;
}
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
return 1;
if (bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID)
return 1;
return 0;
}
static __be32
nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
const char *name, int namlen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
__be32 nfserr;
int ignore_crossmnt = 0;
dentry = lookup_positive_unlocked(name, cd->rd_fhp->fh_dentry, namlen);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
exp_get(exp);
/*
* In the case of a mountpoint, the client may be asking for
* attributes that are only properties of the underlying filesystem
* as opposed to the cross-mounted file system. In such a case,
* we will not follow the cross mount and will fill the attribtutes
* directly from the mountpoint dentry.
*/
if (nfsd_mountpoint(dentry, exp)) {
int err;
if (!(exp->ex_flags & NFSEXP_V4ROOT)
&& !attributes_need_mount(cd->rd_bmval)) {
ignore_crossmnt = 1;
goto out_encode;
}
/*
* Why the heck aren't we just using nfsd_lookup??
* Different "."/".." handling? Something else?
* At least, add a comment here to explain....
*/
err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
if (err) {
nfserr = nfserrno(err);
goto out_put;
}
nfserr = check_nfsd_access(exp, cd->rd_rqstp);
if (nfserr)
goto out_put;
}
out_encode:
nfserr = nfsd4_encode_fattr(xdr, NULL, exp, dentry, cd->rd_bmval,
cd->rd_rqstp, ignore_crossmnt);
out_put:
dput(dentry);
exp_put(exp);
return nfserr;
}
static __be32 *
nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
{
__be32 *p;
p = xdr_reserve_space(xdr, 20);
if (!p)
return NULL;
*p++ = htonl(2);
*p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
*p++ = htonl(0); /* bmval1 */
*p++ = htonl(4); /* attribute length */
*p++ = nfserr; /* no htonl */
return p;
}
static int
nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
struct nfsd4_readdir *cd = container_of(ccd, struct nfsd4_readdir, common);
struct xdr_stream *xdr = cd->xdr;
int start_offset = xdr->buf->len;
int cookie_offset;
u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
__be64 wire_offset;
__be32 *p;
/* In nfsv4, "." and ".." never make it onto the wire.. */
if (name && isdotent(name, namlen)) {
cd->common.err = nfs_ok;
return 0;
}
if (cd->cookie_offset) {
wire_offset = cpu_to_be64(offset);
write_bytes_to_xdr_buf(xdr->buf, cd->cookie_offset,
&wire_offset, 8);
}
p = xdr_reserve_space(xdr, 4);
if (!p)
goto fail;
*p++ = xdr_one; /* mark entry present */
cookie_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 3*4 + namlen);
if (!p)
goto fail;
p = xdr_encode_hyper(p, OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
switch (nfserr) {
case nfs_ok:
break;
case nfserr_resource:
nfserr = nfserr_toosmall;
goto fail;
case nfserr_noent:
xdr_truncate_encode(xdr, start_offset);
goto skip_entry;
case nfserr_jukebox:
/*
* The pseudoroot should only display dentries that lead to
* exports. If we get EJUKEBOX here, then we can't tell whether
* this entry should be included. Just fail the whole READDIR
* with NFS4ERR_DELAY in that case, and hope that the situation
* will resolve itself by the client's next attempt.
*/
if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT)
goto fail;
fallthrough;
default:
/*
* If the client requested the RDATTR_ERROR attribute,
* we stuff the error code into this attribute
* and continue. If this attribute was not requested,
* then in accordance with the spec, we fail the
* entire READDIR operation(!)
*/
if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
goto fail;
p = nfsd4_encode_rdattr_error(xdr, nfserr);
if (p == NULL) {
nfserr = nfserr_toosmall;
goto fail;
}
}
nfserr = nfserr_toosmall;
entry_bytes = xdr->buf->len - start_offset;
if (entry_bytes > cd->rd_maxcount)
goto fail;
cd->rd_maxcount -= entry_bytes;
/*
* RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
* notes that it could be zero. If it is zero, then the server
* should enforce only the rd_maxcount value.
*/
if (cd->rd_dircount) {
name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
goto fail;
cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
if (!cd->rd_dircount)
cd->rd_maxcount = 0;
}
cd->cookie_offset = cookie_offset;
skip_entry:
cd->common.err = nfs_ok;
return 0;
fail:
xdr_truncate_encode(xdr, start_offset);
cd->common.err = nfserr;
return -EINVAL;
}
static __be32
nfsd4_encode_verifier4(struct xdr_stream *xdr, const nfs4_verifier *verf)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
memcpy(p, verf->data, sizeof(verf->data));
return nfs_ok;
}
static __be32
nfsd4_encode_clientid4(struct xdr_stream *xdr, const clientid_t *clientid)
{
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(__be64));
if (!p)
return nfserr_resource;
memcpy(p, clientid, sizeof(*clientid));
return nfs_ok;
}
static __be32
nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
{
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(stateid_t));
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sid->si_generation);
p = xdr_encode_opaque_fixed(p, &sid->si_opaque,
sizeof(stateid_opaque_t));
return 0;
}
static __be32
nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_access *access = &u->access;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(access->ac_supported);
*p++ = cpu_to_be32(access->ac_resp_access);
return 0;
}
static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(bcts->dir);
/* Upshifting from TCP to RDMA is not supported */
*p++ = cpu_to_be32(0);
return 0;
}
static __be32
nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_close *close = &u->close;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_stateid(xdr, &close->cl_stateid);
}
static __be32
nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_commit *commit = &u->commit;
return nfsd4_encode_verifier4(resp->xdr, &commit->co_verf);
}
static __be32
nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_create *create = &u->create;
struct xdr_stream *xdr = resp->xdr;
nfserr = nfsd4_encode_change_info4(xdr, &create->cr_cinfo);
if (nfserr)
return nfserr;
return nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
create->cr_bmval[1], create->cr_bmval[2]);
}
static __be32
nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_getattr *getattr = &u->getattr;
struct svc_fh *fhp = getattr->ga_fhp;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
getattr->ga_bmval, resp->rqstp, 0);
}
static __be32
nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct svc_fh **fhpp = &u->getfh;
struct xdr_stream *xdr = resp->xdr;
struct svc_fh *fhp = *fhpp;
unsigned int len;
__be32 *p;
len = fhp->fh_handle.fh_size;
p = xdr_reserve_space(xdr, len + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw, len);
return 0;
}
/*
* Including all fields other than the name, a LOCK4denied structure requires
* 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
*/
static __be32
nfsd4_encode_lock_denied(struct xdr_stream *xdr, struct nfsd4_lock_denied *ld)
{
struct xdr_netobj *conf = &ld->ld_owner;
__be32 *p;
again:
p = xdr_reserve_space(xdr, 32 + XDR_LEN(conf->len));
if (!p) {
/*
* Don't fail to return the result just because we can't
* return the conflicting open:
*/
if (conf->len) {
kfree(conf->data);
conf->len = 0;
conf->data = NULL;
goto again;
}
return nfserr_resource;
}
p = xdr_encode_hyper(p, ld->ld_start);
p = xdr_encode_hyper(p, ld->ld_length);
*p++ = cpu_to_be32(ld->ld_type);
if (conf->len) {
p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
p = xdr_encode_opaque(p, conf->data, conf->len);
kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
p = xdr_encode_hyper(p, (u64)0); /* clientid */
*p++ = cpu_to_be32(0); /* length of owner name */
}
return nfserr_denied;
}
static __be32
nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_lock *lock = &u->lock;
struct xdr_stream *xdr = resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
else if (nfserr == nfserr_denied)
nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
return nfserr;
}
static __be32
nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_lockt *lockt = &u->lockt;
struct xdr_stream *xdr = resp->xdr;
if (nfserr == nfserr_denied)
nfsd4_encode_lock_denied(xdr, &lockt->lt_denied);
return nfserr;
}
static __be32
nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_locku *locku = &u->locku;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_stateid(xdr, &locku->lu_stateid);
}
static __be32
nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_link *link = &u->link;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_change_info4(xdr, &link->li_cinfo);
}
static __be32
nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_open *open = &u->open;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
if (nfserr)
return nfserr;
nfserr = nfsd4_encode_change_info4(xdr, &open->op_cinfo);
if (nfserr)
return nfserr;
if (xdr_stream_encode_u32(xdr, open->op_rflags) < 0)
return nfserr_resource;
nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
open->op_bmval[2]);
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_delegate_type);
switch (open->op_delegate_type) {
case NFS4_OPEN_DELEGATE_NONE:
break;
case NFS4_OPEN_DELEGATE_READ:
nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_recall);
/*
* TODO: ACE's in delegations
*/
*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
break;
case NFS4_OPEN_DELEGATE_WRITE:
nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, XDR_UNIT * 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_recall);
/*
* Always flush on close
*
* TODO: space_limit's in delegations
*/
*p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
*p++ = xdr_zero;
*p++ = xdr_zero;
/*
* TODO: ACE's in delegations
*/
*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
break;
case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
switch (open->op_why_no_deleg) {
case WND4_CONTENTION:
case WND4_RESOURCE:
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_why_no_deleg);
/* deleg signaling not supported yet: */
*p++ = cpu_to_be32(0);
break;
default:
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_why_no_deleg);
}
break;
default:
BUG();
}
/* XXX save filehandle here */
return 0;
}
static __be32
nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_open_confirm *oc = &u->open_confirm;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
}
static __be32
nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_open_downgrade *od = &u->open_downgrade;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_stateid(xdr, &od->od_stateid);
}
/*
* The operation of this function assumes that this is the only
* READ operation in the COMPOUND. If there are multiple READs,
* we use nfsd4_encode_readv().
*/
static __be32 nfsd4_encode_splice_read(
struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = resp->xdr;
struct xdr_buf *buf = xdr->buf;
int status, space_left;
__be32 nfserr;
/*
* Make sure there is room at the end of buf->head for
* svcxdr_encode_opaque_pages() to create a tail buffer
* to XDR-pad the payload.
*/
if (xdr->iov != xdr->buf->head || xdr->end - xdr->p < 1)
return nfserr_resource;
nfserr = nfsd_splice_read(read->rd_rqstp, read->rd_fhp,
file, read->rd_offset, &maxcount,
&read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
goto out_err;
svcxdr_encode_opaque_pages(read->rd_rqstp, xdr, buf->pages,
buf->page_base, maxcount);
status = svc_encode_result_payload(read->rd_rqstp,
buf->head[0].iov_len, maxcount);
if (status) {
nfserr = nfserrno(status);
goto out_err;
}
/*
* Prepare to encode subsequent operations.
*
* xdr_truncate_encode() is not safe to use after a successful
* splice read has been done, so the following stream
* manipulations are open-coded.
*/
space_left = min_t(int, (void *)xdr->end - (void *)xdr->p,
buf->buflen - buf->len);
buf->buflen = buf->len + space_left;
xdr->end = (__be32 *)((void *)xdr->end + space_left);
return nfs_ok;
out_err:
/*
* nfsd_splice_actor may have already messed with the
* page length; reset it so as not to confuse
* xdr_truncate_encode in our caller.
*/
buf->page_len = 0;
return nfserr;
}
static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = resp->xdr;
unsigned int starting_len = xdr->buf->len;
__be32 zero = xdr_zero;
__be32 nfserr;
if (xdr_reserve_space_vec(xdr, maxcount) < 0)
return nfserr_resource;
nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, file,
read->rd_offset, &maxcount,
xdr->buf->page_len & ~PAGE_MASK,
&read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
return nfserr;
if (svc_encode_result_payload(resp->rqstp, starting_len, maxcount))
return nfserr_io;
xdr_truncate_encode(xdr, starting_len + xdr_align_size(maxcount));
write_bytes_to_xdr_buf(xdr->buf, starting_len + maxcount, &zero,
xdr_pad_size(maxcount));
return nfs_ok;
}
static __be32
nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_read *read = &u->read;
bool splice_ok = test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags);
unsigned long maxcount;
struct xdr_stream *xdr = resp->xdr;
struct file *file;
int starting_len = xdr->buf->len;
__be32 *p;
if (nfserr)
return nfserr;
file = read->rd_nf->nf_file;
p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
if (!p) {
WARN_ON_ONCE(splice_ok);
return nfserr_resource;
}
if (resp->xdr->buf->page_len && splice_ok) {
WARN_ON_ONCE(1);
return nfserr_serverfault;
}
xdr_commit_encode(xdr);
maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
if (file->f_op->splice_read && splice_ok)
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
if (nfserr) {
xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
p = xdr_encode_bool(p, read->rd_eof);
*p = cpu_to_be32(read->rd_length);
return nfs_ok;
}
static __be32
nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_readlink *readlink = &u->readlink;
__be32 *p, *maxcount_p, zero = xdr_zero;
struct xdr_stream *xdr = resp->xdr;
int length_offset = xdr->buf->len;
int maxcount, status;
maxcount_p = xdr_reserve_space(xdr, XDR_UNIT);
if (!maxcount_p)
return nfserr_resource;
maxcount = PAGE_SIZE;
p = xdr_reserve_space(xdr, maxcount);
if (!p)
return nfserr_resource;
/*
* XXX: By default, vfs_readlink() will truncate symlinks if they
* would overflow the buffer. Is this kosher in NFSv4? If not, one
* easy fix is: if vfs_readlink() precisely fills the buffer, assume
* that truncation occurred, and return NFS4ERR_RESOURCE.
*/
nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp,
(char *)p, &maxcount);
if (nfserr == nfserr_isdir)
nfserr = nfserr_inval;
if (nfserr)
goto out_err;
status = svc_encode_result_payload(readlink->rl_rqstp, length_offset,
maxcount);
if (status) {
nfserr = nfserrno(status);
goto out_err;
}
*maxcount_p = cpu_to_be32(maxcount);
xdr_truncate_encode(xdr, length_offset + 4 + xdr_align_size(maxcount));
write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, &zero,
xdr_pad_size(maxcount));
return nfs_ok;
out_err:
xdr_truncate_encode(xdr, length_offset);
return nfserr;
}
static __be32
nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_readdir *readdir = &u->readdir;
int maxcount;
int bytes_left;
loff_t offset;
__be64 wire_offset;
struct xdr_stream *xdr = resp->xdr;
int starting_len = xdr->buf->len;
__be32 *p;
nfserr = nfsd4_encode_verifier4(xdr, &readdir->rd_verf);
if (nfserr != nfs_ok)
return nfserr;
/*
* Number of bytes left for directory entries allowing for the
* final 8 bytes of the readdir and a following failed op:
*/
bytes_left = xdr->buf->buflen - xdr->buf->len
- COMPOUND_ERR_SLACK_SPACE - 8;
if (bytes_left < 0) {
nfserr = nfserr_resource;
goto err_no_verf;
}
maxcount = svc_max_payload(resp->rqstp);
maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
/*
* Note the rfc defines rd_maxcount as the size of the
* READDIR4resok structure, which includes the verifier above
* and the 8 bytes encoded at the end of this function:
*/
if (maxcount < 16) {
nfserr = nfserr_toosmall;
goto err_no_verf;
}
maxcount = min_t(int, maxcount-16, bytes_left);
/* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
if (!readdir->rd_dircount)
readdir->rd_dircount = svc_max_payload(resp->rqstp);
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
readdir->cookie_offset = 0;
offset = readdir->rd_cookie;
nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
&offset,
&readdir->common, nfsd4_encode_dirent);
if (nfserr == nfs_ok &&
readdir->common.err == nfserr_toosmall &&
xdr->buf->len == starting_len + 8) {
/* nothing encoded; which limit did we hit?: */
if (maxcount - 16 < bytes_left)
/* It was the fault of rd_maxcount: */
nfserr = nfserr_toosmall;
else
/* We ran out of buffer space: */
nfserr = nfserr_resource;
}
if (nfserr)
goto err_no_verf;
if (readdir->cookie_offset) {
wire_offset = cpu_to_be64(offset);
write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset,
&wire_offset, 8);
}
p = xdr_reserve_space(xdr, 8);
if (!p) {
WARN_ON_ONCE(1);
goto err_no_verf;
}
*p++ = 0; /* no more entries */
*p++ = htonl(readdir->common.err == nfserr_eof);
return 0;
err_no_verf:
xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
static __be32
nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_remove *remove = &u->remove;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_change_info4(xdr, &remove->rm_cinfo);
}
static __be32
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_rename *rename = &u->rename;
struct xdr_stream *xdr = resp->xdr;
nfserr = nfsd4_encode_change_info4(xdr, &rename->rn_sinfo);
if (nfserr)
return nfserr;
return nfsd4_encode_change_info4(xdr, &rename->rn_tinfo);
}
static __be32
nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
__be32 *p, *flavorsp;
static bool report = true;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
nflavs = exp->ex_nflavors;
} else { /* Handling of some defaults in absence of real secinfo: */
flavs = def_flavs;
if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) {
nflavs = 2;
flavs[0].pseudoflavor = RPC_AUTH_UNIX;
flavs[1].pseudoflavor = RPC_AUTH_NULL;
} else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) {
nflavs = 1;
flavs[0].pseudoflavor
= svcauth_gss_flavor(exp->ex_client);
} else {
nflavs = 1;
flavs[0].pseudoflavor
= exp->ex_client->flavour->flavour;
}
}
supported = 0;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
flavorsp = p++; /* to be backfilled later */
for (i = 0; i < nflavs; i++) {
rpc_authflavor_t pf = flavs[i].pseudoflavor;
struct rpcsec_gss_info info;
if (rpcauth_get_gssinfo(pf, &info) == 0) {
supported++;
p = xdr_reserve_space(xdr, 4 + 4 +
XDR_LEN(info.oid.len) + 4 + 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(RPC_AUTH_GSS);
p = xdr_encode_opaque(p, info.oid.data, info.oid.len);
*p++ = cpu_to_be32(info.qop);
*p++ = cpu_to_be32(info.service);
} else if (pf < RPC_AUTH_MAXFLAVOR) {
supported++;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(pf);
} else {
if (report)
pr_warn("NFS: SECINFO: security flavor %u "
"is not supported\n", pf);
}
}
if (nflavs != supported)
report = false;
*flavorsp = htonl(supported);
return 0;
}
static __be32
nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_secinfo *secinfo = &u->secinfo;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_do_encode_secinfo(xdr, secinfo->si_exp);
}
static __be32
nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_secinfo_no_name *secinfo = &u->secinfo_no_name;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
}
/*
* The SETATTR encode routine is special -- it always encodes a bitmap,
* regardless of the error status.
*/
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_setattr *setattr = &u->setattr;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 16);
if (!p)
return nfserr_resource;
if (nfserr) {
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
}
else {
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(setattr->sa_bmval[0]);
*p++ = cpu_to_be32(setattr->sa_bmval[1]);
*p++ = cpu_to_be32(setattr->sa_bmval[2]);
}
return nfserr;
}
static __be32
nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_setclientid *scd = &u->setclientid;
struct xdr_stream *xdr = resp->xdr;
if (!nfserr) {
nfserr = nfsd4_encode_clientid4(xdr, &scd->se_clientid);
if (nfserr != nfs_ok)
goto out;
nfserr = nfsd4_encode_verifier4(xdr, &scd->se_confirm);
} else if (nfserr == nfserr_clid_inuse) {
/* empty network id */
if (xdr_stream_encode_u32(xdr, 0) < 0) {
nfserr = nfserr_resource;
goto out;
}
/* empty universal address */
if (xdr_stream_encode_u32(xdr, 0) < 0) {
nfserr = nfserr_resource;
goto out;
}
}
out:
return nfserr;
}
static __be32
nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
if (xdr_stream_encode_u32(resp->xdr, write->wr_bytes_written) < 0)
return nfserr_resource;
if (xdr_stream_encode_u32(resp->xdr, write->wr_how_written) < 0)
return nfserr_resource;
return nfsd4_encode_verifier4(resp->xdr, &write->wr_verifier);
}
static __be32
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_exchange_id *exid = &u->exchange_id;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
char *major_id;
char *server_scope;
int major_id_sz;
int server_scope_sz;
uint64_t minor_id = 0;
struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
major_id = nn->nfsd_name;
major_id_sz = strlen(nn->nfsd_name);
server_scope = nn->nfsd_name;
server_scope_sz = strlen(nn->nfsd_name);
if (nfsd4_encode_clientid4(xdr, &exid->clientid) != nfs_ok)
return nfserr_resource;
if (xdr_stream_encode_u32(xdr, exid->seqid) < 0)
return nfserr_resource;
if (xdr_stream_encode_u32(xdr, exid->flags) < 0)
return nfserr_resource;
if (xdr_stream_encode_u32(xdr, exid->spa_how) < 0)
return nfserr_resource;
switch (exid->spa_how) {
case SP4_NONE:
break;
case SP4_MACH_CRED:
/* spo_must_enforce bitmap: */
nfserr = nfsd4_encode_bitmap(xdr,
exid->spo_must_enforce[0],
exid->spo_must_enforce[1],
exid->spo_must_enforce[2]);
if (nfserr)
return nfserr;
/* spo_must_allow bitmap: */
nfserr = nfsd4_encode_bitmap(xdr,
exid->spo_must_allow[0],
exid->spo_must_allow[1],
exid->spo_must_allow[2]);
if (nfserr)
return nfserr;
break;
default:
WARN_ON_ONCE(1);
}
p = xdr_reserve_space(xdr,
8 /* so_minor_id */ +
4 /* so_major_id.len */ +
(XDR_QUADLEN(major_id_sz) * 4) +
4 /* eir_server_scope.len */ +
(XDR_QUADLEN(server_scope_sz) * 4) +
4 /* eir_server_impl_id.count (0) */);
if (!p)
return nfserr_resource;
/* The server_owner struct */
p = xdr_encode_hyper(p, minor_id); /* Minor id */
/* major id */
p = xdr_encode_opaque(p, major_id, major_id_sz);
/* Server scope */
p = xdr_encode_opaque(p, server_scope, server_scope_sz);
/* Implementation id */
*p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
return 0;
}
static __be32
nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_create_session *sess = &u->create_session;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 24);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, sess->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(sess->seqid);
*p++ = cpu_to_be32(sess->flags);
p = xdr_reserve_space(xdr, 28);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0); /* headerpadsz */
*p++ = cpu_to_be32(sess->fore_channel.maxreq_sz);
*p++ = cpu_to_be32(sess->fore_channel.maxresp_sz);
*p++ = cpu_to_be32(sess->fore_channel.maxresp_cached);
*p++ = cpu_to_be32(sess->fore_channel.maxops);
*p++ = cpu_to_be32(sess->fore_channel.maxreqs);
*p++ = cpu_to_be32(sess->fore_channel.nr_rdma_attrs);
if (sess->fore_channel.nr_rdma_attrs) {
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sess->fore_channel.rdma_attrs);
}
p = xdr_reserve_space(xdr, 28);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0); /* headerpadsz */
*p++ = cpu_to_be32(sess->back_channel.maxreq_sz);
*p++ = cpu_to_be32(sess->back_channel.maxresp_sz);
*p++ = cpu_to_be32(sess->back_channel.maxresp_cached);
*p++ = cpu_to_be32(sess->back_channel.maxops);
*p++ = cpu_to_be32(sess->back_channel.maxreqs);
*p++ = cpu_to_be32(sess->back_channel.nr_rdma_attrs);
if (sess->back_channel.nr_rdma_attrs) {
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sess->back_channel.rdma_attrs);
}
return 0;
}
static __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_sequence *seq = &u->sequence;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, seq->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(seq->seqid);
*p++ = cpu_to_be32(seq->slotid);
/* Note slotid's are numbered from zero: */
*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_highest_slotid */
*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_target_highest_slotid */
*p++ = cpu_to_be32(seq->status_flags);
resp->cstate.data_offset = xdr->buf->len; /* DRC cache data pointer */
return 0;
}
static __be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct xdr_stream *xdr = resp->xdr;
struct nfsd4_test_stateid_id *stateid, *next;
__be32 *p;
p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
if (!p)
return nfserr_resource;
*p++ = htonl(test_stateid->ts_num_ids);
list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
*p++ = stateid->ts_id_status;
}
return 0;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
struct xdr_stream *xdr = resp->xdr;
const struct nfsd4_layout_ops *ops;
u32 starting_len = xdr->buf->len, needed_len;
__be32 *p;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(gdev->gd_layout_type);
ops = nfsd4_layout_ops[gdev->gd_layout_type];
nfserr = ops->encode_getdeviceinfo(xdr, gdev);
if (nfserr) {
/*
* We don't bother to burden the layout drivers with
* enforcing gd_maxcount, just tell the client to
* come back with a bigger buffer if it's not enough.
*/
if (xdr->buf->len + 4 > gdev->gd_maxcount)
goto toosmall;
return nfserr;
}
if (gdev->gd_notify_types) {
p = xdr_reserve_space(xdr, 4 + 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(1); /* bitmap length */
*p++ = cpu_to_be32(gdev->gd_notify_types);
} else {
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = 0;
}
return 0;
toosmall:
dprintk("%s: maxcount too small\n", __func__);
needed_len = xdr->buf->len + 4 /* notifications */;
xdr_truncate_encode(xdr, starting_len);
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(needed_len);
return nfserr_toosmall;
}
static __be32
nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_layoutget *lgp = &u->layoutget;
struct xdr_stream *xdr = resp->xdr;
const struct nfsd4_layout_ops *ops;
__be32 *p;
p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(1); /* we always set return-on-close */
*p++ = cpu_to_be32(lgp->lg_sid.si_generation);
p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
sizeof(stateid_opaque_t));
*p++ = cpu_to_be32(1); /* we always return a single layout */
p = xdr_encode_hyper(p, lgp->lg_seg.offset);
p = xdr_encode_hyper(p, lgp->lg_seg.length);
*p++ = cpu_to_be32(lgp->lg_seg.iomode);
*p++ = cpu_to_be32(lgp->lg_layout_type);
ops = nfsd4_layout_ops[lgp->lg_layout_type];
return ops->encode_layoutget(xdr, lgp);
}
static __be32
nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(lcp->lc_size_chg);
if (lcp->lc_size_chg) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
p = xdr_encode_hyper(p, lcp->lc_newsize);
}
return 0;
}
static __be32
nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(lrp->lrs_present);
if (lrp->lrs_present)
return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
return 0;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
struct nfsd42_write_res *write, bool sync)
{
__be32 *p;
p = xdr_reserve_space(resp->xdr, 4);
if (!p)
return nfserr_resource;
if (sync)
*p++ = cpu_to_be32(0);
else {
__be32 nfserr;
*p++ = cpu_to_be32(1);
nfserr = nfsd4_encode_stateid(resp->xdr, &write->cb_stateid);
if (nfserr)
return nfserr;
}
p = xdr_reserve_space(resp->xdr, 8 + 4 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
p = xdr_encode_hyper(p, write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_stable_how);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
NFS4_VERIFIER_SIZE);
return nfs_ok;
}
static __be32
nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns)
{
struct xdr_stream *xdr = resp->xdr;
struct nfs42_netaddr *addr;
__be32 *p;
p = xdr_reserve_space(xdr, 4);
*p++ = cpu_to_be32(ns->nl4_type);
switch (ns->nl4_type) {
case NL4_NETADDR:
addr = &ns->u.nl4_addr;
/* netid_len, netid, uaddr_len, uaddr (port included
* in RPCBIND_MAXUADDRLEN)
*/
p = xdr_reserve_space(xdr,
4 /* netid len */ +
(XDR_QUADLEN(addr->netid_len) * 4) +
4 /* uaddr len */ +
(XDR_QUADLEN(addr->addr_len) * 4));
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(addr->netid_len);
p = xdr_encode_opaque_fixed(p, addr->netid,
addr->netid_len);
*p++ = cpu_to_be32(addr->addr_len);
p = xdr_encode_opaque_fixed(p, addr->addr,
addr->addr_len);
break;
default:
WARN_ON_ONCE(ns->nl4_type != NL4_NETADDR);
return nfserr_inval;
}
return 0;
}
static __be32
nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_copy *copy = &u->copy;
__be32 *p;
nfserr = nfsd42_encode_write_res(resp, ©->cp_res,
nfsd4_copy_is_sync(copy));
if (nfserr)
return nfserr;
p = xdr_reserve_space(resp->xdr, 4 + 4);
*p++ = xdr_one; /* cr_consecutive */
*p = nfsd4_copy_is_sync(copy) ? xdr_one : xdr_zero;
return 0;
}
static __be32
nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_offload_status *os = &u->offload_status;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 8 + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_hyper(p, os->count);
*p++ = cpu_to_be32(0);
return nfserr;
}
static __be32
nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
struct nfsd4_read *read)
{
bool splice_ok = test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags);
struct file *file = read->rd_nf->nf_file;
struct xdr_stream *xdr = resp->xdr;
unsigned long maxcount;
__be32 nfserr, *p;
/* Content type, offset, byte count */
p = xdr_reserve_space(xdr, 4 + 8 + 4);
if (!p)
return nfserr_io;
if (resp->xdr->buf->page_len && splice_ok) {
WARN_ON_ONCE(splice_ok);
return nfserr_serverfault;
}
maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
if (file->f_op->splice_read && splice_ok)
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
if (nfserr)
return nfserr;
*p++ = cpu_to_be32(NFS4_CONTENT_DATA);
p = xdr_encode_hyper(p, read->rd_offset);
*p = cpu_to_be32(read->rd_length);
return nfs_ok;
}
static __be32
nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_read *read = &u->read;
struct file *file = read->rd_nf->nf_file;
struct xdr_stream *xdr = resp->xdr;
int starting_len = xdr->buf->len;
u32 segments = 0;
__be32 *p;
if (nfserr)
return nfserr;
/* eof flag, segment count */
p = xdr_reserve_space(xdr, 4 + 4);
if (!p)
return nfserr_io;
xdr_commit_encode(xdr);
read->rd_eof = read->rd_offset >= i_size_read(file_inode(file));
if (read->rd_eof)
goto out;
nfserr = nfsd4_encode_read_plus_data(resp, read);
if (nfserr) {
xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
segments++;
out:
p = xdr_encode_bool(p, read->rd_eof);
*p = cpu_to_be32(segments);
return nfserr;
}
static __be32
nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_copy_notify *cn = &u->copy_notify;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
/* 8 sec, 4 nsec */
p = xdr_reserve_space(xdr, 12);
if (!p)
return nfserr_resource;
/* cnr_lease_time */
p = xdr_encode_hyper(p, cn->cpn_sec);
*p++ = cpu_to_be32(cn->cpn_nsec);
/* cnr_stateid */
nfserr = nfsd4_encode_stateid(xdr, &cn->cpn_cnr_stateid);
if (nfserr)
return nfserr;
/* cnr_src.nl_nsvr */
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(1);
nfserr = nfsd42_encode_nl4_server(resp, cn->cpn_src);
return nfserr;
}
static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_seek *seek = &u->seek;
__be32 *p;
p = xdr_reserve_space(resp->xdr, 4 + 8);
*p++ = cpu_to_be32(seek->seek_eof);
p = xdr_encode_hyper(p, seek->seek_pos);
return 0;
}
static __be32
nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *p)
{
return nfserr;
}
/*
* Encode kmalloc-ed buffer in to XDR stream.
*/
static __be32
nfsd4_vbuf_to_stream(struct xdr_stream *xdr, char *buf, u32 buflen)
{
u32 cplen;
__be32 *p;
cplen = min_t(unsigned long, buflen,
((void *)xdr->end - (void *)xdr->p));
p = xdr_reserve_space(xdr, cplen);
if (!p)
return nfserr_resource;
memcpy(p, buf, cplen);
buf += cplen;
buflen -= cplen;
while (buflen) {
cplen = min_t(u32, buflen, PAGE_SIZE);
p = xdr_reserve_space(xdr, cplen);
if (!p)
return nfserr_resource;
memcpy(p, buf, cplen);
if (cplen < PAGE_SIZE) {
/*
* We're done, with a length that wasn't page
* aligned, so possibly not word aligned. Pad
* any trailing bytes with 0.
*/
xdr_encode_opaque_fixed(p, NULL, cplen);
break;
}
buflen -= PAGE_SIZE;
buf += PAGE_SIZE;
}
return 0;
}
static __be32
nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_getxattr *getxattr = &u->getxattr;
struct xdr_stream *xdr = resp->xdr;
__be32 *p, err;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p = cpu_to_be32(getxattr->getxa_len);
if (getxattr->getxa_len == 0)
return 0;
err = nfsd4_vbuf_to_stream(xdr, getxattr->getxa_buf,
getxattr->getxa_len);
kvfree(getxattr->getxa_buf);
return err;
}
static __be32
nfsd4_encode_setxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_setxattr *setxattr = &u->setxattr;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_change_info4(xdr, &setxattr->setxa_cinfo);
}
/*
* See if there are cookie values that can be rejected outright.
*/
static __be32
nfsd4_listxattr_validate_cookie(struct nfsd4_listxattrs *listxattrs,
u32 *offsetp)
{
u64 cookie = listxattrs->lsxa_cookie;
/*
* If the cookie is larger than the maximum number we can fit
* in either the buffer we just got back from vfs_listxattr, or,
* XDR-encoded, in the return buffer, it's invalid.
*/
if (cookie > (listxattrs->lsxa_len) / (XATTR_USER_PREFIX_LEN + 2))
return nfserr_badcookie;
if (cookie > (listxattrs->lsxa_maxcount /
(XDR_QUADLEN(XATTR_USER_PREFIX_LEN + 2) + 4)))
return nfserr_badcookie;
*offsetp = (u32)cookie;
return 0;
}
static __be32
nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_listxattrs *listxattrs = &u->listxattrs;
struct xdr_stream *xdr = resp->xdr;
u32 cookie_offset, count_offset, eof;
u32 left, xdrleft, slen, count;
u32 xdrlen, offset;
u64 cookie;
char *sp;
__be32 status, tmp;
__be32 *p;
u32 nuser;
eof = 1;
status = nfsd4_listxattr_validate_cookie(listxattrs, &offset);
if (status)
goto out;
/*
* Reserve space for the cookie and the name array count. Record
* the offsets to save them later.
*/
cookie_offset = xdr->buf->len;
count_offset = cookie_offset + 8;
p = xdr_reserve_space(xdr, 12);
if (!p) {
status = nfserr_resource;
goto out;
}
count = 0;
left = listxattrs->lsxa_len;
sp = listxattrs->lsxa_buf;
nuser = 0;
xdrleft = listxattrs->lsxa_maxcount;
while (left > 0 && xdrleft > 0) {
slen = strlen(sp);
/*
* Check if this is a "user." attribute, skip it if not.
*/
if (strncmp(sp, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
goto contloop;
slen -= XATTR_USER_PREFIX_LEN;
xdrlen = 4 + ((slen + 3) & ~3);
if (xdrlen > xdrleft) {
if (count == 0) {
/*
* Can't even fit the first attribute name.
*/
status = nfserr_toosmall;
goto out;
}
eof = 0;
goto wreof;
}
left -= XATTR_USER_PREFIX_LEN;
sp += XATTR_USER_PREFIX_LEN;
if (nuser++ < offset)
goto contloop;
p = xdr_reserve_space(xdr, xdrlen);
if (!p) {
status = nfserr_resource;
goto out;
}
xdr_encode_opaque(p, sp, slen);
xdrleft -= xdrlen;
count++;
contloop:
sp += slen + 1;
left -= slen + 1;
}
/*
* If there were user attributes to copy, but we didn't copy
* any, the offset was too large (e.g. the cookie was invalid).
*/
if (nuser > 0 && count == 0) {
status = nfserr_badcookie;
goto out;
}
wreof:
p = xdr_reserve_space(xdr, 4);
if (!p) {
status = nfserr_resource;
goto out;
}
*p = cpu_to_be32(eof);
cookie = offset + count;
write_bytes_to_xdr_buf(xdr->buf, cookie_offset, &cookie, 8);
tmp = cpu_to_be32(count);
write_bytes_to_xdr_buf(xdr->buf, count_offset, &tmp, 4);
out:
if (listxattrs->lsxa_len)
kvfree(listxattrs->lsxa_buf);
return status;
}
static __be32
nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_removexattr *removexattr = &u->removexattr;
struct xdr_stream *xdr = resp->xdr;
return nfsd4_encode_change_info4(xdr, &removexattr->rmxa_cinfo);
}
typedef __be32(*nfsd4_enc)(struct nfsd4_compoundres *, __be32, union nfsd4_op_u *u);
/*
* Note: nfsd4_enc_ops vector is shared for v4.0 and v4.1
* since we don't need to filter out obsolete ops as this is
* done in the decoding phase.
*/
static const nfsd4_enc nfsd4_enc_ops[] = {
[OP_ACCESS] = nfsd4_encode_access,
[OP_CLOSE] = nfsd4_encode_close,
[OP_COMMIT] = nfsd4_encode_commit,
[OP_CREATE] = nfsd4_encode_create,
[OP_DELEGPURGE] = nfsd4_encode_noop,
[OP_DELEGRETURN] = nfsd4_encode_noop,
[OP_GETATTR] = nfsd4_encode_getattr,
[OP_GETFH] = nfsd4_encode_getfh,
[OP_LINK] = nfsd4_encode_link,
[OP_LOCK] = nfsd4_encode_lock,
[OP_LOCKT] = nfsd4_encode_lockt,
[OP_LOCKU] = nfsd4_encode_locku,
[OP_LOOKUP] = nfsd4_encode_noop,
[OP_LOOKUPP] = nfsd4_encode_noop,
[OP_NVERIFY] = nfsd4_encode_noop,
[OP_OPEN] = nfsd4_encode_open,
[OP_OPENATTR] = nfsd4_encode_noop,
[OP_OPEN_CONFIRM] = nfsd4_encode_open_confirm,
[OP_OPEN_DOWNGRADE] = nfsd4_encode_open_downgrade,
[OP_PUTFH] = nfsd4_encode_noop,
[OP_PUTPUBFH] = nfsd4_encode_noop,
[OP_PUTROOTFH] = nfsd4_encode_noop,
[OP_READ] = nfsd4_encode_read,
[OP_READDIR] = nfsd4_encode_readdir,
[OP_READLINK] = nfsd4_encode_readlink,
[OP_REMOVE] = nfsd4_encode_remove,
[OP_RENAME] = nfsd4_encode_rename,
[OP_RENEW] = nfsd4_encode_noop,
[OP_RESTOREFH] = nfsd4_encode_noop,
[OP_SAVEFH] = nfsd4_encode_noop,
[OP_SECINFO] = nfsd4_encode_secinfo,
[OP_SETATTR] = nfsd4_encode_setattr,
[OP_SETCLIENTID] = nfsd4_encode_setclientid,
[OP_SETCLIENTID_CONFIRM] = nfsd4_encode_noop,
[OP_VERIFY] = nfsd4_encode_noop,
[OP_WRITE] = nfsd4_encode_write,
[OP_RELEASE_LOCKOWNER] = nfsd4_encode_noop,
/* NFSv4.1 operations */
[OP_BACKCHANNEL_CTL] = nfsd4_encode_noop,
[OP_BIND_CONN_TO_SESSION] = nfsd4_encode_bind_conn_to_session,
[OP_EXCHANGE_ID] = nfsd4_encode_exchange_id,
[OP_CREATE_SESSION] = nfsd4_encode_create_session,
[OP_DESTROY_SESSION] = nfsd4_encode_noop,
[OP_FREE_STATEID] = nfsd4_encode_noop,
[OP_GET_DIR_DELEGATION] = nfsd4_encode_noop,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = nfsd4_encode_getdeviceinfo,
[OP_GETDEVICELIST] = nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = nfsd4_encode_layoutcommit,
[OP_LAYOUTGET] = nfsd4_encode_layoutget,
[OP_LAYOUTRETURN] = nfsd4_encode_layoutreturn,
#else
[OP_GETDEVICEINFO] = nfsd4_encode_noop,
[OP_GETDEVICELIST] = nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = nfsd4_encode_noop,
[OP_LAYOUTGET] = nfsd4_encode_noop,
[OP_LAYOUTRETURN] = nfsd4_encode_noop,
#endif
[OP_SECINFO_NO_NAME] = nfsd4_encode_secinfo_no_name,
[OP_SEQUENCE] = nfsd4_encode_sequence,
[OP_SET_SSV] = nfsd4_encode_noop,
[OP_TEST_STATEID] = nfsd4_encode_test_stateid,
[OP_WANT_DELEGATION] = nfsd4_encode_noop,
[OP_DESTROY_CLIENTID] = nfsd4_encode_noop,
[OP_RECLAIM_COMPLETE] = nfsd4_encode_noop,
/* NFSv4.2 operations */
[OP_ALLOCATE] = nfsd4_encode_noop,
[OP_COPY] = nfsd4_encode_copy,
[OP_COPY_NOTIFY] = nfsd4_encode_copy_notify,
[OP_DEALLOCATE] = nfsd4_encode_noop,
[OP_IO_ADVISE] = nfsd4_encode_noop,
[OP_LAYOUTERROR] = nfsd4_encode_noop,
[OP_LAYOUTSTATS] = nfsd4_encode_noop,
[OP_OFFLOAD_CANCEL] = nfsd4_encode_noop,
[OP_OFFLOAD_STATUS] = nfsd4_encode_offload_status,
[OP_READ_PLUS] = nfsd4_encode_read_plus,
[OP_SEEK] = nfsd4_encode_seek,
[OP_WRITE_SAME] = nfsd4_encode_noop,
[OP_CLONE] = nfsd4_encode_noop,
/* RFC 8276 extended atributes operations */
[OP_GETXATTR] = nfsd4_encode_getxattr,
[OP_SETXATTR] = nfsd4_encode_setxattr,
[OP_LISTXATTRS] = nfsd4_encode_listxattrs,
[OP_REMOVEXATTR] = nfsd4_encode_removexattr,
};
/*
* Calculate whether we still have space to encode repsize bytes.
* There are two considerations:
* - For NFS versions >=4.1, the size of the reply must stay within
* session limits
* - For all NFS versions, we must stay within limited preallocated
* buffer space.
*
* This is called before the operation is processed, so can only provide
* an upper estimate. For some nonidempotent operations (such as
* getattr), it's not necessarily a problem if that estimate is wrong,
* as we can fail it after processing without significant side effects.
*/
__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 respsize)
{
struct xdr_buf *buf = &resp->rqstp->rq_res;
struct nfsd4_slot *slot = resp->cstate.slot;
if (buf->len + respsize <= buf->buflen)
return nfs_ok;
if (!nfsd4_has_session(&resp->cstate))
return nfserr_resource;
if (slot->sl_flags & NFSD4_SLOT_CACHETHIS) {
WARN_ON_ONCE(1);
return nfserr_rep_too_big_to_cache;
}
return nfserr_rep_too_big;
}
void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
struct xdr_stream *xdr = resp->xdr;
struct nfs4_stateowner *so = resp->cstate.replay_owner;
struct svc_rqst *rqstp = resp->rqstp;
const struct nfsd4_operation *opdesc = op->opdesc;
int post_err_offset;
nfsd4_enc encoder;
__be32 *p;
p = xdr_reserve_space(xdr, 8);
if (!p)
goto release;
*p++ = cpu_to_be32(op->opnum);
post_err_offset = xdr->buf->len;
if (op->opnum == OP_ILLEGAL)
goto status;
if (op->status && opdesc &&
!(opdesc->op_flags & OP_NONTRIVIAL_ERROR_ENCODE))
goto status;
BUG_ON(op->opnum >= ARRAY_SIZE(nfsd4_enc_ops) ||
!nfsd4_enc_ops[op->opnum]);
encoder = nfsd4_enc_ops[op->opnum];
op->status = encoder(resp, op->status, &op->u);
if (op->status)
trace_nfsd_compound_encode_err(rqstp, op->opnum, op->status);
xdr_commit_encode(xdr);
/* nfsd4_check_resp_size guarantees enough room for error status */
if (!op->status) {
int space_needed = 0;
if (!nfsd4_last_compound_op(rqstp))
space_needed = COMPOUND_ERR_SLACK_SPACE;
op->status = nfsd4_check_resp_size(resp, space_needed);
}
if (op->status == nfserr_resource && nfsd4_has_session(&resp->cstate)) {
struct nfsd4_slot *slot = resp->cstate.slot;
if (slot->sl_flags & NFSD4_SLOT_CACHETHIS)
op->status = nfserr_rep_too_big_to_cache;
else
op->status = nfserr_rep_too_big;
}
if (op->status == nfserr_resource ||
op->status == nfserr_rep_too_big ||
op->status == nfserr_rep_too_big_to_cache) {
/*
* The operation may have already been encoded or
* partially encoded. No op returns anything additional
* in the case of one of these three errors, so we can
* just truncate back to after the status. But it's a
* bug if we had to do this on a non-idempotent op:
*/
warn_on_nonidempotent_op(op);
xdr_truncate_encode(xdr, post_err_offset);
}
if (so) {
int len = xdr->buf->len - post_err_offset;
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = len;
read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
so->so_replay.rp_buf, len);
}
status:
*p = op->status;
release:
if (opdesc && opdesc->op_release)
opdesc->op_release(&op->u);
/*
* Account for pages consumed while encoding this operation.
* The xdr_stream primitives don't manage rq_next_page.
*/
rqstp->rq_next_page = xdr->page_ptr + 1;
}
/*
* Encode the reply stored in the stateowner reply cache
*
* XDR note: do not encode rp->rp_buflen: the buffer contains the
* previously sent already encoded operation.
*/
void
nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
{
__be32 *p;
struct nfs4_replay *rp = op->replay;
p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
if (!p) {
WARN_ON_ONCE(1);
return;
}
*p++ = cpu_to_be32(op->opnum);
*p++ = rp->rp_status; /* already xdr'ed */
p = xdr_encode_opaque_fixed(p, rp->rp_buf, rp->rp_buflen);
}
void nfsd4_release_compoundargs(struct svc_rqst *rqstp)
{
struct nfsd4_compoundargs *args = rqstp->rq_argp;
if (args->ops != args->iops) {
vfree(args->ops);
args->ops = args->iops;
}
while (args->to_free) {
struct svcxdr_tmpbuf *tb = args->to_free;
args->to_free = tb->next;
kfree(tb);
}
}
bool
nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd4_compoundargs *args = rqstp->rq_argp;
/* svcxdr_tmp_alloc */
args->to_free = NULL;
args->xdr = xdr;
args->ops = args->iops;
args->rqstp = rqstp;
return nfsd4_decode_compound(args);
}
bool
nfs4svc_encode_compoundres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
__be32 *p;
/*
* Send buffer space for the following items is reserved
* at the top of nfsd4_proc_compound().
*/
p = resp->statusp;
*p++ = resp->cstate.status;
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
p += XDR_QUADLEN(resp->taglen);
*p++ = htonl(resp->opcnt);
nfsd4_sequence_done(resp);
return true;
}
| linux-master | fs/nfsd/nfs4xdr.c |
/*
* Mapping of UID/GIDs to name and vice versa.
*
* Copyright (c) 2002, 2003 The Regents of the University of
* Michigan. All rights reserved.
*
* Marius Aamodt Eriksen <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sunrpc/svc_xprt.h>
#include <net/net_namespace.h>
#include "idmap.h"
#include "nfsd.h"
#include "netns.h"
#include "vfs.h"
/*
* Turn off idmapping when using AUTH_SYS.
*/
static bool nfs4_disable_idmapping = true;
module_param(nfs4_disable_idmapping, bool, 0644);
MODULE_PARM_DESC(nfs4_disable_idmapping,
"Turn off server's NFSv4 idmapping when using 'sec=sys'");
/*
* Cache entry
*/
/*
* XXX we know that IDMAP_NAMESZ < PAGE_SIZE, but it's ugly to rely on
* that.
*/
struct ent {
struct cache_head h;
int type; /* User / Group */
u32 id;
char name[IDMAP_NAMESZ];
char authname[IDMAP_NAMESZ];
struct rcu_head rcu_head;
};
/* Common entry handling */
#define ENT_HASHBITS 8
#define ENT_HASHMAX (1 << ENT_HASHBITS)
static void
ent_init(struct cache_head *cnew, struct cache_head *citm)
{
struct ent *new = container_of(cnew, struct ent, h);
struct ent *itm = container_of(citm, struct ent, h);
new->id = itm->id;
new->type = itm->type;
strscpy(new->name, itm->name, sizeof(new->name));
strscpy(new->authname, itm->authname, sizeof(new->authname));
}
static void
ent_put(struct kref *ref)
{
struct ent *map = container_of(ref, struct ent, h.ref);
kfree_rcu(map, rcu_head);
}
static struct cache_head *
ent_alloc(void)
{
struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL);
if (e)
return &e->h;
else
return NULL;
}
/*
* ID -> Name cache
*/
static uint32_t
idtoname_hash(struct ent *ent)
{
uint32_t hash;
hash = hash_str(ent->authname, ENT_HASHBITS);
hash = hash_long(hash ^ ent->id, ENT_HASHBITS);
/* Flip LSB for user/group */
if (ent->type == IDMAP_TYPE_GROUP)
hash ^= 1;
return hash;
}
static int
idtoname_upcall(struct cache_detail *cd, struct cache_head *h)
{
return sunrpc_cache_pipe_upcall_timeout(cd, h);
}
static void
idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
int *blen)
{
struct ent *ent = container_of(ch, struct ent, h);
char idstr[11];
qword_add(bpp, blen, ent->authname);
snprintf(idstr, sizeof(idstr), "%u", ent->id);
qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user");
qword_add(bpp, blen, idstr);
(*bpp)[-1] = '\n';
}
static int
idtoname_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
struct ent *b = container_of(cb, struct ent, h);
return (a->id == b->id && a->type == b->type &&
strcmp(a->authname, b->authname) == 0);
}
static int
idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
{
struct ent *ent;
if (h == NULL) {
seq_puts(m, "#domain type id [name]\n");
return 0;
}
ent = container_of(h, struct ent, h);
seq_printf(m, "%s %s %u", ent->authname,
ent->type == IDMAP_TYPE_GROUP ? "group" : "user",
ent->id);
if (test_bit(CACHE_VALID, &h->flags))
seq_printf(m, " %s", ent->name);
seq_putc(m, '\n');
return 0;
}
static void
warn_no_idmapd(struct cache_detail *detail, int has_died)
{
printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n",
has_died ? "died" : "not been started");
}
static int idtoname_parse(struct cache_detail *, char *, int);
static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
static struct ent *idtoname_update(struct cache_detail *, struct ent *,
struct ent *);
static const struct cache_detail idtoname_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.idtoname",
.cache_put = ent_put,
.cache_upcall = idtoname_upcall,
.cache_request = idtoname_request,
.cache_parse = idtoname_parse,
.cache_show = idtoname_show,
.warn_no_listener = warn_no_idmapd,
.match = idtoname_match,
.init = ent_init,
.update = ent_init,
.alloc = ent_alloc,
};
static int
idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
{
struct ent ent, *res;
char *buf1, *bp;
int len;
int error = -EINVAL;
if (buf[buflen - 1] != '\n')
return (-EINVAL);
buf[buflen - 1]= '\0';
buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (buf1 == NULL)
return (-ENOMEM);
memset(&ent, 0, sizeof(ent));
/* Authentication name */
len = qword_get(&buf, buf1, PAGE_SIZE);
if (len <= 0 || len >= IDMAP_NAMESZ)
goto out;
memcpy(ent.authname, buf1, sizeof(ent.authname));
/* Type */
if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
goto out;
ent.type = strcmp(buf1, "user") == 0 ?
IDMAP_TYPE_USER : IDMAP_TYPE_GROUP;
/* ID */
if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
goto out;
ent.id = simple_strtoul(buf1, &bp, 10);
if (bp == buf1)
goto out;
/* expiry */
error = get_expiry(&buf, &ent.h.expiry_time);
if (error)
goto out;
error = -ENOMEM;
res = idtoname_lookup(cd, &ent);
if (!res)
goto out;
/* Name */
error = -EINVAL;
len = qword_get(&buf, buf1, PAGE_SIZE);
if (len < 0 || len >= IDMAP_NAMESZ)
goto out;
if (len == 0)
set_bit(CACHE_NEGATIVE, &ent.h.flags);
else
memcpy(ent.name, buf1, sizeof(ent.name));
error = -ENOMEM;
res = idtoname_update(cd, &ent, res);
if (res == NULL)
goto out;
cache_put(&res->h, cd);
error = 0;
out:
kfree(buf1);
return error;
}
static struct ent *
idtoname_lookup(struct cache_detail *cd, struct ent *item)
{
struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h,
idtoname_hash(item));
if (ch)
return container_of(ch, struct ent, h);
else
return NULL;
}
static struct ent *
idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old)
{
struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
idtoname_hash(new));
if (ch)
return container_of(ch, struct ent, h);
else
return NULL;
}
/*
* Name -> ID cache
*/
static inline int
nametoid_hash(struct ent *ent)
{
return hash_str(ent->name, ENT_HASHBITS);
}
static int
nametoid_upcall(struct cache_detail *cd, struct cache_head *h)
{
return sunrpc_cache_pipe_upcall_timeout(cd, h);
}
static void
nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
int *blen)
{
struct ent *ent = container_of(ch, struct ent, h);
qword_add(bpp, blen, ent->authname);
qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user");
qword_add(bpp, blen, ent->name);
(*bpp)[-1] = '\n';
}
static int
nametoid_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
struct ent *b = container_of(cb, struct ent, h);
return (a->type == b->type && strcmp(a->name, b->name) == 0 &&
strcmp(a->authname, b->authname) == 0);
}
static int
nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
{
struct ent *ent;
if (h == NULL) {
seq_puts(m, "#domain type name [id]\n");
return 0;
}
ent = container_of(h, struct ent, h);
seq_printf(m, "%s %s %s", ent->authname,
ent->type == IDMAP_TYPE_GROUP ? "group" : "user",
ent->name);
if (test_bit(CACHE_VALID, &h->flags))
seq_printf(m, " %u", ent->id);
seq_putc(m, '\n');
return 0;
}
static struct ent *nametoid_lookup(struct cache_detail *, struct ent *);
static struct ent *nametoid_update(struct cache_detail *, struct ent *,
struct ent *);
static int nametoid_parse(struct cache_detail *, char *, int);
static const struct cache_detail nametoid_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.nametoid",
.cache_put = ent_put,
.cache_upcall = nametoid_upcall,
.cache_request = nametoid_request,
.cache_parse = nametoid_parse,
.cache_show = nametoid_show,
.warn_no_listener = warn_no_idmapd,
.match = nametoid_match,
.init = ent_init,
.update = ent_init,
.alloc = ent_alloc,
};
static int
nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
{
struct ent ent, *res;
char *buf1;
int len, error = -EINVAL;
if (buf[buflen - 1] != '\n')
return (-EINVAL);
buf[buflen - 1]= '\0';
buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (buf1 == NULL)
return (-ENOMEM);
memset(&ent, 0, sizeof(ent));
/* Authentication name */
len = qword_get(&buf, buf1, PAGE_SIZE);
if (len <= 0 || len >= IDMAP_NAMESZ)
goto out;
memcpy(ent.authname, buf1, sizeof(ent.authname));
/* Type */
if (qword_get(&buf, buf1, PAGE_SIZE) <= 0)
goto out;
ent.type = strcmp(buf1, "user") == 0 ?
IDMAP_TYPE_USER : IDMAP_TYPE_GROUP;
/* Name */
len = qword_get(&buf, buf1, PAGE_SIZE);
if (len <= 0 || len >= IDMAP_NAMESZ)
goto out;
memcpy(ent.name, buf1, sizeof(ent.name));
/* expiry */
error = get_expiry(&buf, &ent.h.expiry_time);
if (error)
goto out;
/* ID */
error = get_int(&buf, &ent.id);
if (error == -EINVAL)
goto out;
if (error == -ENOENT)
set_bit(CACHE_NEGATIVE, &ent.h.flags);
error = -ENOMEM;
res = nametoid_lookup(cd, &ent);
if (res == NULL)
goto out;
res = nametoid_update(cd, &ent, res);
if (res == NULL)
goto out;
cache_put(&res->h, cd);
error = 0;
out:
kfree(buf1);
return (error);
}
static struct ent *
nametoid_lookup(struct cache_detail *cd, struct ent *item)
{
struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h,
nametoid_hash(item));
if (ch)
return container_of(ch, struct ent, h);
else
return NULL;
}
static struct ent *
nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old)
{
struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
nametoid_hash(new));
if (ch)
return container_of(ch, struct ent, h);
else
return NULL;
}
/*
* Exported API
*/
int
nfsd_idmap_init(struct net *net)
{
int rv;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net);
if (IS_ERR(nn->idtoname_cache))
return PTR_ERR(nn->idtoname_cache);
rv = cache_register_net(nn->idtoname_cache, net);
if (rv)
goto destroy_idtoname_cache;
nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net);
if (IS_ERR(nn->nametoid_cache)) {
rv = PTR_ERR(nn->nametoid_cache);
goto unregister_idtoname_cache;
}
rv = cache_register_net(nn->nametoid_cache, net);
if (rv)
goto destroy_nametoid_cache;
return 0;
destroy_nametoid_cache:
cache_destroy_net(nn->nametoid_cache, net);
unregister_idtoname_cache:
cache_unregister_net(nn->idtoname_cache, net);
destroy_idtoname_cache:
cache_destroy_net(nn->idtoname_cache, net);
return rv;
}
void
nfsd_idmap_shutdown(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
cache_unregister_net(nn->idtoname_cache, net);
cache_unregister_net(nn->nametoid_cache, net);
cache_destroy_net(nn->idtoname_cache, net);
cache_destroy_net(nn->nametoid_cache, net);
}
static int
idmap_lookup(struct svc_rqst *rqstp,
struct ent *(*lookup_fn)(struct cache_detail *, struct ent *),
struct ent *key, struct cache_detail *detail, struct ent **item)
{
int ret;
*item = lookup_fn(detail, key);
if (!*item)
return -ENOMEM;
retry:
ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
if (ret == -ETIMEDOUT) {
struct ent *prev_item = *item;
*item = lookup_fn(detail, key);
if (*item != prev_item)
goto retry;
cache_put(&(*item)->h, detail);
}
return ret;
}
static char *
rqst_authname(struct svc_rqst *rqstp)
{
struct auth_domain *clp;
clp = rqstp->rq_gssclient ? rqstp->rq_gssclient : rqstp->rq_client;
return clp->name;
}
static __be32
idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
u32 *id)
{
struct ent *item, key = {
.type = type,
};
int ret;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (namelen + 1 > sizeof(key.name))
return nfserr_badowner;
memcpy(key.name, name, namelen);
key.name[namelen] = '\0';
strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item);
if (ret == -ENOENT)
return nfserr_badowner;
if (ret)
return nfserrno(ret);
*id = item->id;
cache_put(&item->h, nn->nametoid_cache);
return 0;
}
static __be32 encode_ascii_id(struct xdr_stream *xdr, u32 id)
{
char buf[11];
int len;
__be32 *p;
len = sprintf(buf, "%u", id);
p = xdr_reserve_space(xdr, len + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, buf, len);
return 0;
}
static __be32 idmap_id_to_name(struct xdr_stream *xdr,
struct svc_rqst *rqstp, int type, u32 id)
{
struct ent *item, key = {
.id = id,
.type = type,
};
__be32 *p;
int ret;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
if (ret == -ENOENT)
return encode_ascii_id(xdr, id);
if (ret)
return nfserrno(ret);
ret = strlen(item->name);
WARN_ON_ONCE(ret > IDMAP_NAMESZ);
p = xdr_reserve_space(xdr, ret + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, item->name, ret);
cache_put(&item->h, nn->idtoname_cache);
return 0;
}
static bool
numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id)
{
int ret;
char buf[11];
if (namelen + 1 > sizeof(buf))
/* too long to represent a 32-bit id: */
return false;
/* Just to make sure it's null-terminated: */
memcpy(buf, name, namelen);
buf[namelen] = '\0';
ret = kstrtouint(buf, 10, id);
return ret == 0;
}
static __be32
do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id)
{
if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
if (numeric_name_to_id(rqstp, type, name, namelen, id))
return 0;
/*
* otherwise, fall through and try idmapping, for
* backwards compatibility with clients sending names:
*/
return idmap_name_to_id(rqstp, type, name, namelen, id);
}
static __be32 encode_name_from_id(struct xdr_stream *xdr,
struct svc_rqst *rqstp, int type, u32 id)
{
if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
return encode_ascii_id(xdr, id);
return idmap_id_to_name(xdr, rqstp, type, id);
}
__be32
nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
kuid_t *uid)
{
__be32 status;
u32 id = -1;
if (name == NULL || namelen == 0)
return nfserr_inval;
status = do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, &id);
*uid = make_kuid(nfsd_user_namespace(rqstp), id);
if (!uid_valid(*uid))
status = nfserr_badowner;
return status;
}
__be32
nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
kgid_t *gid)
{
__be32 status;
u32 id = -1;
if (name == NULL || namelen == 0)
return nfserr_inval;
status = do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, &id);
*gid = make_kgid(nfsd_user_namespace(rqstp), id);
if (!gid_valid(*gid))
status = nfserr_badowner;
return status;
}
__be32 nfsd4_encode_user(struct xdr_stream *xdr, struct svc_rqst *rqstp,
kuid_t uid)
{
u32 id = from_kuid_munged(nfsd_user_namespace(rqstp), uid);
return encode_name_from_id(xdr, rqstp, IDMAP_TYPE_USER, id);
}
__be32 nfsd4_encode_group(struct xdr_stream *xdr, struct svc_rqst *rqstp,
kgid_t gid)
{
u32 id = from_kgid_munged(nfsd_user_namespace(rqstp), gid);
return encode_name_from_id(xdr, rqstp, IDMAP_TYPE_GROUP, id);
}
| linux-master | fs/nfsd/nfs4idmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Request reply cache. This is currently a global cache, but this may
* change in the future and be a per-client cache.
*
* This code is heavily inspired by the 44BSD implementation, although
* it does things a bit differently.
*
* Copyright (C) 1995, 1996 Olaf Kirch <[email protected]>
*/
#include <linux/sunrpc/svc_xprt.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sunrpc/addr.h>
#include <linux/highmem.h>
#include <linux/log2.h>
#include <linux/hash.h>
#include <net/checksum.h>
#include "nfsd.h"
#include "cache.h"
#include "trace.h"
/*
* We use this value to determine the number of hash buckets from the max
* cache size, the idea being that when the cache is at its maximum number
* of entries, then this should be the average number of entries per bucket.
*/
#define TARGET_BUCKET_SIZE 64
struct nfsd_drc_bucket {
struct rb_root rb_head;
struct list_head lru_head;
spinlock_t cache_lock;
};
static struct kmem_cache *drc_slab;
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
struct shrink_control *sc);
static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
struct shrink_control *sc);
/*
* Put a cap on the size of the DRC based on the amount of available
* low memory in the machine.
*
* 64MB: 8192
* 128MB: 11585
* 256MB: 16384
* 512MB: 23170
* 1GB: 32768
* 2GB: 46340
* 4GB: 65536
* 8GB: 92681
* 16GB: 131072
*
* ...with a hard cap of 256k entries. In the worst case, each entry will be
* ~1k, so the above numbers should give a rough max of the amount of memory
* used in k.
*
* XXX: these limits are per-container, so memory used will increase
* linearly with number of containers. Maybe that's OK.
*/
static unsigned int
nfsd_cache_size_limit(void)
{
unsigned int limit;
unsigned long low_pages = totalram_pages() - totalhigh_pages();
limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
return min_t(unsigned int, limit, 256*1024);
}
/*
* Compute the number of hash buckets we need. Divide the max cachesize by
* the "target" max bucket size, and round up to next power of two.
*/
static unsigned int
nfsd_hashsize(unsigned int limit)
{
return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
}
static struct nfsd_cacherep *
nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
struct nfsd_net *nn)
{
struct nfsd_cacherep *rp;
rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
if (rp) {
rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE;
RB_CLEAR_NODE(&rp->c_node);
INIT_LIST_HEAD(&rp->c_lru);
memset(&rp->c_key, 0, sizeof(rp->c_key));
rp->c_key.k_xid = rqstp->rq_xid;
rp->c_key.k_proc = rqstp->rq_proc;
rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
rp->c_key.k_prot = rqstp->rq_prot;
rp->c_key.k_vers = rqstp->rq_vers;
rp->c_key.k_len = rqstp->rq_arg.len;
rp->c_key.k_csum = csum;
}
return rp;
}
static void nfsd_cacherep_free(struct nfsd_cacherep *rp)
{
if (rp->c_type == RC_REPLBUFF)
kfree(rp->c_replvec.iov_base);
kmem_cache_free(drc_slab, rp);
}
static unsigned long
nfsd_cacherep_dispose(struct list_head *dispose)
{
struct nfsd_cacherep *rp;
unsigned long freed = 0;
while (!list_empty(dispose)) {
rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru);
list_del(&rp->c_lru);
nfsd_cacherep_free(rp);
freed++;
}
return freed;
}
static void
nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
struct nfsd_cacherep *rp)
{
if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
if (rp->c_state != RC_UNUSED) {
rb_erase(&rp->c_node, &b->rb_head);
list_del(&rp->c_lru);
atomic_dec(&nn->num_drc_entries);
nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
}
}
static void
nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
struct nfsd_net *nn)
{
nfsd_cacherep_unlink_locked(nn, b, rp);
nfsd_cacherep_free(rp);
}
static void
nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
struct nfsd_net *nn)
{
spin_lock(&b->cache_lock);
nfsd_cacherep_unlink_locked(nn, b, rp);
spin_unlock(&b->cache_lock);
nfsd_cacherep_free(rp);
}
int nfsd_drc_slab_create(void)
{
drc_slab = kmem_cache_create("nfsd_drc",
sizeof(struct nfsd_cacherep), 0, 0, NULL);
return drc_slab ? 0: -ENOMEM;
}
void nfsd_drc_slab_free(void)
{
kmem_cache_destroy(drc_slab);
}
/**
* nfsd_net_reply_cache_init - per net namespace reply cache set-up
* @nn: nfsd_net being initialized
*
* Returns zero on succes; otherwise a negative errno is returned.
*/
int nfsd_net_reply_cache_init(struct nfsd_net *nn)
{
return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
}
/**
* nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
* @nn: nfsd_net being freed
*
*/
void nfsd_net_reply_cache_destroy(struct nfsd_net *nn)
{
nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
}
int nfsd_reply_cache_init(struct nfsd_net *nn)
{
unsigned int hashsize;
unsigned int i;
int status = 0;
nn->max_drc_entries = nfsd_cache_size_limit();
atomic_set(&nn->num_drc_entries, 0);
hashsize = nfsd_hashsize(nn->max_drc_entries);
nn->maskbits = ilog2(hashsize);
nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
nn->nfsd_reply_cache_shrinker.seeks = 1;
status = register_shrinker(&nn->nfsd_reply_cache_shrinker,
"nfsd-reply:%s", nn->nfsd_name);
if (status)
return status;
nn->drc_hashtbl = kvzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
if (!nn->drc_hashtbl)
goto out_shrinker;
for (i = 0; i < hashsize; i++) {
INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
}
nn->drc_hashsize = hashsize;
return 0;
out_shrinker:
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
return -ENOMEM;
}
void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
{
struct nfsd_cacherep *rp;
unsigned int i;
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
for (i = 0; i < nn->drc_hashsize; i++) {
struct list_head *head = &nn->drc_hashtbl[i].lru_head;
while (!list_empty(head)) {
rp = list_first_entry(head, struct nfsd_cacherep, c_lru);
nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
rp, nn);
}
}
kvfree(nn->drc_hashtbl);
nn->drc_hashtbl = NULL;
nn->drc_hashsize = 0;
}
/*
* Move cache entry to end of LRU list, and queue the cleaner to run if it's
* not already scheduled.
*/
static void
lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
{
rp->c_timestamp = jiffies;
list_move_tail(&rp->c_lru, &b->lru_head);
}
static noinline struct nfsd_drc_bucket *
nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
{
unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
return &nn->drc_hashtbl[hash];
}
/*
* Remove and return no more than @max expired entries in bucket @b.
* If @max is zero, do not limit the number of removed entries.
*/
static void
nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
unsigned int max, struct list_head *dispose)
{
unsigned long expiry = jiffies - RC_EXPIRE;
struct nfsd_cacherep *rp, *tmp;
unsigned int freed = 0;
lockdep_assert_held(&b->cache_lock);
/* The bucket LRU is ordered oldest-first. */
list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
/*
* Don't free entries attached to calls that are still
* in-progress, but do keep scanning the list.
*/
if (rp->c_state == RC_INPROG)
continue;
if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
time_before(expiry, rp->c_timestamp))
break;
nfsd_cacherep_unlink_locked(nn, b, rp);
list_add(&rp->c_lru, dispose);
if (max && ++freed > max)
break;
}
}
/**
* nfsd_reply_cache_count - count_objects method for the DRC shrinker
* @shrink: our registered shrinker context
* @sc: garbage collection parameters
*
* Returns the total number of entries in the duplicate reply cache. To
* keep things simple and quick, this is not the number of expired entries
* in the cache (ie, the number that would be removed by a call to
* nfsd_reply_cache_scan).
*/
static unsigned long
nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct nfsd_net *nn = container_of(shrink,
struct nfsd_net, nfsd_reply_cache_shrinker);
return atomic_read(&nn->num_drc_entries);
}
/**
* nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
* @shrink: our registered shrinker context
* @sc: garbage collection parameters
*
* Free expired entries on each bucket's LRU list until we've released
* nr_to_scan freed objects. Nothing will be released if the cache
* has not exceeded it's max_drc_entries limit.
*
* Returns the number of entries released by this call.
*/
static unsigned long
nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct nfsd_net *nn = container_of(shrink,
struct nfsd_net, nfsd_reply_cache_shrinker);
unsigned long freed = 0;
LIST_HEAD(dispose);
unsigned int i;
for (i = 0; i < nn->drc_hashsize; i++) {
struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
if (list_empty(&b->lru_head))
continue;
spin_lock(&b->cache_lock);
nfsd_prune_bucket_locked(nn, b, 0, &dispose);
spin_unlock(&b->cache_lock);
freed += nfsd_cacherep_dispose(&dispose);
if (freed > sc->nr_to_scan)
break;
}
trace_nfsd_drc_gc(nn, freed);
return freed;
}
/*
* Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
*/
static __wsum
nfsd_cache_csum(struct svc_rqst *rqstp)
{
int idx;
unsigned int base;
__wsum csum;
struct xdr_buf *buf = &rqstp->rq_arg;
const unsigned char *p = buf->head[0].iov_base;
size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
RC_CSUMLEN);
size_t len = min(buf->head[0].iov_len, csum_len);
/* rq_arg.head first */
csum = csum_partial(p, len, 0);
csum_len -= len;
/* Continue into page array */
idx = buf->page_base / PAGE_SIZE;
base = buf->page_base & ~PAGE_MASK;
while (csum_len) {
p = page_address(buf->pages[idx]) + base;
len = min_t(size_t, PAGE_SIZE - base, csum_len);
csum = csum_partial(p, len, csum);
csum_len -= len;
base = 0;
++idx;
}
return csum;
}
static int
nfsd_cache_key_cmp(const struct nfsd_cacherep *key,
const struct nfsd_cacherep *rp, struct nfsd_net *nn)
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
key->c_key.k_csum != rp->c_key.k_csum) {
nfsd_stats_payload_misses_inc(nn);
trace_nfsd_drc_mismatch(nn, key, rp);
}
return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
}
/*
* Search the request hash for an entry that matches the given rqstp.
* Must be called with cache_lock held. Returns the found entry or
* inserts an empty key on failure.
*/
static struct nfsd_cacherep *
nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
struct nfsd_net *nn)
{
struct nfsd_cacherep *rp, *ret = key;
struct rb_node **p = &b->rb_head.rb_node,
*parent = NULL;
unsigned int entries = 0;
int cmp;
while (*p != NULL) {
++entries;
parent = *p;
rp = rb_entry(parent, struct nfsd_cacherep, c_node);
cmp = nfsd_cache_key_cmp(key, rp, nn);
if (cmp < 0)
p = &parent->rb_left;
else if (cmp > 0)
p = &parent->rb_right;
else {
ret = rp;
goto out;
}
}
rb_link_node(&key->c_node, parent, p);
rb_insert_color(&key->c_node, &b->rb_head);
out:
/* tally hash chain length stats */
if (entries > nn->longest_chain) {
nn->longest_chain = entries;
nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
} else if (entries == nn->longest_chain) {
/* prefer to keep the smallest cachesize possible here */
nn->longest_chain_cachesize = min_t(unsigned int,
nn->longest_chain_cachesize,
atomic_read(&nn->num_drc_entries));
}
lru_put_end(b, ret);
return ret;
}
/**
* nfsd_cache_lookup - Find an entry in the duplicate reply cache
* @rqstp: Incoming Call to find
* @cacherep: OUT: DRC entry for this request
*
* Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If
* a suitable one isn't there, then drop the cache_lock and allocate a
* new one, then search again in case one got inserted while this thread
* didn't hold the lock.
*
* Return values:
* %RC_DOIT: Process the request normally
* %RC_REPLY: Reply from cache
* %RC_DROPIT: Do not process the request further
*/
int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
{
struct nfsd_net *nn;
struct nfsd_cacherep *rp, *found;
__wsum csum;
struct nfsd_drc_bucket *b;
int type = rqstp->rq_cachetype;
unsigned long freed;
LIST_HEAD(dispose);
int rtn = RC_DOIT;
if (type == RC_NOCACHE) {
nfsd_stats_rc_nocache_inc();
goto out;
}
csum = nfsd_cache_csum(rqstp);
/*
* Since the common case is a cache miss followed by an insert,
* preallocate an entry.
*/
nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
rp = nfsd_cacherep_alloc(rqstp, csum, nn);
if (!rp)
goto out;
b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
spin_lock(&b->cache_lock);
found = nfsd_cache_insert(b, rp, nn);
if (found != rp)
goto found_entry;
*cacherep = rp;
rp->c_state = RC_INPROG;
nfsd_prune_bucket_locked(nn, b, 3, &dispose);
spin_unlock(&b->cache_lock);
freed = nfsd_cacherep_dispose(&dispose);
trace_nfsd_drc_gc(nn, freed);
nfsd_stats_rc_misses_inc();
atomic_inc(&nn->num_drc_entries);
nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
goto out;
found_entry:
/* We found a matching entry which is either in progress or done. */
nfsd_reply_cache_free_locked(NULL, rp, nn);
nfsd_stats_rc_hits_inc();
rtn = RC_DROPIT;
rp = found;
/* Request being processed */
if (rp->c_state == RC_INPROG)
goto out_trace;
/* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT;
if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
goto out_trace;
/* Compose RPC reply header */
switch (rp->c_type) {
case RC_NOCACHE:
break;
case RC_REPLSTAT:
xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat);
rtn = RC_REPLY;
break;
case RC_REPLBUFF:
if (!nfsd_cache_append(rqstp, &rp->c_replvec))
goto out_unlock; /* should not happen */
rtn = RC_REPLY;
break;
default:
WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
}
out_trace:
trace_nfsd_drc_found(nn, rqstp, rtn);
out_unlock:
spin_unlock(&b->cache_lock);
out:
return rtn;
}
/**
* nfsd_cache_update - Update an entry in the duplicate reply cache.
* @rqstp: svc_rqst with a finished Reply
* @rp: IN: DRC entry for this request
* @cachetype: which cache to update
* @statp: pointer to Reply's NFS status code, or NULL
*
* This is called from nfsd_dispatch when the procedure has been
* executed and the complete reply is in rqstp->rq_res.
*
* We're copying around data here rather than swapping buffers because
* the toplevel loop requires max-sized buffers, which would be a waste
* of memory for a cache with a max reply size of 100 bytes (diropokres).
*
* If we should start to use different types of cache entries tailored
* specifically for attrstat and fh's, we may save even more space.
*
* Also note that a cachetype of RC_NOCACHE can legally be passed when
* nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL.
*/
void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
int cachetype, __be32 *statp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
struct nfsd_drc_bucket *b;
int len;
size_t bufsize = 0;
if (!rp)
return;
b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2;
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
nfsd_reply_cache_free(b, rp, nn);
return;
}
switch (cachetype) {
case RC_REPLSTAT:
if (len != 1)
printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
rp->c_replstat = *statp;
break;
case RC_REPLBUFF:
cachv = &rp->c_replvec;
bufsize = len << 2;
cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
if (!cachv->iov_base) {
nfsd_reply_cache_free(b, rp, nn);
return;
}
cachv->iov_len = bufsize;
memcpy(cachv->iov_base, statp, bufsize);
break;
case RC_NOCACHE:
nfsd_reply_cache_free(b, rp, nn);
return;
}
spin_lock(&b->cache_lock);
nfsd_stats_drc_mem_usage_add(nn, bufsize);
lru_put_end(b, rp);
rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
rp->c_type = cachetype;
rp->c_state = RC_DONE;
spin_unlock(&b->cache_lock);
return;
}
/*
* Copy cached reply to current reply buffer. Should always fit.
* FIXME as reply is in a page, we should just attach the page, and
* keep a refcount....
*/
static int
nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
{
struct kvec *vec = &rqstp->rq_res.head[0];
if (vec->iov_len + data->iov_len > PAGE_SIZE) {
printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
data->iov_len);
return 0;
}
memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
vec->iov_len += data->iov_len;
return 1;
}
/*
* Note that fields may be added, removed or reordered in the future. Programs
* scraping this file for info should test the labels to ensure they're
* getting the correct field.
*/
int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
{
struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info,
nfsd_net_id);
seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
seq_printf(m, "num entries: %u\n",
atomic_read(&nn->num_drc_entries));
seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
seq_printf(m, "mem usage: %lld\n",
percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
seq_printf(m, "cache hits: %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
seq_printf(m, "cache misses: %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
seq_printf(m, "not cached: %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
seq_printf(m, "payload misses: %lld\n",
percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
return 0;
}
| linux-master | fs/nfsd/nfscache.c |
// SPDX-License-Identifier: GPL-2.0
/*
* procfs-based user access to knfsd statistics
*
* /proc/net/rpc/nfsd
*
* Format:
* rc <hits> <misses> <nocache>
* Statistsics for the reply cache
* fh <stale> <deprecated filehandle cache stats>
* statistics for filehandle lookup
* io <bytes-read> <bytes-written>
* statistics for IO throughput
* th <threads> <deprecated thread usage histogram stats>
* number of threads
* ra <deprecated ra-cache stats>
*
* plus generic RPC stats (see net/sunrpc/stats.c)
*
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <[email protected]>
*/
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/sunrpc/stats.h>
#include <net/net_namespace.h>
#include "nfsd.h"
struct nfsd_stats nfsdstats;
struct svc_stat nfsd_svcstats = {
.program = &nfsd_program,
};
static int nfsd_show(struct seq_file *seq, void *v)
{
int i;
seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
/* thread usage: */
seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt));
/* deprecated thread usage histogram stats */
for (i = 0; i < 10; i++)
seq_puts(seq, " 0.000");
/* deprecated ra-cache stats */
seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
/* show my rpc info */
svc_seq_show(seq, &nfsd_svcstats);
#ifdef CONFIG_NFSD_V4
/* Show count for individual nfsv4 operations */
/* Writing operation numbers 0 1 2 also for maintaining uniformity */
seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
for (i = 0; i <= LAST_NFS4_OP; i++) {
seq_printf(seq, " %lld",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
}
seq_printf(seq, "\nwdeleg_getattr %lld",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR]));
seq_putc(seq, '\n');
#endif
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
{
int i, err = 0;
for (i = 0; !err && i < num; i++)
err = percpu_counter_init(&counters[i], 0, GFP_KERNEL);
if (!err)
return 0;
for (; i > 0; i--)
percpu_counter_destroy(&counters[i-1]);
return err;
}
void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num)
{
int i;
for (i = 0; i < num; i++)
percpu_counter_set(&counters[i], 0);
}
void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
{
int i;
for (i = 0; i < num; i++)
percpu_counter_destroy(&counters[i]);
}
static int nfsd_stat_counters_init(void)
{
return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
}
static void nfsd_stat_counters_destroy(void)
{
nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
}
int nfsd_stat_init(void)
{
int err;
err = nfsd_stat_counters_init();
if (err)
return err;
svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
return 0;
}
void nfsd_stat_shutdown(void)
{
nfsd_stat_counters_destroy();
svc_proc_unregister(&init_net, "nfsd");
}
| linux-master | fs/nfsd/stats.c |
/*
* Server-side procedures for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <[email protected]>
* Andy Adamson <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/fs_struct.h>
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/namei.h>
#include <linux/sunrpc/addr.h>
#include <linux/nfs_ssc.h>
#include "idmap.h"
#include "cache.h"
#include "xdr4.h"
#include "vfs.h"
#include "current_stateid.h"
#include "netns.h"
#include "acl.h"
#include "pnfs.h"
#include "trace.h"
static bool inter_copy_offload_enable;
module_param(inter_copy_offload_enable, bool, 0644);
MODULE_PARM_DESC(inter_copy_offload_enable,
"Enable inter server to server copy offload. Default: false");
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
static int nfsd4_ssc_umount_timeout = 900000; /* default to 15 mins */
module_param(nfsd4_ssc_umount_timeout, int, 0644);
MODULE_PARM_DESC(nfsd4_ssc_umount_timeout,
"idle msecs before unmount export from source server");
#endif
#define NFSDDBG_FACILITY NFSDDBG_PROC
static u32 nfsd_attrmask[] = {
NFSD_WRITEABLE_ATTRS_WORD0,
NFSD_WRITEABLE_ATTRS_WORD1,
NFSD_WRITEABLE_ATTRS_WORD2
};
static u32 nfsd41_ex_attrmask[] = {
NFSD_SUPPATTR_EXCLCREAT_WORD0,
NFSD_SUPPATTR_EXCLCREAT_WORD1,
NFSD_SUPPATTR_EXCLCREAT_WORD2
};
static __be32
check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
u32 *bmval, u32 *writable)
{
struct dentry *dentry = cstate->current_fh.fh_dentry;
struct svc_export *exp = cstate->current_fh.fh_export;
if (!nfsd_attrs_supported(cstate->minorversion, bmval))
return nfserr_attrnotsupp;
if ((bmval[0] & FATTR4_WORD0_ACL) && !IS_POSIXACL(d_inode(dentry)))
return nfserr_attrnotsupp;
if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) &&
!(exp->ex_flags & NFSEXP_SECURITY_LABEL))
return nfserr_attrnotsupp;
if (writable && !bmval_is_subset(bmval, writable))
return nfserr_inval;
if (writable && (bmval[2] & FATTR4_WORD2_MODE_UMASK) &&
(bmval[1] & FATTR4_WORD1_MODE))
return nfserr_inval;
return nfs_ok;
}
static __be32
nfsd4_check_open_attributes(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
__be32 status = nfs_ok;
if (open->op_create == NFS4_OPEN_CREATE) {
if (open->op_createmode == NFS4_CREATE_UNCHECKED
|| open->op_createmode == NFS4_CREATE_GUARDED)
status = check_attr_support(rqstp, cstate,
open->op_bmval, nfsd_attrmask);
else if (open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1)
status = check_attr_support(rqstp, cstate,
open->op_bmval, nfsd41_ex_attrmask);
}
return status;
}
static int
is_create_with_attrs(struct nfsd4_open *open)
{
return open->op_create == NFS4_OPEN_CREATE
&& (open->op_createmode == NFS4_CREATE_UNCHECKED
|| open->op_createmode == NFS4_CREATE_GUARDED
|| open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1);
}
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
fh_put(dst);
dget(src->fh_dentry);
if (src->fh_export)
exp_get(src->fh_export);
*dst = *src;
}
static __be32
do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, int accmode)
{
if (open->op_truncate &&
!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
accmode |= NFSD_MAY_READ_IF_EXEC;
if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
accmode |= NFSD_MAY_READ;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
accmode |= (NFSD_MAY_WRITE | NFSD_MAY_TRUNC);
if (open->op_share_deny & NFS4_SHARE_DENY_READ)
accmode |= NFSD_MAY_WRITE;
return fh_verify(rqstp, current_fh, S_IFREG, accmode);
}
static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
{
umode_t mode = d_inode(fh->fh_dentry)->i_mode;
if (S_ISREG(mode))
return nfs_ok;
if (S_ISDIR(mode))
return nfserr_isdir;
/*
* Using err_symlink as our catch-all case may look odd; but
* there's no other obvious error for this case in 4.0, and we
* happen to know that it will cause the linux v4 client to do
* the right thing on attempts to open something other than a
* regular file.
*/
return nfserr_symlink;
}
static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh)
{
if (nfsd4_has_session(cstate))
return;
fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&resfh->fh_handle);
}
static inline bool nfsd4_create_is_exclusive(int createmode)
{
return createmode == NFS4_CREATE_EXCLUSIVE ||
createmode == NFS4_CREATE_EXCLUSIVE4_1;
}
static __be32
nfsd4_vfs_create(struct svc_fh *fhp, struct dentry *child,
struct nfsd4_open *open)
{
struct file *filp;
struct path path;
int oflags;
oflags = O_CREAT | O_LARGEFILE;
switch (open->op_share_access & NFS4_SHARE_ACCESS_BOTH) {
case NFS4_SHARE_ACCESS_WRITE:
oflags |= O_WRONLY;
break;
case NFS4_SHARE_ACCESS_BOTH:
oflags |= O_RDWR;
break;
default:
oflags |= O_RDONLY;
}
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = child;
filp = dentry_create(&path, oflags, open->op_iattr.ia_mode,
current_cred());
if (IS_ERR(filp))
return nfserrno(PTR_ERR(filp));
open->op_filp = filp;
return nfs_ok;
}
/*
* Implement NFSv4's unchecked, guarded, and exclusive create
* semantics for regular files. Open state for this new file is
* subsequently fabricated in nfsd4_process_open2().
*
* Upon return, caller must release @fhp and @resfhp.
*/
static __be32
nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct svc_fh *resfhp, struct nfsd4_open *open)
{
struct iattr *iap = &open->op_iattr;
struct nfsd_attrs attrs = {
.na_iattr = iap,
.na_seclabel = &open->op_label,
};
struct dentry *parent, *child;
__u32 v_mtime, v_atime;
struct inode *inode;
__be32 status;
int host_err;
if (isdotent(open->op_fname, open->op_fnamelen))
return nfserr_exist;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (status != nfs_ok)
return status;
parent = fhp->fh_dentry;
inode = d_inode(parent);
host_err = fh_want_write(fhp);
if (host_err)
return nfserrno(host_err);
if (is_create_with_attrs(open))
nfsd4_acl_to_attr(NF4REG, open->op_acl, &attrs);
inode_lock_nested(inode, I_MUTEX_PARENT);
child = lookup_one_len(open->op_fname, parent, open->op_fnamelen);
if (IS_ERR(child)) {
status = nfserrno(PTR_ERR(child));
goto out;
}
if (d_really_is_negative(child)) {
status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (status != nfs_ok)
goto out;
}
status = fh_compose(resfhp, fhp->fh_export, child, fhp);
if (status != nfs_ok)
goto out;
v_mtime = 0;
v_atime = 0;
if (nfsd4_create_is_exclusive(open->op_createmode)) {
u32 *verifier = (u32 *)open->op_verf.data;
/*
* Solaris 7 gets confused (bugid 4218508) if these have
* the high bit set, as do xfs filesystems without the
* "bigtime" feature. So just clear the high bits. If this
* is ever changed to use different attrs for storing the
* verifier, then do_open_lookup() will also need to be
* fixed accordingly.
*/
v_mtime = verifier[0] & 0x7fffffff;
v_atime = verifier[1] & 0x7fffffff;
}
if (d_really_is_positive(child)) {
/* NFSv4 protocol requires change attributes even though
* no change happened.
*/
status = fh_fill_both_attrs(fhp);
if (status != nfs_ok)
goto out;
switch (open->op_createmode) {
case NFS4_CREATE_UNCHECKED:
if (!d_is_reg(child))
break;
/*
* In NFSv4, we don't want to truncate the file
* now. This would be wrong if the OPEN fails for
* some other reason. Furthermore, if the size is
* nonzero, we should ignore it according to spec!
*/
open->op_truncate = (iap->ia_valid & ATTR_SIZE) &&
!iap->ia_size;
break;
case NFS4_CREATE_GUARDED:
status = nfserr_exist;
break;
case NFS4_CREATE_EXCLUSIVE:
if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
d_inode(child)->i_atime.tv_sec == v_atime &&
d_inode(child)->i_size == 0) {
open->op_created = true;
break; /* subtle */
}
status = nfserr_exist;
break;
case NFS4_CREATE_EXCLUSIVE4_1:
if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
d_inode(child)->i_atime.tv_sec == v_atime &&
d_inode(child)->i_size == 0) {
open->op_created = true;
goto set_attr; /* subtle */
}
status = nfserr_exist;
}
goto out;
}
if (!IS_POSIXACL(inode))
iap->ia_mode &= ~current_umask();
status = fh_fill_pre_attrs(fhp);
if (status != nfs_ok)
goto out;
status = nfsd4_vfs_create(fhp, child, open);
if (status != nfs_ok)
goto out;
open->op_created = true;
fh_fill_post_attrs(fhp);
/* A newly created file already has a file size of zero. */
if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
iap->ia_valid &= ~ATTR_SIZE;
if (nfsd4_create_is_exclusive(open->op_createmode)) {
iap->ia_valid = ATTR_MTIME | ATTR_ATIME |
ATTR_MTIME_SET|ATTR_ATIME_SET;
iap->ia_mtime.tv_sec = v_mtime;
iap->ia_atime.tv_sec = v_atime;
iap->ia_mtime.tv_nsec = 0;
iap->ia_atime.tv_nsec = 0;
}
set_attr:
status = nfsd_create_setattr(rqstp, fhp, resfhp, &attrs);
if (attrs.na_labelerr)
open->op_bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
if (attrs.na_aclerr)
open->op_bmval[0] &= ~FATTR4_WORD0_ACL;
out:
inode_unlock(inode);
nfsd_attrs_free(&attrs);
if (child && !IS_ERR(child))
dput(child);
fh_drop_write(fhp);
return status;
}
/**
* set_change_info - set up the change_info4 for a reply
* @cinfo: pointer to nfsd4_change_info to be populated
* @fhp: pointer to svc_fh to use as source
*
* Many operations in NFSv4 require change_info4 in the reply. This function
* populates that from the info that we (should!) have already collected. In
* the event that we didn't get any pre-attrs, just zero out both.
*/
static void
set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
{
cinfo->atomic = (u32)(fhp->fh_pre_saved && fhp->fh_post_saved && !fhp->fh_no_atomic_attr);
cinfo->before_change = fhp->fh_pre_change;
cinfo->after_change = fhp->fh_post_change;
/*
* If fetching the pre-change attributes failed, then we should
* have already failed the whole operation. We could have still
* failed to fetch post-change attributes however.
*
* If we didn't get post-op attrs, just zero-out the after
* field since we don't know what it should be. If the pre_saved
* field isn't set for some reason, throw warning and just copy
* whatever is in the after field.
*/
if (WARN_ON_ONCE(!fhp->fh_pre_saved))
cinfo->before_change = 0;
if (!fhp->fh_post_saved)
cinfo->after_change = cinfo->before_change + 1;
}
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
{
struct svc_fh *current_fh = &cstate->current_fh;
int accmode;
__be32 status;
*resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
if (!*resfh)
return nfserr_jukebox;
fh_init(*resfh, NFS4_FHSIZE);
open->op_truncate = false;
if (open->op_create) {
/* FIXME: check session persistence and pnfs flags.
* The nfsv4.1 spec requires the following semantics:
*
* Persistent | pNFS | Server REQUIRED | Client Allowed
* Reply Cache | server | |
* -------------+--------+-----------------+--------------------
* no | no | EXCLUSIVE4_1 | EXCLUSIVE4_1
* | | | (SHOULD)
* | | and EXCLUSIVE4 | or EXCLUSIVE4
* | | | (SHOULD NOT)
* no | yes | EXCLUSIVE4_1 | EXCLUSIVE4_1
* yes | no | GUARDED4 | GUARDED4
* yes | yes | GUARDED4 | GUARDED4
*/
current->fs->umask = open->op_umask;
status = nfsd4_create_file(rqstp, current_fh, *resfh, open);
current->fs->umask = 0;
/*
* Following rfc 3530 14.2.16, and rfc 5661 18.16.4
* use the returned bitmask to indicate which attributes
* we used to store the verifier:
*/
if (nfsd4_create_is_exclusive(open->op_createmode) && status == 0)
open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
FATTR4_WORD1_TIME_MODIFY);
} else {
status = nfsd_lookup(rqstp, current_fh,
open->op_fname, open->op_fnamelen, *resfh);
if (status == nfs_ok)
/* NFSv4 protocol requires change attributes even though
* no change happened.
*/
status = fh_fill_both_attrs(current_fh);
}
if (status)
goto out;
status = nfsd_check_obj_isreg(*resfh);
if (status)
goto out;
nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
accmode = NFSD_MAY_NOP;
if (open->op_created ||
open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
accmode |= NFSD_MAY_OWNER_OVERRIDE;
status = do_open_permission(rqstp, *resfh, open, accmode);
set_change_info(&open->op_cinfo, current_fh);
out:
return status;
}
static __be32
do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
struct svc_fh *current_fh = &cstate->current_fh;
int accmode = 0;
/* We don't know the target directory, and therefore can not
* set the change info
*/
memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
nfsd4_set_open_owner_reply_cache(cstate, open, current_fh);
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
(open->op_iattr.ia_size == 0);
/*
* In the delegation case, the client is telling us about an
* open that it *already* performed locally, some time ago. We
* should let it succeed now if possible.
*
* In the case of a CLAIM_FH open, on the other hand, the client
* may be counting on us to enforce permissions (the Linux 4.1
* client uses this for normal opens, for example).
*/
if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
accmode = NFSD_MAY_OWNER_OVERRIDE;
return do_open_permission(rqstp, current_fh, open, accmode);
}
static void
copy_clientid(clientid_t *clid, struct nfsd4_session *session)
{
struct nfsd4_sessionid *sid =
(struct nfsd4_sessionid *)session->se_sessionid.data;
clid->cl_boot = sid->clientid.cl_boot;
clid->cl_id = sid->clientid.cl_id;
}
static __be32
nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_open *open = &u->open;
__be32 status;
struct svc_fh *resfh = NULL;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
bool reclaim = false;
dprintk("NFSD: nfsd4_open filename %.*s op_openowner %p\n",
(int)open->op_fnamelen, open->op_fname,
open->op_openowner);
open->op_filp = NULL;
open->op_rqstp = rqstp;
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
open->op_created = false;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
*/
if (nfsd4_has_session(cstate) &&
!test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags) &&
open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
if (nfsd4_has_session(cstate))
copy_clientid(&open->op_clientid, cstate->session);
/* check seqid for replay. set nfs4_owner */
status = nfsd4_process_open1(cstate, open, nn);
if (status == nfserr_replay_me) {
struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
fh_put(&cstate->current_fh);
fh_copy_shallow(&cstate->current_fh.fh_handle,
&rp->rp_openfh);
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
dprintk("nfsd4_open: replay failed"
" restoring previous filehandle\n");
else
status = nfserr_replay_me;
}
if (status)
goto out;
if (open->op_xdr_error) {
status = open->op_xdr_error;
goto out;
}
status = nfsd4_check_open_attributes(rqstp, cstate, open);
if (status)
goto out;
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
if (opens_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
status = nfserr_no_grace;
if (!opens_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
case NFS4_OPEN_CLAIM_NULL:
status = do_open_lookup(rqstp, cstate, open, &resfh);
if (status)
goto out;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
status = nfs4_check_open_reclaim(cstate->clp);
if (status)
goto out;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
reclaim = true;
fallthrough;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
status = do_open_fhandle(rqstp, cstate, open);
if (status)
goto out;
resfh = &cstate->current_fh;
break;
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
status = nfserr_notsupp;
goto out;
default:
status = nfserr_inval;
goto out;
}
status = nfsd4_process_open2(rqstp, resfh, open);
if (status && open->op_created)
pr_warn("nfsd4_process_open2 failed to open newly-created file: status=%u\n",
be32_to_cpu(status));
if (reclaim && !status)
nn->somebody_reclaimed = true;
out:
if (open->op_filp) {
fput(open->op_filp);
open->op_filp = NULL;
}
if (resfh && resfh != &cstate->current_fh) {
fh_dup2(&cstate->current_fh, resfh);
fh_put(resfh);
kfree(resfh);
}
nfsd4_cleanup_open_state(cstate, open);
nfsd4_bump_seqid(cstate, status);
return status;
}
/*
* OPEN is the only seqid-mutating operation whose decoding can fail
* with a seqid-mutating error (specifically, decoding of user names in
* the attributes). Therefore we have to do some processing to look up
* the stateowner so that we can bump the seqid.
*/
static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_op *op)
{
struct nfsd4_open *open = &op->u.open;
if (!seqid_mutating_err(ntohl(op->status)))
return op->status;
if (nfsd4_has_session(cstate))
return op->status;
open->op_xdr_error = op->status;
return nfsd4_open(rqstp, cstate, &op->u);
}
/*
* filehandle-manipulating ops.
*/
static __be32
nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
u->getfh = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_putfh *putfh = &u->putfh;
__be32 ret;
fh_put(&cstate->current_fh);
cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
memcpy(&cstate->current_fh.fh_handle.fh_raw, putfh->pf_fhval,
putfh->pf_fhlen);
ret = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
if (ret == nfserr_stale && putfh->no_verify) {
SET_FH_FLAG(&cstate->current_fh, NFSD4_FH_FOREIGN);
ret = 0;
}
#endif
return ret;
}
static __be32
nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
fh_put(&cstate->current_fh);
return exp_pseudoroot(rqstp, &cstate->current_fh);
}
static __be32
nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
if (!cstate->save_fh.fh_dentry)
return nfserr_restorefh;
fh_dup2(&cstate->current_fh, &cstate->save_fh);
if (HAS_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG)) {
memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t));
SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
}
return nfs_ok;
}
static __be32
nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
fh_dup2(&cstate->save_fh, &cstate->current_fh);
if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG)) {
memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
SET_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG);
}
return nfs_ok;
}
/*
* misc nfsv4 ops
*/
static __be32
nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_access *access = &u->access;
u32 access_full;
access_full = NFS3_ACCESS_FULL;
if (cstate->minorversion >= 2)
access_full |= NFS4_ACCESS_XALIST | NFS4_ACCESS_XAREAD |
NFS4_ACCESS_XAWRITE;
if (access->ac_req_access & ~access_full)
return nfserr_inval;
access->ac_resp_access = access->ac_req_access;
return nfsd_access(rqstp, &cstate->current_fh, &access->ac_resp_access,
&access->ac_supported);
}
static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
{
__be32 *verf = (__be32 *)verifier->data;
BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id));
}
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_commit *commit = &u->commit;
struct nfsd_file *nf;
__be32 status;
status = nfsd_file_acquire(rqstp, &cstate->current_fh, NFSD_MAY_WRITE |
NFSD_MAY_NOT_BREAK_LEASE, &nf);
if (status != nfs_ok)
return status;
status = nfsd_commit(rqstp, &cstate->current_fh, nf, commit->co_offset,
commit->co_count,
(__be32 *)commit->co_verf.data);
nfsd_file_put(nf);
return status;
}
static __be32
nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_create *create = &u->create;
struct nfsd_attrs attrs = {
.na_iattr = &create->cr_iattr,
.na_seclabel = &create->cr_label,
};
struct svc_fh resfh;
__be32 status;
dev_t rdev;
fh_init(&resfh, NFS4_FHSIZE);
status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_NOP);
if (status)
return status;
status = check_attr_support(rqstp, cstate, create->cr_bmval,
nfsd_attrmask);
if (status)
return status;
status = nfsd4_acl_to_attr(create->cr_type, create->cr_acl, &attrs);
current->fs->umask = create->cr_umask;
switch (create->cr_type) {
case NF4LNK:
status = nfsd_symlink(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
create->cr_data, &attrs, &resfh);
break;
case NF4BLK:
status = nfserr_inval;
rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
if (MAJOR(rdev) != create->cr_specdata1 ||
MINOR(rdev) != create->cr_specdata2)
goto out_umask;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&attrs, S_IFBLK, rdev, &resfh);
break;
case NF4CHR:
status = nfserr_inval;
rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
if (MAJOR(rdev) != create->cr_specdata1 ||
MINOR(rdev) != create->cr_specdata2)
goto out_umask;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&attrs, S_IFCHR, rdev, &resfh);
break;
case NF4SOCK:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&attrs, S_IFSOCK, 0, &resfh);
break;
case NF4FIFO:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&attrs, S_IFIFO, 0, &resfh);
break;
case NF4DIR:
create->cr_iattr.ia_valid &= ~ATTR_SIZE;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&attrs, S_IFDIR, 0, &resfh);
break;
default:
status = nfserr_badtype;
}
if (status)
goto out;
if (attrs.na_labelerr)
create->cr_bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
if (attrs.na_aclerr)
create->cr_bmval[0] &= ~FATTR4_WORD0_ACL;
set_change_info(&create->cr_cinfo, &cstate->current_fh);
fh_dup2(&cstate->current_fh, &resfh);
out:
fh_put(&resfh);
out_umask:
current->fs->umask = 0;
nfsd_attrs_free(&attrs);
return status;
}
static __be32
nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_getattr *getattr = &u->getattr;
__be32 status;
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
if (getattr->ga_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
return nfserr_inval;
getattr->ga_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
getattr->ga_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
getattr->ga_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
getattr->ga_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_link *link = &u->link;
__be32 status;
status = nfsd_link(rqstp, &cstate->current_fh,
link->li_name, link->li_namelen, &cstate->save_fh);
if (!status)
set_change_info(&link->li_cinfo, &cstate->current_fh);
return status;
}
static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
{
struct svc_fh tmp_fh;
__be32 ret;
fh_init(&tmp_fh, NFS4_FHSIZE);
ret = exp_pseudoroot(rqstp, &tmp_fh);
if (ret)
return ret;
if (tmp_fh.fh_dentry == fh->fh_dentry) {
fh_put(&tmp_fh);
return nfserr_noent;
}
fh_put(&tmp_fh);
return nfsd_lookup(rqstp, fh, "..", 2, fh);
}
static __be32
nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
}
static __be32
nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
return nfsd_lookup(rqstp, &cstate->current_fh,
u->lookup.lo_name, u->lookup.lo_len,
&cstate->current_fh);
}
static __be32
nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_read *read = &u->read;
__be32 status;
read->rd_nf = NULL;
trace_nfsd_read_start(rqstp, &cstate->current_fh,
read->rd_offset, read->rd_length);
read->rd_length = min_t(u32, read->rd_length, svc_max_payload(rqstp));
if (read->rd_offset > (u64)OFFSET_MAX)
read->rd_offset = (u64)OFFSET_MAX;
if (read->rd_offset + read->rd_length > (u64)OFFSET_MAX)
read->rd_length = (u64)OFFSET_MAX - read->rd_offset;
/*
* If we do a zero copy read, then a client will see read data
* that reflects the state of the file *after* performing the
* following compound.
*
* To ensure proper ordering, we therefore turn off zero copy if
* the client wants us to do more in this compound:
*/
if (!nfsd4_last_compound_op(rqstp))
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* check stateid */
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&read->rd_stateid, RD_STATE,
&read->rd_nf, NULL);
read->rd_rqstp = rqstp;
read->rd_fhp = &cstate->current_fh;
return status;
}
static void
nfsd4_read_release(union nfsd4_op_u *u)
{
if (u->read.rd_nf)
nfsd_file_put(u->read.rd_nf);
trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
u->read.rd_offset, u->read.rd_length);
}
static __be32
nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_readdir *readdir = &u->readdir;
u64 cookie = readdir->rd_cookie;
static const nfs4_verifier zeroverf;
/* no need to check permission - this will be done in nfsd_readdir() */
if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
return nfserr_inval;
readdir->rd_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
readdir->rd_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
readdir->rd_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
if ((cookie == 1) || (cookie == 2) ||
(cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
return nfserr_bad_cookie;
readdir->rd_rqstp = rqstp;
readdir->rd_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
u->readlink.rl_rqstp = rqstp;
u->readlink.rl_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_remove *remove = &u->remove;
__be32 status;
if (opens_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
if (!status)
set_change_info(&remove->rm_cinfo, &cstate->current_fh);
return status;
}
static __be32
nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_rename *rename = &u->rename;
__be32 status;
if (opens_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
rename->rn_snamelen, &cstate->current_fh,
rename->rn_tname, rename->rn_tnamelen);
if (status)
return status;
set_change_info(&rename->rn_sinfo, &cstate->save_fh);
set_change_info(&rename->rn_tinfo, &cstate->current_fh);
return nfs_ok;
}
static __be32
nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_secinfo *secinfo = &u->secinfo;
struct svc_export *exp;
struct dentry *dentry;
__be32 err;
err = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_EXEC);
if (err)
return err;
err = nfsd_lookup_dentry(rqstp, &cstate->current_fh,
secinfo->si_name, secinfo->si_namelen,
&exp, &dentry);
if (err)
return err;
if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
} else
secinfo->si_exp = exp;
dput(dentry);
if (cstate->minorversion)
/* See rfc 5661 section 2.6.3.1.1.8 */
fh_put(&cstate->current_fh);
return err;
}
static __be32
nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
__be32 err;
switch (u->secinfo_no_name.sin_style) {
case NFS4_SECINFO_STYLE4_CURRENT_FH:
break;
case NFS4_SECINFO_STYLE4_PARENT:
err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
if (err)
return err;
break;
default:
return nfserr_inval;
}
u->secinfo_no_name.sin_exp = exp_get(cstate->current_fh.fh_export);
fh_put(&cstate->current_fh);
return nfs_ok;
}
static void
nfsd4_secinfo_release(union nfsd4_op_u *u)
{
if (u->secinfo.si_exp)
exp_put(u->secinfo.si_exp);
}
static void
nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
{
if (u->secinfo_no_name.sin_exp)
exp_put(u->secinfo_no_name.sin_exp);
}
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_setattr *setattr = &u->setattr;
struct nfsd_attrs attrs = {
.na_iattr = &setattr->sa_iattr,
.na_seclabel = &setattr->sa_label,
};
struct inode *inode;
__be32 status = nfs_ok;
int err;
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
WR_STATE, NULL, NULL);
if (status)
return status;
}
err = fh_want_write(&cstate->current_fh);
if (err)
return nfserrno(err);
status = nfs_ok;
status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
nfsd_attrmask);
if (status)
goto out;
inode = cstate->current_fh.fh_dentry->d_inode;
status = nfsd4_acl_to_attr(S_ISDIR(inode->i_mode) ? NF4DIR : NF4REG,
setattr->sa_acl, &attrs);
if (status)
goto out;
status = nfsd_setattr(rqstp, &cstate->current_fh, &attrs,
0, (time64_t)0);
if (!status)
status = nfserrno(attrs.na_labelerr);
if (!status)
status = nfserrno(attrs.na_aclerr);
out:
nfsd_attrs_free(&attrs);
fh_drop_write(&cstate->current_fh);
return status;
}
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
stateid_t *stateid = &write->wr_stateid;
struct nfsd_file *nf = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
int nvecs;
if (write->wr_offset > (u64)OFFSET_MAX ||
write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
return nfserr_fbig;
cnt = write->wr_buflen;
trace_nfsd_write_start(rqstp, &cstate->current_fh,
write->wr_offset, cnt);
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
stateid, WR_STATE, &nf, NULL);
if (status)
return status;
write->wr_how_written = write->wr_stable_how;
nvecs = svc_fill_write_vector(rqstp, &write->wr_payload);
WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf,
write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
write->wr_how_written,
(__be32 *)write->wr_verifier.data);
nfsd_file_put(nf);
write->wr_bytes_written = cnt;
trace_nfsd_write_done(rqstp, &cstate->current_fh,
write->wr_offset, cnt);
return status;
}
static __be32
nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
stateid_t *src_stateid, struct nfsd_file **src,
stateid_t *dst_stateid, struct nfsd_file **dst)
{
__be32 status;
if (!cstate->save_fh.fh_dentry)
return nfserr_nofilehandle;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
src_stateid, RD_STATE, src, NULL);
if (status)
goto out;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
dst_stateid, WR_STATE, dst, NULL);
if (status)
goto out_put_src;
/* fix up for NFS-specific error code */
if (!S_ISREG(file_inode((*src)->nf_file)->i_mode) ||
!S_ISREG(file_inode((*dst)->nf_file)->i_mode)) {
status = nfserr_wrong_type;
goto out_put_dst;
}
out:
return status;
out_put_dst:
nfsd_file_put(*dst);
*dst = NULL;
out_put_src:
nfsd_file_put(*src);
*src = NULL;
goto out;
}
static __be32
nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_clone *clone = &u->clone;
struct nfsd_file *src, *dst;
__be32 status;
status = nfsd4_verify_copy(rqstp, cstate, &clone->cl_src_stateid, &src,
&clone->cl_dst_stateid, &dst);
if (status)
goto out;
status = nfsd4_clone_file_range(rqstp, src, clone->cl_src_pos,
dst, clone->cl_dst_pos, clone->cl_count,
EX_ISSYNC(cstate->current_fh.fh_export));
nfsd_file_put(dst);
nfsd_file_put(src);
out:
return status;
}
static void nfs4_put_copy(struct nfsd4_copy *copy)
{
if (!refcount_dec_and_test(©->refcount))
return;
kfree(copy->cp_src);
kfree(copy);
}
static void nfsd4_stop_copy(struct nfsd4_copy *copy)
{
if (!test_and_set_bit(NFSD4_COPY_F_STOPPED, ©->cp_flags))
kthread_stop(copy->copy_task);
nfs4_put_copy(copy);
}
static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
{
struct nfsd4_copy *copy = NULL;
spin_lock(&clp->async_lock);
if (!list_empty(&clp->async_copies)) {
copy = list_first_entry(&clp->async_copies, struct nfsd4_copy,
copies);
refcount_inc(©->refcount);
}
spin_unlock(&clp->async_lock);
return copy;
}
void nfsd4_shutdown_copy(struct nfs4_client *clp)
{
struct nfsd4_copy *copy;
while ((copy = nfsd4_get_copy(clp)) != NULL)
nfsd4_stop_copy(copy);
}
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
extern struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
struct nfs_fh *src_fh,
nfs4_stateid *stateid);
extern void nfs42_ssc_close(struct file *filep);
extern void nfs_sb_deactive(struct super_block *sb);
#define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
/*
* setup a work entry in the ssc delayed unmount list.
*/
static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
struct nfsd4_ssc_umount_item **nsui)
{
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *work = NULL;
struct nfsd4_ssc_umount_item *tmp;
DEFINE_WAIT(wait);
__be32 status = 0;
*nsui = NULL;
work = kzalloc(sizeof(*work), GFP_KERNEL);
try_again:
spin_lock(&nn->nfsd_ssc_lock);
list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
if (strncmp(ni->nsui_ipaddr, ipaddr, sizeof(ni->nsui_ipaddr)))
continue;
/* found a match */
if (ni->nsui_busy) {
/* wait - and try again */
prepare_to_wait(&nn->nfsd_ssc_waitq, &wait, TASK_IDLE);
spin_unlock(&nn->nfsd_ssc_lock);
/* allow 20secs for mount/unmount for now - revisit */
if (kthread_should_stop() ||
(schedule_timeout(20*HZ) == 0)) {
finish_wait(&nn->nfsd_ssc_waitq, &wait);
kfree(work);
return nfserr_eagain;
}
finish_wait(&nn->nfsd_ssc_waitq, &wait);
goto try_again;
}
*nsui = ni;
refcount_inc(&ni->nsui_refcnt);
spin_unlock(&nn->nfsd_ssc_lock);
kfree(work);
/* return vfsmount in (*nsui)->nsui_vfsmount */
return 0;
}
if (work) {
strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1);
refcount_set(&work->nsui_refcnt, 2);
work->nsui_busy = true;
list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
*nsui = work;
} else
status = nfserr_resource;
spin_unlock(&nn->nfsd_ssc_lock);
return status;
}
static void nfsd4_ssc_update_dul(struct nfsd_net *nn,
struct nfsd4_ssc_umount_item *nsui,
struct vfsmount *ss_mnt)
{
spin_lock(&nn->nfsd_ssc_lock);
nsui->nsui_vfsmount = ss_mnt;
nsui->nsui_busy = false;
wake_up_all(&nn->nfsd_ssc_waitq);
spin_unlock(&nn->nfsd_ssc_lock);
}
static void nfsd4_ssc_cancel_dul(struct nfsd_net *nn,
struct nfsd4_ssc_umount_item *nsui)
{
spin_lock(&nn->nfsd_ssc_lock);
list_del(&nsui->nsui_list);
wake_up_all(&nn->nfsd_ssc_waitq);
spin_unlock(&nn->nfsd_ssc_lock);
kfree(nsui);
}
/*
* Support one copy source server for now.
*/
static __be32
nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
struct nfsd4_ssc_umount_item **nsui)
{
struct file_system_type *type;
struct vfsmount *ss_mnt;
struct nfs42_netaddr *naddr;
struct sockaddr_storage tmp_addr;
size_t tmp_addrlen, match_netid_len = 3;
char *startsep = "", *endsep = "", *match_netid = "tcp";
char *ipaddr, *dev_name, *raw_data;
int len, raw_len;
__be32 status = nfserr_inval;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
naddr = &nss->u.nl4_addr;
tmp_addrlen = rpc_uaddr2sockaddr(SVC_NET(rqstp), naddr->addr,
naddr->addr_len,
(struct sockaddr *)&tmp_addr,
sizeof(tmp_addr));
*nsui = NULL;
if (tmp_addrlen == 0)
goto out_err;
if (tmp_addr.ss_family == AF_INET6) {
startsep = "[";
endsep = "]";
match_netid = "tcp6";
match_netid_len = 4;
}
if (naddr->netid_len != match_netid_len ||
strncmp(naddr->netid, match_netid, naddr->netid_len))
goto out_err;
/* Construct the raw data for the vfs_kern_mount call */
len = RPC_MAX_ADDRBUFLEN + 1;
ipaddr = kzalloc(len, GFP_KERNEL);
if (!ipaddr)
goto out_err;
rpc_ntop((struct sockaddr *)&tmp_addr, ipaddr, len);
/* 2 for ipv6 endsep and startsep. 3 for ":/" and trailing '/0'*/
raw_len = strlen(NFSD42_INTERSSC_MOUNTOPS) + strlen(ipaddr);
raw_data = kzalloc(raw_len, GFP_KERNEL);
if (!raw_data)
goto out_free_ipaddr;
snprintf(raw_data, raw_len, NFSD42_INTERSSC_MOUNTOPS, ipaddr);
status = nfserr_nodev;
type = get_fs_type("nfs");
if (!type)
goto out_free_rawdata;
/* Set the server:<export> for the vfs_kern_mount call */
dev_name = kzalloc(len + 5, GFP_KERNEL);
if (!dev_name)
goto out_free_rawdata;
snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui);
if (status)
goto out_free_devname;
if ((*nsui)->nsui_vfsmount)
goto out_done;
/* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
ss_mnt = vfs_kern_mount(type, SB_KERNMOUNT, dev_name, raw_data);
module_put(type->owner);
if (IS_ERR(ss_mnt)) {
status = nfserr_nodev;
nfsd4_ssc_cancel_dul(nn, *nsui);
goto out_free_devname;
}
nfsd4_ssc_update_dul(nn, *nsui, ss_mnt);
out_done:
status = 0;
out_free_devname:
kfree(dev_name);
out_free_rawdata:
kfree(raw_data);
out_free_ipaddr:
kfree(ipaddr);
out_err:
return status;
}
/*
* Verify COPY destination stateid.
*
* Connect to the source server with NFSv4.1.
* Create the source struct file for nfsd_copy_range.
* Called with COPY cstate:
* SAVED_FH: source filehandle
* CURRENT_FH: destination filehandle
*/
static __be32
nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy)
{
struct svc_fh *s_fh = NULL;
stateid_t *s_stid = ©->cp_src_stateid;
__be32 status = nfserr_inval;
/* Verify the destination stateid and set dst struct file*/
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
©->cp_dst_stateid,
WR_STATE, ©->nf_dst, NULL);
if (status)
goto out;
status = nfsd4_interssc_connect(copy->cp_src, rqstp, ©->ss_nsui);
if (status)
goto out;
s_fh = &cstate->save_fh;
copy->c_fh.size = s_fh->fh_handle.fh_size;
memcpy(copy->c_fh.data, &s_fh->fh_handle.fh_raw, copy->c_fh.size);
copy->stateid.seqid = cpu_to_be32(s_stid->si_generation);
memcpy(copy->stateid.other, (void *)&s_stid->si_opaque,
sizeof(stateid_opaque_t));
status = 0;
out:
return status;
}
static void
nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
struct nfsd_file *dst)
{
struct nfsd_net *nn = net_generic(dst->nf_net, nfsd_net_id);
long timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
nfs42_ssc_close(filp);
fput(filp);
spin_lock(&nn->nfsd_ssc_lock);
list_del(&nsui->nsui_list);
/*
* vfsmount can be shared by multiple exports,
* decrement refcnt. If the count drops to 1 it
* will be unmounted when nsui_expire expires.
*/
refcount_dec(&nsui->nsui_refcnt);
nsui->nsui_expire = jiffies + timeout;
list_add_tail(&nsui->nsui_list, &nn->nfsd_ssc_mount_list);
spin_unlock(&nn->nfsd_ssc_lock);
}
#else /* CONFIG_NFSD_V4_2_INTER_SSC */
static __be32
nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy)
{
return nfserr_inval;
}
static void
nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
struct nfsd_file *dst)
{
}
static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
struct nfs_fh *src_fh,
nfs4_stateid *stateid)
{
return NULL;
}
#endif /* CONFIG_NFSD_V4_2_INTER_SSC */
static __be32
nfsd4_setup_intra_ssc(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy)
{
return nfsd4_verify_copy(rqstp, cstate, ©->cp_src_stateid,
©->nf_src, ©->cp_dst_stateid,
©->nf_dst);
}
static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
{
struct nfsd4_cb_offload *cbo =
container_of(cb, struct nfsd4_cb_offload, co_cb);
kfree(cbo);
}
static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
struct rpc_task *task)
{
struct nfsd4_cb_offload *cbo =
container_of(cb, struct nfsd4_cb_offload, co_cb);
trace_nfsd_cb_offload_done(&cbo->co_res.cb_stateid, task);
return 1;
}
static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
.release = nfsd4_cb_offload_release,
.done = nfsd4_cb_offload_done
};
static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
{
copy->cp_res.wr_stable_how =
test_bit(NFSD4_COPY_F_COMMITTED, ©->cp_flags) ?
NFS_FILE_SYNC : NFS_UNSTABLE;
nfsd4_copy_set_sync(copy, sync);
gen_boot_verifier(©->cp_res.wr_verifier, copy->cp_clp->net);
}
static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
struct file *dst,
struct file *src)
{
errseq_t since;
ssize_t bytes_copied = 0;
u64 bytes_total = copy->cp_count;
u64 src_pos = copy->cp_src_pos;
u64 dst_pos = copy->cp_dst_pos;
int status;
loff_t end;
/* See RFC 7862 p.67: */
if (bytes_total == 0)
bytes_total = ULLONG_MAX;
do {
if (kthread_should_stop())
break;
bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos,
bytes_total);
if (bytes_copied <= 0)
break;
bytes_total -= bytes_copied;
copy->cp_res.wr_bytes_written += bytes_copied;
src_pos += bytes_copied;
dst_pos += bytes_copied;
} while (bytes_total > 0 && nfsd4_copy_is_async(copy));
/* for a non-zero asynchronous copy do a commit of data */
if (nfsd4_copy_is_async(copy) && copy->cp_res.wr_bytes_written > 0) {
since = READ_ONCE(dst->f_wb_err);
end = copy->cp_dst_pos + copy->cp_res.wr_bytes_written - 1;
status = vfs_fsync_range(dst, copy->cp_dst_pos, end, 0);
if (!status)
status = filemap_check_wb_err(dst->f_mapping, since);
if (!status)
set_bit(NFSD4_COPY_F_COMMITTED, ©->cp_flags);
}
return bytes_copied;
}
static __be32 nfsd4_do_copy(struct nfsd4_copy *copy,
struct file *src, struct file *dst,
bool sync)
{
__be32 status;
ssize_t bytes;
bytes = _nfsd_copy_file_range(copy, dst, src);
/* for async copy, we ignore the error, client can always retry
* to get the error
*/
if (bytes < 0 && !copy->cp_res.wr_bytes_written)
status = nfserrno(bytes);
else {
nfsd4_init_copy_res(copy, sync);
status = nfs_ok;
}
return status;
}
static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
{
dst->cp_src_pos = src->cp_src_pos;
dst->cp_dst_pos = src->cp_dst_pos;
dst->cp_count = src->cp_count;
dst->cp_flags = src->cp_flags;
memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res));
memcpy(&dst->fh, &src->fh, sizeof(src->fh));
dst->cp_clp = src->cp_clp;
dst->nf_dst = nfsd_file_get(src->nf_dst);
/* for inter, nf_src doesn't exist yet */
if (!nfsd4_ssc_is_inter(src))
dst->nf_src = nfsd_file_get(src->nf_src);
memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
memcpy(dst->cp_src, src->cp_src, sizeof(struct nl4_server));
memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
dst->ss_nsui = src->ss_nsui;
}
static void release_copy_files(struct nfsd4_copy *copy)
{
if (copy->nf_src)
nfsd_file_put(copy->nf_src);
if (copy->nf_dst)
nfsd_file_put(copy->nf_dst);
}
static void cleanup_async_copy(struct nfsd4_copy *copy)
{
nfs4_free_copy_state(copy);
release_copy_files(copy);
if (copy->cp_clp) {
spin_lock(©->cp_clp->async_lock);
if (!list_empty(©->copies))
list_del_init(©->copies);
spin_unlock(©->cp_clp->async_lock);
}
nfs4_put_copy(copy);
}
static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
{
struct nfsd4_cb_offload *cbo;
cbo = kzalloc(sizeof(*cbo), GFP_KERNEL);
if (!cbo)
return;
memcpy(&cbo->co_res, ©->cp_res, sizeof(copy->cp_res));
memcpy(&cbo->co_fh, ©->fh, sizeof(copy->fh));
cbo->co_nfserr = nfserr;
nfsd4_init_cb(&cbo->co_cb, copy->cp_clp, &nfsd4_cb_offload_ops,
NFSPROC4_CLNT_CB_OFFLOAD);
trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid,
&cbo->co_fh, copy->cp_count, nfserr);
nfsd4_run_cb(&cbo->co_cb);
}
/**
* nfsd4_do_async_copy - kthread function for background server-side COPY
* @data: arguments for COPY operation
*
* Return values:
* %0: Copy operation is done.
*/
static int nfsd4_do_async_copy(void *data)
{
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
__be32 nfserr;
if (nfsd4_ssc_is_inter(copy)) {
struct file *filp;
filp = nfs42_ssc_open(copy->ss_nsui->nsui_vfsmount,
©->c_fh, ©->stateid);
if (IS_ERR(filp)) {
switch (PTR_ERR(filp)) {
case -EBADF:
nfserr = nfserr_wrong_type;
break;
default:
nfserr = nfserr_offload_denied;
}
/* ss_mnt will be unmounted by the laundromat */
goto do_callback;
}
nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
false);
nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst);
} else {
nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
copy->nf_dst->nf_file, false);
}
do_callback:
nfsd4_send_cb_offload(copy, nfserr);
cleanup_async_copy(copy);
return 0;
}
static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_copy *copy = &u->copy;
__be32 status;
struct nfsd4_copy *async_copy = NULL;
if (nfsd4_ssc_is_inter(copy)) {
if (!inter_copy_offload_enable || nfsd4_copy_is_sync(copy)) {
status = nfserr_notsupp;
goto out;
}
status = nfsd4_setup_inter_ssc(rqstp, cstate, copy);
if (status)
return nfserr_offload_denied;
} else {
status = nfsd4_setup_intra_ssc(rqstp, cstate, copy);
if (status)
return status;
}
copy->cp_clp = cstate->clp;
memcpy(©->fh, &cstate->current_fh.fh_handle,
sizeof(struct knfsd_fh));
if (nfsd4_copy_is_async(copy)) {
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
status = nfserrno(-ENOMEM);
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!async_copy)
goto out_err;
INIT_LIST_HEAD(&async_copy->copies);
refcount_set(&async_copy->refcount, 1);
async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
if (!async_copy->cp_src)
goto out_err;
if (!nfs4_init_copy_state(nn, copy))
goto out_err;
memcpy(©->cp_res.cb_stateid, ©->cp_stateid.cs_stid,
sizeof(copy->cp_res.cb_stateid));
dup_copy_fields(copy, async_copy);
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
if (IS_ERR(async_copy->copy_task))
goto out_err;
spin_lock(&async_copy->cp_clp->async_lock);
list_add(&async_copy->copies,
&async_copy->cp_clp->async_copies);
spin_unlock(&async_copy->cp_clp->async_lock);
wake_up_process(async_copy->copy_task);
status = nfs_ok;
} else {
status = nfsd4_do_copy(copy, copy->nf_src->nf_file,
copy->nf_dst->nf_file, true);
}
out:
release_copy_files(copy);
return status;
out_err:
if (nfsd4_ssc_is_inter(copy)) {
/*
* Source's vfsmount of inter-copy will be unmounted
* by the laundromat. Use copy instead of async_copy
* since async_copy->ss_nsui might not be set yet.
*/
refcount_dec(©->ss_nsui->nsui_refcnt);
}
if (async_copy)
cleanup_async_copy(async_copy);
status = nfserrno(-ENOMEM);
goto out;
}
static struct nfsd4_copy *
find_async_copy_locked(struct nfs4_client *clp, stateid_t *stateid)
{
struct nfsd4_copy *copy;
lockdep_assert_held(&clp->async_lock);
list_for_each_entry(copy, &clp->async_copies, copies) {
if (memcmp(©->cp_stateid.cs_stid, stateid, NFS4_STATEID_SIZE))
continue;
return copy;
}
return NULL;
}
static struct nfsd4_copy *
find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
{
struct nfsd4_copy *copy;
spin_lock(&clp->async_lock);
copy = find_async_copy_locked(clp, stateid);
if (copy)
refcount_inc(©->refcount);
spin_unlock(&clp->async_lock);
return copy;
}
static __be32
nfsd4_offload_cancel(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_offload_status *os = &u->offload_status;
struct nfsd4_copy *copy;
struct nfs4_client *clp = cstate->clp;
copy = find_async_copy(clp, &os->stateid);
if (!copy) {
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
return manage_cpntf_state(nn, &os->stateid, clp, NULL);
} else
nfsd4_stop_copy(copy);
return nfs_ok;
}
static __be32
nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_copy_notify *cn = &u->copy_notify;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct nfs4_stid *stid;
struct nfs4_cpntf_state *cps;
struct nfs4_client *clp = cstate->clp;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&cn->cpn_src_stateid, RD_STATE, NULL,
&stid);
if (status)
return status;
cn->cpn_sec = nn->nfsd4_lease;
cn->cpn_nsec = 0;
status = nfserrno(-ENOMEM);
cps = nfs4_alloc_init_cpntf_state(nn, stid);
if (!cps)
goto out;
memcpy(&cn->cpn_cnr_stateid, &cps->cp_stateid.cs_stid, sizeof(stateid_t));
memcpy(&cps->cp_p_stateid, &stid->sc_stateid, sizeof(stateid_t));
memcpy(&cps->cp_p_clid, &clp->cl_clientid, sizeof(clientid_t));
/* For now, only return one server address in cpn_src, the
* address used by the client to connect to this server.
*/
cn->cpn_src->nl4_type = NL4_NETADDR;
status = nfsd4_set_netaddr((struct sockaddr *)&rqstp->rq_daddr,
&cn->cpn_src->u.nl4_addr);
WARN_ON_ONCE(status);
if (status) {
nfs4_put_cpntf_state(nn, cps);
goto out;
}
out:
nfs4_put_stid(stid);
return status;
}
static __be32
nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate, int flags)
{
__be32 status;
struct nfsd_file *nf;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&fallocate->falloc_stateid,
WR_STATE, &nf, NULL);
if (status != nfs_ok)
return status;
status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, nf->nf_file,
fallocate->falloc_offset,
fallocate->falloc_length,
flags);
nfsd_file_put(nf);
return status;
}
static __be32
nfsd4_offload_status(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_offload_status *os = &u->offload_status;
__be32 status = nfs_ok;
struct nfsd4_copy *copy;
struct nfs4_client *clp = cstate->clp;
spin_lock(&clp->async_lock);
copy = find_async_copy_locked(clp, &os->stateid);
if (copy)
os->count = copy->cp_res.wr_bytes_written;
else
status = nfserr_bad_stateid;
spin_unlock(&clp->async_lock);
return status;
}
static __be32
nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
return nfsd4_fallocate(rqstp, cstate, &u->allocate, 0);
}
static __be32
nfsd4_deallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
return nfsd4_fallocate(rqstp, cstate, &u->deallocate,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
}
static __be32
nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_seek *seek = &u->seek;
int whence;
__be32 status;
struct nfsd_file *nf;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&seek->seek_stateid,
RD_STATE, &nf, NULL);
if (status)
return status;
switch (seek->seek_whence) {
case NFS4_CONTENT_DATA:
whence = SEEK_DATA;
break;
case NFS4_CONTENT_HOLE:
whence = SEEK_HOLE;
break;
default:
status = nfserr_union_notsupp;
goto out;
}
/*
* Note: This call does change file->f_pos, but nothing in NFSD
* should ever file->f_pos.
*/
seek->seek_pos = vfs_llseek(nf->nf_file, seek->seek_offset, whence);
if (seek->seek_pos < 0)
status = nfserrno(seek->seek_pos);
else if (seek->seek_pos >= i_size_read(file_inode(nf->nf_file)))
seek->seek_eof = true;
out:
nfsd_file_put(nf);
return status;
}
/* This routine never returns NFS_OK! If there are no other errors, it
* will return NFSERR_SAME or NFSERR_NOT_SAME depending on whether the
* attributes matched. VERIFY is implemented by mapping NFSERR_SAME
* to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK.
*/
static __be32
_nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_verify *verify)
{
__be32 *buf, *p;
int count;
__be32 status;
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
status = check_attr_support(rqstp, cstate, verify->ve_bmval, NULL);
if (status)
return status;
if ((verify->ve_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)
|| (verify->ve_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1))
return nfserr_inval;
if (verify->ve_attrlen & 3)
return nfserr_inval;
/* count in words:
* bitmap_len(1) + bitmap(2) + attr_len(1) = 4
*/
count = 4 + (verify->ve_attrlen >> 2);
buf = kmalloc(count << 2, GFP_KERNEL);
if (!buf)
return nfserr_jukebox;
p = buf;
status = nfsd4_encode_fattr_to_buf(&p, count, &cstate->current_fh,
cstate->current_fh.fh_export,
cstate->current_fh.fh_dentry,
verify->ve_bmval,
rqstp, 0);
/*
* If nfsd4_encode_fattr() ran out of space, assume that's because
* the attributes are longer (hence different) than those given:
*/
if (status == nfserr_resource)
status = nfserr_not_same;
if (status)
goto out_kfree;
/* skip bitmap */
p = buf + 1 + ntohl(buf[0]);
status = nfserr_not_same;
if (ntohl(*p++) != verify->ve_attrlen)
goto out_kfree;
if (!memcmp(p, verify->ve_attrval, verify->ve_attrlen))
status = nfserr_same;
out_kfree:
kfree(buf);
return status;
}
static __be32
nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
__be32 status;
status = _nfsd4_verify(rqstp, cstate, &u->verify);
return status == nfserr_not_same ? nfs_ok : status;
}
static __be32
nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
__be32 status;
status = _nfsd4_verify(rqstp, cstate, &u->nverify);
return status == nfserr_same ? nfs_ok : status;
}
#ifdef CONFIG_NFSD_PNFS
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
{
if (!exp->ex_layout_types) {
dprintk("%s: export does not support pNFS\n", __func__);
return NULL;
}
if (layout_type >= LAYOUT_TYPE_MAX ||
!(exp->ex_layout_types & (1 << layout_type))) {
dprintk("%s: layout type %d not supported\n",
__func__, layout_type);
return NULL;
}
return nfsd4_layout_ops[layout_type];
}
static __be32
nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_getdeviceinfo *gdp = &u->getdeviceinfo;
const struct nfsd4_layout_ops *ops;
struct nfsd4_deviceid_map *map;
struct svc_export *exp;
__be32 nfserr;
dprintk("%s: layout_type %u dev_id [0x%llx:0x%x] maxcnt %u\n",
__func__,
gdp->gd_layout_type,
gdp->gd_devid.fsid_idx, gdp->gd_devid.generation,
gdp->gd_maxcount);
map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx);
if (!map) {
dprintk("%s: couldn't find device ID to export mapping!\n",
__func__);
return nfserr_noent;
}
exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
if (IS_ERR(exp)) {
dprintk("%s: could not find device id\n", __func__);
return nfserr_noent;
}
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(exp, gdp->gd_layout_type);
if (!ops)
goto out;
nfserr = nfs_ok;
if (gdp->gd_maxcount != 0) {
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb,
rqstp, cstate->clp, gdp);
}
gdp->gd_notify_types &= ops->notify_types;
out:
exp_put(exp);
return nfserr;
}
static void
nfsd4_getdeviceinfo_release(union nfsd4_op_u *u)
{
kfree(u->getdeviceinfo.gd_device);
}
static __be32
nfsd4_layoutget(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_layoutget *lgp = &u->layoutget;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
int accmode = NFSD_MAY_READ_IF_EXEC;
switch (lgp->lg_seg.iomode) {
case IOMODE_READ:
accmode |= NFSD_MAY_READ;
break;
case IOMODE_RW:
accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;
break;
default:
dprintk("%s: invalid iomode %d\n",
__func__, lgp->lg_seg.iomode);
nfserr = nfserr_badiomode;
goto out;
}
nfserr = fh_verify(rqstp, current_fh, 0, accmode);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(current_fh->fh_export, lgp->lg_layout_type);
if (!ops)
goto out;
/*
* Verify minlength and range as per RFC5661:
* o If loga_length is less than loga_minlength,
* the metadata server MUST return NFS4ERR_INVAL.
* o If the sum of loga_offset and loga_minlength exceeds
* NFS4_UINT64_MAX, and loga_minlength is not
* NFS4_UINT64_MAX, the error NFS4ERR_INVAL MUST result.
* o If the sum of loga_offset and loga_length exceeds
* NFS4_UINT64_MAX, and loga_length is not NFS4_UINT64_MAX,
* the error NFS4ERR_INVAL MUST result.
*/
nfserr = nfserr_inval;
if (lgp->lg_seg.length < lgp->lg_minlength ||
(lgp->lg_minlength != NFS4_MAX_UINT64 &&
lgp->lg_minlength > NFS4_MAX_UINT64 - lgp->lg_seg.offset) ||
(lgp->lg_seg.length != NFS4_MAX_UINT64 &&
lgp->lg_seg.length > NFS4_MAX_UINT64 - lgp->lg_seg.offset))
goto out;
if (lgp->lg_seg.length == 0)
goto out;
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lgp->lg_sid,
true, lgp->lg_layout_type, &ls);
if (nfserr) {
trace_nfsd_layout_get_lookup_fail(&lgp->lg_sid);
goto out;
}
nfserr = nfserr_recallconflict;
if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
goto out_put_stid;
nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry),
current_fh, lgp);
if (nfserr)
goto out_put_stid;
nfserr = nfsd4_insert_layout(lgp, ls);
out_put_stid:
mutex_unlock(&ls->ls_mutex);
nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
static void
nfsd4_layoutget_release(union nfsd4_op_u *u)
{
kfree(u->layoutget.lg_content);
}
static __be32
nfsd4_layoutcommit(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
loff_t new_size = lcp->lc_last_wr + 1;
struct inode *inode;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type);
if (!ops)
goto out;
inode = d_inode(current_fh->fh_dentry);
nfserr = nfserr_inval;
if (new_size <= seg->offset) {
dprintk("pnfsd: last write before layout segment\n");
goto out;
}
if (new_size > seg->offset + seg->length) {
dprintk("pnfsd: last write beyond layout segment\n");
goto out;
}
if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
dprintk("pnfsd: layoutcommit beyond EOF\n");
goto out;
}
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
false, lcp->lc_layout_type,
&ls);
if (nfserr) {
trace_nfsd_layout_commit_lookup_fail(&lcp->lc_sid);
/* fixup error code as per RFC5661 */
if (nfserr == nfserr_bad_stateid)
nfserr = nfserr_badlayout;
goto out;
}
/* LAYOUTCOMMIT does not require any serialization */
mutex_unlock(&ls->ls_mutex);
if (new_size > i_size_read(inode)) {
lcp->lc_size_chg = 1;
lcp->lc_newsize = new_size;
} else {
lcp->lc_size_chg = 0;
}
nfserr = ops->proc_layoutcommit(inode, lcp);
nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
static __be32
nfsd4_layoutreturn(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
struct svc_fh *current_fh = &cstate->current_fh;
__be32 nfserr;
nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_NOP);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
if (!nfsd4_layout_verify(current_fh->fh_export, lrp->lr_layout_type))
goto out;
switch (lrp->lr_seg.iomode) {
case IOMODE_READ:
case IOMODE_RW:
case IOMODE_ANY:
break;
default:
dprintk("%s: invalid iomode %d\n", __func__,
lrp->lr_seg.iomode);
nfserr = nfserr_inval;
goto out;
}
switch (lrp->lr_return_type) {
case RETURN_FILE:
nfserr = nfsd4_return_file_layouts(rqstp, cstate, lrp);
break;
case RETURN_FSID:
case RETURN_ALL:
nfserr = nfsd4_return_client_layouts(rqstp, cstate, lrp);
break;
default:
dprintk("%s: invalid return_type %d\n", __func__,
lrp->lr_return_type);
nfserr = nfserr_inval;
break;
}
out:
return nfserr;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd4_getxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_getxattr *getxattr = &u->getxattr;
return nfsd_getxattr(rqstp, &cstate->current_fh,
getxattr->getxa_name, &getxattr->getxa_buf,
&getxattr->getxa_len);
}
static __be32
nfsd4_setxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_setxattr *setxattr = &u->setxattr;
__be32 ret;
if (opens_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
ret = nfsd_setxattr(rqstp, &cstate->current_fh, setxattr->setxa_name,
setxattr->setxa_buf, setxattr->setxa_len,
setxattr->setxa_flags);
if (!ret)
set_change_info(&setxattr->setxa_cinfo, &cstate->current_fh);
return ret;
}
static __be32
nfsd4_listxattrs(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
/*
* Get the entire list, then copy out only the user attributes
* in the encode function.
*/
return nfsd_listxattr(rqstp, &cstate->current_fh,
&u->listxattrs.lsxa_buf, &u->listxattrs.lsxa_len);
}
static __be32
nfsd4_removexattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_removexattr *removexattr = &u->removexattr;
__be32 ret;
if (opens_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
ret = nfsd_removexattr(rqstp, &cstate->current_fh,
removexattr->rmxa_name);
if (!ret)
set_change_info(&removexattr->rmxa_cinfo, &cstate->current_fh);
return ret;
}
/*
* NULL call.
*/
static __be32
nfsd4_proc_null(struct svc_rqst *rqstp)
{
return rpc_success;
}
static inline void nfsd4_increment_op_stats(u32 opnum)
{
if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
}
static const struct nfsd4_operation nfsd4_ops[];
static const char *nfsd4_op_name(unsigned opnum);
/*
* Enforce NFSv4.1 COMPOUND ordering rules:
*
* Also note, enforced elsewhere:
* - SEQUENCE other than as first op results in
* NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
* - BIND_CONN_TO_SESSION must be the only op in its compound.
* (Enforced in nfsd4_bind_conn_to_session().)
* - DESTROY_SESSION must be the final operation in a compound, if
* sessionid's in SEQUENCE and DESTROY_SESSION are the same.
* (Enforced in nfsd4_destroy_session().)
*/
static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
{
struct nfsd4_op *first_op = &args->ops[0];
/* These ordering requirements don't apply to NFSv4.0: */
if (args->minorversion == 0)
return nfs_ok;
/* This is weird, but OK, not our problem: */
if (args->opcnt == 0)
return nfs_ok;
if (first_op->status == nfserr_op_illegal)
return nfs_ok;
if (!(nfsd4_ops[first_op->opnum].op_flags & ALLOWED_AS_FIRST_OP))
return nfserr_op_not_in_session;
if (first_op->opnum == OP_SEQUENCE)
return nfs_ok;
/*
* So first_op is something allowed outside a session, like
* EXCHANGE_ID; but then it has to be the only op in the
* compound:
*/
if (args->opcnt != 1)
return nfserr_not_only_op;
return nfs_ok;
}
const struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
{
return &nfsd4_ops[op->opnum];
}
bool nfsd4_cache_this_op(struct nfsd4_op *op)
{
if (op->opnum == OP_ILLEGAL)
return false;
return OPDESC(op)->op_flags & OP_CACHEME;
}
static bool need_wrongsec_check(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
struct nfsd4_op *next = &argp->ops[resp->opcnt];
const struct nfsd4_operation *thisd = OPDESC(this);
const struct nfsd4_operation *nextd;
/*
* Most ops check wronsec on our own; only the putfh-like ops
* have special rules.
*/
if (!(thisd->op_flags & OP_IS_PUTFH_LIKE))
return false;
/*
* rfc 5661 2.6.3.1.1.6: don't bother erroring out a
* put-filehandle operation if we're not going to use the
* result:
*/
if (argp->opcnt == resp->opcnt)
return false;
if (next->opnum == OP_ILLEGAL)
return false;
nextd = OPDESC(next);
/*
* Rest of 2.6.3.1.1: certain operations will return WRONGSEC
* errors themselves as necessary; others should check for them
* now:
*/
return !(nextd->op_flags & OP_HANDLES_WRONGSEC);
}
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
static void
check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
{
struct nfsd4_op *op, *current_op = NULL, *saved_op = NULL;
struct nfsd4_copy *copy;
struct nfsd4_putfh *putfh;
int i;
/* traverse all operation and if it's a COPY compound, mark the
* source filehandle to skip verification
*/
for (i = 0; i < args->opcnt; i++) {
op = &args->ops[i];
if (op->opnum == OP_PUTFH)
current_op = op;
else if (op->opnum == OP_SAVEFH)
saved_op = current_op;
else if (op->opnum == OP_RESTOREFH)
current_op = saved_op;
else if (op->opnum == OP_COPY) {
copy = (struct nfsd4_copy *)&op->u;
if (!saved_op) {
op->status = nfserr_nofilehandle;
return;
}
putfh = (struct nfsd4_putfh *)&saved_op->u;
if (nfsd4_ssc_is_inter(copy))
putfh->no_verify = true;
}
}
}
#else
static void
check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
{
}
#endif
/*
* COMPOUND call.
*/
static __be32
nfsd4_proc_compound(struct svc_rqst *rqstp)
{
struct nfsd4_compoundargs *args = rqstp->rq_argp;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_op *op;
struct nfsd4_compound_state *cstate = &resp->cstate;
struct svc_fh *current_fh = &cstate->current_fh;
struct svc_fh *save_fh = &cstate->save_fh;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
__be32 status;
resp->xdr = &rqstp->rq_res_stream;
resp->statusp = resp->xdr->p;
/* reserve space for: NFS status code */
xdr_reserve_space(resp->xdr, XDR_UNIT);
/* reserve space for: taglen, tag, and opcnt */
xdr_reserve_space(resp->xdr, XDR_UNIT * 2 + args->taglen);
resp->taglen = args->taglen;
resp->tag = args->tag;
resp->rqstp = rqstp;
cstate->minorversion = args->minorversion;
fh_init(current_fh, NFS4_FHSIZE);
fh_init(save_fh, NFS4_FHSIZE);
/*
* Don't use the deferral mechanism for NFSv4; compounds make it
* too hard to avoid non-idempotency problems.
*/
clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
/*
* According to RFC3010, this takes precedence over all other errors.
*/
status = nfserr_minor_vers_mismatch;
if (nfsd_minorversion(nn, args->minorversion, NFSD_TEST) <= 0)
goto out;
status = nfs41_check_op_ordering(args);
if (status) {
op = &args->ops[0];
op->status = status;
resp->opcnt = 1;
goto encode_op;
}
check_if_stalefh_allowed(args);
rqstp->rq_lease_breaker = (void **)&cstate->clp;
trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt);
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
if (unlikely(resp->opcnt == NFSD_MAX_OPS_PER_COMPOUND)) {
/* If there are still more operations to process,
* stop here and report NFS4ERR_RESOURCE. */
if (cstate->minorversion == 0 &&
args->client_opcnt > resp->opcnt) {
op->status = nfserr_resource;
goto encode_op;
}
}
/*
* The XDR decode routines may have pre-set op->status;
* for example, if there is a miscellaneous XDR error
* it will be set to nfserr_bad_xdr.
*/
if (op->status) {
if (op->opnum == OP_OPEN)
op->status = nfsd4_open_omfg(rqstp, cstate, op);
goto encode_op;
}
if (!current_fh->fh_dentry &&
!HAS_FH_FLAG(current_fh, NFSD4_FH_FOREIGN)) {
if (!(op->opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
op->status = nfserr_nofilehandle;
goto encode_op;
}
} else if (current_fh->fh_export &&
current_fh->fh_export->ex_fslocs.migrated &&
!(op->opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
op->status = nfserr_moved;
goto encode_op;
}
fh_clear_pre_post_attrs(current_fh);
/* If op is non-idempotent */
if (op->opdesc->op_flags & OP_MODIFIES_SOMETHING) {
/*
* Don't execute this op if we couldn't encode a
* successful reply:
*/
u32 plen = op->opdesc->op_rsize_bop(rqstp, op);
/*
* Plus if there's another operation, make sure
* we'll have space to at least encode an error:
*/
if (resp->opcnt < args->opcnt)
plen += COMPOUND_ERR_SLACK_SPACE;
op->status = nfsd4_check_resp_size(resp, plen);
}
if (op->status)
goto encode_op;
if (op->opdesc->op_get_currentstateid)
op->opdesc->op_get_currentstateid(cstate, &op->u);
op->status = op->opdesc->op_func(rqstp, cstate, &op->u);
/* Only from SEQUENCE */
if (cstate->status == nfserr_replay_cache) {
dprintk("%s NFS4.1 replay from cache\n", __func__);
status = op->status;
goto out;
}
if (!op->status) {
if (op->opdesc->op_set_currentstateid)
op->opdesc->op_set_currentstateid(cstate, &op->u);
if (op->opdesc->op_flags & OP_CLEAR_STATEID)
clear_current_stateid(cstate);
if (current_fh->fh_export &&
need_wrongsec_check(rqstp))
op->status = check_nfsd_access(current_fh->fh_export, rqstp);
}
encode_op:
if (op->status == nfserr_replay_me) {
op->replay = &cstate->replay_owner->so_replay;
nfsd4_encode_replay(resp->xdr, op);
status = op->status = op->replay->rp_status;
} else {
nfsd4_encode_operation(resp, op);
status = op->status;
}
trace_nfsd_compound_status(args->client_opcnt, resp->opcnt,
status, nfsd4_op_name(op->opnum));
nfsd4_cstate_clear_replay(cstate);
nfsd4_increment_op_stats(op->opnum);
}
fh_put(current_fh);
fh_put(save_fh);
BUG_ON(cstate->replay_owner);
out:
cstate->status = status;
/* Reset deferral mechanism for RPC deferrals */
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
return rpc_success;
}
#define op_encode_hdr_size (2)
#define op_encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
#define op_encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define op_encode_change_info_maxsz (5)
#define nfs4_fattr_bitmap_maxsz (4)
/* We'll fall back on returning no lockowner if run out of space: */
#define op_encode_lockowner_maxsz (0)
#define op_encode_lock_denied_maxsz (8 + op_encode_lockowner_maxsz)
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define op_encode_ace_maxsz (3 + nfs4_owner_maxsz)
#define op_encode_delegation_maxsz (1 + op_encode_stateid_maxsz + 1 + \
op_encode_ace_maxsz)
#define op_encode_channel_attrs_maxsz (6 + 1 + 1)
/*
* The _rsize() helpers are invoked by the NFSv4 COMPOUND decoder, which
* is called before sunrpc sets rq_res.buflen. Thus we have to compute
* the maximum payload size here, based on transport limits and the size
* of the remaining space in the rq_pages array.
*/
static u32 nfsd4_max_payload(const struct svc_rqst *rqstp)
{
u32 buflen;
buflen = (rqstp->rq_page_end - rqstp->rq_next_page) * PAGE_SIZE;
buflen -= rqstp->rq_auth_slack;
buflen -= rqstp->rq_res.head[0].iov_len;
return min_t(u32, buflen, svc_max_payload(rqstp));
}
static u32 nfsd4_only_status_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size) * sizeof(__be32);
}
static u32 nfsd4_status_stateid_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz)* sizeof(__be32);
}
static u32 nfsd4_access_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
/* ac_supported, ac_resp_access */
return (op_encode_hdr_size + 2)* sizeof(__be32);
}
static u32 nfsd4_commit_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
}
static u32 nfsd4_create_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
/*
* Note since this is an idempotent operation we won't insist on failing
* the op prematurely if the estimate is too large. We may turn off splice
* reads unnecessarily.
*/
static u32 nfsd4_getattr_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
const u32 *bmap = op->u.getattr.ga_bmval;
u32 bmap0 = bmap[0], bmap1 = bmap[1], bmap2 = bmap[2];
u32 ret = 0;
if (bmap0 & FATTR4_WORD0_ACL)
return nfsd4_max_payload(rqstp);
if (bmap0 & FATTR4_WORD0_FS_LOCATIONS)
return nfsd4_max_payload(rqstp);
if (bmap1 & FATTR4_WORD1_OWNER) {
ret += IDMAP_NAMESZ + 4;
bmap1 &= ~FATTR4_WORD1_OWNER;
}
if (bmap1 & FATTR4_WORD1_OWNER_GROUP) {
ret += IDMAP_NAMESZ + 4;
bmap1 &= ~FATTR4_WORD1_OWNER_GROUP;
}
if (bmap0 & FATTR4_WORD0_FILEHANDLE) {
ret += NFS4_FHSIZE + 4;
bmap0 &= ~FATTR4_WORD0_FILEHANDLE;
}
if (bmap2 & FATTR4_WORD2_SECURITY_LABEL) {
ret += NFS4_MAXLABELLEN + 12;
bmap2 &= ~FATTR4_WORD2_SECURITY_LABEL;
}
/*
* Largest of remaining attributes are 16 bytes (e.g.,
* supported_attributes)
*/
ret += 16 * (hweight32(bmap0) + hweight32(bmap1) + hweight32(bmap2));
/* bitmask, length */
ret += 20;
return ret;
}
static u32 nfsd4_getfh_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + NFS4_FHSIZE;
}
static u32 nfsd4_link_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static u32 nfsd4_lock_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_lock_denied_maxsz)
* sizeof(__be32);
}
static u32 nfsd4_open_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz
+ op_encode_change_info_maxsz + 1
+ nfs4_fattr_bitmap_maxsz
+ op_encode_delegation_maxsz) * sizeof(__be32);
}
static u32 nfsd4_read_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
u32 rlen = min(op->u.read.rd_length, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + 2 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static u32 nfsd4_read_plus_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
u32 rlen = min(op->u.read.rd_length, nfsd4_max_payload(rqstp));
/*
* If we detect that the file changed during hole encoding, then we
* recover by encoding the remaining reply as data. This means we need
* to set aside enough room to encode two data segments.
*/
u32 seg_len = 2 * (1 + 2 + 1);
return (op_encode_hdr_size + 2 + seg_len + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static u32 nfsd4_readdir_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
u32 rlen = min(op->u.readdir.rd_maxcount, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + op_encode_verifier_maxsz +
XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static u32 nfsd4_readlink_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + PAGE_SIZE;
}
static u32 nfsd4_remove_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static u32 nfsd4_rename_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ op_encode_change_info_maxsz) * sizeof(__be32);
}
static u32 nfsd4_sequence_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
}
static u32 nfsd4_test_stateid_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1 + op->u.test_stateid.ts_num_ids)
* sizeof(__be32);
}
static u32 nfsd4_setattr_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
static u32 nfsd4_secinfo_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + RPC_AUTH_MAXFLAVOR *
(4 + XDR_QUADLEN(GSS_OID_MAX_LEN))) * sizeof(__be32);
}
static u32 nfsd4_setclientid_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
sizeof(__be32);
}
static u32 nfsd4_write_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + op_encode_verifier_maxsz) * sizeof(__be32);
}
static u32 nfsd4_exchange_id_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
1 + 1 + /* eir_flags, spr_how */\
4 + /* spo_must_enforce & _allow with bitmap */\
2 + /*eir_server_owner.so_minor_id */\
/* eir_server_owner.so_major_id<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
/* eir_server_scope<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
1 + /* eir_server_impl_id array length */\
0 /* ignored eir_server_impl_id contents */) * sizeof(__be32);
}
static u32 nfsd4_bind_conn_to_session_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* bctsr_sessid */\
2 /* bctsr_dir, use_conn_in_rdma_mode */) * sizeof(__be32);
}
static u32 nfsd4_create_session_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* sessionid */\
2 + /* csr_sequence, csr_flags */\
op_encode_channel_attrs_maxsz + \
op_encode_channel_attrs_maxsz) * sizeof(__be32);
}
static u32 nfsd4_copy_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* wr_callback */ +
op_encode_stateid_maxsz /* wr_callback */ +
2 /* wr_count */ +
1 /* wr_committed */ +
op_encode_verifier_maxsz +
1 /* cr_consecutive */ +
1 /* cr_synchronous */) * sizeof(__be32);
}
static u32 nfsd4_offload_status_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
2 /* osr_count */ +
1 /* osr_complete<1> optional 0 for now */) * sizeof(__be32);
}
static u32 nfsd4_copy_notify_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
3 /* cnr_lease_time */ +
1 /* We support one cnr_source_server */ +
1 /* cnr_stateid seq */ +
op_encode_stateid_maxsz /* cnr_stateid */ +
1 /* num cnr_source_server*/ +
1 /* nl4_type */ +
1 /* nl4 size */ +
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) /*nl4_loc + nl4_loc_sz */)
* sizeof(__be32);
}
#ifdef CONFIG_NFSD_PNFS
static u32 nfsd4_getdeviceinfo_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
u32 rlen = min(op->u.getdeviceinfo.gd_maxcount, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size +
1 /* gd_layout_type*/ +
XDR_QUADLEN(rlen) +
2 /* gd_notify_types */) * sizeof(__be32);
}
/*
* At this stage we don't really know what layout driver will handle the request,
* so we need to define an arbitrary upper bound here.
*/
#define MAX_LAYOUT_SIZE 128
static u32 nfsd4_layoutget_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* logr_return_on_close */ +
op_encode_stateid_maxsz +
1 /* nr of layouts */ +
MAX_LAYOUT_SIZE) * sizeof(__be32);
}
static u32 nfsd4_layoutcommit_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* locr_newsize */ +
2 /* ns_size */) * sizeof(__be32);
}
static u32 nfsd4_layoutreturn_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* lrs_stateid */ +
op_encode_stateid_maxsz) * sizeof(__be32);
}
#endif /* CONFIG_NFSD_PNFS */
static u32 nfsd4_seek_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 3) * sizeof(__be32);
}
static u32 nfsd4_getxattr_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
u32 rlen = min_t(u32, XATTR_SIZE_MAX, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + 1 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static u32 nfsd4_setxattr_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static u32 nfsd4_listxattrs_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
u32 rlen = min(op->u.listxattrs.lsxa_maxcount, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + 4 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static u32 nfsd4_removexattr_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static const struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
.op_func = nfsd4_access,
.op_name = "OP_ACCESS",
.op_rsize_bop = nfsd4_access_rsize,
},
[OP_CLOSE] = {
.op_func = nfsd4_close,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLOSE",
.op_rsize_bop = nfsd4_status_stateid_rsize,
.op_get_currentstateid = nfsd4_get_closestateid,
.op_set_currentstateid = nfsd4_set_closestateid,
},
[OP_COMMIT] = {
.op_func = nfsd4_commit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COMMIT",
.op_rsize_bop = nfsd4_commit_rsize,
},
[OP_CREATE] = {
.op_func = nfsd4_create,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME | OP_CLEAR_STATEID,
.op_name = "OP_CREATE",
.op_rsize_bop = nfsd4_create_rsize,
},
[OP_DELEGRETURN] = {
.op_func = nfsd4_delegreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DELEGRETURN",
.op_rsize_bop = nfsd4_only_status_rsize,
.op_get_currentstateid = nfsd4_get_delegreturnstateid,
},
[OP_GETATTR] = {
.op_func = nfsd4_getattr,
.op_flags = ALLOWED_ON_ABSENT_FS,
.op_rsize_bop = nfsd4_getattr_rsize,
.op_name = "OP_GETATTR",
},
[OP_GETFH] = {
.op_func = nfsd4_getfh,
.op_name = "OP_GETFH",
.op_rsize_bop = nfsd4_getfh_rsize,
},
[OP_LINK] = {
.op_func = nfsd4_link,
.op_flags = ALLOWED_ON_ABSENT_FS | OP_MODIFIES_SOMETHING
| OP_CACHEME,
.op_name = "OP_LINK",
.op_rsize_bop = nfsd4_link_rsize,
},
[OP_LOCK] = {
.op_func = nfsd4_lock,
.op_flags = OP_MODIFIES_SOMETHING |
OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCK",
.op_rsize_bop = nfsd4_lock_rsize,
.op_set_currentstateid = nfsd4_set_lockstateid,
},
[OP_LOCKT] = {
.op_func = nfsd4_lockt,
.op_flags = OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCKT",
.op_rsize_bop = nfsd4_lock_rsize,
},
[OP_LOCKU] = {
.op_func = nfsd4_locku,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCKU",
.op_rsize_bop = nfsd4_status_stateid_rsize,
.op_get_currentstateid = nfsd4_get_lockustateid,
},
[OP_LOOKUP] = {
.op_func = nfsd4_lookup,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUP",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_LOOKUPP] = {
.op_func = nfsd4_lookupp,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUPP",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_NVERIFY] = {
.op_func = nfsd4_nverify,
.op_name = "OP_NVERIFY",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_OPEN] = {
.op_func = nfsd4_open,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN",
.op_rsize_bop = nfsd4_open_rsize,
.op_set_currentstateid = nfsd4_set_openstateid,
},
[OP_OPEN_CONFIRM] = {
.op_func = nfsd4_open_confirm,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_CONFIRM",
.op_rsize_bop = nfsd4_status_stateid_rsize,
},
[OP_OPEN_DOWNGRADE] = {
.op_func = nfsd4_open_downgrade,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_DOWNGRADE",
.op_rsize_bop = nfsd4_status_stateid_rsize,
.op_get_currentstateid = nfsd4_get_opendowngradestateid,
.op_set_currentstateid = nfsd4_set_opendowngradestateid,
},
[OP_PUTFH] = {
.op_func = nfsd4_putfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTFH",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_PUTPUBFH] = {
.op_func = nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTPUBFH",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_PUTROOTFH] = {
.op_func = nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTROOTFH",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_READ] = {
.op_func = nfsd4_read,
.op_release = nfsd4_read_release,
.op_name = "OP_READ",
.op_rsize_bop = nfsd4_read_rsize,
.op_get_currentstateid = nfsd4_get_readstateid,
},
[OP_READDIR] = {
.op_func = nfsd4_readdir,
.op_name = "OP_READDIR",
.op_rsize_bop = nfsd4_readdir_rsize,
},
[OP_READLINK] = {
.op_func = nfsd4_readlink,
.op_name = "OP_READLINK",
.op_rsize_bop = nfsd4_readlink_rsize,
},
[OP_REMOVE] = {
.op_func = nfsd4_remove,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_REMOVE",
.op_rsize_bop = nfsd4_remove_rsize,
},
[OP_RENAME] = {
.op_func = nfsd4_rename,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_RENAME",
.op_rsize_bop = nfsd4_rename_rsize,
},
[OP_RENEW] = {
.op_func = nfsd4_renew,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RENEW",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_RESTOREFH] = {
.op_func = nfsd4_restorefh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_RESTOREFH",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SAVEFH] = {
.op_func = nfsd4_savefh,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_SAVEFH",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SECINFO] = {
.op_func = nfsd4_secinfo,
.op_release = nfsd4_secinfo_release,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO",
.op_rsize_bop = nfsd4_secinfo_rsize,
},
[OP_SETATTR] = {
.op_func = nfsd4_setattr,
.op_name = "OP_SETATTR",
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME
| OP_NONTRIVIAL_ERROR_ENCODE,
.op_rsize_bop = nfsd4_setattr_rsize,
.op_get_currentstateid = nfsd4_get_setattrstateid,
},
[OP_SETCLIENTID] = {
.op_func = nfsd4_setclientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME
| OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_SETCLIENTID",
.op_rsize_bop = nfsd4_setclientid_rsize,
},
[OP_SETCLIENTID_CONFIRM] = {
.op_func = nfsd4_setclientid_confirm,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID_CONFIRM",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_VERIFY] = {
.op_func = nfsd4_verify,
.op_name = "OP_VERIFY",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_WRITE] = {
.op_func = nfsd4_write,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_WRITE",
.op_rsize_bop = nfsd4_write_rsize,
.op_get_currentstateid = nfsd4_get_writestateid,
},
[OP_RELEASE_LOCKOWNER] = {
.op_func = nfsd4_release_lockowner,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RELEASE_LOCKOWNER",
.op_rsize_bop = nfsd4_only_status_rsize,
},
/* NFSv4.1 operations */
[OP_EXCHANGE_ID] = {
.op_func = nfsd4_exchange_id,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_EXCHANGE_ID",
.op_rsize_bop = nfsd4_exchange_id_rsize,
},
[OP_BACKCHANNEL_CTL] = {
.op_func = nfsd4_backchannel_ctl,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_BACKCHANNEL_CTL",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_BIND_CONN_TO_SESSION] = {
.op_func = nfsd4_bind_conn_to_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_BIND_CONN_TO_SESSION",
.op_rsize_bop = nfsd4_bind_conn_to_session_rsize,
},
[OP_CREATE_SESSION] = {
.op_func = nfsd4_create_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_CREATE_SESSION",
.op_rsize_bop = nfsd4_create_session_rsize,
},
[OP_DESTROY_SESSION] = {
.op_func = nfsd4_destroy_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_SESSION",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SEQUENCE] = {
.op_func = nfsd4_sequence,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_SEQUENCE",
.op_rsize_bop = nfsd4_sequence_rsize,
},
[OP_DESTROY_CLIENTID] = {
.op_func = nfsd4_destroy_clientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_CLIENTID",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_RECLAIM_COMPLETE] = {
.op_func = nfsd4_reclaim_complete,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_RECLAIM_COMPLETE",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_SECINFO_NO_NAME] = {
.op_func = nfsd4_secinfo_no_name,
.op_release = nfsd4_secinfo_no_name_release,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
.op_rsize_bop = nfsd4_secinfo_rsize,
},
[OP_TEST_STATEID] = {
.op_func = nfsd4_test_stateid,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_TEST_STATEID",
.op_rsize_bop = nfsd4_test_stateid_rsize,
},
[OP_FREE_STATEID] = {
.op_func = nfsd4_free_stateid,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_FREE_STATEID",
.op_get_currentstateid = nfsd4_get_freestateid,
.op_rsize_bop = nfsd4_only_status_rsize,
},
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = {
.op_func = nfsd4_getdeviceinfo,
.op_release = nfsd4_getdeviceinfo_release,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_GETDEVICEINFO",
.op_rsize_bop = nfsd4_getdeviceinfo_rsize,
},
[OP_LAYOUTGET] = {
.op_func = nfsd4_layoutget,
.op_release = nfsd4_layoutget_release,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTGET",
.op_rsize_bop = nfsd4_layoutget_rsize,
},
[OP_LAYOUTCOMMIT] = {
.op_func = nfsd4_layoutcommit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTCOMMIT",
.op_rsize_bop = nfsd4_layoutcommit_rsize,
},
[OP_LAYOUTRETURN] = {
.op_func = nfsd4_layoutreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTRETURN",
.op_rsize_bop = nfsd4_layoutreturn_rsize,
},
#endif /* CONFIG_NFSD_PNFS */
/* NFSv4.2 operations */
[OP_ALLOCATE] = {
.op_func = nfsd4_allocate,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_ALLOCATE",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_DEALLOCATE] = {
.op_func = nfsd4_deallocate,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DEALLOCATE",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_CLONE] = {
.op_func = nfsd4_clone,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLONE",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_COPY] = {
.op_func = nfsd4_copy,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COPY",
.op_rsize_bop = nfsd4_copy_rsize,
},
[OP_READ_PLUS] = {
.op_func = nfsd4_read,
.op_release = nfsd4_read_release,
.op_name = "OP_READ_PLUS",
.op_rsize_bop = nfsd4_read_plus_rsize,
.op_get_currentstateid = nfsd4_get_readstateid,
},
[OP_SEEK] = {
.op_func = nfsd4_seek,
.op_name = "OP_SEEK",
.op_rsize_bop = nfsd4_seek_rsize,
},
[OP_OFFLOAD_STATUS] = {
.op_func = nfsd4_offload_status,
.op_name = "OP_OFFLOAD_STATUS",
.op_rsize_bop = nfsd4_offload_status_rsize,
},
[OP_OFFLOAD_CANCEL] = {
.op_func = nfsd4_offload_cancel,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OFFLOAD_CANCEL",
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_COPY_NOTIFY] = {
.op_func = nfsd4_copy_notify,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COPY_NOTIFY",
.op_rsize_bop = nfsd4_copy_notify_rsize,
},
[OP_GETXATTR] = {
.op_func = nfsd4_getxattr,
.op_name = "OP_GETXATTR",
.op_rsize_bop = nfsd4_getxattr_rsize,
},
[OP_SETXATTR] = {
.op_func = nfsd4_setxattr,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETXATTR",
.op_rsize_bop = nfsd4_setxattr_rsize,
},
[OP_LISTXATTRS] = {
.op_func = nfsd4_listxattrs,
.op_name = "OP_LISTXATTRS",
.op_rsize_bop = nfsd4_listxattrs_rsize,
},
[OP_REMOVEXATTR] = {
.op_func = nfsd4_removexattr,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_REMOVEXATTR",
.op_rsize_bop = nfsd4_removexattr_rsize,
},
};
/**
* nfsd4_spo_must_allow - Determine if the compound op contains an
* operation that is allowed to be sent with machine credentials
*
* @rqstp: a pointer to the struct svc_rqst
*
* Checks to see if the compound contains a spo_must_allow op
* and confirms that it was sent with the proper machine creds.
*/
bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
struct nfsd4_op *this;
struct nfsd4_compound_state *cstate = &resp->cstate;
struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
u32 opiter;
if (!cstate->minorversion)
return false;
if (cstate->spo_must_allowed)
return true;
opiter = resp->opcnt;
while (opiter < argp->opcnt) {
this = &argp->ops[opiter++];
if (test_bit(this->opnum, allow->u.longs) &&
cstate->clp->cl_mach_cred &&
nfsd4_mach_creds_match(cstate->clp, rqstp)) {
cstate->spo_must_allowed = true;
return true;
}
}
cstate->spo_must_allowed = false;
return false;
}
int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
return op_encode_hdr_size * sizeof(__be32);
BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
return OPDESC(op)->op_rsize_bop(rqstp, op);
}
void warn_on_nonidempotent_op(struct nfsd4_op *op)
{
if (OPDESC(op)->op_flags & OP_MODIFIES_SOMETHING) {
pr_err("unable to encode reply to nonidempotent op %u (%s)\n",
op->opnum, nfsd4_op_name(op->opnum));
WARN_ON_ONCE(1);
}
}
static const char *nfsd4_op_name(unsigned opnum)
{
if (opnum < ARRAY_SIZE(nfsd4_ops))
return nfsd4_ops[opnum].op_name;
return "unknown_operation";
}
static const struct svc_procedure nfsd_procedures4[2] = {
[NFSPROC4_NULL] = {
.pc_func = nfsd4_proc_null,
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
.pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 1,
.pc_name = "NULL",
},
[NFSPROC4_COMPOUND] = {
.pc_func = nfsd4_proc_compound,
.pc_decode = nfs4svc_decode_compoundargs,
.pc_encode = nfs4svc_encode_compoundres,
.pc_argsize = sizeof(struct nfsd4_compoundargs),
.pc_argzero = offsetof(struct nfsd4_compoundargs, iops),
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = NFSD_BUFSIZE/4,
.pc_name = "COMPOUND",
},
};
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_count4[ARRAY_SIZE(nfsd_procedures4)]);
const struct svc_version nfsd_version4 = {
.vs_vers = 4,
.vs_nproc = ARRAY_SIZE(nfsd_procedures4),
.vs_proc = nfsd_procedures4,
.vs_count = nfsd_count4,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS4_SVC_XDRSIZE,
.vs_rpcb_optnl = true,
.vs_need_cong_ctrl = true,
};
| linux-master | fs/nfsd/nfs4proc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014-2016 Christoph Hellwig.
*/
#include <linux/sunrpc/svc.h>
#include <linux/exportfs.h>
#include <linux/iomap.h>
#include <linux/nfs4.h>
#include "nfsd.h"
#include "blocklayoutxdr.h"
#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
__be32
nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
struct nfsd4_layoutget *lgp)
{
struct pnfs_block_extent *b = lgp->lg_content;
int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32);
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(__be32) + len);
if (!p)
return nfserr_toosmall;
*p++ = cpu_to_be32(len);
*p++ = cpu_to_be32(1); /* we always return a single extent */
p = xdr_encode_opaque_fixed(p, &b->vol_id,
sizeof(struct nfsd4_deviceid));
p = xdr_encode_hyper(p, b->foff);
p = xdr_encode_hyper(p, b->len);
p = xdr_encode_hyper(p, b->soff);
*p++ = cpu_to_be32(b->es);
return 0;
}
static int
nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
{
__be32 *p;
int len;
switch (b->type) {
case PNFS_BLOCK_VOLUME_SIMPLE:
len = 4 + 4 + 8 + 4 + (XDR_QUADLEN(b->simple.sig_len) << 2);
p = xdr_reserve_space(xdr, len);
if (!p)
return -ETOOSMALL;
*p++ = cpu_to_be32(b->type);
*p++ = cpu_to_be32(1); /* single signature */
p = xdr_encode_hyper(p, b->simple.offset);
p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len);
break;
case PNFS_BLOCK_VOLUME_SCSI:
len = 4 + 4 + 4 + 4 + (XDR_QUADLEN(b->scsi.designator_len) << 2) + 8;
p = xdr_reserve_space(xdr, len);
if (!p)
return -ETOOSMALL;
*p++ = cpu_to_be32(b->type);
*p++ = cpu_to_be32(b->scsi.code_set);
*p++ = cpu_to_be32(b->scsi.designator_type);
p = xdr_encode_opaque(p, b->scsi.designator, b->scsi.designator_len);
p = xdr_encode_hyper(p, b->scsi.pr_key);
break;
default:
return -ENOTSUPP;
}
return len;
}
__be32
nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_block_deviceaddr *dev = gdp->gd_device;
int len = sizeof(__be32), ret, i;
__be32 *p;
/*
* See paragraph 5 of RFC 8881 S18.40.3.
*/
if (!gdp->gd_maxcount) {
if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
return nfs_ok;
}
p = xdr_reserve_space(xdr, len + sizeof(__be32));
if (!p)
return nfserr_resource;
for (i = 0; i < dev->nr_volumes; i++) {
ret = nfsd4_block_encode_volume(xdr, &dev->volumes[i]);
if (ret < 0)
return nfserrno(ret);
len += ret;
}
/*
* Fill in the overall length and number of volumes at the beginning
* of the layout.
*/
*p++ = cpu_to_be32(len);
*p++ = cpu_to_be32(dev->nr_volumes);
return 0;
}
int
nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
u32 block_size)
{
struct iomap *iomaps;
u32 nr_iomaps, i;
if (len < sizeof(u32)) {
dprintk("%s: extent array too small: %u\n", __func__, len);
return -EINVAL;
}
len -= sizeof(u32);
if (len % PNFS_BLOCK_EXTENT_SIZE) {
dprintk("%s: extent array invalid: %u\n", __func__, len);
return -EINVAL;
}
nr_iomaps = be32_to_cpup(p++);
if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) {
dprintk("%s: extent array size mismatch: %u/%u\n",
__func__, len, nr_iomaps);
return -EINVAL;
}
iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
if (!iomaps) {
dprintk("%s: failed to allocate extent array\n", __func__);
return -ENOMEM;
}
for (i = 0; i < nr_iomaps; i++) {
struct pnfs_block_extent bex;
memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
p = xdr_decode_hyper(p, &bex.foff);
if (bex.foff & (block_size - 1)) {
dprintk("%s: unaligned offset 0x%llx\n",
__func__, bex.foff);
goto fail;
}
p = xdr_decode_hyper(p, &bex.len);
if (bex.len & (block_size - 1)) {
dprintk("%s: unaligned length 0x%llx\n",
__func__, bex.foff);
goto fail;
}
p = xdr_decode_hyper(p, &bex.soff);
if (bex.soff & (block_size - 1)) {
dprintk("%s: unaligned disk offset 0x%llx\n",
__func__, bex.soff);
goto fail;
}
bex.es = be32_to_cpup(p++);
if (bex.es != PNFS_BLOCK_READWRITE_DATA) {
dprintk("%s: incorrect extent state %d\n",
__func__, bex.es);
goto fail;
}
iomaps[i].offset = bex.foff;
iomaps[i].length = bex.len;
}
*iomapp = iomaps;
return nr_iomaps;
fail:
kfree(iomaps);
return -EINVAL;
}
int
nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
u32 block_size)
{
struct iomap *iomaps;
u32 nr_iomaps, expected, i;
if (len < sizeof(u32)) {
dprintk("%s: extent array too small: %u\n", __func__, len);
return -EINVAL;
}
nr_iomaps = be32_to_cpup(p++);
expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE;
if (len != expected) {
dprintk("%s: extent array size mismatch: %u/%u\n",
__func__, len, expected);
return -EINVAL;
}
iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
if (!iomaps) {
dprintk("%s: failed to allocate extent array\n", __func__);
return -ENOMEM;
}
for (i = 0; i < nr_iomaps; i++) {
u64 val;
p = xdr_decode_hyper(p, &val);
if (val & (block_size - 1)) {
dprintk("%s: unaligned offset 0x%llx\n", __func__, val);
goto fail;
}
iomaps[i].offset = val;
p = xdr_decode_hyper(p, &val);
if (val & (block_size - 1)) {
dprintk("%s: unaligned length 0x%llx\n", __func__, val);
goto fail;
}
iomaps[i].length = val;
}
*iomapp = iomaps;
return nr_iomaps;
fail:
kfree(iomaps);
return -EINVAL;
}
| linux-master | fs/nfsd/blocklayoutxdr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Christoph Hellwig.
*/
#include <linux/blkdev.h>
#include <linux/kmod.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/sched.h>
#include <linux/sunrpc/addr.h>
#include "pnfs.h"
#include "netns.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
struct nfs4_layout {
struct list_head lo_perstate;
struct nfs4_layout_stateid *lo_state;
struct nfsd4_layout_seg lo_seg;
};
static struct kmem_cache *nfs4_layout_cache;
static struct kmem_cache *nfs4_layout_stateid_cache;
static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
static const struct lock_manager_operations nfsd4_layouts_lm_ops;
const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
#ifdef CONFIG_NFSD_FLEXFILELAYOUT
[LAYOUT_FLEX_FILES] = &ff_layout_ops,
#endif
#ifdef CONFIG_NFSD_BLOCKLAYOUT
[LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
#endif
#ifdef CONFIG_NFSD_SCSILAYOUT
[LAYOUT_SCSI] = &scsi_layout_ops,
#endif
};
/* pNFS device ID to export fsid mapping */
#define DEVID_HASH_BITS 8
#define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
#define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
static u64 nfsd_devid_seq = 1;
static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
static DEFINE_SPINLOCK(nfsd_devid_lock);
static inline u32 devid_hashfn(u64 idx)
{
return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
}
static void
nfsd4_alloc_devid_map(const struct svc_fh *fhp)
{
const struct knfsd_fh *fh = &fhp->fh_handle;
size_t fsid_len = key_len(fh->fh_fsid_type);
struct nfsd4_deviceid_map *map, *old;
int i;
map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
if (!map)
return;
map->fsid_type = fh->fh_fsid_type;
memcpy(&map->fsid, fh->fh_fsid, fsid_len);
spin_lock(&nfsd_devid_lock);
if (fhp->fh_export->ex_devid_map)
goto out_unlock;
for (i = 0; i < DEVID_HASH_SIZE; i++) {
list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
if (old->fsid_type != fh->fh_fsid_type)
continue;
if (memcmp(old->fsid, fh->fh_fsid,
key_len(old->fsid_type)))
continue;
fhp->fh_export->ex_devid_map = old;
goto out_unlock;
}
}
map->idx = nfsd_devid_seq++;
list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
fhp->fh_export->ex_devid_map = map;
map = NULL;
out_unlock:
spin_unlock(&nfsd_devid_lock);
kfree(map);
}
struct nfsd4_deviceid_map *
nfsd4_find_devid_map(int idx)
{
struct nfsd4_deviceid_map *map, *ret = NULL;
rcu_read_lock();
list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
if (map->idx == idx)
ret = map;
rcu_read_unlock();
return ret;
}
int
nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
u32 device_generation)
{
if (!fhp->fh_export->ex_devid_map) {
nfsd4_alloc_devid_map(fhp);
if (!fhp->fh_export->ex_devid_map)
return -ENOMEM;
}
id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
id->generation = device_generation;
id->pad = 0;
return 0;
}
void nfsd4_setup_layout_type(struct svc_export *exp)
{
#if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
struct super_block *sb = exp->ex_path.mnt->mnt_sb;
#endif
if (!(exp->ex_flags & NFSEXP_PNFS))
return;
#ifdef CONFIG_NFSD_FLEXFILELAYOUT
exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
#endif
#ifdef CONFIG_NFSD_BLOCKLAYOUT
if (sb->s_export_op->get_uuid &&
sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks)
exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
#endif
#ifdef CONFIG_NFSD_SCSILAYOUT
if (sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks &&
sb->s_bdev &&
sb->s_bdev->bd_disk->fops->pr_ops &&
sb->s_bdev->bd_disk->fops->get_unique_id)
exp->ex_layout_types |= 1 << LAYOUT_SCSI;
#endif
}
static void
nfsd4_free_layout_stateid(struct nfs4_stid *stid)
{
struct nfs4_layout_stateid *ls = layoutstateid(stid);
struct nfs4_client *clp = ls->ls_stid.sc_client;
struct nfs4_file *fp = ls->ls_stid.sc_file;
trace_nfsd_layoutstate_free(&ls->ls_stid.sc_stateid);
spin_lock(&clp->cl_lock);
list_del_init(&ls->ls_perclnt);
spin_unlock(&clp->cl_lock);
spin_lock(&fp->fi_lock);
list_del_init(&ls->ls_perfile);
spin_unlock(&fp->fi_lock);
if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls);
nfsd_file_put(ls->ls_file);
if (ls->ls_recalled)
atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
kmem_cache_free(nfs4_layout_stateid_cache, ls);
}
static int
nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
{
struct file_lock *fl;
int status;
if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
return 0;
fl = locks_alloc_lock();
if (!fl)
return -ENOMEM;
locks_init_lock(fl);
fl->fl_lmops = &nfsd4_layouts_lm_ops;
fl->fl_flags = FL_LAYOUT;
fl->fl_type = F_RDLCK;
fl->fl_end = OFFSET_MAX;
fl->fl_owner = ls;
fl->fl_pid = current->tgid;
fl->fl_file = ls->ls_file->nf_file;
status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
if (status) {
locks_free_lock(fl);
return status;
}
BUG_ON(fl != NULL);
return 0;
}
static struct nfs4_layout_stateid *
nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
struct nfs4_stid *parent, u32 layout_type)
{
struct nfs4_client *clp = cstate->clp;
struct nfs4_file *fp = parent->sc_file;
struct nfs4_layout_stateid *ls;
struct nfs4_stid *stp;
stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
nfsd4_free_layout_stateid);
if (!stp)
return NULL;
get_nfs4_file(fp);
stp->sc_file = fp;
ls = layoutstateid(stp);
INIT_LIST_HEAD(&ls->ls_perclnt);
INIT_LIST_HEAD(&ls->ls_perfile);
spin_lock_init(&ls->ls_lock);
INIT_LIST_HEAD(&ls->ls_layouts);
mutex_init(&ls->ls_mutex);
ls->ls_layout_type = layout_type;
nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
NFSPROC4_CLNT_CB_LAYOUT);
if (parent->sc_type == NFS4_DELEG_STID)
ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
else
ls->ls_file = find_any_file(fp);
BUG_ON(!ls->ls_file);
if (nfsd4_layout_setlease(ls)) {
nfsd_file_put(ls->ls_file);
put_nfs4_file(fp);
kmem_cache_free(nfs4_layout_stateid_cache, ls);
return NULL;
}
spin_lock(&clp->cl_lock);
stp->sc_type = NFS4_LAYOUT_STID;
list_add(&ls->ls_perclnt, &clp->cl_lo_states);
spin_unlock(&clp->cl_lock);
spin_lock(&fp->fi_lock);
list_add(&ls->ls_perfile, &fp->fi_lo_states);
spin_unlock(&fp->fi_lock);
trace_nfsd_layoutstate_alloc(&ls->ls_stid.sc_stateid);
return ls;
}
__be32
nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, stateid_t *stateid,
bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
{
struct nfs4_layout_stateid *ls;
struct nfs4_stid *stid;
unsigned char typemask = NFS4_LAYOUT_STID;
__be32 status;
if (create)
typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
net_generic(SVC_NET(rqstp), nfsd_net_id));
if (status)
goto out;
if (!fh_match(&cstate->current_fh.fh_handle,
&stid->sc_file->fi_fhandle)) {
status = nfserr_bad_stateid;
goto out_put_stid;
}
if (stid->sc_type != NFS4_LAYOUT_STID) {
ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
nfs4_put_stid(stid);
status = nfserr_jukebox;
if (!ls)
goto out;
mutex_lock(&ls->ls_mutex);
} else {
ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
status = nfserr_bad_stateid;
mutex_lock(&ls->ls_mutex);
if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
goto out_unlock_stid;
if (layout_type != ls->ls_layout_type)
goto out_unlock_stid;
}
*lsp = ls;
return 0;
out_unlock_stid:
mutex_unlock(&ls->ls_mutex);
out_put_stid:
nfs4_put_stid(stid);
out:
return status;
}
static void
nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
{
spin_lock(&ls->ls_lock);
if (ls->ls_recalled)
goto out_unlock;
if (list_empty(&ls->ls_layouts))
goto out_unlock;
ls->ls_recalled = true;
atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
refcount_inc(&ls->ls_stid.sc_count);
nfsd4_run_cb(&ls->ls_recall);
out_unlock:
spin_unlock(&ls->ls_lock);
}
static inline u64
layout_end(struct nfsd4_layout_seg *seg)
{
u64 end = seg->offset + seg->length;
return end >= seg->offset ? end : NFS4_MAX_UINT64;
}
static void
layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
{
if (end == NFS4_MAX_UINT64)
lo->length = NFS4_MAX_UINT64;
else
lo->length = end - lo->offset;
}
static bool
layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
{
if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
return false;
if (layout_end(&lo->lo_seg) <= s->offset)
return false;
if (layout_end(s) <= lo->lo_seg.offset)
return false;
return true;
}
static bool
layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
{
if (lo->iomode != new->iomode)
return false;
if (layout_end(new) < lo->offset)
return false;
if (layout_end(lo) < new->offset)
return false;
lo->offset = min(lo->offset, new->offset);
layout_update_len(lo, max(layout_end(lo), layout_end(new)));
return true;
}
static __be32
nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
{
struct nfs4_file *fp = ls->ls_stid.sc_file;
struct nfs4_layout_stateid *l, *n;
__be32 nfserr = nfs_ok;
assert_spin_locked(&fp->fi_lock);
list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
if (l != ls) {
nfsd4_recall_file_layout(l);
nfserr = nfserr_recallconflict;
}
}
return nfserr;
}
__be32
nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
{
struct nfsd4_layout_seg *seg = &lgp->lg_seg;
struct nfs4_file *fp = ls->ls_stid.sc_file;
struct nfs4_layout *lp, *new = NULL;
__be32 nfserr;
spin_lock(&fp->fi_lock);
nfserr = nfsd4_recall_conflict(ls);
if (nfserr)
goto out;
spin_lock(&ls->ls_lock);
list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
if (layouts_try_merge(&lp->lo_seg, seg))
goto done;
}
spin_unlock(&ls->ls_lock);
spin_unlock(&fp->fi_lock);
new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
if (!new)
return nfserr_jukebox;
memcpy(&new->lo_seg, seg, sizeof(new->lo_seg));
new->lo_state = ls;
spin_lock(&fp->fi_lock);
nfserr = nfsd4_recall_conflict(ls);
if (nfserr)
goto out;
spin_lock(&ls->ls_lock);
list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
if (layouts_try_merge(&lp->lo_seg, seg))
goto done;
}
refcount_inc(&ls->ls_stid.sc_count);
list_add_tail(&new->lo_perstate, &ls->ls_layouts);
new = NULL;
done:
nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
spin_unlock(&ls->ls_lock);
out:
spin_unlock(&fp->fi_lock);
if (new)
kmem_cache_free(nfs4_layout_cache, new);
return nfserr;
}
static void
nfsd4_free_layouts(struct list_head *reaplist)
{
while (!list_empty(reaplist)) {
struct nfs4_layout *lp = list_first_entry(reaplist,
struct nfs4_layout, lo_perstate);
list_del(&lp->lo_perstate);
nfs4_put_stid(&lp->lo_state->ls_stid);
kmem_cache_free(nfs4_layout_cache, lp);
}
}
static void
nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
struct list_head *reaplist)
{
struct nfsd4_layout_seg *lo = &lp->lo_seg;
u64 end = layout_end(lo);
if (seg->offset <= lo->offset) {
if (layout_end(seg) >= end) {
list_move_tail(&lp->lo_perstate, reaplist);
return;
}
lo->offset = layout_end(seg);
} else {
/* retain the whole layout segment on a split. */
if (layout_end(seg) < end) {
dprintk("%s: split not supported\n", __func__);
return;
}
end = seg->offset;
}
layout_update_len(lo, end);
}
__be32
nfsd4_return_file_layouts(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutreturn *lrp)
{
struct nfs4_layout_stateid *ls;
struct nfs4_layout *lp, *n;
LIST_HEAD(reaplist);
__be32 nfserr;
int found = 0;
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
false, lrp->lr_layout_type,
&ls);
if (nfserr) {
trace_nfsd_layout_return_lookup_fail(&lrp->lr_sid);
return nfserr;
}
spin_lock(&ls->ls_lock);
list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
if (layouts_overlapping(lp, &lrp->lr_seg)) {
nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
found++;
}
}
if (!list_empty(&ls->ls_layouts)) {
if (found)
nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
lrp->lrs_present = 1;
} else {
trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
nfs4_unhash_stid(&ls->ls_stid);
lrp->lrs_present = 0;
}
spin_unlock(&ls->ls_lock);
mutex_unlock(&ls->ls_mutex);
nfs4_put_stid(&ls->ls_stid);
nfsd4_free_layouts(&reaplist);
return nfs_ok;
}
__be32
nfsd4_return_client_layouts(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutreturn *lrp)
{
struct nfs4_layout_stateid *ls, *n;
struct nfs4_client *clp = cstate->clp;
struct nfs4_layout *lp, *t;
LIST_HEAD(reaplist);
lrp->lrs_present = 0;
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
if (ls->ls_layout_type != lrp->lr_layout_type)
continue;
if (lrp->lr_return_type == RETURN_FSID &&
!fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
&cstate->current_fh.fh_handle))
continue;
spin_lock(&ls->ls_lock);
list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
if (lrp->lr_seg.iomode == IOMODE_ANY ||
lrp->lr_seg.iomode == lp->lo_seg.iomode)
list_move_tail(&lp->lo_perstate, &reaplist);
}
spin_unlock(&ls->ls_lock);
}
spin_unlock(&clp->cl_lock);
nfsd4_free_layouts(&reaplist);
return 0;
}
static void
nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
struct list_head *reaplist)
{
spin_lock(&ls->ls_lock);
list_splice_init(&ls->ls_layouts, reaplist);
spin_unlock(&ls->ls_lock);
}
void
nfsd4_return_all_client_layouts(struct nfs4_client *clp)
{
struct nfs4_layout_stateid *ls, *n;
LIST_HEAD(reaplist);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
nfsd4_return_all_layouts(ls, &reaplist);
spin_unlock(&clp->cl_lock);
nfsd4_free_layouts(&reaplist);
}
void
nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
{
struct nfs4_layout_stateid *ls, *n;
LIST_HEAD(reaplist);
spin_lock(&fp->fi_lock);
list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
if (ls->ls_stid.sc_client == clp)
nfsd4_return_all_layouts(ls, &reaplist);
}
spin_unlock(&fp->fi_lock);
nfsd4_free_layouts(&reaplist);
}
static void
nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
{
struct nfs4_client *clp = ls->ls_stid.sc_client;
char addr_str[INET6_ADDRSTRLEN];
static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed";
static char *envp[] = {
"HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL
};
char *argv[8];
int error;
rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
printk(KERN_WARNING
"nfsd: client %s failed to respond to layout recall. "
" Fencing..\n", addr_str);
argv[0] = (char *)nfsd_recall_failed;
argv[1] = addr_str;
argv[2] = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_id;
argv[3] = NULL;
error = call_usermodehelper(nfsd_recall_failed, argv, envp,
UMH_WAIT_PROC);
if (error) {
printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
addr_str, error);
}
}
static void
nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
{
struct nfs4_layout_stateid *ls =
container_of(cb, struct nfs4_layout_stateid, ls_recall);
mutex_lock(&ls->ls_mutex);
nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
mutex_unlock(&ls->ls_mutex);
}
static int
nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
struct nfs4_layout_stateid *ls =
container_of(cb, struct nfs4_layout_stateid, ls_recall);
struct nfsd_net *nn;
ktime_t now, cutoff;
const struct nfsd4_layout_ops *ops;
trace_nfsd_cb_layout_done(&ls->ls_stid.sc_stateid, task);
switch (task->tk_status) {
case 0:
case -NFS4ERR_DELAY:
/*
* Anything left? If not, then call it done. Note that we don't
* take the spinlock since this is an optimization and nothing
* should get added until the cb counter goes to zero.
*/
if (list_empty(&ls->ls_layouts))
return 1;
/* Poll the client until it's done with the layout */
now = ktime_get();
nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
/* Client gets 2 lease periods to return it */
cutoff = ktime_add_ns(task->tk_start,
(u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
if (ktime_before(now, cutoff)) {
rpc_delay(task, HZ/100); /* 10 mili-seconds */
return 0;
}
fallthrough;
default:
/*
* Unknown error or non-responding client, we'll need to fence.
*/
trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid);
ops = nfsd4_layout_ops[ls->ls_layout_type];
if (ops->fence_client)
ops->fence_client(ls);
else
nfsd4_cb_layout_fail(ls);
return 1;
case -NFS4ERR_NOMATCHING_LAYOUT:
trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid);
task->tk_status = 0;
return 1;
}
}
static void
nfsd4_cb_layout_release(struct nfsd4_callback *cb)
{
struct nfs4_layout_stateid *ls =
container_of(cb, struct nfs4_layout_stateid, ls_recall);
LIST_HEAD(reaplist);
trace_nfsd_layout_recall_release(&ls->ls_stid.sc_stateid);
nfsd4_return_all_layouts(ls, &reaplist);
nfsd4_free_layouts(&reaplist);
nfs4_put_stid(&ls->ls_stid);
}
static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
.prepare = nfsd4_cb_layout_prepare,
.done = nfsd4_cb_layout_done,
.release = nfsd4_cb_layout_release,
};
static bool
nfsd4_layout_lm_break(struct file_lock *fl)
{
/*
* We don't want the locks code to timeout the lease for us;
* we'll remove it ourself if a layout isn't returned
* in time:
*/
fl->fl_break_time = 0;
nfsd4_recall_file_layout(fl->fl_owner);
return false;
}
static int
nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
struct list_head *dispose)
{
BUG_ON(!(arg & F_UNLCK));
return lease_modify(onlist, arg, dispose);
}
static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
};
int
nfsd4_init_pnfs(void)
{
int i;
for (i = 0; i < DEVID_HASH_SIZE; i++)
INIT_LIST_HEAD(&nfsd_devid_hash[i]);
nfs4_layout_cache = kmem_cache_create("nfs4_layout",
sizeof(struct nfs4_layout), 0, 0, NULL);
if (!nfs4_layout_cache)
return -ENOMEM;
nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
if (!nfs4_layout_stateid_cache) {
kmem_cache_destroy(nfs4_layout_cache);
return -ENOMEM;
}
return 0;
}
void
nfsd4_exit_pnfs(void)
{
int i;
kmem_cache_destroy(nfs4_layout_cache);
kmem_cache_destroy(nfs4_layout_stateid_cache);
for (i = 0; i < DEVID_HASH_SIZE; i++) {
struct nfsd4_deviceid_map *map, *n;
list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
kfree(map);
}
}
| linux-master | fs/nfsd/nfs4layouts.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains all the stubs needed when communicating with lockd.
* This level of indirection is necessary so we can run nfsd+lockd without
* requiring the nfs client to be compiled in/loaded, and vice versa.
*
* Copyright (C) 1996, Olaf Kirch <[email protected]>
*/
#include <linux/file.h>
#include <linux/lockd/bind.h>
#include "nfsd.h"
#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_LOCKD
#ifdef CONFIG_LOCKD_V4
#define nlm_stale_fh nlm4_stale_fh
#define nlm_failed nlm4_failed
#else
#define nlm_stale_fh nlm_lck_denied_nolocks
#define nlm_failed nlm_lck_denied_nolocks
#endif
/*
* Note: we hold the dentry use count while the file is open.
*/
static __be32
nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
int mode)
{
__be32 nfserr;
int access;
struct svc_fh fh;
/* must initialize before using! but maxsize doesn't matter */
fh_init(&fh,0);
fh.fh_handle.fh_size = f->size;
memcpy(&fh.fh_handle.fh_raw, f->data, f->size);
fh.fh_export = NULL;
access = (mode == O_WRONLY) ? NFSD_MAY_WRITE : NFSD_MAY_READ;
access |= NFSD_MAY_LOCK;
nfserr = nfsd_open(rqstp, &fh, S_IFREG, access, filp);
fh_put(&fh);
/* We return nlm error codes as nlm doesn't know
* about nfsd, but nfsd does know about nlm..
*/
switch (nfserr) {
case nfs_ok:
return 0;
case nfserr_dropit:
return nlm_drop_reply;
case nfserr_stale:
return nlm_stale_fh;
default:
return nlm_failed;
}
}
static void
nlm_fclose(struct file *filp)
{
fput(filp);
}
static const struct nlmsvc_binding nfsd_nlm_ops = {
.fopen = nlm_fopen, /* open file for locking */
.fclose = nlm_fclose, /* close file */
};
void
nfsd_lockd_init(void)
{
dprintk("nfsd: initializing lockd\n");
nlmsvc_ops = &nfsd_nlm_ops;
}
void
nfsd_lockd_shutdown(void)
{
nlmsvc_ops = NULL;
}
| linux-master | fs/nfsd/lockd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Process version 3 NFSACL requests.
*
* Copyright (C) 2002-2003 Andreas Gruenbacher <[email protected]>
*/
#include "nfsd.h"
/* FIXME: nfsacl.h is a broken header */
#include <linux/nfsacl.h>
#include <linux/gfp.h>
#include "cache.h"
#include "xdr3.h"
#include "vfs.h"
/*
* NULL call.
*/
static __be32
nfsd3_proc_null(struct svc_rqst *rqstp)
{
return rpc_success;
}
/*
* Get the Access and/or Default ACL of a file.
*/
static __be32 nfsd3_proc_getacl(struct svc_rqst *rqstp)
{
struct nfsd3_getaclargs *argp = rqstp->rq_argp;
struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct posix_acl *acl;
struct inode *inode;
svc_fh *fh;
fh = fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP);
if (resp->status != nfs_ok)
goto out;
inode = d_inode(fh->fh_dentry);
if (argp->mask & ~NFS_ACL_MASK) {
resp->status = nfserr_inval;
goto out;
}
resp->mask = argp->mask;
if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
acl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (acl == NULL) {
/* Solaris returns the inode's minimum ACL. */
acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
}
if (IS_ERR(acl)) {
resp->status = nfserrno(PTR_ERR(acl));
goto fail;
}
resp->acl_access = acl;
}
if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) {
/* Check how Solaris handles requests for the Default ACL
of a non-directory! */
acl = get_inode_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl)) {
resp->status = nfserrno(PTR_ERR(acl));
goto fail;
}
resp->acl_default = acl;
}
/* resp->acl_{access,default} are released in nfs3svc_release_getacl. */
out:
return rpc_success;
fail:
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
goto out;
}
/*
* Set the Access and/or Default ACL of a file.
*/
static __be32 nfsd3_proc_setacl(struct svc_rqst *rqstp)
{
struct nfsd3_setaclargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
struct inode *inode;
svc_fh *fh;
int error;
fh = fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR);
if (resp->status != nfs_ok)
goto out;
inode = d_inode(fh->fh_dentry);
error = fh_want_write(fh);
if (error)
goto out_errno;
inode_lock(inode);
error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_ACCESS,
argp->acl_access);
if (error)
goto out_drop_lock;
error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_DEFAULT,
argp->acl_default);
out_drop_lock:
inode_unlock(inode);
fh_drop_write(fh);
out_errno:
resp->status = nfserrno(error);
out:
/* argp->acl_{access,default} may have been allocated in
nfs3svc_decode_setaclargs. */
posix_acl_release(argp->acl_access);
posix_acl_release(argp->acl_default);
return rpc_success;
}
/*
* XDR decode functions
*/
static bool
nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_getaclargs *args = rqstp->rq_argp;
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return false;
if (xdr_stream_decode_u32(xdr, &args->mask) < 0)
return false;
return true;
}
static bool
nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_setaclargs *argp = rqstp->rq_argp;
if (!svcxdr_decode_nfs_fh3(xdr, &argp->fh))
return false;
if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return false;
if (argp->mask & ~NFS_ACL_MASK)
return false;
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_ACL) ?
&argp->acl_access : NULL))
return false;
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_DFACL) ?
&argp->acl_default : NULL))
return false;
return true;
}
/*
* XDR encode functions
*/
/* GETACL */
static bool
nfs3svc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct dentry *dentry = resp->fh.fh_dentry;
struct inode *inode;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
inode = d_inode(dentry);
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
return false;
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
resp->mask & NFS_ACL, 0))
return false;
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
resp->mask & NFS_DFACL,
NFS_ACL_DEFAULT))
return false;
break;
default:
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return false;
}
return true;
}
/* SETACL */
static bool
nfs3svc_encode_setaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_attrstat *resp = rqstp->rq_resp;
return svcxdr_encode_nfsstat3(xdr, resp->status) &&
svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh);
}
/*
* XDR release functions
*/
static void nfs3svc_release_getacl(struct svc_rqst *rqstp)
{
struct nfsd3_getaclres *resp = rqstp->rq_resp;
fh_put(&resp->fh);
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
}
struct nfsd3_voidargs { int dummy; };
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
#define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */
static const struct svc_procedure nfsd_acl_procedures3[3] = {
[ACLPROC3_NULL] = {
.pc_func = nfsd3_proc_null,
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
.pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
.pc_name = "NULL",
},
[ACLPROC3_GETACL] = {
.pc_func = nfsd3_proc_getacl,
.pc_decode = nfs3svc_decode_getaclargs,
.pc_encode = nfs3svc_encode_getaclres,
.pc_release = nfs3svc_release_getacl,
.pc_argsize = sizeof(struct nfsd3_getaclargs),
.pc_argzero = sizeof(struct nfsd3_getaclargs),
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
.pc_name = "GETACL",
},
[ACLPROC3_SETACL] = {
.pc_func = nfsd3_proc_setacl,
.pc_decode = nfs3svc_decode_setaclargs,
.pc_encode = nfs3svc_encode_setaclres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_setaclargs),
.pc_argzero = sizeof(struct nfsd3_setaclargs),
.pc_ressize = sizeof(struct nfsd3_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT,
.pc_name = "SETACL",
},
};
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_acl_count3[ARRAY_SIZE(nfsd_acl_procedures3)]);
const struct svc_version nfsd_acl_version3 = {
.vs_vers = 3,
.vs_nproc = ARRAY_SIZE(nfsd_acl_procedures3),
.vs_proc = nfsd_acl_procedures3,
.vs_count = nfsd_acl_count3,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS3_SVC_XDRSIZE,
};
| linux-master | fs/nfsd/nfs3acl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Tom Haynes <[email protected]>
*/
#include <linux/sunrpc/svc.h>
#include <linux/nfs4.h>
#include "nfsd.h"
#include "flexfilelayoutxdr.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
struct ff_idmap {
char buf[11];
int len;
};
__be32
nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
struct nfsd4_layoutget *lgp)
{
struct pnfs_ff_layout *fl = lgp->lg_content;
int len, mirror_len, ds_len, fh_len;
__be32 *p;
/*
* Unlike nfsd4_encode_user, we know these will
* always be stringified.
*/
struct ff_idmap uid;
struct ff_idmap gid;
fh_len = 4 + fl->fh.size;
uid.len = sprintf(uid.buf, "%u", from_kuid(&init_user_ns, fl->uid));
gid.len = sprintf(gid.buf, "%u", from_kgid(&init_user_ns, fl->gid));
/* 8 + len for recording the length, name, and padding */
ds_len = 20 + sizeof(stateid_opaque_t) + 4 + fh_len +
8 + uid.len + 8 + gid.len;
mirror_len = 4 + ds_len;
/* The layout segment */
len = 20 + mirror_len;
p = xdr_reserve_space(xdr, sizeof(__be32) + len);
if (!p)
return nfserr_toosmall;
*p++ = cpu_to_be32(len);
p = xdr_encode_hyper(p, 0); /* stripe unit of 1 */
*p++ = cpu_to_be32(1); /* single mirror */
*p++ = cpu_to_be32(1); /* single data server */
p = xdr_encode_opaque_fixed(p, &fl->deviceid,
sizeof(struct nfsd4_deviceid));
*p++ = cpu_to_be32(1); /* efficiency */
*p++ = cpu_to_be32(fl->stateid.si_generation);
p = xdr_encode_opaque_fixed(p, &fl->stateid.si_opaque,
sizeof(stateid_opaque_t));
*p++ = cpu_to_be32(1); /* single file handle */
p = xdr_encode_opaque(p, fl->fh.data, fl->fh.size);
p = xdr_encode_opaque(p, uid.buf, uid.len);
p = xdr_encode_opaque(p, gid.buf, gid.len);
*p++ = cpu_to_be32(fl->flags);
*p++ = cpu_to_be32(0); /* No stats collect hint */
return 0;
}
__be32
nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_ff_device_addr *da = gdp->gd_device;
int len;
int ver_len;
int addr_len;
__be32 *p;
/*
* See paragraph 5 of RFC 8881 S18.40.3.
*/
if (!gdp->gd_maxcount) {
if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
return nfs_ok;
}
/* len + padding for two strings */
addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
ver_len = 20;
len = 4 + ver_len + 4 + addr_len;
p = xdr_reserve_space(xdr, len + sizeof(__be32));
if (!p)
return nfserr_resource;
/*
* Fill in the overall length and number of volumes at the beginning
* of the layout.
*/
*p++ = cpu_to_be32(len);
*p++ = cpu_to_be32(1); /* 1 netaddr */
p = xdr_encode_opaque(p, da->netaddr.netid, da->netaddr.netid_len);
p = xdr_encode_opaque(p, da->netaddr.addr, da->netaddr.addr_len);
*p++ = cpu_to_be32(1); /* 1 versions */
*p++ = cpu_to_be32(da->version);
*p++ = cpu_to_be32(da->minor_version);
*p++ = cpu_to_be32(da->rsize);
*p++ = cpu_to_be32(da->wsize);
*p++ = cpu_to_be32(da->tightly_coupled);
return 0;
}
| linux-master | fs/nfsd/flexfilelayoutxdr.c |
/*
* Copyright (c) 2001 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <[email protected]>
* Andy Adamson <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/addr.h>
#include <linux/jhash.h>
#include <linux/string_helpers.h>
#include <linux/fsnotify.h>
#include <linux/rhashtable.h>
#include <linux/nfs_ssc.h>
#include "xdr4.h"
#include "xdr4cb.h"
#include "vfs.h"
#include "current_stateid.h"
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
#define all_ones {{~0,~0},~0}
static const stateid_t one_stateid = {
.si_generation = ~0,
.si_opaque = all_ones,
};
static const stateid_t zero_stateid = {
/* all fields zero */
};
static const stateid_t currentstateid = {
.si_generation = 1,
};
static const stateid_t close_stateid = {
.si_generation = 0xffffffffU,
};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
void nfsd4_end_grace(struct nfsd_net *nn);
static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
static void nfsd4_file_hash_remove(struct nfs4_file *fi);
/* Locking: */
/*
* Currently used for the del_recall_lru and file hash table. In an
* effort to decrease the scope of the client_mutex, this spinlock may
* eventually cover more:
*/
static DEFINE_SPINLOCK(state_lock);
enum nfsd4_st_mutex_lock_subclass {
OPEN_STATEID_MUTEX = 0,
LOCK_STATEID_MUTEX = 1,
};
/*
* A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
* the refcount on the open stateid to drop.
*/
static DECLARE_WAIT_QUEUE_HEAD(close_wq);
/*
* A waitqueue where a writer to clients/#/ctl destroying a client can
* wait for cl_rpc_users to drop to 0 and then for the client to be
* unhashed.
*/
static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
static struct kmem_cache *client_slab;
static struct kmem_cache *openowner_slab;
static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
static struct kmem_cache *odstate_slab;
static void free_session(struct nfsd4_session *);
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
static struct workqueue_struct *laundry_wq;
int nfsd4_create_laundry_wq(void)
{
int rc = 0;
laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
if (laundry_wq == NULL)
rc = -ENOMEM;
return rc;
}
void nfsd4_destroy_laundry_wq(void)
{
destroy_workqueue(laundry_wq);
}
static bool is_session_dead(struct nfsd4_session *ses)
{
return ses->se_flags & NFS4_SESSION_DEAD;
}
static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
{
if (atomic_read(&ses->se_ref) > ref_held_by_me)
return nfserr_jukebox;
ses->se_flags |= NFS4_SESSION_DEAD;
return nfs_ok;
}
static bool is_client_expired(struct nfs4_client *clp)
{
return clp->cl_time == 0;
}
static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
struct nfs4_client *clp)
{
if (clp->cl_state != NFSD4_ACTIVE)
atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
}
static __be32 get_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (is_client_expired(clp))
return nfserr_expired;
atomic_inc(&clp->cl_rpc_users);
nfsd4_dec_courtesy_client_count(nn, clp);
clp->cl_state = NFSD4_ACTIVE;
return nfs_ok;
}
/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (is_client_expired(clp)) {
WARN_ON(1);
printk("%s: client (clientid %08x/%08x) already expired\n",
__func__,
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
return;
}
list_move_tail(&clp->cl_lru, &nn->client_lru);
clp->cl_time = ktime_get_boottime_seconds();
nfsd4_dec_courtesy_client_count(nn, clp);
clp->cl_state = NFSD4_ACTIVE;
}
static void put_client_renew_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (!atomic_dec_and_test(&clp->cl_rpc_users))
return;
if (!is_client_expired(clp))
renew_client_locked(clp);
else
wake_up_all(&expiry_wq);
}
static void put_client_renew(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
return;
if (!is_client_expired(clp))
renew_client_locked(clp);
else
wake_up_all(&expiry_wq);
spin_unlock(&nn->client_lock);
}
static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
{
__be32 status;
if (is_session_dead(ses))
return nfserr_badsession;
status = get_client_locked(ses->se_client);
if (status)
return status;
atomic_inc(&ses->se_ref);
return nfs_ok;
}
static void nfsd4_put_session_locked(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
free_session(ses);
put_client_renew_locked(clp);
}
static void nfsd4_put_session(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
spin_lock(&nn->client_lock);
nfsd4_put_session_locked(ses);
spin_unlock(&nn->client_lock);
}
static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
struct nfsd_net *nn)
{
struct nfsd4_blocked_lock *cur, *found = NULL;
spin_lock(&nn->blocked_locks_lock);
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
WARN_ON(list_empty(&cur->nbl_lru));
list_del_init(&cur->nbl_lru);
found = cur;
break;
}
}
spin_unlock(&nn->blocked_locks_lock);
if (found)
locks_delete_block(&found->nbl_lock);
return found;
}
static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
struct nfsd_net *nn)
{
struct nfsd4_blocked_lock *nbl;
nbl = find_blocked_lock(lo, fh, nn);
if (!nbl) {
nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
INIT_LIST_HEAD(&nbl->nbl_list);
INIT_LIST_HEAD(&nbl->nbl_lru);
fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock);
kref_init(&nbl->nbl_kref);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
&nfsd4_cb_notify_lock_ops,
NFSPROC4_CLNT_CB_NOTIFY_LOCK);
}
}
return nbl;
}
static void
free_nbl(struct kref *kref)
{
struct nfsd4_blocked_lock *nbl;
nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
kfree(nbl);
}
static void
free_blocked_lock(struct nfsd4_blocked_lock *nbl)
{
locks_delete_block(&nbl->nbl_lock);
locks_release_private(&nbl->nbl_lock);
kref_put(&nbl->nbl_kref, free_nbl);
}
static void
remove_blocked_locks(struct nfs4_lockowner *lo)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct nfsd4_blocked_lock *nbl;
LIST_HEAD(reaplist);
/* Dequeue all blocked locks */
spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&lo->lo_blocked)) {
nbl = list_first_entry(&lo->lo_blocked,
struct nfsd4_blocked_lock,
nbl_list);
list_del_init(&nbl->nbl_list);
WARN_ON(list_empty(&nbl->nbl_lru));
list_move(&nbl->nbl_lru, &reaplist);
}
spin_unlock(&nn->blocked_locks_lock);
/* Now free them */
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
nbl_lru);
list_del_init(&nbl->nbl_lru);
free_blocked_lock(nbl);
}
}
static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
{
struct nfsd4_blocked_lock *nbl = container_of(cb,
struct nfsd4_blocked_lock, nbl_cb);
locks_delete_block(&nbl->nbl_lock);
}
static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
/*
* Since this is just an optimization, we don't try very hard if it
* turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
* just quit trying on anything else.
*/
switch (task->tk_status) {
case -NFS4ERR_DELAY:
rpc_delay(task, 1 * HZ);
return 0;
default:
return 1;
}
}
static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
{
struct nfsd4_blocked_lock *nbl = container_of(cb,
struct nfsd4_blocked_lock, nbl_cb);
free_blocked_lock(nbl);
}
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
.prepare = nfsd4_cb_notify_lock_prepare,
.done = nfsd4_cb_notify_lock_done,
.release = nfsd4_cb_notify_lock_release,
};
/*
* We store the NONE, READ, WRITE, and BOTH bits separately in the
* st_{access,deny}_bmap field of the stateid, in order to track not
* only what share bits are currently in force, but also what
* combinations of share bits previous opens have used. This allows us
* to enforce the recommendation in
* https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
* the server return an error if the client attempt to downgrade to a
* combination of share bits not explicable by closing some of its
* previous opens.
*
* This enforcement is arguably incomplete, since we don't keep
* track of access/deny bit combinations; so, e.g., we allow:
*
* OPEN allow read, deny write
* OPEN allow both, deny none
* DOWNGRADE allow read, deny none
*
* which we should reject.
*
* But you could also argue that our current code is already overkill,
* since it only exists to return NFS4ERR_INVAL on incorrect client
* behavior.
*/
static unsigned int
bmap_to_share_mode(unsigned long bmap)
{
int i;
unsigned int access = 0;
for (i = 1; i < 4; i++) {
if (test_bit(i, &bmap))
access |= i;
}
return access;
}
/* set share access for a given stateid */
static inline void
set_access(u32 access, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << access;
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
stp->st_access_bmap |= mask;
}
/* clear share access for a given stateid */
static inline void
clear_access(u32 access, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << access;
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
stp->st_access_bmap &= ~mask;
}
/* test whether a given stateid has access */
static inline bool
test_access(u32 access, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << access;
return (bool)(stp->st_access_bmap & mask);
}
/* set share deny for a given stateid */
static inline void
set_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << deny;
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
stp->st_deny_bmap |= mask;
}
/* clear share deny for a given stateid */
static inline void
clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << deny;
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
stp->st_deny_bmap &= ~mask;
}
/* test whether a given stateid is denying specific access */
static inline bool
test_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << deny;
return (bool)(stp->st_deny_bmap & mask);
}
static int nfs4_access_to_omode(u32 access)
{
switch (access & NFS4_SHARE_ACCESS_BOTH) {
case NFS4_SHARE_ACCESS_READ:
return O_RDONLY;
case NFS4_SHARE_ACCESS_WRITE:
return O_WRONLY;
case NFS4_SHARE_ACCESS_BOTH:
return O_RDWR;
}
WARN_ON_ONCE(1);
return O_RDONLY;
}
static inline int
access_permit_read(struct nfs4_ol_stateid *stp)
{
return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
test_access(NFS4_SHARE_ACCESS_WRITE, stp);
}
static inline int
access_permit_write(struct nfs4_ol_stateid *stp)
{
return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
test_access(NFS4_SHARE_ACCESS_BOTH, stp);
}
static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner *sop)
{
atomic_inc(&sop->so_count);
return sop;
}
static int
same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
{
return (sop->so_owner.len == owner->len) &&
0 == memcmp(sop->so_owner.data, owner->data, owner->len);
}
static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_stateowner *so;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
so_strhash) {
if (!so->so_is_open_owner)
continue;
if (same_owner_str(so, &open->op_owner))
return openowner(nfs4_get_stateowner(so));
}
return NULL;
}
static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
spin_lock(&clp->cl_lock);
oo = find_openstateowner_str_locked(hashval, open, clp);
spin_unlock(&clp->cl_lock);
return oo;
}
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
unsigned char *cptr = (unsigned char *) ptr;
u32 x = 0;
while (nbytes--) {
x *= 37;
x += *cptr++;
}
return x;
}
static void nfsd4_free_file_rcu(struct rcu_head *rcu)
{
struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
kmem_cache_free(file_slab, fp);
}
void
put_nfs4_file(struct nfs4_file *fi)
{
if (refcount_dec_and_test(&fi->fi_ref)) {
nfsd4_file_hash_remove(fi);
WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
}
static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file *f)
{
struct nfsd_file *ret;
lockdep_assert_held(&f->fi_lock);
ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
if (!ret)
ret = nfsd_file_get(f->fi_fds[O_RDWR]);
return ret;
}
static struct nfsd_file *
find_writeable_file(struct nfs4_file *f)
{
struct nfsd_file *ret;
spin_lock(&f->fi_lock);
ret = find_writeable_file_locked(f);
spin_unlock(&f->fi_lock);
return ret;
}
static struct nfsd_file *
find_readable_file_locked(struct nfs4_file *f)
{
struct nfsd_file *ret;
lockdep_assert_held(&f->fi_lock);
ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
if (!ret)
ret = nfsd_file_get(f->fi_fds[O_RDWR]);
return ret;
}
static struct nfsd_file *
find_readable_file(struct nfs4_file *f)
{
struct nfsd_file *ret;
spin_lock(&f->fi_lock);
ret = find_readable_file_locked(f);
spin_unlock(&f->fi_lock);
return ret;
}
static struct nfsd_file *
find_rw_file(struct nfs4_file *f)
{
struct nfsd_file *ret;
spin_lock(&f->fi_lock);
ret = nfsd_file_get(f->fi_fds[O_RDWR]);
spin_unlock(&f->fi_lock);
return ret;
}
struct nfsd_file *
find_any_file(struct nfs4_file *f)
{
struct nfsd_file *ret;
if (!f)
return NULL;
spin_lock(&f->fi_lock);
ret = nfsd_file_get(f->fi_fds[O_RDWR]);
if (!ret) {
ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
if (!ret)
ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
}
spin_unlock(&f->fi_lock);
return ret;
}
static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
{
lockdep_assert_held(&f->fi_lock);
if (f->fi_fds[O_RDWR])
return f->fi_fds[O_RDWR];
if (f->fi_fds[O_WRONLY])
return f->fi_fds[O_WRONLY];
if (f->fi_fds[O_RDONLY])
return f->fi_fds[O_RDONLY];
return NULL;
}
static atomic_long_t num_delegations;
unsigned long max_delegations;
/*
* Open owner state (share locks)
*/
/* hash tables for lock and open owners */
#define OWNER_HASH_BITS 8
#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
{
unsigned int ret;
ret = opaque_hashval(ownername->data, ownername->len);
return ret & OWNER_HASH_MASK;
}
static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
static const struct rhashtable_params nfs4_file_rhash_params = {
.key_len = sizeof_field(struct nfs4_file, fi_inode),
.key_offset = offsetof(struct nfs4_file, fi_inode),
.head_offset = offsetof(struct nfs4_file, fi_rlist),
/*
* Start with a single page hash table to reduce resizing churn
* on light workloads.
*/
.min_size = 256,
.automatic_shrinking = true,
};
/*
* Check if courtesy clients have conflicting access and resolve it if possible
*
* access: is op_share_access if share_access is true.
* Check if access mode, op_share_access, would conflict with
* the current deny mode of the file 'fp'.
* access: is op_share_deny if share_access is false.
* Check if the deny mode, op_share_deny, would conflict with
* current access of the file 'fp'.
* stp: skip checking this entry.
* new_stp: normal open, not open upgrade.
*
* Function returns:
* false - access/deny mode conflict with normal client.
* true - no conflict or conflict with courtesy client(s) is resolved.
*/
static bool
nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
struct nfs4_ol_stateid *stp, u32 access, bool share_access)
{
struct nfs4_ol_stateid *st;
bool resolvable = true;
unsigned char bmap;
struct nfsd_net *nn;
struct nfs4_client *clp;
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
/* ignore lock stateid */
if (st->st_openstp)
continue;
if (st == stp && new_stp)
continue;
/* check file access against deny mode or vice versa */
bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
if (!(access & bmap_to_share_mode(bmap)))
continue;
clp = st->st_stid.sc_client;
if (try_to_expire_client(clp))
continue;
resolvable = false;
break;
}
if (resolvable) {
clp = stp->st_stid.sc_client;
nn = net_generic(clp->net, nfsd_net_id);
mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
}
return resolvable;
}
static void
__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{
lockdep_assert_held(&fp->fi_lock);
if (access & NFS4_SHARE_ACCESS_WRITE)
atomic_inc(&fp->fi_access[O_WRONLY]);
if (access & NFS4_SHARE_ACCESS_READ)
atomic_inc(&fp->fi_access[O_RDONLY]);
}
static __be32
nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{
lockdep_assert_held(&fp->fi_lock);
/* Does this access mode make sense? */
if (access & ~NFS4_SHARE_ACCESS_BOTH)
return nfserr_inval;
/* Does it conflict with a deny mode already set? */
if ((access & fp->fi_share_deny) != 0)
return nfserr_share_denied;
__nfs4_file_get_access(fp, access);
return nfs_ok;
}
static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
{
/* Common case is that there is no deny mode. */
if (deny) {
/* Does this deny mode make sense? */
if (deny & ~NFS4_SHARE_DENY_BOTH)
return nfserr_inval;
if ((deny & NFS4_SHARE_DENY_READ) &&
atomic_read(&fp->fi_access[O_RDONLY]))
return nfserr_share_denied;
if ((deny & NFS4_SHARE_DENY_WRITE) &&
atomic_read(&fp->fi_access[O_WRONLY]))
return nfserr_share_denied;
}
return nfs_ok;
}
static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
{
might_lock(&fp->fi_lock);
if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
struct nfsd_file *f1 = NULL;
struct nfsd_file *f2 = NULL;
swap(f1, fp->fi_fds[oflag]);
if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
swap(f2, fp->fi_fds[O_RDWR]);
spin_unlock(&fp->fi_lock);
if (f1)
nfsd_file_put(f1);
if (f2)
nfsd_file_put(f2);
}
}
static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
{
WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
if (access & NFS4_SHARE_ACCESS_WRITE)
__nfs4_file_put_access(fp, O_WRONLY);
if (access & NFS4_SHARE_ACCESS_READ)
__nfs4_file_put_access(fp, O_RDONLY);
}
/*
* Allocate a new open/delegation state counter. This is needed for
* pNFS for proper return on close semantics.
*
* Note that we only allocate it for pNFS-enabled exports, otherwise
* all pointers to struct nfs4_clnt_odstate are always NULL.
*/
static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client *clp)
{
struct nfs4_clnt_odstate *co;
co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
if (co) {
co->co_client = clp;
refcount_set(&co->co_odcount, 1);
}
return co;
}
static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp = co->co_file;
lockdep_assert_held(&fp->fi_lock);
list_add(&co->co_perfile, &fp->fi_clnt_odstate);
}
static inline void
get_clnt_odstate(struct nfs4_clnt_odstate *co)
{
if (co)
refcount_inc(&co->co_odcount);
}
static void
put_clnt_odstate(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp;
if (!co)
return;
fp = co->co_file;
if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
list_del(&co->co_perfile);
spin_unlock(&fp->fi_lock);
nfsd4_return_all_file_layouts(co->co_client, fp);
kmem_cache_free(odstate_slab, co);
}
}
static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
{
struct nfs4_clnt_odstate *co;
struct nfs4_client *cl;
if (!new)
return NULL;
cl = new->co_client;
spin_lock(&fp->fi_lock);
list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
if (co->co_client == cl) {
get_clnt_odstate(co);
goto out;
}
}
co = new;
co->co_file = fp;
hash_clnt_odstate_locked(new);
out:
spin_unlock(&fp->fi_lock);
return co;
}
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
void (*sc_free)(struct nfs4_stid *))
{
struct nfs4_stid *stid;
int new_id;
stid = kmem_cache_zalloc(slab, GFP_KERNEL);
if (!stid)
return NULL;
idr_preload(GFP_KERNEL);
spin_lock(&cl->cl_lock);
/* Reserving 0 for start of file in nfsdfs "states" file: */
new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
spin_unlock(&cl->cl_lock);
idr_preload_end();
if (new_id < 0)
goto out_free;
stid->sc_free = sc_free;
stid->sc_client = cl;
stid->sc_stateid.si_opaque.so_id = new_id;
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
/* Will be incremented before return to client: */
refcount_set(&stid->sc_count, 1);
spin_lock_init(&stid->sc_lock);
INIT_LIST_HEAD(&stid->sc_cp_list);
/*
* It shouldn't be a problem to reuse an opaque stateid value.
* I don't think it is for 4.1. But with 4.0 I worry that, for
* example, a stray write retransmission could be accepted by
* the server when it should have been rejected. Therefore,
* adopt a trick from the sctp code to attempt to maximize the
* amount of time until an id is reused, by ensuring they always
* "increase" (mod INT_MAX):
*/
return stid;
out_free:
kmem_cache_free(slab, stid);
return NULL;
}
/*
* Create a unique stateid_t to represent each COPY.
*/
static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
unsigned char cs_type)
{
int new_id;
stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
idr_preload(GFP_KERNEL);
spin_lock(&nn->s2s_cp_lock);
new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
stid->cs_stid.si_opaque.so_id = new_id;
stid->cs_stid.si_generation = 1;
spin_unlock(&nn->s2s_cp_lock);
idr_preload_end();
if (new_id < 0)
return 0;
stid->cs_type = cs_type;
return 1;
}
int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
{
return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
}
struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
struct nfs4_stid *p_stid)
{
struct nfs4_cpntf_state *cps;
cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
if (!cps)
return NULL;
cps->cpntf_time = ktime_get_boottime_seconds();
refcount_set(&cps->cp_stateid.cs_count, 1);
if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
goto out_free;
spin_lock(&nn->s2s_cp_lock);
list_add(&cps->cp_list, &p_stid->sc_cp_list);
spin_unlock(&nn->s2s_cp_lock);
return cps;
out_free:
kfree(cps);
return NULL;
}
void nfs4_free_copy_state(struct nfsd4_copy *copy)
{
struct nfsd_net *nn;
if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
return;
nn = net_generic(copy->cp_clp->net, nfsd_net_id);
spin_lock(&nn->s2s_cp_lock);
idr_remove(&nn->s2s_cp_stateids,
copy->cp_stateid.cs_stid.si_opaque.so_id);
spin_unlock(&nn->s2s_cp_lock);
}
static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
{
struct nfs4_cpntf_state *cps;
struct nfsd_net *nn;
nn = net_generic(net, nfsd_net_id);
spin_lock(&nn->s2s_cp_lock);
while (!list_empty(&stid->sc_cp_list)) {
cps = list_first_entry(&stid->sc_cp_list,
struct nfs4_cpntf_state, cp_list);
_free_cpntf_state_locked(nn, cps);
}
spin_unlock(&nn->s2s_cp_lock);
}
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
{
struct nfs4_stid *stid;
stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
if (!stid)
return NULL;
return openlockstateid(stid);
}
static void nfs4_free_deleg(struct nfs4_stid *stid)
{
struct nfs4_delegation *dp = delegstateid(stid);
WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
kmem_cache_free(deleg_slab, stid);
atomic_long_dec(&num_delegations);
}
/*
* When we recall a delegation, we should be careful not to hand it
* out again straight away.
* To ensure this we keep a pair of bloom filters ('new' and 'old')
* in which the filehandles of recalled delegations are "stored".
* If a filehandle appear in either filter, a delegation is blocked.
* When a delegation is recalled, the filehandle is stored in the "new"
* filter.
* Every 30 seconds we swap the filters and clear the "new" one,
* unless both are empty of course.
*
* Each filter is 256 bits. We hash the filehandle to 32bit and use the
* low 3 bytes as hash-table indices.
*
* 'blocked_delegations_lock', which is always taken in block_delegations(),
* is used to manage concurrent access. Testing does not need the lock
* except when swapping the two filters.
*/
static DEFINE_SPINLOCK(blocked_delegations_lock);
static struct bloom_pair {
int entries, old_entries;
time64_t swap_time;
int new; /* index into 'set' */
DECLARE_BITMAP(set[2], 256);
} blocked_delegations;
static int delegation_blocked(struct knfsd_fh *fh)
{
u32 hash;
struct bloom_pair *bd = &blocked_delegations;
if (bd->entries == 0)
return 0;
if (ktime_get_seconds() - bd->swap_time > 30) {
spin_lock(&blocked_delegations_lock);
if (ktime_get_seconds() - bd->swap_time > 30) {
bd->entries -= bd->old_entries;
bd->old_entries = bd->entries;
memset(bd->set[bd->new], 0,
sizeof(bd->set[0]));
bd->new = 1-bd->new;
bd->swap_time = ktime_get_seconds();
}
spin_unlock(&blocked_delegations_lock);
}
hash = jhash(&fh->fh_raw, fh->fh_size, 0);
if (test_bit(hash&255, bd->set[0]) &&
test_bit((hash>>8)&255, bd->set[0]) &&
test_bit((hash>>16)&255, bd->set[0]))
return 1;
if (test_bit(hash&255, bd->set[1]) &&
test_bit((hash>>8)&255, bd->set[1]) &&
test_bit((hash>>16)&255, bd->set[1]))
return 1;
return 0;
}
static void block_delegations(struct knfsd_fh *fh)
{
u32 hash;
struct bloom_pair *bd = &blocked_delegations;
hash = jhash(&fh->fh_raw, fh->fh_size, 0);
spin_lock(&blocked_delegations_lock);
__set_bit(hash&255, bd->set[bd->new]);
__set_bit((hash>>8)&255, bd->set[bd->new]);
__set_bit((hash>>16)&255, bd->set[bd->new]);
if (bd->entries == 0)
bd->swap_time = ktime_get_seconds();
bd->entries += 1;
spin_unlock(&blocked_delegations_lock);
}
static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
struct nfs4_clnt_odstate *odstate, u32 dl_type)
{
struct nfs4_delegation *dp;
long n;
dprintk("NFSD alloc_init_deleg\n");
n = atomic_long_inc_return(&num_delegations);
if (n < 0 || n > max_delegations)
goto out_dec;
if (delegation_blocked(&fp->fi_fhandle))
goto out_dec;
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
if (dp == NULL)
goto out_dec;
/*
* delegation seqid's are never incremented. The 4.1 special
* meaning of seqid 0 isn't meaningful, really, but let's avoid
* 0 anyway just for consistency and use 1:
*/
dp->dl_stid.sc_stateid.si_generation = 1;
INIT_LIST_HEAD(&dp->dl_perfile);
INIT_LIST_HEAD(&dp->dl_perclnt);
INIT_LIST_HEAD(&dp->dl_recall_lru);
dp->dl_clnt_odstate = odstate;
get_clnt_odstate(odstate);
dp->dl_type = dl_type;
dp->dl_retries = 1;
dp->dl_recalled = false;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
&nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
get_nfs4_file(fp);
dp->dl_stid.sc_file = fp;
return dp;
out_dec:
atomic_long_dec(&num_delegations);
return NULL;
}
void
nfs4_put_stid(struct nfs4_stid *s)
{
struct nfs4_file *fp = s->sc_file;
struct nfs4_client *clp = s->sc_client;
might_lock(&clp->cl_lock);
if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
wake_up_all(&close_wq);
return;
}
idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
nfs4_free_cpntf_statelist(clp->net, s);
spin_unlock(&clp->cl_lock);
s->sc_free(s);
if (fp)
put_nfs4_file(fp);
}
void
nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
{
stateid_t *src = &stid->sc_stateid;
spin_lock(&stid->sc_lock);
if (unlikely(++src->si_generation == 0))
src->si_generation = 1;
memcpy(dst, src, sizeof(*dst));
spin_unlock(&stid->sc_lock);
}
static void put_deleg_file(struct nfs4_file *fp)
{
struct nfsd_file *nf = NULL;
spin_lock(&fp->fi_lock);
if (--fp->fi_delegees == 0)
swap(nf, fp->fi_deleg_file);
spin_unlock(&fp->fi_lock);
if (nf)
nfsd_file_put(nf);
}
static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
struct nfsd_file *nf = fp->fi_deleg_file;
WARN_ON_ONCE(!fp->fi_delegees);
vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
put_deleg_file(fp);
}
static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
{
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_unlock_deleg_lease(dp);
nfs4_put_stid(&dp->dl_stid);
}
void nfs4_unhash_stid(struct nfs4_stid *s)
{
s->sc_type = 0;
}
/**
* nfs4_delegation_exists - Discover if this delegation already exists
* @clp: a pointer to the nfs4_client we're granting a delegation to
* @fp: a pointer to the nfs4_file we're granting a delegation on
*
* Return:
* On success: true iff an existing delegation is found
*/
static bool
nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
{
struct nfs4_delegation *searchdp = NULL;
struct nfs4_client *searchclp = NULL;
lockdep_assert_held(&state_lock);
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
searchclp = searchdp->dl_stid.sc_client;
if (clp == searchclp) {
return true;
}
}
return false;
}
/**
* hash_delegation_locked - Add a delegation to the appropriate lists
* @dp: a pointer to the nfs4_delegation we are adding.
* @fp: a pointer to the nfs4_file we're granting a delegation on
*
* Return:
* On success: NULL if the delegation was successfully hashed.
*
* On error: -EAGAIN if one was previously granted to this
* nfs4_client for this nfs4_file. Delegation is not hashed.
*
*/
static int
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{
struct nfs4_client *clp = dp->dl_stid.sc_client;
lockdep_assert_held(&state_lock);
lockdep_assert_held(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
return -EAGAIN;
refcount_inc(&dp->dl_stid.sc_count);
dp->dl_stid.sc_type = NFS4_DELEG_STID;
list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &clp->cl_delegations);
return 0;
}
static bool delegation_hashed(struct nfs4_delegation *dp)
{
return !(list_empty(&dp->dl_perfile));
}
static bool
unhash_delegation_locked(struct nfs4_delegation *dp)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
lockdep_assert_held(&state_lock);
if (!delegation_hashed(dp))
return false;
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
/* Ensure that deleg break won't try to requeue it */
++dp->dl_time;
spin_lock(&fp->fi_lock);
list_del_init(&dp->dl_perclnt);
list_del_init(&dp->dl_recall_lru);
list_del_init(&dp->dl_perfile);
spin_unlock(&fp->fi_lock);
return true;
}
static void destroy_delegation(struct nfs4_delegation *dp)
{
bool unhashed;
spin_lock(&state_lock);
unhashed = unhash_delegation_locked(dp);
spin_unlock(&state_lock);
if (unhashed)
destroy_unhashed_deleg(dp);
}
static void revoke_delegation(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_stid.sc_client;
WARN_ON(!list_empty(&dp->dl_recall_lru));
trace_nfsd_stid_revoke(&dp->dl_stid);
if (clp->cl_minorversion) {
spin_lock(&clp->cl_lock);
dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
refcount_inc(&dp->dl_stid.sc_count);
list_add(&dp->dl_recall_lru, &clp->cl_revoked);
spin_unlock(&clp->cl_lock);
}
destroy_unhashed_deleg(dp);
}
/*
* SETCLIENTID state
*/
static unsigned int clientid_hashval(u32 id)
{
return id & CLIENT_HASH_MASK;
}
static unsigned int clientstr_hashval(struct xdr_netobj name)
{
return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
}
/*
* A stateid that had a deny mode associated with it is being released
* or downgraded. Recalculate the deny mode on the file.
*/
static void
recalculate_deny_mode(struct nfs4_file *fp)
{
struct nfs4_ol_stateid *stp;
spin_lock(&fp->fi_lock);
fp->fi_share_deny = 0;
list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
spin_unlock(&fp->fi_lock);
}
static void
reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
int i;
bool change = false;
for (i = 1; i < 4; i++) {
if ((i & deny) != i) {
change = true;
clear_deny(i, stp);
}
}
/* Recalculate per-file deny mode if there was a change */
if (change)
recalculate_deny_mode(stp->st_stid.sc_file);
}
/* release all access and file references for a given stateid */
static void
release_all_access(struct nfs4_ol_stateid *stp)
{
int i;
struct nfs4_file *fp = stp->st_stid.sc_file;
if (fp && stp->st_deny_bmap != 0)
recalculate_deny_mode(fp);
for (i = 1; i < 4; i++) {
if (test_access(i, stp))
nfs4_file_put_access(stp->st_stid.sc_file, i);
clear_access(i, stp);
}
}
static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
{
kfree(sop->so_owner.data);
sop->so_ops->so_free(sop);
}
static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
{
struct nfs4_client *clp = sop->so_client;
might_lock(&clp->cl_lock);
if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
return;
sop->so_ops->so_unhash(sop);
spin_unlock(&clp->cl_lock);
nfs4_free_stateowner(sop);
}
static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
{
return list_empty(&stp->st_perfile);
}
static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_file *fp = stp->st_stid.sc_file;
lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
if (list_empty(&stp->st_perfile))
return false;
spin_lock(&fp->fi_lock);
list_del_init(&stp->st_perfile);
spin_unlock(&fp->fi_lock);
list_del(&stp->st_perstateowner);
return true;
}
static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
{
struct nfs4_ol_stateid *stp = openlockstateid(stid);
put_clnt_odstate(stp->st_clnt_odstate);
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
WARN_ON(!list_empty(&stid->sc_cp_list));
kmem_cache_free(stateid_slab, stid);
}
static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
{
struct nfs4_ol_stateid *stp = openlockstateid(stid);
struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
struct nfsd_file *nf;
nf = find_any_file(stp->st_stid.sc_file);
if (nf) {
get_file(nf->nf_file);
filp_close(nf->nf_file, (fl_owner_t)lo);
nfsd_file_put(nf);
}
nfs4_free_ol_stateid(stid);
}
/*
* Put the persistent reference to an already unhashed generic stateid, while
* holding the cl_lock. If it's the last reference, then put it onto the
* reaplist for later destruction.
*/
static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
struct list_head *reaplist)
{
struct nfs4_stid *s = &stp->st_stid;
struct nfs4_client *clp = s->sc_client;
lockdep_assert_held(&clp->cl_lock);
WARN_ON_ONCE(!list_empty(&stp->st_locks));
if (!refcount_dec_and_test(&s->sc_count)) {
wake_up_all(&close_wq);
return;
}
idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
list_add(&stp->st_locks, reaplist);
}
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
if (!unhash_ol_stateid(stp))
return false;
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
return true;
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_del_init(&lo->lo_owner.so_strhash);
}
/*
* Free a list of generic stateids that were collected earlier after being
* fully unhashed.
*/
static void
free_ol_stateid_reaplist(struct list_head *reaplist)
{
struct nfs4_ol_stateid *stp;
struct nfs4_file *fp;
might_sleep();
while (!list_empty(reaplist)) {
stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
st_locks);
list_del(&stp->st_locks);
fp = stp->st_stid.sc_file;
stp->st_stid.sc_free(&stp->st_stid);
if (fp)
put_nfs4_file(fp);
}
}
static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
struct list_head *reaplist)
{
struct nfs4_ol_stateid *stp;
lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
while (!list_empty(&open_stp->st_locks)) {
stp = list_entry(open_stp->st_locks.next,
struct nfs4_ol_stateid, st_locks);
WARN_ON(!unhash_lock_stateid(stp));
put_ol_stateid_locked(stp, reaplist);
}
}
static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
struct list_head *reaplist)
{
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
if (!unhash_ol_stateid(stp))
return false;
release_open_stateid_locks(stp, reaplist);
return true;
}
static void release_open_stateid(struct nfs4_ol_stateid *stp)
{
LIST_HEAD(reaplist);
spin_lock(&stp->st_stid.sc_client->cl_lock);
if (unhash_open_stateid(stp, &reaplist))
put_ol_stateid_locked(stp, &reaplist);
spin_unlock(&stp->st_stid.sc_client->cl_lock);
free_ol_stateid_reaplist(&reaplist);
}
static void unhash_openowner_locked(struct nfs4_openowner *oo)
{
struct nfs4_client *clp = oo->oo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_del_init(&oo->oo_owner.so_strhash);
list_del_init(&oo->oo_perclient);
}
static void release_last_closed_stateid(struct nfs4_openowner *oo)
{
struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
nfsd_net_id);
struct nfs4_ol_stateid *s;
spin_lock(&nn->client_lock);
s = oo->oo_last_closed_stid;
if (s) {
list_del_init(&oo->oo_close_lru);
oo->oo_last_closed_stid = NULL;
}
spin_unlock(&nn->client_lock);
if (s)
nfs4_put_stid(&s->st_stid);
}
static void release_openowner(struct nfs4_openowner *oo)
{
struct nfs4_ol_stateid *stp;
struct nfs4_client *clp = oo->oo_owner.so_client;
struct list_head reaplist;
INIT_LIST_HEAD(&reaplist);
spin_lock(&clp->cl_lock);
unhash_openowner_locked(oo);
while (!list_empty(&oo->oo_owner.so_stateids)) {
stp = list_first_entry(&oo->oo_owner.so_stateids,
struct nfs4_ol_stateid, st_perstateowner);
if (unhash_open_stateid(stp, &reaplist))
put_ol_stateid_locked(stp, &reaplist);
}
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
release_last_closed_stateid(oo);
nfs4_put_stateowner(&oo->oo_owner);
}
static inline int
hash_sessionid(struct nfs4_sessionid *sessionid)
{
struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
return sid->sequence % SESSION_HASH_SIZE;
}
#ifdef CONFIG_SUNRPC_DEBUG
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
u32 *ptr = (u32 *)(&sessionid->data[0]);
dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
}
#else
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
}
#endif
/*
* Bump the seqid on cstate->replay_owner, and clear replay_owner if it
* won't be used for replay.
*/
void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
{
struct nfs4_stateowner *so = cstate->replay_owner;
if (nfserr == nfserr_replay_me)
return;
if (!seqid_mutating_err(ntohl(nfserr))) {
nfsd4_cstate_clear_replay(cstate);
return;
}
if (!so)
return;
if (so->so_is_open_owner)
release_last_closed_stateid(openowner(so));
so->so_seqid++;
return;
}
static void
gen_sessionid(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd4_sessionid *sid;
sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
sid->clientid = clp->cl_clientid;
sid->sequence = current_sessionid++;
sid->reserved = 0;
}
/*
* The protocol defines ca_maxresponssize_cached to include the size of
* the rpc header, but all we need to cache is the data starting after
* the end of the initial SEQUENCE operation--the rest we regenerate
* each time. Therefore we can advertise a ca_maxresponssize_cached
* value that is the number of bytes in our cache plus a few additional
* bytes. In order to stay on the safe side, and not promise more than
* we can cache, those additional bytes must be the minimum possible: 24
* bytes of rpc header (xid through accept state, with AUTH_NULL
* verifier), 12 for the compound header (with zero-length tag), and 44
* for the SEQUENCE op response:
*/
#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
static void
free_session_slots(struct nfsd4_session *ses)
{
int i;
for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
free_svc_cred(&ses->se_slots[i]->sl_cred);
kfree(ses->se_slots[i]);
}
}
/*
* We don't actually need to cache the rpc and session headers, so we
* can allocate a little less for each slot:
*/
static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
{
u32 size;
if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
size = 0;
else
size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
return size + sizeof(struct nfsd4_slot);
}
/*
* XXX: If we run out of reserved DRC memory we could (up to a point)
* re-negotiate active sessions and reduce their slot usage to make
* room for new connections. For now we just fail the create session.
*/
static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
{
u32 slotsize = slot_bytes(ca);
u32 num = ca->maxreqs;
unsigned long avail, total_avail;
unsigned int scale_factor;
spin_lock(&nfsd_drc_lock);
if (nfsd_drc_max_mem > nfsd_drc_mem_used)
total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
else
/* We have handed out more space than we chose in
* set_max_drc() to allow. That isn't really a
* problem as long as that doesn't make us think we
* have lots more due to integer overflow.
*/
total_avail = 0;
avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
/*
* Never use more than a fraction of the remaining memory,
* unless it's the only way to give this client a slot.
* The chosen fraction is either 1/8 or 1/number of threads,
* whichever is smaller. This ensures there are adequate
* slots to support multiple clients per thread.
* Give the client one slot even if that would require
* over-allocation--it is better than failure.
*/
scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
avail = clamp_t(unsigned long, avail, slotsize,
total_avail/scale_factor);
num = min_t(int, num, avail / slotsize);
num = max_t(int, num, 1);
nfsd_drc_mem_used += num * slotsize;
spin_unlock(&nfsd_drc_lock);
return num;
}
static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
{
int slotsize = slot_bytes(ca);
spin_lock(&nfsd_drc_lock);
nfsd_drc_mem_used -= slotsize * ca->maxreqs;
spin_unlock(&nfsd_drc_lock);
}
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
struct nfsd4_channel_attrs *battrs)
{
int numslots = fattrs->maxreqs;
int slotsize = slot_bytes(fattrs);
struct nfsd4_session *new;
int i;
BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
> PAGE_SIZE);
new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
if (!new)
return NULL;
/* allocate each struct nfsd4_slot and data cache in one piece */
for (i = 0; i < numslots; i++) {
new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
if (!new->se_slots[i])
goto out_free;
}
memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
return new;
out_free:
while (i--)
kfree(new->se_slots[i]);
kfree(new);
return NULL;
}
static void free_conn(struct nfsd4_conn *c)
{
svc_xprt_put(c->cn_xprt);
kfree(c);
}
static void nfsd4_conn_lost(struct svc_xpt_user *u)
{
struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
struct nfs4_client *clp = c->cn_session->se_client;
trace_nfsd_cb_lost(clp);
spin_lock(&clp->cl_lock);
if (!list_empty(&c->cn_persession)) {
list_del(&c->cn_persession);
free_conn(c);
}
nfsd4_probe_callback(clp);
spin_unlock(&clp->cl_lock);
}
static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
{
struct nfsd4_conn *conn;
conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
if (!conn)
return NULL;
svc_xprt_get(rqstp->rq_xprt);
conn->cn_xprt = rqstp->rq_xprt;
conn->cn_flags = flags;
INIT_LIST_HEAD(&conn->cn_xpt_user.list);
return conn;
}
static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
conn->cn_session = ses;
list_add(&conn->cn_persession, &ses->se_conns);
}
static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
spin_lock(&clp->cl_lock);
__nfsd4_hash_conn(conn, ses);
spin_unlock(&clp->cl_lock);
}
static int nfsd4_register_conn(struct nfsd4_conn *conn)
{
conn->cn_xpt_user.callback = nfsd4_conn_lost;
return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
}
static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
int ret;
nfsd4_hash_conn(conn, ses);
ret = nfsd4_register_conn(conn);
if (ret)
/* oops; xprt is already down: */
nfsd4_conn_lost(&conn->cn_xpt_user);
/* We may have gained or lost a callback channel: */
nfsd4_probe_callback_sync(ses->se_client);
}
static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
{
u32 dir = NFS4_CDFC4_FORE;
if (cses->flags & SESSION4_BACK_CHAN)
dir |= NFS4_CDFC4_BACK;
return alloc_conn(rqstp, dir);
}
/* must be called under client_lock */
static void nfsd4_del_conns(struct nfsd4_session *s)
{
struct nfs4_client *clp = s->se_client;
struct nfsd4_conn *c;
spin_lock(&clp->cl_lock);
while (!list_empty(&s->se_conns)) {
c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
list_del_init(&c->cn_persession);
spin_unlock(&clp->cl_lock);
unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
free_conn(c);
spin_lock(&clp->cl_lock);
}
spin_unlock(&clp->cl_lock);
}
static void __free_session(struct nfsd4_session *ses)
{
free_session_slots(ses);
kfree(ses);
}
static void free_session(struct nfsd4_session *ses)
{
nfsd4_del_conns(ses);
nfsd4_put_drc_mem(&ses->se_fchannel);
__free_session(ses);
}
static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{
int idx;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
new->se_client = clp;
gen_sessionid(new);
INIT_LIST_HEAD(&new->se_conns);
new->se_cb_seq_nr = 1;
new->se_flags = cses->flags;
new->se_cb_prog = cses->callback_prog;
new->se_cb_sec = cses->cb_sec;
atomic_set(&new->se_ref, 0);
idx = hash_sessionid(&new->se_sessionid);
list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
spin_lock(&clp->cl_lock);
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
{
struct sockaddr *sa = svc_addr(rqstp);
/*
* This is a little silly; with sessions there's no real
* use for the callback address. Use the peer address
* as a reasonable default for now, but consider fixing
* the rpc client not to require an address in the
* future:
*/
rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
}
}
/* caller must hold client_lock */
static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
{
struct nfsd4_session *elem;
int idx;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
dump_sessionid(__func__, sessionid);
idx = hash_sessionid(sessionid);
/* Search in the appropriate list */
list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
if (!memcmp(elem->se_sessionid.data, sessionid->data,
NFS4_MAX_SESSIONID_LEN)) {
return elem;
}
}
dprintk("%s: session not found\n", __func__);
return NULL;
}
static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
__be32 *ret)
{
struct nfsd4_session *session;
__be32 status = nfserr_badsession;
session = __find_in_sessionid_hashtbl(sessionid, net);
if (!session)
goto out;
status = nfsd4_get_session_locked(session);
if (status)
session = NULL;
out:
*ret = status;
return session;
}
/* caller must hold client_lock */
static void
unhash_session(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
list_del(&ses->se_hash);
spin_lock(&ses->se_client->cl_lock);
list_del(&ses->se_perclnt);
spin_unlock(&ses->se_client->cl_lock);
}
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
static int
STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
{
/*
* We're assuming the clid was not given out from a boot
* precisely 2^32 (about 136 years) before this one. That seems
* a safe assumption:
*/
if (clid->cl_boot == (u32)nn->boot_time)
return 0;
trace_nfsd_clid_stale(clid);
return 1;
}
/*
* XXX Should we use a slab cache ?
* This type of memory management is somewhat inefficient, but we use it
* anyway since SETCLIENTID is not a common operation.
*/
static struct nfs4_client *alloc_client(struct xdr_netobj name,
struct nfsd_net *nn)
{
struct nfs4_client *clp;
int i;
if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
return NULL;
}
clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
if (clp == NULL)
return NULL;
xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
if (clp->cl_name.data == NULL)
goto err_no_name;
clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
sizeof(struct list_head),
GFP_KERNEL);
if (!clp->cl_ownerstr_hashtbl)
goto err_no_hashtbl;
for (i = 0; i < OWNER_HASH_SIZE; i++)
INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
INIT_LIST_HEAD(&clp->cl_sessions);
idr_init(&clp->cl_stateids);
atomic_set(&clp->cl_rpc_users, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
clp->cl_state = NFSD4_ACTIVE;
atomic_inc(&nn->nfs4_client_count);
atomic_set(&clp->cl_delegs_in_recall, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
INIT_LIST_HEAD(&clp->cl_lru);
INIT_LIST_HEAD(&clp->cl_revoked);
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&clp->cl_lo_states);
#endif
INIT_LIST_HEAD(&clp->async_copies);
spin_lock_init(&clp->async_lock);
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
err_no_hashtbl:
kfree(clp->cl_name.data);
err_no_name:
kmem_cache_free(client_slab, clp);
return NULL;
}
static void __free_client(struct kref *k)
{
struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
free_svc_cred(&clp->cl_cred);
kfree(clp->cl_ownerstr_hashtbl);
kfree(clp->cl_name.data);
kfree(clp->cl_nii_domain.data);
kfree(clp->cl_nii_name.data);
idr_destroy(&clp->cl_stateids);
kfree(clp->cl_ra);
kmem_cache_free(client_slab, clp);
}
static void drop_client(struct nfs4_client *clp)
{
kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
}
static void
free_client(struct nfs4_client *clp)
{
while (!list_empty(&clp->cl_sessions)) {
struct nfsd4_session *ses;
ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
se_perclnt);
list_del(&ses->se_perclnt);
WARN_ON_ONCE(atomic_read(&ses->se_ref));
free_session(ses);
}
rpc_destroy_wait_queue(&clp->cl_cb_waitq);
if (clp->cl_nfsd_dentry) {
nfsd_client_rmdir(clp->cl_nfsd_dentry);
clp->cl_nfsd_dentry = NULL;
wake_up_all(&expiry_wq);
}
drop_client(clp);
}
/* must be called under the client_lock */
static void
unhash_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct nfsd4_session *ses;
lockdep_assert_held(&nn->client_lock);
/* Mark the client as expired! */
clp->cl_time = 0;
/* Make it invisible */
if (!list_empty(&clp->cl_idhash)) {
list_del_init(&clp->cl_idhash);
if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
else
rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
}
list_del_init(&clp->cl_lru);
spin_lock(&clp->cl_lock);
list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
list_del_init(&ses->se_hash);
spin_unlock(&clp->cl_lock);
}
static void
unhash_client(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
spin_lock(&nn->client_lock);
unhash_client_locked(clp);
spin_unlock(&nn->client_lock);
}
static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{
if (atomic_read(&clp->cl_rpc_users))
return nfserr_jukebox;
unhash_client_locked(clp);
return nfs_ok;
}
static void
__destroy_client(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
int i;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head reaplist;
INIT_LIST_HEAD(&reaplist);
spin_lock(&state_lock);
while (!list_empty(&clp->cl_delegations)) {
dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
WARN_ON(!unhash_delegation_locked(dp));
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
while (!list_empty(&reaplist)) {
dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
destroy_unhashed_deleg(dp);
}
while (!list_empty(&clp->cl_revoked)) {
dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
nfs4_put_stid(&dp->dl_stid);
}
while (!list_empty(&clp->cl_openowners)) {
oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
nfs4_get_stateowner(&oo->oo_owner);
release_openowner(oo);
}
for (i = 0; i < OWNER_HASH_SIZE; i++) {
struct nfs4_stateowner *so, *tmp;
list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
so_strhash) {
/* Should be no openowners at this point */
WARN_ON_ONCE(so->so_is_open_owner);
remove_blocked_locks(lockowner(so));
}
}
nfsd4_return_all_client_layouts(clp);
nfsd4_shutdown_copy(clp);
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
atomic_add_unless(&nn->nfs4_client_count, -1, 0);
nfsd4_dec_courtesy_client_count(nn, clp);
free_client(clp);
wake_up_all(&expiry_wq);
}
static void
destroy_client(struct nfs4_client *clp)
{
unhash_client(clp);
__destroy_client(clp);
}
static void inc_reclaim_complete(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (!nn->track_reclaim_completes)
return;
if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
return;
if (atomic_inc_return(&nn->nr_reclaim_complete) ==
nn->reclaim_str_hashtbl_size) {
printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
clp->net->ns.inum);
nfsd4_end_grace(nn);
}
}
static void expire_client(struct nfs4_client *clp)
{
unhash_client(clp);
nfsd4_client_record_remove(clp);
__destroy_client(clp);
}
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{
memcpy(target->cl_verifier.data, source->data,
sizeof(target->cl_verifier.data));
}
static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
{
target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
target->cl_clientid.cl_id = source->cl_clientid.cl_id;
}
static int copy_cred(struct svc_cred *target, struct svc_cred *source)
{
target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
target->cr_raw_principal = kstrdup(source->cr_raw_principal,
GFP_KERNEL);
target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
if ((source->cr_principal && !target->cr_principal) ||
(source->cr_raw_principal && !target->cr_raw_principal) ||
(source->cr_targ_princ && !target->cr_targ_princ))
return -ENOMEM;
target->cr_flavor = source->cr_flavor;
target->cr_uid = source->cr_uid;
target->cr_gid = source->cr_gid;
target->cr_group_info = source->cr_group_info;
get_group_info(target->cr_group_info);
target->cr_gss_mech = source->cr_gss_mech;
if (source->cr_gss_mech)
gss_mech_get(source->cr_gss_mech);
return 0;
}
static int
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
{
if (o1->len < o2->len)
return -1;
if (o1->len > o2->len)
return 1;
return memcmp(o1->data, o2->data, o1->len);
}
static int
same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
{
return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
}
static int
same_clid(clientid_t *cl1, clientid_t *cl2)
{
return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
}
static bool groups_equal(struct group_info *g1, struct group_info *g2)
{
int i;
if (g1->ngroups != g2->ngroups)
return false;
for (i=0; i<g1->ngroups; i++)
if (!gid_eq(g1->gid[i], g2->gid[i]))
return false;
return true;
}
/*
* RFC 3530 language requires clid_inuse be returned when the
* "principal" associated with a requests differs from that previously
* used. We use uid, gid's, and gss principal string as our best
* approximation. We also don't want to allow non-gss use of a client
* established using gss: in theory cr_principal should catch that
* change, but in practice cr_principal can be null even in the gss case
* since gssd doesn't always pass down a principal string.
*/
static bool is_gss_cred(struct svc_cred *cr)
{
/* Is cr_flavor one of the gss "pseudoflavors"?: */
return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
}
static bool
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
if ((is_gss_cred(cr1) != is_gss_cred(cr2))
|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
return false;
/* XXX: check that cr_targ_princ fields match ? */
if (cr1->cr_principal == cr2->cr_principal)
return true;
if (!cr1->cr_principal || !cr2->cr_principal)
return false;
return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
}
static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
{
struct svc_cred *cr = &rqstp->rq_cred;
u32 service;
if (!cr->cr_gss_mech)
return false;
service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
return service == RPC_GSS_SVC_INTEGRITY ||
service == RPC_GSS_SVC_PRIVACY;
}
bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
{
struct svc_cred *cr = &rqstp->rq_cred;
if (!cl->cl_mach_cred)
return true;
if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
return false;
if (!svc_rqst_integrity_protected(rqstp))
return false;
if (cl->cl_cred.cr_raw_principal)
return 0 == strcmp(cl->cl_cred.cr_raw_principal,
cr->cr_raw_principal);
if (!cr->cr_principal)
return false;
return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
}
static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
{
__be32 verf[2];
/*
* This is opaque to client, so no need to byte-swap. Use
* __force to keep sparse happy
*/
verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
verf[1] = (__force __be32)nn->clverifier_counter++;
memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
}
static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
{
clp->cl_clientid.cl_boot = (u32)nn->boot_time;
clp->cl_clientid.cl_id = nn->clientid_counter++;
gen_confirm(clp, nn);
}
static struct nfs4_stid *
find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
{
struct nfs4_stid *ret;
ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
if (!ret || !ret->sc_type)
return NULL;
return ret;
}
static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
{
struct nfs4_stid *s;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, t);
if (s != NULL) {
if (typemask & s->sc_type)
refcount_inc(&s->sc_count);
else
s = NULL;
}
spin_unlock(&cl->cl_lock);
return s;
}
static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
{
struct nfsdfs_client *nc;
nc = get_nfsdfs_client(inode);
if (!nc)
return NULL;
return container_of(nc, struct nfs4_client, cl_nfsdfs);
}
static void seq_quote_mem(struct seq_file *m, char *data, int len)
{
seq_printf(m, "\"");
seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
seq_printf(m, "\"");
}
static const char *cb_state2str(int state)
{
switch (state) {
case NFSD4_CB_UP:
return "UP";
case NFSD4_CB_UNKNOWN:
return "UNKNOWN";
case NFSD4_CB_DOWN:
return "DOWN";
case NFSD4_CB_FAULT:
return "FAULT";
}
return "UNDEFINED";
}
static int client_info_show(struct seq_file *m, void *v)
{
struct inode *inode = file_inode(m->file);
struct nfs4_client *clp;
u64 clid;
clp = get_nfsdfs_clp(inode);
if (!clp)
return -ENXIO;
memcpy(&clid, &clp->cl_clientid, sizeof(clid));
seq_printf(m, "clientid: 0x%llx\n", clid);
seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
if (clp->cl_state == NFSD4_COURTESY)
seq_puts(m, "status: courtesy\n");
else if (clp->cl_state == NFSD4_EXPIRABLE)
seq_puts(m, "status: expirable\n");
else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
seq_puts(m, "status: confirmed\n");
else
seq_puts(m, "status: unconfirmed\n");
seq_printf(m, "seconds from last renew: %lld\n",
ktime_get_boottime_seconds() - clp->cl_time);
seq_printf(m, "name: ");
seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
if (clp->cl_nii_domain.data) {
seq_printf(m, "Implementation domain: ");
seq_quote_mem(m, clp->cl_nii_domain.data,
clp->cl_nii_domain.len);
seq_printf(m, "\nImplementation name: ");
seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
}
seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
drop_client(clp);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(client_info);
static void *states_start(struct seq_file *s, loff_t *pos)
__acquires(&clp->cl_lock)
{
struct nfs4_client *clp = s->private;
unsigned long id = *pos;
void *ret;
spin_lock(&clp->cl_lock);
ret = idr_get_next_ul(&clp->cl_stateids, &id);
*pos = id;
return ret;
}
static void *states_next(struct seq_file *s, void *v, loff_t *pos)
{
struct nfs4_client *clp = s->private;
unsigned long id = *pos;
void *ret;
id = *pos;
id++;
ret = idr_get_next_ul(&clp->cl_stateids, &id);
*pos = id;
return ret;
}
static void states_stop(struct seq_file *s, void *v)
__releases(&clp->cl_lock)
{
struct nfs4_client *clp = s->private;
spin_unlock(&clp->cl_lock);
}
static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
{
seq_printf(s, "filename: \"%pD2\"", f->nf_file);
}
static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
{
struct inode *inode = file_inode(f->nf_file);
seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev),
inode->i_ino);
}
static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
{
seq_printf(s, "owner: ");
seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
}
static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
{
seq_printf(s, "0x%.8x", stid->si_generation);
seq_printf(s, "%12phN", &stid->si_opaque);
}
static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_ol_stateid *ols;
struct nfs4_file *nf;
struct nfsd_file *file;
struct nfs4_stateowner *oo;
unsigned int access, deny;
if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
return 0; /* XXX: or SEQ_SKIP? */
ols = openlockstateid(st);
oo = ols->st_stateowner;
nf = st->sc_file;
spin_lock(&nf->fi_lock);
file = find_any_file_locked(nf);
if (!file)
goto out;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
seq_printf(s, ": { type: open, ");
access = bmap_to_share_mode(ols->st_access_bmap);
deny = bmap_to_share_mode(ols->st_deny_bmap);
seq_printf(s, "access: %s%s, ",
access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
seq_printf(s, "deny: %s%s, ",
deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
nfs4_show_superblock(s, file);
seq_printf(s, ", ");
nfs4_show_fname(s, file);
seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
out:
spin_unlock(&nf->fi_lock);
return 0;
}
static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_ol_stateid *ols;
struct nfs4_file *nf;
struct nfsd_file *file;
struct nfs4_stateowner *oo;
ols = openlockstateid(st);
oo = ols->st_stateowner;
nf = st->sc_file;
spin_lock(&nf->fi_lock);
file = find_any_file_locked(nf);
if (!file)
goto out;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
seq_printf(s, ": { type: lock, ");
/*
* Note: a lock stateid isn't really the same thing as a lock,
* it's the locking state held by one owner on a file, and there
* may be multiple (or no) lock ranges associated with it.
* (Same for the matter is true of open stateids.)
*/
nfs4_show_superblock(s, file);
/* XXX: open stateid? */
seq_printf(s, ", ");
nfs4_show_fname(s, file);
seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
out:
spin_unlock(&nf->fi_lock);
return 0;
}
static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_delegation *ds;
struct nfs4_file *nf;
struct nfsd_file *file;
ds = delegstateid(st);
nf = st->sc_file;
spin_lock(&nf->fi_lock);
file = nf->fi_deleg_file;
if (!file)
goto out;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
seq_printf(s, ": { type: deleg, ");
/* Kinda dead code as long as we only support read delegs: */
seq_printf(s, "access: %s, ",
ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
/* XXX: lease time, whether it's being recalled. */
nfs4_show_superblock(s, file);
seq_printf(s, ", ");
nfs4_show_fname(s, file);
seq_printf(s, " }\n");
out:
spin_unlock(&nf->fi_lock);
return 0;
}
static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_layout_stateid *ls;
struct nfsd_file *file;
ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
file = ls->ls_file;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
seq_printf(s, ": { type: layout, ");
/* XXX: What else would be useful? */
nfs4_show_superblock(s, file);
seq_printf(s, ", ");
nfs4_show_fname(s, file);
seq_printf(s, " }\n");
return 0;
}
static int states_show(struct seq_file *s, void *v)
{
struct nfs4_stid *st = v;
switch (st->sc_type) {
case NFS4_OPEN_STID:
return nfs4_show_open(s, st);
case NFS4_LOCK_STID:
return nfs4_show_lock(s, st);
case NFS4_DELEG_STID:
return nfs4_show_deleg(s, st);
case NFS4_LAYOUT_STID:
return nfs4_show_layout(s, st);
default:
return 0; /* XXX: or SEQ_SKIP? */
}
/* XXX: copy stateids? */
}
static struct seq_operations states_seq_ops = {
.start = states_start,
.next = states_next,
.stop = states_stop,
.show = states_show
};
static int client_states_open(struct inode *inode, struct file *file)
{
struct seq_file *s;
struct nfs4_client *clp;
int ret;
clp = get_nfsdfs_clp(inode);
if (!clp)
return -ENXIO;
ret = seq_open(file, &states_seq_ops);
if (ret)
return ret;
s = file->private_data;
s->private = clp;
return 0;
}
static int client_opens_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct nfs4_client *clp = m->private;
/* XXX: alternatively, we could get/drop in seq start/stop */
drop_client(clp);
return 0;
}
static const struct file_operations client_states_fops = {
.open = client_states_open,
.read = seq_read,
.llseek = seq_lseek,
.release = client_opens_release,
};
/*
* Normally we refuse to destroy clients that are in use, but here the
* administrator is telling us to just do it. We also want to wait
* so the caller has a guarantee that the client's locks are gone by
* the time the write returns:
*/
static void force_expire_client(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
bool already_expired;
trace_nfsd_clid_admin_expired(&clp->cl_clientid);
spin_lock(&nn->client_lock);
clp->cl_time = 0;
spin_unlock(&nn->client_lock);
wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
spin_lock(&nn->client_lock);
already_expired = list_empty(&clp->cl_lru);
if (!already_expired)
unhash_client_locked(clp);
spin_unlock(&nn->client_lock);
if (!already_expired)
expire_client(clp);
else
wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
}
static ssize_t client_ctl_write(struct file *file, const char __user *buf,
size_t size, loff_t *pos)
{
char *data;
struct nfs4_client *clp;
data = simple_transaction_get(file, buf, size);
if (IS_ERR(data))
return PTR_ERR(data);
if (size != 7 || 0 != memcmp(data, "expire\n", 7))
return -EINVAL;
clp = get_nfsdfs_clp(file_inode(file));
if (!clp)
return -ENXIO;
force_expire_client(clp);
drop_client(clp);
return 7;
}
static const struct file_operations client_ctl_fops = {
.write = client_ctl_write,
.release = simple_transaction_release,
};
static const struct tree_descr client_files[] = {
[0] = {"info", &client_info_fops, S_IRUSR},
[1] = {"states", &client_states_fops, S_IRUSR},
[2] = {"ctl", &client_ctl_fops, S_IWUSR},
[3] = {""},
};
static int
nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
struct rpc_task *task)
{
trace_nfsd_cb_recall_any_done(cb, task);
switch (task->tk_status) {
case -NFS4ERR_DELAY:
rpc_delay(task, 2 * HZ);
return 0;
default:
return 1;
}
}
static void
nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
spin_lock(&nn->client_lock);
clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
put_client_renew_locked(clp);
spin_unlock(&nn->client_lock);
}
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
.done = nfsd4_cb_recall_any_done,
.release = nfsd4_cb_recall_any_release,
};
static struct nfs4_client *create_client(struct xdr_netobj name,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
struct nfs4_client *clp;
struct sockaddr *sa = svc_addr(rqstp);
int ret;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct dentry *dentries[ARRAY_SIZE(client_files)];
clp = alloc_client(name, nn);
if (clp == NULL)
return NULL;
ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
if (ret) {
free_client(clp);
return NULL;
}
gen_clid(clp, nn);
kref_init(&clp->cl_nfsdfs.cl_ref);
nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
clp->cl_time = ktime_get_boottime_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
copy_verf(clp, verf);
memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
clp->cl_cb_session = NULL;
clp->net = net;
clp->cl_nfsd_dentry = nfsd_client_mkdir(
nn, &clp->cl_nfsdfs,
clp->cl_clientid.cl_id - nn->clientid_base,
client_files, dentries);
clp->cl_nfsd_info_dentry = dentries[0];
if (!clp->cl_nfsd_dentry) {
free_client(clp);
return NULL;
}
clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
if (!clp->cl_ra) {
free_client(clp);
return NULL;
}
clp->cl_ra_time = 0;
nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
NFSPROC4_CLNT_CB_RECALL_ANY);
return clp;
}
static void
add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct nfs4_client *clp;
while (*new) {
clp = rb_entry(*new, struct nfs4_client, cl_namenode);
parent = *new;
if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&new_clp->cl_namenode, parent, new);
rb_insert_color(&new_clp->cl_namenode, root);
}
static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
{
int cmp;
struct rb_node *node = root->rb_node;
struct nfs4_client *clp;
while (node) {
clp = rb_entry(node, struct nfs4_client, cl_namenode);
cmp = compare_blob(&clp->cl_name, name);
if (cmp > 0)
node = node->rb_left;
else if (cmp < 0)
node = node->rb_right;
else
return clp;
}
return NULL;
}
static void
add_to_unconfirmed(struct nfs4_client *clp)
{
unsigned int idhashval;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
add_clp_to_name_tree(clp, &nn->unconf_name_tree);
idhashval = clientid_hashval(clp->cl_clientid.cl_id);
list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
renew_client_locked(clp);
}
static void
move_to_confirmed(struct nfs4_client *clp)
{
unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
add_clp_to_name_tree(clp, &nn->conf_name_tree);
set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
trace_nfsd_clid_confirmed(&clp->cl_clientid);
renew_client_locked(clp);
}
static struct nfs4_client *
find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
{
struct nfs4_client *clp;
unsigned int idhashval = clientid_hashval(clid->cl_id);
list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
if (same_clid(&clp->cl_clientid, clid)) {
if ((bool)clp->cl_minorversion != sessions)
return NULL;
renew_client_locked(clp);
return clp;
}
}
return NULL;
}
static struct nfs4_client *
find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
struct list_head *tbl = nn->conf_id_hashtbl;
lockdep_assert_held(&nn->client_lock);
return find_client_in_id_table(tbl, clid, sessions);
}
static struct nfs4_client *
find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
struct list_head *tbl = nn->unconf_id_hashtbl;
lockdep_assert_held(&nn->client_lock);
return find_client_in_id_table(tbl, clid, sessions);
}
static bool clp_used_exchangeid(struct nfs4_client *clp)
{
return clp->cl_exchange_flags != 0;
}
static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{
lockdep_assert_held(&nn->client_lock);
return find_clp_in_name_tree(name, &nn->conf_name_tree);
}
static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{
lockdep_assert_held(&nn->client_lock);
return find_clp_in_name_tree(name, &nn->unconf_name_tree);
}
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
{
struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
struct sockaddr *sa = svc_addr(rqstp);
u32 scopeid = rpc_get_scope_id(sa);
unsigned short expected_family;
/* Currently, we only support tcp and tcp6 for the callback channel */
if (se->se_callback_netid_len == 3 &&
!memcmp(se->se_callback_netid_val, "tcp", 3))
expected_family = AF_INET;
else if (se->se_callback_netid_len == 4 &&
!memcmp(se->se_callback_netid_val, "tcp6", 4))
expected_family = AF_INET6;
else
goto out_err;
conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
se->se_callback_addr_len,
(struct sockaddr *)&conn->cb_addr,
sizeof(conn->cb_addr));
if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
goto out_err;
if (conn->cb_addr.ss_family == AF_INET6)
((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
conn->cb_prog = se->se_callback_prog;
conn->cb_ident = se->se_callback_ident;
memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
trace_nfsd_cb_args(clp, conn);
return;
out_err:
conn->cb_addr.ss_family = AF_UNSPEC;
conn->cb_addrlen = 0;
trace_nfsd_cb_nodelegs(clp);
return;
}
/*
* Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
*/
static void
nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
{
struct xdr_buf *buf = resp->xdr->buf;
struct nfsd4_slot *slot = resp->cstate.slot;
unsigned int base;
dprintk("--> %s slot %p\n", __func__, slot);
slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
slot->sl_opcnt = resp->opcnt;
slot->sl_status = resp->cstate.status;
free_svc_cred(&slot->sl_cred);
copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
if (!nfsd4_cache_this(resp)) {
slot->sl_flags &= ~NFSD4_SLOT_CACHED;
return;
}
slot->sl_flags |= NFSD4_SLOT_CACHED;
base = resp->cstate.data_offset;
slot->sl_datalen = buf->len - base;
if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
WARN(1, "%s: sessions DRC could not cache compound\n",
__func__);
return;
}
/*
* Encode the replay sequence operation from the slot values.
* If cachethis is FALSE encode the uncached rep error on the next
* operation which sets resp->p and increments resp->opcnt for
* nfs4svc_encode_compoundres.
*
*/
static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
struct nfsd4_compoundres *resp)
{
struct nfsd4_op *op;
struct nfsd4_slot *slot = resp->cstate.slot;
/* Encode the replayed sequence operation */
op = &args->ops[resp->opcnt - 1];
nfsd4_encode_operation(resp, op);
if (slot->sl_flags & NFSD4_SLOT_CACHED)
return op->status;
if (args->opcnt == 1) {
/*
* The original operation wasn't a solo sequence--we
* always cache those--so this retry must not match the
* original:
*/
op->status = nfserr_seq_false_retry;
} else {
op = &args->ops[resp->opcnt++];
op->status = nfserr_retry_uncached_rep;
nfsd4_encode_operation(resp, op);
}
return op->status;
}
/*
* The sequence operation is not cached because we can use the slot and
* session values.
*/
static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq)
{
struct nfsd4_slot *slot = resp->cstate.slot;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
__be32 status;
dprintk("--> %s slot %p\n", __func__, slot);
status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
if (status)
return status;
p = xdr_reserve_space(xdr, slot->sl_datalen);
if (!p) {
WARN_ON_ONCE(1);
return nfserr_serverfault;
}
xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
xdr_commit_encode(xdr);
resp->opcnt = slot->sl_opcnt;
return slot->sl_status;
}
/*
* Set the exchange_id flags returned by the server.
*/
static void
nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
{
#ifdef CONFIG_NFSD_PNFS
new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
#else
new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
#endif
/* Referrals are supported, Migration is not. */
new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
/* set the wire flags to return to client. */
clid->flags = new->cl_exchange_flags;
}
static bool client_has_openowners(struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
if (!list_empty(&oo->oo_owner.so_stateids))
return true;
}
return false;
}
static bool client_has_state(struct nfs4_client *clp)
{
return client_has_openowners(clp)
#ifdef CONFIG_NFSD_PNFS
|| !list_empty(&clp->cl_lo_states)
#endif
|| !list_empty(&clp->cl_delegations)
|| !list_empty(&clp->cl_sessions)
|| !list_empty(&clp->async_copies);
}
static __be32 copy_impl_id(struct nfs4_client *clp,
struct nfsd4_exchange_id *exid)
{
if (!exid->nii_domain.data)
return 0;
xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
if (!clp->cl_nii_domain.data)
return nfserr_jukebox;
xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
if (!clp->cl_nii_name.data)
return nfserr_jukebox;
clp->cl_nii_time = exid->nii_time;
return 0;
}
__be32
nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_exchange_id *exid = &u->exchange_id;
struct nfs4_client *conf, *new;
struct nfs4_client *unconf = NULL;
__be32 status;
char addr_str[INET6_ADDRSTRLEN];
nfs4_verifier verf = exid->verifier;
struct sockaddr *sa = svc_addr(rqstp);
bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
rpc_ntop(sa, addr_str, sizeof(addr_str));
dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
"ip_addr=%s flags %x, spa_how %u\n",
__func__, rqstp, exid, exid->clname.len, exid->clname.data,
addr_str, exid->flags, exid->spa_how);
if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
return nfserr_inval;
new = create_client(exid->clname, rqstp, &verf);
if (new == NULL)
return nfserr_jukebox;
status = copy_impl_id(new, exid);
if (status)
goto out_nolock;
switch (exid->spa_how) {
case SP4_MACH_CRED:
exid->spo_must_enforce[0] = 0;
exid->spo_must_enforce[1] = (
1 << (OP_BIND_CONN_TO_SESSION - 32) |
1 << (OP_EXCHANGE_ID - 32) |
1 << (OP_CREATE_SESSION - 32) |
1 << (OP_DESTROY_SESSION - 32) |
1 << (OP_DESTROY_CLIENTID - 32));
exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
1 << (OP_OPEN_DOWNGRADE) |
1 << (OP_LOCKU) |
1 << (OP_DELEGRETURN));
exid->spo_must_allow[1] &= (
1 << (OP_TEST_STATEID - 32) |
1 << (OP_FREE_STATEID - 32));
if (!svc_rqst_integrity_protected(rqstp)) {
status = nfserr_inval;
goto out_nolock;
}
/*
* Sometimes userspace doesn't give us a principal.
* Which is a bug, really. Anyway, we can't enforce
* MACH_CRED in that case, better to give up now:
*/
if (!new->cl_cred.cr_principal &&
!new->cl_cred.cr_raw_principal) {
status = nfserr_serverfault;
goto out_nolock;
}
new->cl_mach_cred = true;
break;
case SP4_NONE:
break;
default: /* checked by xdr code */
WARN_ON_ONCE(1);
fallthrough;
case SP4_SSV:
status = nfserr_encr_alg_unsupp;
goto out_nolock;
}
/* Cases below refer to rfc 5661 section 18.35.4: */
spin_lock(&nn->client_lock);
conf = find_confirmed_client_by_name(&exid->clname, nn);
if (conf) {
bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
bool verfs_match = same_verf(&verf, &conf->cl_verifier);
if (update) {
if (!clp_used_exchangeid(conf)) { /* buggy client */
status = nfserr_inval;
goto out;
}
if (!nfsd4_mach_creds_match(conf, rqstp)) {
status = nfserr_wrong_cred;
goto out;
}
if (!creds_match) { /* case 9 */
status = nfserr_perm;
goto out;
}
if (!verfs_match) { /* case 8 */
status = nfserr_not_same;
goto out;
}
/* case 6 */
exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
trace_nfsd_clid_confirmed_r(conf);
goto out_copy;
}
if (!creds_match) { /* case 3 */
if (client_has_state(conf)) {
status = nfserr_clid_inuse;
trace_nfsd_clid_cred_mismatch(conf, rqstp);
goto out;
}
goto out_new;
}
if (verfs_match) { /* case 2 */
conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
trace_nfsd_clid_confirmed_r(conf);
goto out_copy;
}
/* case 5, client reboot */
trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
conf = NULL;
goto out_new;
}
if (update) { /* case 7 */
status = nfserr_noent;
goto out;
}
unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
if (unconf) /* case 4, possible retry or client restart */
unhash_client_locked(unconf);
/* case 1, new owner ID */
trace_nfsd_clid_fresh(new);
out_new:
if (conf) {
status = mark_client_expired_locked(conf);
if (status)
goto out;
trace_nfsd_clid_replaced(&conf->cl_clientid);
}
new->cl_minorversion = cstate->minorversion;
new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
add_to_unconfirmed(new);
swap(new, conf);
out_copy:
exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
exid->clientid.cl_id = conf->cl_clientid.cl_id;
exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
nfsd4_set_ex_flags(conf, exid);
dprintk("nfsd4_exchange_id seqid %d flags %x\n",
conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
status = nfs_ok;
out:
spin_unlock(&nn->client_lock);
out_nolock:
if (new)
expire_client(new);
if (unconf) {
trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
expire_client(unconf);
}
return status;
}
static __be32
check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
{
dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
slot_seqid);
/* The slot is in use, and no response has been sent. */
if (slot_inuse) {
if (seqid == slot_seqid)
return nfserr_jukebox;
else
return nfserr_seq_misordered;
}
/* Note unsigned 32-bit arithmetic handles wraparound: */
if (likely(seqid == slot_seqid + 1))
return nfs_ok;
if (seqid == slot_seqid)
return nfserr_replay_cache;
return nfserr_seq_misordered;
}
/*
* Cache the create session result into the create session single DRC
* slot cache by saving the xdr structure. sl_seqid has been set.
* Do this for solo or embedded create session operations.
*/
static void
nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
struct nfsd4_clid_slot *slot, __be32 nfserr)
{
slot->sl_status = nfserr;
memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
}
static __be32
nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
struct nfsd4_clid_slot *slot)
{
memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
return slot->sl_status;
}
#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1 + /* MIN tag is length with zero, only length */ \
3 + /* version, opcount, opcode */ \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
/* seqid, slotID, slotID, cache */ \
4 ) * sizeof(__be32))
#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2 + /* verifier: AUTH_NULL, length 0 */\
1 + /* status */ \
1 + /* MIN tag is length with zero, only length */ \
3 + /* opcount, opcode, opstatus*/ \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
/* seqid, slotID, slotID, slotID, status */ \
5 ) * sizeof(__be32))
static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
{
u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
return nfserr_toosmall;
if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
return nfserr_toosmall;
ca->headerpadsz = 0;
ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
/*
* Note decreasing slot size below client's request may make it
* difficult for client to function correctly, whereas
* decreasing the number of slots will (just?) affect
* performance. When short on memory we therefore prefer to
* decrease number of slots instead of their size. Clients that
* request larger slots than they need will get poor results:
* Note that we always allow at least one slot, because our
* accounting is soft and provides no guarantees either way.
*/
ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
return nfs_ok;
}
/*
* Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
* These are based on similar macros in linux/sunrpc/msg_prot.h .
*/
#define RPC_MAX_HEADER_WITH_AUTH_SYS \
(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
sizeof(__be32))
static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
{
ca->headerpadsz = 0;
if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
return nfserr_toosmall;
if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
return nfserr_toosmall;
ca->maxresp_cached = 0;
if (ca->maxops < 2)
return nfserr_toosmall;
return nfs_ok;
}
static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
{
switch (cbs->flavor) {
case RPC_AUTH_NULL:
case RPC_AUTH_UNIX:
return nfs_ok;
default:
/*
* GSS case: the spec doesn't allow us to return this
* error. But it also doesn't allow us not to support
* GSS.
* I'd rather this fail hard than return some error the
* client might think it can already handle:
*/
return nfserr_encr_alg_unsupp;
}
}
__be32
nfsd4_create_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_create_session *cr_ses = &u->create_session;
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
struct nfsd4_session *new;
struct nfsd4_conn *conn;
struct nfsd4_clid_slot *cs_slot = NULL;
__be32 status = 0;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
return nfserr_inval;
status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
if (status)
return status;
status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
if (status)
return status;
status = check_backchannel_attrs(&cr_ses->back_channel);
if (status)
goto out_release_drc_mem;
status = nfserr_jukebox;
new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
if (!new)
goto out_release_drc_mem;
conn = alloc_conn_from_crses(rqstp, cr_ses);
if (!conn)
goto out_free_session;
spin_lock(&nn->client_lock);
unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
conf = find_confirmed_client(&cr_ses->clientid, true, nn);
WARN_ON_ONCE(conf && unconf);
if (conf) {
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(conf, rqstp))
goto out_free_conn;
cs_slot = &conf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status) {
if (status == nfserr_replay_cache)
status = nfsd4_replay_create_session(cr_ses, cs_slot);
goto out_free_conn;
}
} else if (unconf) {
status = nfserr_clid_inuse;
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
trace_nfsd_clid_cred_mismatch(unconf, rqstp);
goto out_free_conn;
}
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(unconf, rqstp))
goto out_free_conn;
cs_slot = &unconf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status) {
/* an unconfirmed replay returns misordered */
status = nfserr_seq_misordered;
goto out_free_conn;
}
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = mark_client_expired_locked(old);
if (status) {
old = NULL;
goto out_free_conn;
}
trace_nfsd_clid_replaced(&old->cl_clientid);
}
move_to_confirmed(unconf);
conf = unconf;
} else {
status = nfserr_stale_clientid;
goto out_free_conn;
}
status = nfs_ok;
/* Persistent sessions are not supported */
cr_ses->flags &= ~SESSION4_PERSIST;
/* Upshifting from TCP to RDMA is not supported */
cr_ses->flags &= ~SESSION4_RDMA;
init_session(rqstp, new, conf, cr_ses);
nfsd4_get_session_locked(new);
memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
cs_slot->sl_seqid++;
cr_ses->seqid = cs_slot->sl_seqid;
/* cache solo and embedded create sessions under the client_lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
spin_unlock(&nn->client_lock);
if (conf == unconf)
fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
/* init connection and backchannel */
nfsd4_init_conn(rqstp, conn, new);
nfsd4_put_session(new);
if (old)
expire_client(old);
return status;
out_free_conn:
spin_unlock(&nn->client_lock);
free_conn(conn);
if (old)
expire_client(old);
out_free_session:
__free_session(new);
out_release_drc_mem:
nfsd4_put_drc_mem(&cr_ses->fore_channel);
return status;
}
static __be32 nfsd4_map_bcts_dir(u32 *dir)
{
switch (*dir) {
case NFS4_CDFC4_FORE:
case NFS4_CDFC4_BACK:
return nfs_ok;
case NFS4_CDFC4_FORE_OR_BOTH:
case NFS4_CDFC4_BACK_OR_BOTH:
*dir = NFS4_CDFC4_BOTH;
return nfs_ok;
}
return nfserr_inval;
}
__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
struct nfsd4_session *session = cstate->session;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
__be32 status;
status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
if (status)
return status;
spin_lock(&nn->client_lock);
session->se_cb_prog = bc->bc_cb_program;
session->se_cb_sec = bc->bc_cb_sec;
spin_unlock(&nn->client_lock);
nfsd4_probe_callback(session->se_client);
return nfs_ok;
}
static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
{
struct nfsd4_conn *c;
list_for_each_entry(c, &s->se_conns, cn_persession) {
if (c->cn_xprt == xpt) {
return c;
}
}
return NULL;
}
static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
{
struct nfs4_client *clp = session->se_client;
struct svc_xprt *xpt = rqst->rq_xprt;
struct nfsd4_conn *c;
__be32 status;
/* Following the last paragraph of RFC 5661 Section 18.34.3: */
spin_lock(&clp->cl_lock);
c = __nfsd4_find_conn(xpt, session);
if (!c)
status = nfserr_noent;
else if (req == c->cn_flags)
status = nfs_ok;
else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
c->cn_flags != NFS4_CDFC4_BACK)
status = nfs_ok;
else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
c->cn_flags != NFS4_CDFC4_FORE)
status = nfs_ok;
else
status = nfserr_inval;
spin_unlock(&clp->cl_lock);
if (status == nfs_ok && conn)
*conn = c;
return status;
}
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
__be32 status;
struct nfsd4_conn *conn;
struct nfsd4_session *session;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (!nfsd4_last_compound_op(rqstp))
return nfserr_not_only_op;
spin_lock(&nn->client_lock);
session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
spin_unlock(&nn->client_lock);
if (!session)
goto out_no_session;
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
status = nfsd4_match_existing_connection(rqstp, session,
bcts->dir, &conn);
if (status == nfs_ok) {
if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
bcts->dir == NFS4_CDFC4_BACK)
conn->cn_flags |= NFS4_CDFC4_BACK;
nfsd4_probe_callback(session->se_client);
goto out;
}
if (status == nfserr_inval)
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
goto out;
conn = alloc_conn(rqstp, bcts->dir);
status = nfserr_jukebox;
if (!conn)
goto out;
nfsd4_init_conn(rqstp, conn, session);
status = nfs_ok;
out:
nfsd4_put_session(session);
out_no_session:
return status;
}
static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
{
if (!cstate->session)
return false;
return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
}
__be32
nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
struct nfsd4_session *ses;
__be32 status;
int ref_held_by_me = 0;
struct net *net = SVC_NET(r);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
status = nfserr_not_only_op;
if (nfsd4_compound_in_session(cstate, sessionid)) {
if (!nfsd4_last_compound_op(r))
goto out;
ref_held_by_me++;
}
dump_sessionid(__func__, sessionid);
spin_lock(&nn->client_lock);
ses = find_in_sessionid_hashtbl(sessionid, net, &status);
if (!ses)
goto out_client_lock;
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(ses->se_client, r))
goto out_put_session;
status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
if (status)
goto out_put_session;
unhash_session(ses);
spin_unlock(&nn->client_lock);
nfsd4_probe_callback_sync(ses->se_client);
spin_lock(&nn->client_lock);
status = nfs_ok;
out_put_session:
nfsd4_put_session_locked(ses);
out_client_lock:
spin_unlock(&nn->client_lock);
out:
return status;
}
static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd4_conn *c;
__be32 status = nfs_ok;
int ret;
spin_lock(&clp->cl_lock);
c = __nfsd4_find_conn(new->cn_xprt, ses);
if (c)
goto out_free;
status = nfserr_conn_not_bound_to_session;
if (clp->cl_mach_cred)
goto out_free;
__nfsd4_hash_conn(new, ses);
spin_unlock(&clp->cl_lock);
ret = nfsd4_register_conn(new);
if (ret)
/* oops; xprt is already down: */
nfsd4_conn_lost(&new->cn_xpt_user);
return nfs_ok;
out_free:
spin_unlock(&clp->cl_lock);
free_conn(new);
return status;
}
static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
{
struct nfsd4_compoundargs *args = rqstp->rq_argp;
return args->opcnt > session->se_fchannel.maxops;
}
static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
struct nfsd4_session *session)
{
struct xdr_buf *xb = &rqstp->rq_arg;
return xb->len > session->se_fchannel.maxreq_sz;
}
static bool replay_matches_cache(struct svc_rqst *rqstp,
struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
{
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
(bool)seq->cachethis)
return false;
/*
* If there's an error then the reply can have fewer ops than
* the call.
*/
if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
return false;
/*
* But if we cached a reply with *more* ops than the call you're
* sending us now, then this new call is clearly not really a
* replay of the old one:
*/
if (slot->sl_opcnt > argp->opcnt)
return false;
/* This is the only check explicitly called by spec: */
if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
return false;
/*
* There may be more comparisons we could actually do, but the
* spec doesn't require us to catch every case where the calls
* don't match (that would require caching the call as well as
* the reply), so we don't bother.
*/
return true;
}
__be32
nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_sequence *seq = &u->sequence;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct xdr_stream *xdr = resp->xdr;
struct nfsd4_session *session;
struct nfs4_client *clp;
struct nfsd4_slot *slot;
struct nfsd4_conn *conn;
__be32 status;
int buflen;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (resp->opcnt != 1)
return nfserr_sequence_pos;
/*
* Will be either used or freed by nfsd4_sequence_check_conn
* below.
*/
conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
if (!conn)
return nfserr_jukebox;
spin_lock(&nn->client_lock);
session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
if (!session)
goto out_no_session;
clp = session->se_client;
status = nfserr_too_many_ops;
if (nfsd4_session_too_many_ops(rqstp, session))
goto out_put_session;
status = nfserr_req_too_big;
if (nfsd4_request_too_big(rqstp, session))
goto out_put_session;
status = nfserr_badslot;
if (seq->slotid >= session->se_fchannel.maxreqs)
goto out_put_session;
slot = session->se_slots[seq->slotid];
dprintk("%s: slotid %d\n", __func__, seq->slotid);
/* We do not negotiate the number of slots yet, so set the
* maxslots to the session maxreqs which is used to encode
* sr_highest_slotid and the sr_target_slot id to maxslots */
seq->maxslots = session->se_fchannel.maxreqs;
status = check_slot_seqid(seq->seqid, slot->sl_seqid,
slot->sl_flags & NFSD4_SLOT_INUSE);
if (status == nfserr_replay_cache) {
status = nfserr_seq_misordered;
if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
goto out_put_session;
status = nfserr_seq_false_retry;
if (!replay_matches_cache(rqstp, seq, slot))
goto out_put_session;
cstate->slot = slot;
cstate->session = session;
cstate->clp = clp;
/* Return the cached reply status and set cstate->status
* for nfsd4_proc_compound processing */
status = nfsd4_replay_cache_entry(resp, seq);
cstate->status = nfserr_replay_cache;
goto out;
}
if (status)
goto out_put_session;
status = nfsd4_sequence_check_conn(conn, session);
conn = NULL;
if (status)
goto out_put_session;
buflen = (seq->cachethis) ?
session->se_fchannel.maxresp_cached :
session->se_fchannel.maxresp_sz;
status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
nfserr_rep_too_big;
if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
goto out_put_session;
svc_reserve(rqstp, buflen);
status = nfs_ok;
/* Success! bump slot seqid */
slot->sl_seqid = seq->seqid;
slot->sl_flags |= NFSD4_SLOT_INUSE;
if (seq->cachethis)
slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
else
slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
cstate->slot = slot;
cstate->session = session;
cstate->clp = clp;
out:
switch (clp->cl_cb_state) {
case NFSD4_CB_DOWN:
seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
break;
case NFSD4_CB_FAULT:
seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
break;
default:
seq->status_flags = 0;
}
if (!list_empty(&clp->cl_revoked))
seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
out_no_session:
if (conn)
free_conn(conn);
spin_unlock(&nn->client_lock);
return status;
out_put_session:
nfsd4_put_session_locked(session);
goto out_no_session;
}
void
nfsd4_sequence_done(struct nfsd4_compoundres *resp)
{
struct nfsd4_compound_state *cs = &resp->cstate;
if (nfsd4_has_session(cs)) {
if (cs->status != nfserr_replay_cache) {
nfsd4_store_cache_entry(resp);
cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
}
/* Drop session reference that was taken in nfsd4_sequence() */
nfsd4_put_session(cs->session);
} else if (cs->clp)
put_client_renew(cs->clp);
}
__be32
nfsd4_destroy_clientid(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
struct nfs4_client *conf, *unconf;
struct nfs4_client *clp = NULL;
__be32 status = 0;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
spin_lock(&nn->client_lock);
unconf = find_unconfirmed_client(&dc->clientid, true, nn);
conf = find_confirmed_client(&dc->clientid, true, nn);
WARN_ON_ONCE(conf && unconf);
if (conf) {
if (client_has_state(conf)) {
status = nfserr_clientid_busy;
goto out;
}
status = mark_client_expired_locked(conf);
if (status)
goto out;
clp = conf;
} else if (unconf)
clp = unconf;
else {
status = nfserr_stale_clientid;
goto out;
}
if (!nfsd4_mach_creds_match(clp, rqstp)) {
clp = NULL;
status = nfserr_wrong_cred;
goto out;
}
trace_nfsd_clid_destroyed(&clp->cl_clientid);
unhash_client_locked(clp);
out:
spin_unlock(&nn->client_lock);
if (clp)
expire_client(clp);
return status;
}
__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
struct nfs4_client *clp = cstate->clp;
__be32 status = 0;
if (rc->rca_one_fs) {
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
/*
* We don't take advantage of the rca_one_fs case.
* That's OK, it's optional, we can safely ignore it.
*/
return nfs_ok;
}
status = nfserr_complete_already;
if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
goto out;
status = nfserr_stale_clientid;
if (is_client_expired(clp))
/*
* The following error isn't really legal.
* But we only get here if the client just explicitly
* destroyed the client. Surely it no longer cares what
* error it gets back on an operation for the dead
* client.
*/
goto out;
status = nfs_ok;
trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
nfsd4_client_record_create(clp);
inc_reclaim_complete(clp);
out:
return status;
}
__be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_setclientid *setclid = &u->setclientid;
struct xdr_netobj clname = setclid->se_name;
nfs4_verifier clverifier = setclid->se_verf;
struct nfs4_client *conf, *new;
struct nfs4_client *unconf = NULL;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
new = create_client(clname, rqstp, &clverifier);
if (new == NULL)
return nfserr_jukebox;
spin_lock(&nn->client_lock);
conf = find_confirmed_client_by_name(&clname, nn);
if (conf && client_has_state(conf)) {
status = nfserr_clid_inuse;
if (clp_used_exchangeid(conf))
goto out;
if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
trace_nfsd_clid_cred_mismatch(conf, rqstp);
goto out;
}
}
unconf = find_unconfirmed_client_by_name(&clname, nn);
if (unconf)
unhash_client_locked(unconf);
if (conf) {
if (same_verf(&conf->cl_verifier, &clverifier)) {
copy_clid(new, conf);
gen_confirm(new, nn);
} else
trace_nfsd_clid_verf_mismatch(conf, rqstp,
&clverifier);
} else
trace_nfsd_clid_fresh(new);
new->cl_minorversion = 0;
gen_callback(new, setclid, rqstp);
add_to_unconfirmed(new);
setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
new = NULL;
status = nfs_ok;
out:
spin_unlock(&nn->client_lock);
if (new)
free_client(new);
if (unconf) {
trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
expire_client(unconf);
}
return status;
}
__be32
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_setclientid_confirm *setclientid_confirm =
&u->setclientid_confirm;
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
nfs4_verifier confirm = setclientid_confirm->sc_confirm;
clientid_t * clid = &setclientid_confirm->sc_clientid;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
spin_lock(&nn->client_lock);
conf = find_confirmed_client(clid, false, nn);
unconf = find_unconfirmed_client(clid, false, nn);
/*
* We try hard to give out unique clientid's, so if we get an
* attempt to confirm the same clientid with a different cred,
* the client may be buggy; this should never happen.
*
* Nevertheless, RFC 7530 recommends INUSE for this case:
*/
status = nfserr_clid_inuse;
if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
trace_nfsd_clid_cred_mismatch(unconf, rqstp);
goto out;
}
if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
trace_nfsd_clid_cred_mismatch(conf, rqstp);
goto out;
}
if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
if (conf && same_verf(&confirm, &conf->cl_confirm)) {
status = nfs_ok;
} else
status = nfserr_stale_clientid;
goto out;
}
status = nfs_ok;
if (conf) {
old = unconf;
unhash_client_locked(old);
nfsd4_change_callback(conf, &unconf->cl_cb_conn);
} else {
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = nfserr_clid_inuse;
if (client_has_state(old)
&& !same_creds(&unconf->cl_cred,
&old->cl_cred)) {
old = NULL;
goto out;
}
status = mark_client_expired_locked(old);
if (status) {
old = NULL;
goto out;
}
trace_nfsd_clid_replaced(&old->cl_clientid);
}
move_to_confirmed(unconf);
conf = unconf;
}
get_client_locked(conf);
spin_unlock(&nn->client_lock);
if (conf == unconf)
fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
nfsd4_probe_callback(conf);
spin_lock(&nn->client_lock);
put_client_renew_locked(conf);
out:
spin_unlock(&nn->client_lock);
if (old)
expire_client(old);
return status;
}
static struct nfs4_file *nfsd4_alloc_file(void)
{
return kmem_cache_alloc(file_slab, GFP_KERNEL);
}
/* OPEN Share state helper functions */
static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
{
refcount_set(&fp->fi_ref, 1);
spin_lock_init(&fp->fi_lock);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
INIT_LIST_HEAD(&fp->fi_clnt_odstate);
fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
fp->fi_deleg_file = NULL;
fp->fi_had_conflict = false;
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
fp->fi_aliased = false;
fp->fi_inode = d_inode(fh->fh_dentry);
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&fp->fi_lo_states);
atomic_set(&fp->fi_lo_recalls, 0);
#endif
}
void
nfsd4_free_slabs(void)
{
kmem_cache_destroy(client_slab);
kmem_cache_destroy(openowner_slab);
kmem_cache_destroy(lockowner_slab);
kmem_cache_destroy(file_slab);
kmem_cache_destroy(stateid_slab);
kmem_cache_destroy(deleg_slab);
kmem_cache_destroy(odstate_slab);
}
int
nfsd4_init_slabs(void)
{
client_slab = kmem_cache_create("nfsd4_clients",
sizeof(struct nfs4_client), 0, 0, NULL);
if (client_slab == NULL)
goto out;
openowner_slab = kmem_cache_create("nfsd4_openowners",
sizeof(struct nfs4_openowner), 0, 0, NULL);
if (openowner_slab == NULL)
goto out_free_client_slab;
lockowner_slab = kmem_cache_create("nfsd4_lockowners",
sizeof(struct nfs4_lockowner), 0, 0, NULL);
if (lockowner_slab == NULL)
goto out_free_openowner_slab;
file_slab = kmem_cache_create("nfsd4_files",
sizeof(struct nfs4_file), 0, 0, NULL);
if (file_slab == NULL)
goto out_free_lockowner_slab;
stateid_slab = kmem_cache_create("nfsd4_stateids",
sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
if (stateid_slab == NULL)
goto out_free_file_slab;
deleg_slab = kmem_cache_create("nfsd4_delegations",
sizeof(struct nfs4_delegation), 0, 0, NULL);
if (deleg_slab == NULL)
goto out_free_stateid_slab;
odstate_slab = kmem_cache_create("nfsd4_odstate",
sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
if (odstate_slab == NULL)
goto out_free_deleg_slab;
return 0;
out_free_deleg_slab:
kmem_cache_destroy(deleg_slab);
out_free_stateid_slab:
kmem_cache_destroy(stateid_slab);
out_free_file_slab:
kmem_cache_destroy(file_slab);
out_free_lockowner_slab:
kmem_cache_destroy(lockowner_slab);
out_free_openowner_slab:
kmem_cache_destroy(openowner_slab);
out_free_client_slab:
kmem_cache_destroy(client_slab);
out:
return -ENOMEM;
}
static unsigned long
nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
int count;
struct nfsd_net *nn = container_of(shrink,
struct nfsd_net, nfsd_client_shrinker);
count = atomic_read(&nn->nfsd_courtesy_clients);
if (!count)
count = atomic_long_read(&num_delegations);
if (count)
queue_work(laundry_wq, &nn->nfsd_shrinker_work);
return (unsigned long)count;
}
static unsigned long
nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
{
return SHRINK_STOP;
}
void
nfsd4_init_leases_net(struct nfsd_net *nn)
{
struct sysinfo si;
u64 max_clients;
nn->nfsd4_lease = 90; /* default lease time */
nn->nfsd4_grace = 90;
nn->somebody_reclaimed = false;
nn->track_reclaim_completes = false;
nn->clverifier_counter = get_random_u32();
nn->clientid_base = get_random_u32();
nn->clientid_counter = nn->clientid_base + 1;
nn->s2s_cp_cl_id = nn->clientid_counter++;
atomic_set(&nn->nfs4_client_count, 0);
si_meminfo(&si);
max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
max_clients *= NFS4_CLIENTS_PER_GB;
nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
atomic_set(&nn->nfsd_courtesy_clients, 0);
}
static void init_nfs4_replay(struct nfs4_replay *rp)
{
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
mutex_init(&rp->rp_mutex);
}
static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
struct nfs4_stateowner *so)
{
if (!nfsd4_has_session(cstate)) {
mutex_lock(&so->so_replay.rp_mutex);
cstate->replay_owner = nfs4_get_stateowner(so);
}
}
void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
{
struct nfs4_stateowner *so = cstate->replay_owner;
if (so != NULL) {
cstate->replay_owner = NULL;
mutex_unlock(&so->so_replay.rp_mutex);
nfs4_put_stateowner(so);
}
}
static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
{
struct nfs4_stateowner *sop;
sop = kmem_cache_alloc(slab, GFP_KERNEL);
if (!sop)
return NULL;
xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
if (!sop->so_owner.data) {
kmem_cache_free(slab, sop);
return NULL;
}
INIT_LIST_HEAD(&sop->so_stateids);
sop->so_client = clp;
init_nfs4_replay(&sop->so_replay);
atomic_set(&sop->so_count, 1);
return sop;
}
static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
{
lockdep_assert_held(&clp->cl_lock);
list_add(&oo->oo_owner.so_strhash,
&clp->cl_ownerstr_hashtbl[strhashval]);
list_add(&oo->oo_perclient, &clp->cl_openowners);
}
static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
{
unhash_openowner_locked(openowner(so));
}
static void nfs4_free_openowner(struct nfs4_stateowner *so)
{
struct nfs4_openowner *oo = openowner(so);
kmem_cache_free(openowner_slab, oo);
}
static const struct nfs4_stateowner_operations openowner_ops = {
.so_unhash = nfs4_unhash_openowner,
.so_free = nfs4_free_openowner,
};
static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_ol_stateid *local, *ret = NULL;
struct nfs4_openowner *oo = open->op_openowner;
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
if (local->st_stateowner != &oo->oo_owner)
continue;
if (local->st_stid.sc_type == NFS4_OPEN_STID) {
ret = local;
refcount_inc(&ret->st_stid.sc_count);
break;
}
}
return ret;
}
static __be32
nfsd4_verify_open_stid(struct nfs4_stid *s)
{
__be32 ret = nfs_ok;
switch (s->sc_type) {
default:
break;
case 0:
case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID:
ret = nfserr_bad_stateid;
break;
case NFS4_REVOKED_DELEG_STID:
ret = nfserr_deleg_revoked;
}
return ret;
}
/* Lock the stateid st_mutex, and deal with races with CLOSE */
static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
{
__be32 ret;
mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
ret = nfsd4_verify_open_stid(&stp->st_stid);
if (ret != nfs_ok)
mutex_unlock(&stp->st_mutex);
return ret;
}
static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_ol_stateid *stp;
for (;;) {
spin_lock(&fp->fi_lock);
stp = nfsd4_find_existing_open(fp, open);
spin_unlock(&fp->fi_lock);
if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
break;
nfs4_put_stid(&stp->st_stid);
}
return stp;
}
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
{
struct nfs4_client *clp = cstate->clp;
struct nfs4_openowner *oo, *ret;
oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
if (!oo)
return NULL;
oo->oo_owner.so_ops = &openowner_ops;
oo->oo_owner.so_is_open_owner = 1;
oo->oo_owner.so_seqid = open->op_seqid;
oo->oo_flags = 0;
if (nfsd4_has_session(cstate))
oo->oo_flags |= NFS4_OO_CONFIRMED;
oo->oo_time = 0;
oo->oo_last_closed_stid = NULL;
INIT_LIST_HEAD(&oo->oo_close_lru);
spin_lock(&clp->cl_lock);
ret = find_openstateowner_str_locked(strhashval, open, clp);
if (ret == NULL) {
hash_openowner(oo, clp, strhashval);
ret = oo;
} else
nfs4_free_stateowner(&oo->oo_owner);
spin_unlock(&clp->cl_lock);
return ret;
}
static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_openowner *oo = open->op_openowner;
struct nfs4_ol_stateid *retstp = NULL;
struct nfs4_ol_stateid *stp;
stp = open->op_stp;
/* We are moving these outside of the spinlocks to avoid the warnings */
mutex_init(&stp->st_mutex);
mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
retry:
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
retstp = nfsd4_find_existing_open(fp, open);
if (retstp)
goto out_unlock;
open->op_stp = NULL;
refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks);
stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
get_nfs4_file(fp);
stp->st_stid.sc_file = fp;
stp->st_access_bmap = 0;
stp->st_deny_bmap = 0;
stp->st_openstp = NULL;
list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
if (retstp) {
/* Handle races with CLOSE */
if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
nfs4_put_stid(&retstp->st_stid);
goto retry;
}
/* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
stp = retstp;
}
return stp;
}
/*
* In the 4.0 case we need to keep the owners around a little while to handle
* CLOSE replay. We still do need to release any file access that is held by
* them before returning however.
*/
static void
move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
{
struct nfs4_ol_stateid *last;
struct nfs4_openowner *oo = openowner(s->st_stateowner);
struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
nfsd_net_id);
dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
/*
* We know that we hold one reference via nfsd4_close, and another
* "persistent" reference for the client. If the refcount is higher
* than 2, then there are still calls in progress that are using this
* stateid. We can't put the sc_file reference until they are finished.
* Wait for the refcount to drop to 2. Since it has been unhashed,
* there should be no danger of the refcount going back up again at
* this point.
*/
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
release_all_access(s);
if (s->st_stid.sc_file) {
put_nfs4_file(s->st_stid.sc_file);
s->st_stid.sc_file = NULL;
}
spin_lock(&nn->client_lock);
last = oo->oo_last_closed_stid;
oo->oo_last_closed_stid = s;
list_move_tail(&oo->oo_close_lru, &nn->close_lru);
oo->oo_time = ktime_get_boottime_seconds();
spin_unlock(&nn->client_lock);
if (last)
nfs4_put_stid(&last->st_stid);
}
static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_lookup(const struct svc_fh *fhp)
{
struct inode *inode = d_inode(fhp->fh_dentry);
struct rhlist_head *tmp, *list;
struct nfs4_file *fi;
rcu_read_lock();
list = rhltable_lookup(&nfs4_file_rhltable, &inode,
nfs4_file_rhash_params);
rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
if (refcount_inc_not_zero(&fi->fi_ref)) {
rcu_read_unlock();
return fi;
}
}
}
rcu_read_unlock();
return NULL;
}
/*
* On hash insertion, identify entries with the same inode but
* distinct filehandles. They will all be on the list returned
* by rhltable_lookup().
*
* inode->i_lock prevents racing insertions from adding an entry
* for the same inode/fhp pair twice.
*/
static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
{
struct inode *inode = d_inode(fhp->fh_dentry);
struct rhlist_head *tmp, *list;
struct nfs4_file *ret = NULL;
bool alias_found = false;
struct nfs4_file *fi;
int err;
rcu_read_lock();
spin_lock(&inode->i_lock);
list = rhltable_lookup(&nfs4_file_rhltable, &inode,
nfs4_file_rhash_params);
rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
if (refcount_inc_not_zero(&fi->fi_ref))
ret = fi;
} else
fi->fi_aliased = alias_found = true;
}
if (ret)
goto out_unlock;
nfsd4_file_init(fhp, new);
err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
nfs4_file_rhash_params);
if (err)
goto out_unlock;
new->fi_aliased = alias_found;
ret = new;
out_unlock:
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return ret;
}
static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
{
rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
nfs4_file_rhash_params);
}
/*
* Called to check deny when READ with all zero stateid or
* WRITE with all zero or all one stateid
*/
static __be32
nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
{
struct nfs4_file *fp;
__be32 ret = nfs_ok;
fp = nfsd4_file_hash_lookup(current_fh);
if (!fp)
return ret;
/* Check for conflicting share reservations */
spin_lock(&fp->fi_lock);
if (fp->fi_share_deny & deny_type)
ret = nfserr_locked;
spin_unlock(&fp->fi_lock);
put_nfs4_file(fp);
return ret;
}
static bool nfsd4_deleg_present(const struct inode *inode)
{
struct file_lock_context *ctx = locks_inode_context(inode);
return ctx && !list_empty_careful(&ctx->flc_lease);
}
/**
* nfsd_wait_for_delegreturn - wait for delegations to be returned
* @rqstp: the RPC transaction being executed
* @inode: in-core inode of the file being waited for
*
* The timeout prevents deadlock if all nfsd threads happen to be
* tied up waiting for returning delegations.
*
* Return values:
* %true: delegation was returned
* %false: timed out waiting for delegreturn
*/
bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
{
long __maybe_unused timeo;
timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
NFSD_DELEGRETURN_TIMEOUT);
trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
return timeo > 0;
}
static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
nfsd_net_id);
block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
/*
* We can't do this in nfsd_break_deleg_cb because it is
* already holding inode->i_lock.
*
* If the dl_time != 0, then we know that it has already been
* queued for a lease break. Don't queue it again.
*/
spin_lock(&state_lock);
if (delegation_hashed(dp) && dp->dl_time == 0) {
dp->dl_time = ktime_get_boottime_seconds();
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
}
spin_unlock(&state_lock);
}
static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
struct rpc_task *task)
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
return 1;
switch (task->tk_status) {
case 0:
return 1;
case -NFS4ERR_DELAY:
rpc_delay(task, 2 * HZ);
return 0;
case -EBADHANDLE:
case -NFS4ERR_BAD_STATEID:
/*
* Race: client probably got cb_recall before open reply
* granting delegation.
*/
if (dp->dl_retries--) {
rpc_delay(task, 2 * HZ);
return 0;
}
fallthrough;
default:
return 1;
}
}
static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
nfs4_put_stid(&dp->dl_stid);
}
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
.prepare = nfsd4_cb_recall_prepare,
.done = nfsd4_cb_recall_done,
.release = nfsd4_cb_recall_release,
};
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
{
/*
* We're assuming the state code never drops its reference
* without first removing the lease. Since we're in this lease
* callback (and since the lease code is serialized by the
* flc_lock) we know the server hasn't removed the lease yet, and
* we know it's safe to take a reference.
*/
refcount_inc(&dp->dl_stid.sc_count);
WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
}
/* Called from break_lease() with flc_lock held. */
static bool
nfsd_break_deleg_cb(struct file_lock *fl)
{
struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
struct nfs4_file *fp = dp->dl_stid.sc_file;
struct nfs4_client *clp = dp->dl_stid.sc_client;
struct nfsd_net *nn;
trace_nfsd_cb_recall(&dp->dl_stid);
dp->dl_recalled = true;
atomic_inc(&clp->cl_delegs_in_recall);
if (try_to_expire_client(clp)) {
nn = net_generic(clp->net, nfsd_net_id);
mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
}
/*
* We don't want the locks code to timeout the lease for us;
* we'll remove it ourself if a delegation isn't returned
* in time:
*/
fl->fl_break_time = 0;
spin_lock(&fp->fi_lock);
fp->fi_had_conflict = true;
nfsd_break_one_deleg(dp);
spin_unlock(&fp->fi_lock);
return false;
}
/**
* nfsd_breaker_owns_lease - Check if lease conflict was resolved
* @fl: Lock state to check
*
* Return values:
* %true: Lease conflict was resolved
* %false: Lease conflict was not resolved.
*/
static bool nfsd_breaker_owns_lease(struct file_lock *fl)
{
struct nfs4_delegation *dl = fl->fl_owner;
struct svc_rqst *rqst;
struct nfs4_client *clp;
if (!i_am_nfsd())
return false;
rqst = kthread_data(current);
/* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
return false;
clp = *(rqst->rq_lease_breaker);
return dl->dl_stid.sc_client == clp;
}
static int
nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
struct list_head *dispose)
{
struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
struct nfs4_client *clp = dp->dl_stid.sc_client;
if (arg & F_UNLCK) {
if (dp->dl_recalled)
atomic_dec(&clp->cl_delegs_in_recall);
return lease_modify(onlist, arg, dispose);
} else
return -EAGAIN;
}
static const struct lock_manager_operations nfsd_lease_mng_ops = {
.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
};
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
{
if (nfsd4_has_session(cstate))
return nfs_ok;
if (seqid == so->so_seqid - 1)
return nfserr_replay_me;
if (seqid == so->so_seqid)
return nfs_ok;
return nfserr_bad_seqid;
}
static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
struct nfsd_net *nn)
{
struct nfs4_client *found;
spin_lock(&nn->client_lock);
found = find_confirmed_client(clid, sessions, nn);
if (found)
atomic_inc(&found->cl_rpc_users);
spin_unlock(&nn->client_lock);
return found;
}
static __be32 set_client(clientid_t *clid,
struct nfsd4_compound_state *cstate,
struct nfsd_net *nn)
{
if (cstate->clp) {
if (!same_clid(&cstate->clp->cl_clientid, clid))
return nfserr_stale_clientid;
return nfs_ok;
}
if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
/*
* We're in the 4.0 case (otherwise the SEQUENCE op would have
* set cstate->clp), so session = false:
*/
cstate->clp = lookup_clientid(clid, false, nn);
if (!cstate->clp)
return nfserr_expired;
return nfs_ok;
}
__be32
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open, struct nfsd_net *nn)
{
clientid_t *clientid = &open->op_clientid;
struct nfs4_client *clp = NULL;
unsigned int strhashval;
struct nfs4_openowner *oo = NULL;
__be32 status;
/*
* In case we need it later, after we've already created the
* file and don't want to risk a further failure:
*/
open->op_file = nfsd4_alloc_file();
if (open->op_file == NULL)
return nfserr_jukebox;
status = set_client(clientid, cstate, nn);
if (status)
return status;
clp = cstate->clp;
strhashval = ownerstr_hashval(&open->op_owner);
oo = find_openstateowner_str(strhashval, open, clp);
open->op_openowner = oo;
if (!oo) {
goto new_owner;
}
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
/* Replace unconfirmed owners without checking for replay. */
release_openowner(oo);
open->op_openowner = NULL;
goto new_owner;
}
status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
if (status)
return status;
goto alloc_stateid;
new_owner:
oo = alloc_init_open_stateowner(strhashval, open, cstate);
if (oo == NULL)
return nfserr_jukebox;
open->op_openowner = oo;
alloc_stateid:
open->op_stp = nfs4_alloc_open_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
if (nfsd4_has_session(cstate) &&
(cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
open->op_odstate = alloc_clnt_odstate(clp);
if (!open->op_odstate)
return nfserr_jukebox;
}
return nfs_ok;
}
static inline __be32
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{
if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
return nfserr_openmode;
else
return nfs_ok;
}
static int share_access_to_flags(u32 share_access)
{
return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
}
static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
{
struct nfs4_stid *ret;
ret = find_stateid_by_type(cl, s,
NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
if (!ret)
return NULL;
return delegstateid(ret);
}
static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
{
return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
}
static __be32
nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
struct nfs4_delegation **dp)
{
int flags;
__be32 status = nfserr_bad_stateid;
struct nfs4_delegation *deleg;
deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
if (deleg == NULL)
goto out;
if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
nfs4_put_stid(&deleg->dl_stid);
if (cl->cl_minorversion)
status = nfserr_deleg_revoked;
goto out;
}
flags = share_access_to_flags(open->op_share_access);
status = nfs4_check_delegmode(deleg, flags);
if (status) {
nfs4_put_stid(&deleg->dl_stid);
goto out;
}
*dp = deleg;
out:
if (!nfsd4_is_deleg_cur(open))
return nfs_ok;
if (status)
return status;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
return nfs_ok;
}
static inline int nfs4_access_to_access(u32 nfs4_access)
{
int flags = 0;
if (nfs4_access & NFS4_SHARE_ACCESS_READ)
flags |= NFSD_MAY_READ;
if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
flags |= NFSD_MAY_WRITE;
return flags;
}
static inline __be32
nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
struct nfsd4_open *open)
{
struct iattr iattr = {
.ia_valid = ATTR_SIZE,
.ia_size = 0,
};
struct nfsd_attrs attrs = {
.na_iattr = &iattr,
};
if (!open->op_truncate)
return 0;
if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
}
static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
struct nfsd4_open *open, bool new_stp)
{
struct nfsd_file *nf = NULL;
__be32 status;
int oflag = nfs4_access_to_omode(open->op_share_access);
int access = nfs4_access_to_access(open->op_share_access);
unsigned char old_access_bmap, old_deny_bmap;
spin_lock(&fp->fi_lock);
/*
* Are we trying to set a deny mode that would conflict with
* current access?
*/
status = nfs4_file_check_deny(fp, open->op_share_deny);
if (status != nfs_ok) {
if (status != nfserr_share_denied) {
spin_unlock(&fp->fi_lock);
goto out;
}
if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
stp, open->op_share_deny, false))
status = nfserr_jukebox;
spin_unlock(&fp->fi_lock);
goto out;
}
/* set access to the file */
status = nfs4_file_get_access(fp, open->op_share_access);
if (status != nfs_ok) {
if (status != nfserr_share_denied) {
spin_unlock(&fp->fi_lock);
goto out;
}
if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
stp, open->op_share_access, true))
status = nfserr_jukebox;
spin_unlock(&fp->fi_lock);
goto out;
}
/* Set access bits in stateid */
old_access_bmap = stp->st_access_bmap;
set_access(open->op_share_access, stp);
/* Set new deny mask */
old_deny_bmap = stp->st_deny_bmap;
set_deny(open->op_share_deny, stp);
fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
if (!fp->fi_fds[oflag]) {
spin_unlock(&fp->fi_lock);
status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
open->op_filp, &nf);
if (status != nfs_ok)
goto out_put_access;
spin_lock(&fp->fi_lock);
if (!fp->fi_fds[oflag]) {
fp->fi_fds[oflag] = nf;
nf = NULL;
}
}
spin_unlock(&fp->fi_lock);
if (nf)
nfsd_file_put(nf);
status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
access));
if (status)
goto out_put_access;
status = nfsd4_truncate(rqstp, cur_fh, open);
if (status)
goto out_put_access;
out:
return status;
out_put_access:
stp->st_access_bmap = old_access_bmap;
nfs4_file_put_access(fp, open->op_share_access);
reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
goto out;
}
static __be32
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
struct nfsd4_open *open)
{
__be32 status;
unsigned char old_deny_bmap = stp->st_deny_bmap;
if (!test_access(open->op_share_access, stp))
return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
/* test and set deny mode */
spin_lock(&fp->fi_lock);
status = nfs4_file_check_deny(fp, open->op_share_deny);
switch (status) {
case nfs_ok:
set_deny(open->op_share_deny, stp);
fp->fi_share_deny |=
(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
break;
case nfserr_share_denied:
if (nfs4_resolve_deny_conflicts_locked(fp, false,
stp, open->op_share_deny, false))
status = nfserr_jukebox;
break;
}
spin_unlock(&fp->fi_lock);
if (status != nfs_ok)
return status;
status = nfsd4_truncate(rqstp, cur_fh, open);
if (status != nfs_ok)
reset_union_bmap_deny(old_deny_bmap, stp);
return status;
}
/* Should we give out recallable state?: */
static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
{
if (clp->cl_cb_state == NFSD4_CB_UP)
return true;
/*
* In the sessions case, since we don't have to establish a
* separate connection for callbacks, we assume it's OK
* until we hear otherwise:
*/
return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
}
static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
int flag)
{
struct file_lock *fl;
fl = locks_alloc_lock();
if (!fl)
return NULL;
fl->fl_lmops = &nfsd_lease_mng_ops;
fl->fl_flags = FL_DELEG;
fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
fl->fl_end = OFFSET_MAX;
fl->fl_owner = (fl_owner_t)dp;
fl->fl_pid = current->tgid;
fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
return fl;
}
static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
struct nfs4_file *fp)
{
struct nfs4_ol_stateid *st;
struct file *f = fp->fi_deleg_file->nf_file;
struct inode *ino = file_inode(f);
int writes;
writes = atomic_read(&ino->i_writecount);
if (!writes)
return 0;
/*
* There could be multiple filehandles (hence multiple
* nfs4_files) referencing this file, but that's not too
* common; let's just give up in that case rather than
* trying to go look up all the clients using that other
* nfs4_file as well:
*/
if (fp->fi_aliased)
return -EAGAIN;
/*
* If there's a close in progress, make sure that we see it
* clear any fi_fds[] entries before we see it decrement
* i_writecount:
*/
smp_mb__after_atomic();
if (fp->fi_fds[O_WRONLY])
writes--;
if (fp->fi_fds[O_RDWR])
writes--;
if (writes > 0)
return -EAGAIN; /* There may be non-NFSv4 writers */
/*
* It's possible there are non-NFSv4 write opens in progress,
* but if they haven't incremented i_writecount yet then they
* also haven't called break lease yet; so, they'll break this
* lease soon enough. So, all that's left to check for is NFSv4
* opens:
*/
spin_lock(&fp->fi_lock);
list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
if (st->st_openstp == NULL /* it's an open */ &&
access_permit_write(st) &&
st->st_stid.sc_client != clp) {
spin_unlock(&fp->fi_lock);
return -EAGAIN;
}
}
spin_unlock(&fp->fi_lock);
/*
* There's a small chance that we could be racing with another
* NFSv4 open. However, any open that hasn't added itself to
* the fi_stateids list also hasn't called break_lease yet; so,
* they'll break this lease soon enough.
*/
return 0;
}
/*
* It's possible that between opening the dentry and setting the delegation,
* that it has been renamed or unlinked. Redo the lookup to verify that this
* hasn't happened.
*/
static int
nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
struct svc_fh *parent)
{
struct svc_export *exp;
struct dentry *child;
__be32 err;
err = nfsd_lookup_dentry(open->op_rqstp, parent,
open->op_fname, open->op_fnamelen,
&exp, &child);
if (err)
return -EAGAIN;
exp_put(exp);
dput(child);
if (child != file_dentry(fp->fi_deleg_file->nf_file))
return -EAGAIN;
return 0;
}
/*
* We avoid breaking delegations held by a client due to its own activity, but
* clearing setuid/setgid bits on a write is an implicit activity and the client
* may not notice and continue using the old mode. Avoid giving out a delegation
* on setuid/setgid files when the client is requesting an open for write.
*/
static int
nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
{
struct inode *inode = file_inode(nf->nf_file);
if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
(inode->i_mode & (S_ISUID|S_ISGID)))
return -EAGAIN;
return 0;
}
static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent)
{
int status = 0;
struct nfs4_client *clp = stp->st_stid.sc_client;
struct nfs4_file *fp = stp->st_stid.sc_file;
struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
struct nfs4_delegation *dp;
struct nfsd_file *nf = NULL;
struct file_lock *fl;
u32 dl_type;
/*
* The fi_had_conflict and nfs_get_existing_delegation checks
* here are just optimizations; we'll need to recheck them at
* the end:
*/
if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN);
/*
* Try for a write delegation first. RFC8881 section 10.4 says:
*
* "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
* on its own, all opens."
*
* Furthermore the client can use a write delegation for most READ
* operations as well, so we require a O_RDWR file here.
*
* Offer a write delegation in the case of a BOTH open, and ensure
* we get the O_RDWR descriptor.
*/
if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
nf = find_rw_file(fp);
dl_type = NFS4_OPEN_DELEGATE_WRITE;
}
/*
* If the file is being opened O_RDONLY or we couldn't get a O_RDWR
* file for some reason, then try for a read delegation instead.
*/
if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
nf = find_readable_file(fp);
dl_type = NFS4_OPEN_DELEGATE_READ;
}
if (!nf)
return ERR_PTR(-EAGAIN);
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
status = -EAGAIN;
else if (nfsd4_verify_setuid_write(open, nf))
status = -EAGAIN;
else if (!fp->fi_deleg_file) {
fp->fi_deleg_file = nf;
/* increment early to prevent fi_deleg_file from being
* cleared */
fp->fi_delegees = 1;
nf = NULL;
} else
fp->fi_delegees++;
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
if (nf)
nfsd_file_put(nf);
if (status)
return ERR_PTR(status);
status = -ENOMEM;
dp = alloc_init_deleg(clp, fp, odstate, dl_type);
if (!dp)
goto out_delegees;
fl = nfs4_alloc_init_lease(dp, dl_type);
if (!fl)
goto out_clnt_odstate;
status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
if (fl)
locks_free_lock(fl);
if (status)
goto out_clnt_odstate;
if (parent) {
status = nfsd4_verify_deleg_dentry(open, fp, parent);
if (status)
goto out_unlock;
}
status = nfsd4_check_conflicting_opens(clp, fp);
if (status)
goto out_unlock;
/*
* Now that the deleg is set, check again to ensure that nothing
* raced in and changed the mode while we weren't lookng.
*/
status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
if (status)
goto out_unlock;
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
if (fp->fi_had_conflict)
status = -EAGAIN;
else
status = hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
if (status)
goto out_unlock;
return dp;
out_unlock:
vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
out_clnt_odstate:
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_stid(&dp->dl_stid);
out_delegees:
put_deleg_file(fp);
return ERR_PTR(status);
}
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
if (status == -EAGAIN)
open->op_why_no_deleg = WND4_CONTENTION;
else {
open->op_why_no_deleg = WND4_RESOURCE;
switch (open->op_deleg_want) {
case NFS4_SHARE_WANT_READ_DELEG:
case NFS4_SHARE_WANT_WRITE_DELEG:
case NFS4_SHARE_WANT_ANY_DELEG:
break;
case NFS4_SHARE_WANT_CANCEL:
open->op_why_no_deleg = WND4_CANCELLED;
break;
case NFS4_SHARE_WANT_NO_DELEG:
WARN_ON_ONCE(1);
}
}
}
/*
* The Linux NFS server does not offer write delegations to NFSv4.0
* clients in order to avoid conflicts between write delegations and
* GETATTRs requesting CHANGE or SIZE attributes.
*
* With NFSv4.1 and later minorversions, the SEQUENCE operation that
* begins each COMPOUND contains a client ID. Delegation recall can
* be avoided when the server recognizes the client sending a
* GETATTR also holds write delegation it conflicts with.
*
* However, the NFSv4.0 protocol does not enable a server to
* determine that a GETATTR originated from the client holding the
* conflicting delegation versus coming from some other client. Per
* RFC 7530 Section 16.7.5, the server must recall or send a
* CB_GETATTR even when the GETATTR originates from the client that
* holds the conflicting delegation.
*
* An NFSv4.0 client can trigger a pathological situation if it
* always sends a DELEGRETURN preceded by a conflicting GETATTR in
* the same COMPOUND. COMPOUND execution will always stop at the
* GETATTR and the DELEGRETURN will never get executed. The server
* eventually revokes the delegation, which can result in loss of
* open or lock state.
*/
static void
nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *currentfh)
{
struct nfs4_delegation *dp;
struct nfs4_openowner *oo = openowner(stp->st_stateowner);
struct nfs4_client *clp = stp->st_stid.sc_client;
struct svc_fh *parent = NULL;
int cb_up;
int status = 0;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
open->op_recall = 0;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!cb_up)
open->op_recall = 1;
break;
case NFS4_OPEN_CLAIM_NULL:
parent = currentfh;
fallthrough;
case NFS4_OPEN_CLAIM_FH:
/*
* Let's not give out any delegations till everyone's
* had the chance to reclaim theirs, *and* until
* NLM locks have all been reclaimed:
*/
if (locks_in_grace(clp->net))
goto out_no_deleg;
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out_no_deleg;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
!clp->cl_minorversion)
goto out_no_deleg;
break;
default:
goto out_no_deleg;
}
dp = nfs4_set_delegation(open, stp, parent);
if (IS_ERR(dp))
goto out_no_deleg;
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
} else {
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
}
nfs4_put_stid(&dp->dl_stid);
return;
out_no_deleg:
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
dprintk("NFSD: WARNING: refusing delegation reclaim\n");
open->op_recall = 1;
}
/* 4.1 client asking for a delegation? */
if (open->op_deleg_want)
nfsd4_open_deleg_none_ext(open, status);
return;
}
static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
struct nfs4_delegation *dp)
{
if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
}
/* Otherwise the client must be confused wanting a delegation
* it already has, therefore we don't return
* NFS4_OPEN_DELEGATE_NONE_EXT and reason.
*/
}
/**
* nfsd4_process_open2 - finish open processing
* @rqstp: the RPC transaction being executed
* @current_fh: NFSv4 COMPOUND's current filehandle
* @open: OPEN arguments
*
* If successful, (1) truncate the file if open->op_truncate was
* set, (2) set open->op_stateid, (3) set open->op_delegation.
*
* Returns %nfs_ok on success; otherwise an nfs4stat value in
* network byte order is returned.
*/
__be32
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL;
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
bool new_stp = false;
/*
* Lookup file; if found, lookup stateid and check open request,
* and check for delegations in the process of being recalled.
* If not found, create the nfs4_file struct
*/
fp = nfsd4_file_hash_insert(open->op_file, current_fh);
if (unlikely(!fp))
return nfserr_jukebox;
if (fp != open->op_file) {
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
stp = nfsd4_find_and_lock_existing_open(fp, open);
} else {
open->op_file = NULL;
status = nfserr_bad_stateid;
if (nfsd4_is_deleg_cur(open))
goto out;
}
if (!stp) {
stp = init_open_stateid(fp, open);
if (!open->op_stp)
new_stp = true;
}
/*
* OPEN the file, or upgrade an existing OPEN.
* If truncate fails, the OPEN fails.
*
* stp is already locked.
*/
if (!new_stp) {
/* Stateid was found, this is an OPEN upgrade */
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
if (status) {
stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
goto out;
}
stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
open->op_odstate);
if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL;
}
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
if (nfsd4_has_session(&resp->cstate)) {
if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_WANTED;
goto nodeleg;
}
}
/*
* Attempt to hand out a delegation. No error return, because the
* OPEN succeeds even if we fail.
*/
nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
nodeleg:
status = nfs_ok;
trace_nfsd_open(&stp->st_stid.sc_stateid);
out:
/* 4.1 client trying to upgrade/downgrade delegation? */
if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
open->op_deleg_want)
nfsd4_deleg_xgrade_none_ext(open, dp);
if (fp)
put_nfs4_file(fp);
if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
/*
* To finish the open response, we just need to set the rflags.
*/
open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
if (nfsd4_has_session(&resp->cstate))
open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
if (dp)
nfs4_put_stid(&dp->dl_stid);
if (stp)
nfs4_put_stid(&stp->st_stid);
return status;
}
void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open)
{
if (open->op_openowner) {
struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
nfsd4_cstate_assign_replay(cstate, so);
nfs4_put_stateowner(so);
}
if (open->op_file)
kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
nfs4_put_stid(&open->op_stp->st_stid);
if (open->op_odstate)
kmem_cache_free(odstate_slab, open->op_odstate);
}
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
clientid_t *clid = &u->renew;
struct nfs4_client *clp;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
trace_nfsd_clid_renew(clid);
status = set_client(clid, cstate, nn);
if (status)
return status;
clp = cstate->clp;
if (!list_empty(&clp->cl_delegations)
&& clp->cl_cb_state != NFSD4_CB_UP)
return nfserr_cb_path_down;
return nfs_ok;
}
void
nfsd4_end_grace(struct nfsd_net *nn)
{
/* do nothing if grace period already ended */
if (nn->grace_ended)
return;
trace_nfsd_grace_complete(nn);
nn->grace_ended = true;
/*
* If the server goes down again right now, an NFSv4
* client will still be allowed to reclaim after it comes back up,
* even if it hasn't yet had a chance to reclaim state this time.
*
*/
nfsd4_record_grace_done(nn);
/*
* At this point, NFSv4 clients can still reclaim. But if the
* server crashes, any that have not yet reclaimed will be out
* of luck on the next boot.
*
* (NFSv4.1+ clients are considered to have reclaimed once they
* call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
* have reclaimed after their first OPEN.)
*/
locks_end_grace(&nn->nfsd4_manager);
/*
* At this point, and once lockd and/or any other containers
* exit their grace period, further reclaims will fail and
* regular locking can resume.
*/
}
/*
* If we've waited a lease period but there are still clients trying to
* reclaim, wait a little longer to give them a chance to finish.
*/
static bool clients_still_reclaiming(struct nfsd_net *nn)
{
time64_t double_grace_period_end = nn->boot_time +
2 * nn->nfsd4_lease;
if (nn->track_reclaim_completes &&
atomic_read(&nn->nr_reclaim_complete) ==
nn->reclaim_str_hashtbl_size)
return false;
if (!nn->somebody_reclaimed)
return false;
nn->somebody_reclaimed = false;
/*
* If we've given them *two* lease times to reclaim, and they're
* still not done, give up:
*/
if (ktime_get_boottime_seconds() > double_grace_period_end)
return false;
return true;
}
struct laundry_time {
time64_t cutoff;
time64_t new_timeo;
};
static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
{
time64_t time_remaining;
if (last_refresh < lt->cutoff)
return true;
time_remaining = last_refresh - lt->cutoff;
lt->new_timeo = min(lt->new_timeo, time_remaining);
return false;
}
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
{
spin_lock_init(&nn->nfsd_ssc_lock);
INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
init_waitqueue_head(&nn->nfsd_ssc_waitq);
}
EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
/*
* This is called when nfsd is being shutdown, after all inter_ssc
* cleanup were done, to destroy the ssc delayed unmount list.
*/
static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
{
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *tmp;
spin_lock(&nn->nfsd_ssc_lock);
list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
list_del(&ni->nsui_list);
spin_unlock(&nn->nfsd_ssc_lock);
mntput(ni->nsui_vfsmount);
kfree(ni);
spin_lock(&nn->nfsd_ssc_lock);
}
spin_unlock(&nn->nfsd_ssc_lock);
}
static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
{
bool do_wakeup = false;
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *tmp;
spin_lock(&nn->nfsd_ssc_lock);
list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
if (time_after(jiffies, ni->nsui_expire)) {
if (refcount_read(&ni->nsui_refcnt) > 1)
continue;
/* mark being unmount */
ni->nsui_busy = true;
spin_unlock(&nn->nfsd_ssc_lock);
mntput(ni->nsui_vfsmount);
spin_lock(&nn->nfsd_ssc_lock);
/* waiters need to start from begin of list */
list_del(&ni->nsui_list);
kfree(ni);
/* wakeup ssc_connect waiters */
do_wakeup = true;
continue;
}
break;
}
if (do_wakeup)
wake_up_all(&nn->nfsd_ssc_waitq);
spin_unlock(&nn->nfsd_ssc_lock);
}
#endif
/* Check if any lock belonging to this lockowner has any blockers */
static bool
nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
{
struct file_lock_context *ctx;
struct nfs4_ol_stateid *stp;
struct nfs4_file *nf;
list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
nf = stp->st_stid.sc_file;
ctx = locks_inode_context(nf->fi_inode);
if (!ctx)
continue;
if (locks_owner_has_blockers(ctx, lo))
return true;
}
return false;
}
static bool
nfs4_anylock_blockers(struct nfs4_client *clp)
{
int i;
struct nfs4_stateowner *so;
struct nfs4_lockowner *lo;
if (atomic_read(&clp->cl_delegs_in_recall))
return true;
spin_lock(&clp->cl_lock);
for (i = 0; i < OWNER_HASH_SIZE; i++) {
list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
so_strhash) {
if (so->so_is_open_owner)
continue;
lo = lockowner(so);
if (nfs4_lockowner_has_blockers(lo)) {
spin_unlock(&clp->cl_lock);
return true;
}
}
}
spin_unlock(&clp->cl_lock);
return false;
}
static void
nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
struct laundry_time *lt)
{
unsigned int maxreap, reapcnt = 0;
struct list_head *pos, *next;
struct nfs4_client *clp;
maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
INIT_LIST_HEAD(reaplist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
if (clp->cl_state == NFSD4_EXPIRABLE)
goto exp_client;
if (!state_expired(lt, clp->cl_time))
break;
if (!atomic_read(&clp->cl_rpc_users)) {
if (clp->cl_state == NFSD4_ACTIVE)
atomic_inc(&nn->nfsd_courtesy_clients);
clp->cl_state = NFSD4_COURTESY;
}
if (!client_has_state(clp))
goto exp_client;
if (!nfs4_anylock_blockers(clp))
if (reapcnt >= maxreap)
continue;
exp_client:
if (!mark_client_expired_locked(clp)) {
list_add(&clp->cl_lru, reaplist);
reapcnt++;
}
}
spin_unlock(&nn->client_lock);
}
static void
nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
struct list_head *reaplist)
{
unsigned int maxreap = 0, reapcnt = 0;
struct list_head *pos, *next;
struct nfs4_client *clp;
maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
INIT_LIST_HEAD(reaplist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
if (clp->cl_state == NFSD4_ACTIVE)
break;
if (reapcnt >= maxreap)
break;
if (!mark_client_expired_locked(clp)) {
list_add(&clp->cl_lru, reaplist);
reapcnt++;
}
}
spin_unlock(&nn->client_lock);
}
static void
nfs4_process_client_reaplist(struct list_head *reaplist)
{
struct list_head *pos, *next;
struct nfs4_client *clp;
list_for_each_safe(pos, next, reaplist) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
trace_nfsd_clid_purged(&clp->cl_clientid);
list_del_init(&clp->cl_lru);
expire_client(clp);
}
}
static time64_t
nfs4_laundromat(struct nfsd_net *nn)
{
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct nfs4_ol_stateid *stp;
struct nfsd4_blocked_lock *nbl;
struct list_head *pos, *next, reaplist;
struct laundry_time lt = {
.cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
.new_timeo = nn->nfsd4_lease
};
struct nfs4_cpntf_state *cps;
copy_stateid_t *cps_t;
int i;
if (clients_still_reclaiming(nn)) {
lt.new_timeo = 0;
goto out;
}
nfsd4_end_grace(nn);
spin_lock(&nn->s2s_cp_lock);
idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
state_expired(<, cps->cpntf_time))
_free_cpntf_state_locked(nn, cps);
}
spin_unlock(&nn->s2s_cp_lock);
nfs4_get_client_reaplist(nn, &reaplist, <);
nfs4_process_client_reaplist(&reaplist);
spin_lock(&state_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
if (!state_expired(<, dp->dl_time))
break;
WARN_ON(!unhash_delegation_locked(dp));
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
while (!list_empty(&reaplist)) {
dp = list_first_entry(&reaplist, struct nfs4_delegation,
dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
revoke_delegation(dp);
}
spin_lock(&nn->client_lock);
while (!list_empty(&nn->close_lru)) {
oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
oo_close_lru);
if (!state_expired(<, oo->oo_time))
break;
list_del_init(&oo->oo_close_lru);
stp = oo->oo_last_closed_stid;
oo->oo_last_closed_stid = NULL;
spin_unlock(&nn->client_lock);
nfs4_put_stid(&stp->st_stid);
spin_lock(&nn->client_lock);
}
spin_unlock(&nn->client_lock);
/*
* It's possible for a client to try and acquire an already held lock
* that is being held for a long time, and then lose interest in it.
* So, we clean out any un-revisited request after a lease period
* under the assumption that the client is no longer interested.
*
* RFC5661, sec. 9.6 states that the client must not rely on getting
* notifications and must continue to poll for locks, even when the
* server supports them. Thus this shouldn't lead to clients blocking
* indefinitely once the lock does become free.
*/
BUG_ON(!list_empty(&reaplist));
spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
if (!state_expired(<, nbl->nbl_time))
break;
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
free_blocked_lock(nbl);
}
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
/* service the server-to-server copy delayed unmount list */
nfsd4_ssc_expire_umount(nn);
#endif
out:
return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
}
static void laundromat_main(struct work_struct *);
static void
laundromat_main(struct work_struct *laundry)
{
time64_t t;
struct delayed_work *dwork = to_delayed_work(laundry);
struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
laundromat_work);
t = nfs4_laundromat(nn);
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
static void
courtesy_client_reaper(struct nfsd_net *nn)
{
struct list_head reaplist;
nfs4_get_courtesy_client_reaplist(nn, &reaplist);
nfs4_process_client_reaplist(&reaplist);
}
static void
deleg_reaper(struct nfsd_net *nn)
{
struct list_head *pos, *next;
struct nfs4_client *clp;
struct list_head cblist;
INIT_LIST_HEAD(&cblist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
if (clp->cl_state != NFSD4_ACTIVE ||
list_empty(&clp->cl_delegations) ||
atomic_read(&clp->cl_delegs_in_recall) ||
test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
(ktime_get_boottime_seconds() -
clp->cl_ra_time < 5)) {
continue;
}
list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */
atomic_inc(&clp->cl_rpc_users);
set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
clp->cl_ra_time = ktime_get_boottime_seconds();
}
spin_unlock(&nn->client_lock);
while (!list_empty(&cblist)) {
clp = list_first_entry(&cblist, struct nfs4_client,
cl_ra_cblist);
list_del_init(&clp->cl_ra_cblist);
clp->cl_ra->ra_keep = 0;
clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
trace_nfsd_cb_recall_any(clp->cl_ra);
nfsd4_run_cb(&clp->cl_ra->ra_cb);
}
}
static void
nfsd4_state_shrinker_worker(struct work_struct *work)
{
struct nfsd_net *nn = container_of(work, struct nfsd_net,
nfsd_shrinker_work);
courtesy_client_reaper(nn);
deleg_reaper(nn);
}
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
{
if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
return nfserr_bad_stateid;
return nfs_ok;
}
static
__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
{
__be32 status = nfserr_openmode;
/* For lock stateid's, we test the parent open, not the lock: */
if (stp->st_openstp)
stp = stp->st_openstp;
if ((flags & WR_STATE) && !access_permit_write(stp))
goto out;
if ((flags & RD_STATE) && !access_permit_read(stp))
goto out;
status = nfs_ok;
out:
return status;
}
static inline __be32
check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
{
if (ONE_STATEID(stateid) && (flags & RD_STATE))
return nfs_ok;
else if (opens_in_grace(net)) {
/* Answer in remaining cases depends on existence of
* conflicting state; so we must wait out the grace period. */
return nfserr_grace;
} else if (flags & WR_STATE)
return nfs4_share_conflict(current_fh,
NFS4_SHARE_DENY_WRITE);
else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
return nfs4_share_conflict(current_fh,
NFS4_SHARE_DENY_READ);
}
static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
{
/*
* When sessions are used the stateid generation number is ignored
* when it is zero.
*/
if (has_session && in->si_generation == 0)
return nfs_ok;
if (in->si_generation == ref->si_generation)
return nfs_ok;
/* If the client sends us a stateid from the future, it's buggy: */
if (nfsd4_stateid_generation_after(in, ref))
return nfserr_bad_stateid;
/*
* However, we could see a stateid from the past, even from a
* non-buggy client. For example, if the client sends a lock
* while some IO is outstanding, the lock may bump si_generation
* while the IO is still in flight. The client could avoid that
* situation by waiting for responses on all the IO requests,
* but better performance may result in retrying IO that
* receives an old_stateid error if requests are rarely
* reordered in flight:
*/
return nfserr_old_stateid;
}
static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
{
__be32 ret;
spin_lock(&s->sc_lock);
ret = nfsd4_verify_open_stid(s);
if (ret == nfs_ok)
ret = check_stateid_generation(in, &s->sc_stateid, has_session);
spin_unlock(&s->sc_lock);
return ret;
}
static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
{
if (ols->st_stateowner->so_is_open_owner &&
!(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
return nfserr_bad_stateid;
return nfs_ok;
}
static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{
struct nfs4_stid *s;
__be32 status = nfserr_bad_stateid;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
if (status)
goto out_unlock;
switch (s->sc_type) {
case NFS4_DELEG_STID:
status = nfs_ok;
break;
case NFS4_REVOKED_DELEG_STID:
status = nfserr_deleg_revoked;
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
status = nfsd4_check_openowner_confirmed(openlockstateid(s));
break;
default:
printk("unknown stateid type %x\n", s->sc_type);
fallthrough;
case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID:
status = nfserr_bad_stateid;
}
out_unlock:
spin_unlock(&cl->cl_lock);
return status;
}
__be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn)
{
__be32 status;
struct nfs4_stid *stid;
bool return_revoked = false;
/*
* only return revoked delegations if explicitly asked.
* otherwise we report revoked or bad_stateid status.
*/
if (typemask & NFS4_REVOKED_DELEG_STID)
return_revoked = true;
else if (typemask & NFS4_DELEG_STID)
typemask |= NFS4_REVOKED_DELEG_STID;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
if (cstate->session)
return nfserr_bad_stateid;
return nfserr_stale_stateid;
}
if (status)
return status;
stid = find_stateid_by_type(cstate->clp, stateid, typemask);
if (!stid)
return nfserr_bad_stateid;
if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
nfs4_put_stid(stid);
if (cstate->minorversion)
return nfserr_deleg_revoked;
return nfserr_bad_stateid;
}
*s = stid;
return nfs_ok;
}
static struct nfsd_file *
nfs4_find_file(struct nfs4_stid *s, int flags)
{
struct nfsd_file *ret = NULL;
if (!s)
return NULL;
switch (s->sc_type) {
case NFS4_DELEG_STID:
spin_lock(&s->sc_file->fi_lock);
ret = nfsd_file_get(s->sc_file->fi_deleg_file);
spin_unlock(&s->sc_file->fi_lock);
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
if (flags & RD_STATE)
ret = find_readable_file(s->sc_file);
else
ret = find_writeable_file(s->sc_file);
}
return ret;
}
static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
{
__be32 status;
status = nfsd4_check_openowner_confirmed(ols);
if (status)
return status;
return nfs4_check_openmode(ols, flags);
}
static __be32
nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
struct nfsd_file **nfp, int flags)
{
int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
struct nfsd_file *nf;
__be32 status;
nf = nfs4_find_file(s, flags);
if (nf) {
status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
acc | NFSD_MAY_OWNER_OVERRIDE);
if (status) {
nfsd_file_put(nf);
goto out;
}
} else {
status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
if (status)
return status;
}
*nfp = nf;
out:
return status;
}
static void
_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
{
WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
return;
list_del(&cps->cp_list);
idr_remove(&nn->s2s_cp_stateids,
cps->cp_stateid.cs_stid.si_opaque.so_id);
kfree(cps);
}
/*
* A READ from an inter server to server COPY will have a
* copy stateid. Look up the copy notify stateid from the
* idr structure and take a reference on it.
*/
__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
struct nfs4_client *clp,
struct nfs4_cpntf_state **cps)
{
copy_stateid_t *cps_t;
struct nfs4_cpntf_state *state = NULL;
if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
return nfserr_bad_stateid;
spin_lock(&nn->s2s_cp_lock);
cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
if (cps_t) {
state = container_of(cps_t, struct nfs4_cpntf_state,
cp_stateid);
if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
state = NULL;
goto unlock;
}
if (!clp)
refcount_inc(&state->cp_stateid.cs_count);
else
_free_cpntf_state_locked(nn, state);
}
unlock:
spin_unlock(&nn->s2s_cp_lock);
if (!state)
return nfserr_bad_stateid;
if (!clp && state)
*cps = state;
return 0;
}
static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
struct nfs4_stid **stid)
{
__be32 status;
struct nfs4_cpntf_state *cps = NULL;
struct nfs4_client *found;
status = manage_cpntf_state(nn, st, NULL, &cps);
if (status)
return status;
cps->cpntf_time = ktime_get_boottime_seconds();
status = nfserr_expired;
found = lookup_clientid(&cps->cp_p_clid, true, nn);
if (!found)
goto out;
*stid = find_stateid_by_type(found, &cps->cp_p_stateid,
NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
if (*stid)
status = nfs_ok;
else
status = nfserr_bad_stateid;
put_client_renew(found);
out:
nfs4_put_cpntf_state(nn, cps);
return status;
}
void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
{
spin_lock(&nn->s2s_cp_lock);
_free_cpntf_state_locked(nn, cps);
spin_unlock(&nn->s2s_cp_lock);
}
/**
* nfs4_preprocess_stateid_op - find and prep stateid for an operation
* @rqstp: incoming request from client
* @cstate: current compound state
* @fhp: filehandle associated with requested stateid
* @stateid: stateid (provided by client)
* @flags: flags describing type of operation to be done
* @nfp: optional nfsd_file return pointer (may be NULL)
* @cstid: optional returned nfs4_stid pointer (may be NULL)
*
* Given info from the client, look up a nfs4_stid for the operation. On
* success, it returns a reference to the nfs4_stid and/or the nfsd_file
* associated with it.
*/
__be32
nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
stateid_t *stateid, int flags, struct nfsd_file **nfp,
struct nfs4_stid **cstid)
{
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfs4_stid *s = NULL;
__be32 status;
if (nfp)
*nfp = NULL;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
if (cstid)
status = nfserr_bad_stateid;
else
status = check_special_stateids(net, fhp, stateid,
flags);
goto done;
}
status = nfsd4_lookup_stateid(cstate, stateid,
NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
&s, nn);
if (status == nfserr_bad_stateid)
status = find_cpntf_state(nn, stateid, &s);
if (status)
return status;
status = nfsd4_stid_check_stateid_generation(stateid, s,
nfsd4_has_session(cstate));
if (status)
goto out;
switch (s->sc_type) {
case NFS4_DELEG_STID:
status = nfs4_check_delegmode(delegstateid(s), flags);
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
status = nfs4_check_olstateid(openlockstateid(s), flags);
break;
default:
status = nfserr_bad_stateid;
break;
}
if (status)
goto out;
status = nfs4_check_fh(fhp, s);
done:
if (status == nfs_ok && nfp)
status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
out:
if (s) {
if (!status && cstid)
*cstid = s;
else
nfs4_put_stid(s);
}
return status;
}
/*
* Test if the stateid is valid
*/
__be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct nfsd4_test_stateid_id *stateid;
struct nfs4_client *cl = cstate->clp;
list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
stateid->ts_id_status =
nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
return nfs_ok;
}
static __be32
nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
{
struct nfs4_ol_stateid *stp = openlockstateid(s);
__be32 ret;
ret = nfsd4_lock_ol_stateid(stp);
if (ret)
goto out_put_stid;
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
goto out;
ret = nfserr_locks_held;
if (check_for_locks(stp->st_stid.sc_file,
lockowner(stp->st_stateowner)))
goto out;
release_lock_stateid(stp);
ret = nfs_ok;
out:
mutex_unlock(&stp->st_mutex);
out_put_stid:
nfs4_put_stid(s);
return ret;
}
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
struct nfs4_client *cl = cstate->clp;
__be32 ret = nfserr_bad_stateid;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
spin_lock(&s->sc_lock);
switch (s->sc_type) {
case NFS4_DELEG_STID:
ret = nfserr_locks_held;
break;
case NFS4_OPEN_STID:
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
break;
ret = nfserr_locks_held;
break;
case NFS4_LOCK_STID:
spin_unlock(&s->sc_lock);
refcount_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
case NFS4_REVOKED_DELEG_STID:
spin_unlock(&s->sc_lock);
dp = delegstateid(s);
list_del_init(&dp->dl_recall_lru);
spin_unlock(&cl->cl_lock);
nfs4_put_stid(s);
ret = nfs_ok;
goto out;
/* Default falls through and returns nfserr_bad_stateid */
}
spin_unlock(&s->sc_lock);
out_unlock:
spin_unlock(&cl->cl_lock);
out:
return ret;
}
static inline int
setlkflg (int type)
{
return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
RD_STATE : WR_STATE;
}
static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
{
struct svc_fh *current_fh = &cstate->current_fh;
struct nfs4_stateowner *sop = stp->st_stateowner;
__be32 status;
status = nfsd4_check_seqid(cstate, sop, seqid);
if (status)
return status;
status = nfsd4_lock_ol_stateid(stp);
if (status != nfs_ok)
return status;
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
if (status != nfs_ok)
mutex_unlock(&stp->st_mutex);
return status;
}
/**
* nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
* @cstate: compund state
* @seqid: seqid (provided by client)
* @stateid: stateid (provided by client)
* @typemask: mask of allowable types for this operation
* @stpp: return pointer for the stateid found
* @nn: net namespace for request
*
* Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
* return it in @stpp. On a nfs_ok return, the returned stateid will
* have its st_mutex locked.
*/
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
stateid_t *stateid, char typemask,
struct nfs4_ol_stateid **stpp,
struct nfsd_net *nn)
{
__be32 status;
struct nfs4_stid *s;
struct nfs4_ol_stateid *stp = NULL;
trace_nfsd_preprocess(seqid, stateid);
*stpp = NULL;
status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
if (status)
return status;
stp = openlockstateid(s);
nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
if (!status)
*stpp = stp;
else
nfs4_put_stid(&stp->st_stid);
return status;
}
static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
{
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
NFS4_OPEN_STID, &stp, nn);
if (status)
return status;
oo = openowner(stp->st_stateowner);
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
return nfserr_bad_stateid;
}
*stpp = stp;
return nfs_ok;
}
__be32
nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_open_confirm *oc = &u->open_confirm;
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
cstate->current_fh.fh_dentry);
status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
if (status)
return status;
status = nfs4_preprocess_seqid_op(cstate,
oc->oc_seqid, &oc->oc_req_stateid,
NFS4_OPEN_STID, &stp, nn);
if (status)
goto out;
oo = openowner(stp->st_stateowner);
status = nfserr_bad_stateid;
if (oo->oo_flags & NFS4_OO_CONFIRMED) {
mutex_unlock(&stp->st_mutex);
goto put_stateid;
}
oo->oo_flags |= NFS4_OO_CONFIRMED;
nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
nfsd4_client_record_create(oo->oo_owner.so_client);
status = nfs_ok;
put_stateid:
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
return status;
}
static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
{
if (!test_access(access, stp))
return;
nfs4_file_put_access(stp->st_stid.sc_file, access);
clear_access(access, stp);
}
static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
{
switch (to_access) {
case NFS4_SHARE_ACCESS_READ:
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
break;
case NFS4_SHARE_ACCESS_WRITE:
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
break;
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
WARN_ON_ONCE(1);
}
}
__be32
nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_open_downgrade *od = &u->open_downgrade;
__be32 status;
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
cstate->current_fh.fh_dentry);
/* We don't yet support WANT bits: */
if (od->od_deleg_want)
dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
od->od_deleg_want);
status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
&od->od_stateid, &stp, nn);
if (status)
goto out;
status = nfserr_inval;
if (!test_access(od->od_share_access, stp)) {
dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
stp->st_access_bmap, od->od_share_access);
goto put_stateid;
}
if (!test_deny(od->od_share_deny, stp)) {
dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
stp->st_deny_bmap, od->od_share_deny);
goto put_stateid;
}
nfs4_stateid_downgrade(stp, od->od_share_access);
reset_union_bmap_deny(od->od_share_deny, stp);
nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
status = nfs_ok;
put_stateid:
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
return status;
}
static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
struct nfs4_client *clp = s->st_stid.sc_client;
bool unhashed;
LIST_HEAD(reaplist);
struct nfs4_ol_stateid *stp;
spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist);
if (clp->cl_minorversion) {
if (unhashed)
put_ol_stateid_locked(s, &reaplist);
spin_unlock(&clp->cl_lock);
list_for_each_entry(stp, &reaplist, st_locks)
nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
free_ol_stateid_reaplist(&reaplist);
} else {
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
if (unhashed)
move_to_close_lru(s, clp->net);
}
}
/*
* nfs4_unlock_state() called after encode
*/
__be32
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_close *close = &u->close;
__be32 status;
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_close on file %pd\n",
cstate->current_fh.fh_dentry);
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
&close->cl_stateid,
NFS4_OPEN_STID|NFS4_CLOSED_STID,
&stp, nn);
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
stp->st_stid.sc_type = NFS4_CLOSED_STID;
/*
* Technically we don't _really_ have to increment or copy it, since
* it should just be gone after this operation and we clobber the
* copied value below, but we continue to do so here just to ensure
* that racing ops see that there was a state change.
*/
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
nfsd4_close_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
/* v4.1+ suggests that we send a special stateid in here, since the
* clients should just ignore this anyway. Since this is not useful
* for v4.0 clients either, we set it to the special close_stateid
* universally.
*
* See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
*/
memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
out:
return status;
}
__be32
nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_delegreturn *dr = &u->delegreturn;
struct nfs4_delegation *dp;
stateid_t *stateid = &dr->dr_stateid;
struct nfs4_stid *s;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
return status;
status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
if (status)
goto out;
dp = delegstateid(s);
status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
if (status)
goto put_stateid;
trace_nfsd_deleg_return(stateid);
wake_up_var(d_inode(cstate->current_fh.fh_dentry));
destroy_delegation(dp);
put_stateid:
nfs4_put_stid(&dp->dl_stid);
out:
return status;
}
/* last octet in a range */
static inline u64
last_byte_offset(u64 start, u64 len)
{
u64 end;
WARN_ON_ONCE(!len);
end = start + len;
return end > start ? end - 1: NFS4_MAX_UINT64;
}
/*
* TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
* we can't properly handle lock requests that go beyond the (2^63 - 1)-th
* byte, because of sign extension problems. Since NFSv4 calls for 64-bit
* locking, this prevents us from being completely protocol-compliant. The
* real solution to this problem is to start using unsigned file offsets in
* the VFS, but this is a very deep change!
*/
static inline void
nfs4_transform_lock_offset(struct file_lock *lock)
{
if (lock->fl_start < 0)
lock->fl_start = OFFSET_MAX;
if (lock->fl_end < 0)
lock->fl_end = OFFSET_MAX;
}
static fl_owner_t
nfsd4_lm_get_owner(fl_owner_t owner)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
nfs4_get_stateowner(&lo->lo_owner);
return owner;
}
static void
nfsd4_lm_put_owner(fl_owner_t owner)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
if (lo)
nfs4_put_stateowner(&lo->lo_owner);
}
/* return pointer to struct nfs4_client if client is expirable */
static bool
nfsd4_lm_lock_expirable(struct file_lock *cfl)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
struct nfs4_client *clp = lo->lo_owner.so_client;
struct nfsd_net *nn;
if (try_to_expire_client(clp)) {
nn = net_generic(clp->net, nfsd_net_id);
mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
return true;
}
return false;
}
/* schedule laundromat to run immediately and wait for it to complete */
static void
nfsd4_lm_expire_lock(void)
{
flush_workqueue(laundry_wq);
}
static void
nfsd4_lm_notify(struct file_lock *fl)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
struct net *net = lo->lo_owner.so_client->net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfsd4_blocked_lock *nbl = container_of(fl,
struct nfsd4_blocked_lock, nbl_lock);
bool queue = false;
/* An empty list means that something else is going to be using it */
spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list)) {
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
queue = true;
}
spin_unlock(&nn->blocked_locks_lock);
if (queue) {
trace_nfsd_cb_notify_lock(lo, nbl);
nfsd4_run_cb(&nbl->nbl_cb);
}
}
static const struct lock_manager_operations nfsd_posix_mng_ops = {
.lm_mod_owner = THIS_MODULE,
.lm_notify = nfsd4_lm_notify,
.lm_get_owner = nfsd4_lm_get_owner,
.lm_put_owner = nfsd4_lm_put_owner,
.lm_lock_expirable = nfsd4_lm_lock_expirable,
.lm_expire_lock = nfsd4_lm_expire_lock,
};
static inline void
nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
{
struct nfs4_lockowner *lo;
if (fl->fl_lmops == &nfsd_posix_mng_ops) {
lo = (struct nfs4_lockowner *) fl->fl_owner;
xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
GFP_KERNEL);
if (!deny->ld_owner.data)
/* We just don't care that much */
goto nevermind;
deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
} else {
nevermind:
deny->ld_owner.len = 0;
deny->ld_owner.data = NULL;
deny->ld_clientid.cl_boot = 0;
deny->ld_clientid.cl_id = 0;
}
deny->ld_start = fl->fl_start;
deny->ld_length = NFS4_MAX_UINT64;
if (fl->fl_end != NFS4_MAX_UINT64)
deny->ld_length = fl->fl_end - fl->fl_start + 1;
deny->ld_type = NFS4_READ_LT;
if (fl->fl_type != F_RDLCK)
deny->ld_type = NFS4_WRITE_LT;
}
static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
{
unsigned int strhashval = ownerstr_hashval(owner);
struct nfs4_stateowner *so;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
so_strhash) {
if (so->so_is_open_owner)
continue;
if (same_owner_str(so, owner))
return lockowner(nfs4_get_stateowner(so));
}
return NULL;
}
static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
{
struct nfs4_lockowner *lo;
spin_lock(&clp->cl_lock);
lo = find_lockowner_str_locked(clp, owner);
spin_unlock(&clp->cl_lock);
return lo;
}
static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
{
unhash_lockowner_locked(lockowner(sop));
}
static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
{
struct nfs4_lockowner *lo = lockowner(sop);
kmem_cache_free(lockowner_slab, lo);
}
static const struct nfs4_stateowner_operations lockowner_ops = {
.so_unhash = nfs4_unhash_lockowner,
.so_free = nfs4_free_lockowner,
};
/*
* Alloc a lock owner structure.
* Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
* occurred.
*
* strhashval = ownerstr_hashval
*/
static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
struct nfs4_ol_stateid *open_stp,
struct nfsd4_lock *lock)
{
struct nfs4_lockowner *lo, *ret;
lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
if (!lo)
return NULL;
INIT_LIST_HEAD(&lo->lo_blocked);
INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
lo->lo_owner.so_is_open_owner = 0;
lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
lo->lo_owner.so_ops = &lockowner_ops;
spin_lock(&clp->cl_lock);
ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
if (ret == NULL) {
list_add(&lo->lo_owner.so_strhash,
&clp->cl_ownerstr_hashtbl[strhashval]);
ret = lo;
} else
nfs4_free_stateowner(&lo->lo_owner);
spin_unlock(&clp->cl_lock);
return ret;
}
static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner *lo,
const struct nfs4_ol_stateid *ost)
{
struct nfs4_ol_stateid *lst;
lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
/* If ost is not hashed, ost->st_locks will not be valid */
if (!nfs4_ol_stateid_unhashed(ost))
list_for_each_entry(lst, &ost->st_locks, st_locks) {
if (lst->st_stateowner == &lo->lo_owner) {
refcount_inc(&lst->st_stid.sc_count);
return lst;
}
}
return NULL;
}
static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
struct nfs4_file *fp, struct inode *inode,
struct nfs4_ol_stateid *open_stp)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
struct nfs4_ol_stateid *retstp;
mutex_init(&stp->st_mutex);
mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
retry:
spin_lock(&clp->cl_lock);
if (nfs4_ol_stateid_unhashed(open_stp))
goto out_close;
retstp = find_lock_stateid(lo, open_stp);
if (retstp)
goto out_found;
refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
get_nfs4_file(fp);
stp->st_stid.sc_file = fp;
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
spin_lock(&fp->fi_lock);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
spin_unlock(&fp->fi_lock);
spin_unlock(&clp->cl_lock);
return stp;
out_found:
spin_unlock(&clp->cl_lock);
if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
nfs4_put_stid(&retstp->st_stid);
goto retry;
}
/* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
return retstp;
out_close:
spin_unlock(&clp->cl_lock);
mutex_unlock(&stp->st_mutex);
return NULL;
}
static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
struct inode *inode, struct nfs4_ol_stateid *ost,
bool *new)
{
struct nfs4_stid *ns = NULL;
struct nfs4_ol_stateid *lst;
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *clp = oo->oo_owner.so_client;
*new = false;
spin_lock(&clp->cl_lock);
lst = find_lock_stateid(lo, ost);
spin_unlock(&clp->cl_lock);
if (lst != NULL) {
if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
goto out;
nfs4_put_stid(&lst->st_stid);
}
ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
if (ns == NULL)
return NULL;
lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
if (lst == openlockstateid(ns))
*new = true;
else
nfs4_put_stid(ns);
out:
return lst;
}
static int
check_lock_length(u64 offset, u64 length)
{
return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
(length > ~offset)));
}
static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
{
struct nfs4_file *fp = lock_stp->st_stid.sc_file;
lockdep_assert_held(&fp->fi_lock);
if (test_access(access, lock_stp))
return;
__nfs4_file_get_access(fp, access);
set_access(access, lock_stp);
}
static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *ost,
struct nfsd4_lock *lock,
struct nfs4_ol_stateid **plst, bool *new)
{
__be32 status;
struct nfs4_file *fi = ost->st_stid.sc_file;
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *cl = oo->oo_owner.so_client;
struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
struct nfs4_lockowner *lo;
struct nfs4_ol_stateid *lst;
unsigned int strhashval;
lo = find_lockowner_str(cl, &lock->lk_new_owner);
if (!lo) {
strhashval = ownerstr_hashval(&lock->lk_new_owner);
lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
if (lo == NULL)
return nfserr_jukebox;
} else {
/* with an existing lockowner, seqids must be the same */
status = nfserr_bad_seqid;
if (!cstate->minorversion &&
lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
goto out;
}
lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
if (lst == NULL) {
status = nfserr_jukebox;
goto out;
}
status = nfs_ok;
*plst = lst;
out:
nfs4_put_stateowner(&lo->lo_owner);
return status;
}
/*
* LOCK operation
*/
__be32
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_lock *lock = &u->lock;
struct nfs4_openowner *open_sop = NULL;
struct nfs4_lockowner *lock_sop = NULL;
struct nfs4_ol_stateid *lock_stp = NULL;
struct nfs4_ol_stateid *open_stp = NULL;
struct nfs4_file *fp;
struct nfsd_file *nf = NULL;
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
__be32 status = 0;
int lkflg;
int err;
bool new = false;
unsigned char fl_type;
unsigned int fl_flags = FL_POSIX;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
(long long) lock->lk_offset,
(long long) lock->lk_length);
if (check_lock_length(lock->lk_offset, lock->lk_length))
return nfserr_inval;
if ((status = fh_verify(rqstp, &cstate->current_fh,
S_IFREG, NFSD_MAY_LOCK))) {
dprintk("NFSD: nfsd4_lock: permission denied!\n");
return status;
}
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
/* See rfc 5661 18.10.3: given clientid is ignored: */
memcpy(&lock->lk_new_clientid,
&cstate->clp->cl_clientid,
sizeof(clientid_t));
/* validate and update open stateid and open seqid */
status = nfs4_preprocess_confirmed_seqid_op(cstate,
lock->lk_new_open_seqid,
&lock->lk_new_open_stateid,
&open_stp, nn);
if (status)
goto out;
mutex_unlock(&open_stp->st_mutex);
open_sop = openowner(open_stp->st_stateowner);
status = nfserr_bad_stateid;
if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
&lock->lk_new_clientid))
goto out;
status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new);
} else {
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
&lock->lk_old_lock_stateid,
NFS4_LOCK_STID, &lock_stp, nn);
}
if (status)
goto out;
lock_sop = lockowner(lock_stp->st_stateowner);
lkflg = setlkflg(lock->lk_type);
status = nfs4_check_openmode(lock_stp, lkflg);
if (status)
goto out;
status = nfserr_grace;
if (locks_in_grace(net) && !lock->lk_reclaim)
goto out;
status = nfserr_no_grace;
if (!locks_in_grace(net) && lock->lk_reclaim)
goto out;
if (lock->lk_reclaim)
fl_flags |= FL_RECLAIM;
fp = lock_stp->st_stid.sc_file;
switch (lock->lk_type) {
case NFS4_READW_LT:
if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_READ_LT:
spin_lock(&fp->fi_lock);
nf = find_readable_file_locked(fp);
if (nf)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
spin_unlock(&fp->fi_lock);
fl_type = F_RDLCK;
break;
case NFS4_WRITEW_LT:
if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_WRITE_LT:
spin_lock(&fp->fi_lock);
nf = find_writeable_file_locked(fp);
if (nf)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
spin_unlock(&fp->fi_lock);
fl_type = F_WRLCK;
break;
default:
status = nfserr_inval;
goto out;
}
if (!nf) {
status = nfserr_openmode;
goto out;
}
/*
* Most filesystems with their own ->lock operations will block
* the nfsd thread waiting to acquire the lock. That leads to
* deadlocks (we don't want every nfsd thread tied up waiting
* for file locks), so don't attempt blocking lock notifications
* on those filesystems:
*/
if (nf->nf_file->f_op->lock)
fl_flags &= ~FL_SLEEP;
nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
if (!nbl) {
dprintk("NFSD: %s: unable to allocate block!\n", __func__);
status = nfserr_jukebox;
goto out;
}
file_lock = &nbl->nbl_lock;
file_lock->fl_type = fl_type;
file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
file_lock->fl_pid = current->tgid;
file_lock->fl_file = nf->nf_file;
file_lock->fl_flags = fl_flags;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = lock->lk_offset;
file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
nfs4_transform_lock_offset(file_lock);
conflock = locks_alloc_lock();
if (!conflock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
status = nfserr_jukebox;
goto out;
}
if (fl_flags & FL_SLEEP) {
nbl->nbl_time = ktime_get_boottime_seconds();
spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
kref_get(&nbl->nbl_kref);
spin_unlock(&nn->blocked_locks_lock);
}
err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
switch (err) {
case 0: /* success! */
nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
status = 0;
if (lock->lk_reclaim)
nn->somebody_reclaimed = true;
break;
case FILE_LOCK_DEFERRED:
kref_put(&nbl->nbl_kref, free_nbl);
nbl = NULL;
fallthrough;
case -EAGAIN: /* conflock holds conflicting lock */
status = nfserr_denied;
dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
nfs4_set_lock_denied(conflock, &lock->lk_denied);
break;
case -EDEADLK:
status = nfserr_deadlock;
break;
default:
dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
status = nfserrno(err);
break;
}
out:
if (nbl) {
/* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) {
spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list) &&
!list_empty(&nbl->nbl_lru)) {
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
kref_put(&nbl->nbl_kref, free_nbl);
}
/* nbl can use one of lists to be linked to reaplist */
spin_unlock(&nn->blocked_locks_lock);
}
free_blocked_lock(nbl);
}
if (nf)
nfsd_file_put(nf);
if (lock_stp) {
/* Bump seqid manually if the 4.0 replay owner is openowner */
if (cstate->replay_owner &&
cstate->replay_owner != &lock_sop->lo_owner &&
seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++;
/*
* If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it.
*/
if (status && new)
release_lock_stateid(lock_stp);
mutex_unlock(&lock_stp->st_mutex);
nfs4_put_stid(&lock_stp->st_stid);
}
if (open_stp)
nfs4_put_stid(&open_stp->st_stid);
nfsd4_bump_seqid(cstate, status);
if (conflock)
locks_free_lock(conflock);
return status;
}
/*
* The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
* so we do a temporary open here just to get an open file to pass to
* vfs_test_lock.
*/
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
{
struct nfsd_file *nf;
struct inode *inode;
__be32 err;
err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
if (err)
return err;
inode = fhp->fh_dentry->d_inode;
inode_lock(inode); /* to block new leases till after test_lock: */
err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
if (err)
goto out;
lock->fl_file = nf->nf_file;
err = nfserrno(vfs_test_lock(nf->nf_file, lock));
lock->fl_file = NULL;
out:
inode_unlock(inode);
nfsd_file_put(nf);
return err;
}
/*
* LOCKT operation
*/
__be32
nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_lockt *lockt = &u->lockt;
struct file_lock *file_lock = NULL;
struct nfs4_lockowner *lo = NULL;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (locks_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
if (check_lock_length(lockt->lt_offset, lockt->lt_length))
return nfserr_inval;
if (!nfsd4_has_session(cstate)) {
status = set_client(&lockt->lt_clientid, cstate, nn);
if (status)
goto out;
}
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
goto out;
file_lock = locks_alloc_lock();
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
status = nfserr_jukebox;
goto out;
}
switch (lockt->lt_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
file_lock->fl_type = F_RDLCK;
break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
file_lock->fl_type = F_WRLCK;
break;
default:
dprintk("NFSD: nfs4_lockt: bad lock type!\n");
status = nfserr_inval;
goto out;
}
lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
if (lo)
file_lock->fl_owner = (fl_owner_t)lo;
file_lock->fl_pid = current->tgid;
file_lock->fl_flags = FL_POSIX;
file_lock->fl_start = lockt->lt_offset;
file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
nfs4_transform_lock_offset(file_lock);
status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
if (status)
goto out;
if (file_lock->fl_type != F_UNLCK) {
status = nfserr_denied;
nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
}
out:
if (lo)
nfs4_put_stateowner(&lo->lo_owner);
if (file_lock)
locks_free_lock(file_lock);
return status;
}
__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_locku *locku = &u->locku;
struct nfs4_ol_stateid *stp;
struct nfsd_file *nf = NULL;
struct file_lock *file_lock = NULL;
__be32 status;
int err;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
(long long) locku->lu_offset,
(long long) locku->lu_length);
if (check_lock_length(locku->lu_offset, locku->lu_length))
return nfserr_inval;
status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
&locku->lu_stateid, NFS4_LOCK_STID,
&stp, nn);
if (status)
goto out;
nf = find_any_file(stp->st_stid.sc_file);
if (!nf) {
status = nfserr_lock_range;
goto put_stateid;
}
file_lock = locks_alloc_lock();
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
status = nfserr_jukebox;
goto put_file;
}
file_lock->fl_type = F_UNLCK;
file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
file_lock->fl_pid = current->tgid;
file_lock->fl_file = nf->nf_file;
file_lock->fl_flags = FL_POSIX;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = locku->lu_offset;
file_lock->fl_end = last_byte_offset(locku->lu_offset,
locku->lu_length);
nfs4_transform_lock_offset(file_lock);
err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
if (err) {
dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
goto out_nfserr;
}
nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
put_file:
nfsd_file_put(nf);
put_stateid:
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
if (file_lock)
locks_free_lock(file_lock);
return status;
out_nfserr:
status = nfserrno(err);
goto put_file;
}
/*
* returns
* true: locks held by lockowner
* false: no locks held by lockowner
*/
static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
struct file_lock *fl;
int status = false;
struct nfsd_file *nf = find_any_file(fp);
struct inode *inode;
struct file_lock_context *flctx;
if (!nf) {
/* Any valid lock stateid should have some sort of access */
WARN_ON_ONCE(1);
return status;
}
inode = file_inode(nf->nf_file);
flctx = locks_inode_context(inode);
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
spin_lock(&flctx->flc_lock);
list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
if (fl->fl_owner == (fl_owner_t)lowner) {
status = true;
break;
}
}
spin_unlock(&flctx->flc_lock);
}
nfsd_file_put(nf);
return status;
}
/**
* nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
* @rqstp: RPC transaction
* @cstate: NFSv4 COMPOUND state
* @u: RELEASE_LOCKOWNER arguments
*
* The lockowner's so_count is bumped when a lock record is added
* or when copying a conflicting lock. The latter case is brief,
* but can lead to fleeting false positives when looking for
* locks-in-use.
*
* Return values:
* %nfs_ok: lockowner released or not found
* %nfserr_locks_held: lockowner still in use
* %nfserr_stale_clientid: clientid no longer active
* %nfserr_expired: clientid not recognized
*/
__be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
clientid_t *clid = &rlockowner->rl_clientid;
struct nfs4_ol_stateid *stp;
struct nfs4_lockowner *lo;
struct nfs4_client *clp;
LIST_HEAD(reaplist);
__be32 status;
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
status = set_client(clid, cstate, nn);
if (status)
return status;
clp = cstate->clp;
spin_lock(&clp->cl_lock);
lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
if (!lo) {
spin_unlock(&clp->cl_lock);
return nfs_ok;
}
if (atomic_read(&lo->lo_owner.so_count) != 2) {
spin_unlock(&clp->cl_lock);
nfs4_put_stateowner(&lo->lo_owner);
return nfserr_locks_held;
}
unhash_lockowner_locked(lo);
while (!list_empty(&lo->lo_owner.so_stateids)) {
stp = list_first_entry(&lo->lo_owner.so_stateids,
struct nfs4_ol_stateid,
st_perstateowner);
WARN_ON(!unhash_lock_stateid(stp));
put_ol_stateid_locked(stp, &reaplist);
}
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
remove_blocked_locks(lo);
nfs4_put_stateowner(&lo->lo_owner);
return nfs_ok;
}
static inline struct nfs4_client_reclaim *
alloc_reclaim(void)
{
return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
}
bool
nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
{
struct nfs4_client_reclaim *crp;
crp = nfsd4_find_reclaim_client(name, nn);
return (crp && crp->cr_clp);
}
/*
* failure => all reset bets are off, nfserr_no_grace...
*
* The caller is responsible for freeing name.data if NULL is returned (it
* will be freed in nfs4_remove_reclaim_record in the normal case).
*/
struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
struct nfsd_net *nn)
{
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
crp = alloc_reclaim();
if (crp) {
strhashval = clientstr_hashval(name);
INIT_LIST_HEAD(&crp->cr_strhash);
list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
crp->cr_name.data = name.data;
crp->cr_name.len = name.len;
crp->cr_princhash.data = princhash.data;
crp->cr_princhash.len = princhash.len;
crp->cr_clp = NULL;
nn->reclaim_str_hashtbl_size++;
}
return crp;
}
void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
{
list_del(&crp->cr_strhash);
kfree(crp->cr_name.data);
kfree(crp->cr_princhash.data);
kfree(crp);
nn->reclaim_str_hashtbl_size--;
}
void
nfs4_release_reclaim(struct nfsd_net *nn)
{
struct nfs4_client_reclaim *crp = NULL;
int i;
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
crp = list_entry(nn->reclaim_str_hashtbl[i].next,
struct nfs4_client_reclaim, cr_strhash);
nfs4_remove_reclaim_record(crp, nn);
}
}
WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
}
/*
* called from OPEN, CLAIM_PREVIOUS with a new clientid. */
struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
{
unsigned int strhashval;
struct nfs4_client_reclaim *crp = NULL;
strhashval = clientstr_hashval(name);
list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
if (compare_blob(&crp->cr_name, &name) == 0) {
return crp;
}
}
return NULL;
}
__be32
nfs4_check_open_reclaim(struct nfs4_client *clp)
{
if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
return nfserr_no_grace;
if (nfsd4_client_record_check(clp))
return nfserr_reclaim_bad;
return nfs_ok;
}
/*
* Since the lifetime of a delegation isn't limited to that of an open, a
* client may quite reasonably hang on to a delegation as long as it has
* the inode cached. This becomes an obvious problem the first time a
* client's inode cache approaches the size of the server's total memory.
*
* For now we avoid this problem by imposing a hard limit on the number
* of delegations, which varies according to the server's memory size.
*/
static void
set_max_delegations(void)
{
/*
* Allow at most 4 delegations per megabyte of RAM. Quick
* estimates suggest that in the worst case (where every delegation
* is for a different inode), a delegation could take about 1.5K,
* giving a worst case usage of about 6% of memory.
*/
max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
}
static int nfs4_state_create_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
sizeof(struct list_head),
GFP_KERNEL);
if (!nn->conf_id_hashtbl)
goto err;
nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
sizeof(struct list_head),
GFP_KERNEL);
if (!nn->unconf_id_hashtbl)
goto err_unconf_id;
nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
sizeof(struct list_head),
GFP_KERNEL);
if (!nn->sessionid_hashtbl)
goto err_sessionid;
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
}
for (i = 0; i < SESSION_HASH_SIZE; i++)
INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
nn->boot_time = ktime_get_real_seconds();
nn->grace_ended = false;
nn->nfsd4_manager.block_opens = true;
INIT_LIST_HEAD(&nn->nfsd4_manager.list);
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->s2s_cp_lock);
idr_init(&nn->s2s_cp_stateids);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
get_net(net);
nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client"))
goto err_shrinker;
return 0;
err_shrinker:
put_net(net);
kfree(nn->sessionid_hashtbl);
err_sessionid:
kfree(nn->unconf_id_hashtbl);
err_unconf_id:
kfree(nn->conf_id_hashtbl);
err:
return -ENOMEM;
}
static void
nfs4_state_destroy_net(struct net *net)
{
int i;
struct nfs4_client *clp = NULL;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&nn->conf_id_hashtbl[i])) {
clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
destroy_client(clp);
}
}
WARN_ON(!list_empty(&nn->blocked_locks_lru));
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&nn->unconf_id_hashtbl[i])) {
clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
destroy_client(clp);
}
}
kfree(nn->sessionid_hashtbl);
kfree(nn->unconf_id_hashtbl);
kfree(nn->conf_id_hashtbl);
put_net(net);
}
int
nfs4_state_start_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
ret = nfs4_state_create_net(net);
if (ret)
return ret;
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
goto skip_grace;
printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
nn->nfsd4_grace, net->ns.inum);
trace_nfsd_grace_start(nn);
queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
return 0;
skip_grace:
printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
net->ns.inum);
queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
nfsd4_end_grace(nn);
return 0;
}
/* initialization to perform when the nfsd service is started: */
int
nfs4_state_start(void)
{
int ret;
ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
if (ret)
return ret;
ret = nfsd4_create_callback_queue();
if (ret) {
rhltable_destroy(&nfs4_file_rhltable);
return ret;
}
set_max_delegations();
return 0;
}
void
nfs4_state_shutdown_net(struct net *net)
{
struct nfs4_delegation *dp = NULL;
struct list_head *pos, *next, reaplist;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
unregister_shrinker(&nn->nfsd_client_shrinker);
cancel_work(&nn->nfsd_shrinker_work);
cancel_delayed_work_sync(&nn->laundromat_work);
locks_end_grace(&nn->nfsd4_manager);
INIT_LIST_HEAD(&reaplist);
spin_lock(&state_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
WARN_ON(!unhash_delegation_locked(dp));
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
list_for_each_safe(pos, next, &reaplist) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
destroy_unhashed_deleg(dp);
}
nfsd4_client_tracking_exit(net);
nfs4_state_destroy_net(net);
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_shutdown_umount(nn);
#endif
}
void
nfs4_state_shutdown(void)
{
nfsd4_destroy_callback_queue();
rhltable_destroy(&nfs4_file_rhltable);
}
static void
get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
CURRENT_STATEID(stateid))
memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
}
static void
put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
if (cstate->minorversion) {
memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
}
}
void
clear_current_stateid(struct nfsd4_compound_state *cstate)
{
CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
}
/*
* functions to set current state id
*/
void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
put_stateid(cstate, &u->open_downgrade.od_stateid);
}
void
nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
put_stateid(cstate, &u->open.op_stateid);
}
void
nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
put_stateid(cstate, &u->close.cl_stateid);
}
void
nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
put_stateid(cstate, &u->lock.lk_resp_stateid);
}
/*
* functions to consume current state id
*/
void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->open_downgrade.od_stateid);
}
void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->delegreturn.dr_stateid);
}
void
nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->free_stateid.fr_stateid);
}
void
nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->setattr.sa_stateid);
}
void
nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->close.cl_stateid);
}
void
nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->locku.lu_stateid);
}
void
nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->read.rd_stateid);
}
void
nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
get_stateid(cstate, &u->write.wr_stateid);
}
/**
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
* @rqstp: RPC transaction context
* @inode: file to be checked for a conflict
*
* This function is called when there is a conflict between a write
* delegation and a change/size GETATTR from another client. The server
* must either use the CB_GETATTR to get the current values of the
* attributes from the client that holds the delegation or recall the
* delegation before replying to the GETATTR. See RFC 8881 section
* 18.7.4.
*
* The current implementation does not support CB_GETATTR yet. However
* this can avoid recalling the delegation could be added in follow up
* work.
*
* Returns 0 if there is no conflict; otherwise an nfs_stat
* code is returned.
*/
__be32
nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
{
__be32 status;
struct file_lock_context *ctx;
struct file_lock *fl;
struct nfs4_delegation *dp;
ctx = locks_inode_context(inode);
if (!ctx)
return 0;
spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_flags == FL_LAYOUT)
continue;
if (fl->fl_lmops != &nfsd_lease_mng_ops) {
/*
* non-nfs lease, if it's a lease with F_RDLCK then
* we are done; there isn't any write delegation
* on this inode
*/
if (fl->fl_type == F_RDLCK)
break;
goto break_lease;
}
if (fl->fl_type == F_WRLCK) {
dp = fl->fl_owner;
if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
spin_unlock(&ctx->flc_lock);
return 0;
}
break_lease:
spin_unlock(&ctx->flc_lock);
nfsd_stats_wdeleg_getattr_inc();
status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
if (status != nfserr_jukebox ||
!nfsd_wait_for_delegreturn(rqstp, inode))
return status;
return 0;
}
break;
}
spin_unlock(&ctx->flc_lock);
return 0;
}
| linux-master | fs/nfsd/nfs4state.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Process version 2 NFSACL requests.
*
* Copyright (C) 2002-2003 Andreas Gruenbacher <[email protected]>
*/
#include "nfsd.h"
/* FIXME: nfsacl.h is a broken header */
#include <linux/nfsacl.h>
#include <linux/gfp.h>
#include "cache.h"
#include "xdr3.h"
#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
/*
* NULL call.
*/
static __be32
nfsacld_proc_null(struct svc_rqst *rqstp)
{
return rpc_success;
}
/*
* Get the Access and/or Default ACL of a file.
*/
static __be32 nfsacld_proc_getacl(struct svc_rqst *rqstp)
{
struct nfsd3_getaclargs *argp = rqstp->rq_argp;
struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct posix_acl *acl;
struct inode *inode;
svc_fh *fh;
dprintk("nfsd: GETACL(2acl) %s\n", SVCFH_fmt(&argp->fh));
fh = fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP);
if (resp->status != nfs_ok)
goto out;
inode = d_inode(fh->fh_dentry);
if (argp->mask & ~NFS_ACL_MASK) {
resp->status = nfserr_inval;
goto out;
}
resp->mask = argp->mask;
resp->status = fh_getattr(fh, &resp->stat);
if (resp->status != nfs_ok)
goto out;
if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
acl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (acl == NULL) {
/* Solaris returns the inode's minimum ACL. */
acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
}
if (IS_ERR(acl)) {
resp->status = nfserrno(PTR_ERR(acl));
goto fail;
}
resp->acl_access = acl;
}
if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) {
/* Check how Solaris handles requests for the Default ACL
of a non-directory! */
acl = get_inode_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl)) {
resp->status = nfserrno(PTR_ERR(acl));
goto fail;
}
resp->acl_default = acl;
}
/* resp->acl_{access,default} are released in nfssvc_release_getacl. */
out:
return rpc_success;
fail:
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
goto out;
}
/*
* Set the Access and/or Default ACL of a file.
*/
static __be32 nfsacld_proc_setacl(struct svc_rqst *rqstp)
{
struct nfsd3_setaclargs *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
struct inode *inode;
svc_fh *fh;
int error;
dprintk("nfsd: SETACL(2acl) %s\n", SVCFH_fmt(&argp->fh));
fh = fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR);
if (resp->status != nfs_ok)
goto out;
inode = d_inode(fh->fh_dentry);
error = fh_want_write(fh);
if (error)
goto out_errno;
inode_lock(inode);
error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_ACCESS,
argp->acl_access);
if (error)
goto out_drop_lock;
error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_DEFAULT,
argp->acl_default);
if (error)
goto out_drop_lock;
inode_unlock(inode);
fh_drop_write(fh);
resp->status = fh_getattr(fh, &resp->stat);
out:
/* argp->acl_{access,default} may have been allocated in
nfssvc_decode_setaclargs. */
posix_acl_release(argp->acl_access);
posix_acl_release(argp->acl_default);
return rpc_success;
out_drop_lock:
inode_unlock(inode);
fh_drop_write(fh);
out_errno:
resp->status = nfserrno(error);
goto out;
}
/*
* Check file attributes
*/
static __be32 nfsacld_proc_getattr(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
/*
* Check file access
*/
static __be32 nfsacld_proc_access(struct svc_rqst *rqstp)
{
struct nfsd3_accessargs *argp = rqstp->rq_argp;
struct nfsd3_accessres *resp = rqstp->rq_resp;
dprintk("nfsd: ACCESS(2acl) %s 0x%x\n",
SVCFH_fmt(&argp->fh),
argp->access);
fh_copy(&resp->fh, &argp->fh);
resp->access = argp->access;
resp->status = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
/*
* XDR decode functions
*/
static bool
nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_getaclargs *argp = rqstp->rq_argp;
if (!svcxdr_decode_fhandle(xdr, &argp->fh))
return false;
if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return false;
return true;
}
static bool
nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_setaclargs *argp = rqstp->rq_argp;
if (!svcxdr_decode_fhandle(xdr, &argp->fh))
return false;
if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return false;
if (argp->mask & ~NFS_ACL_MASK)
return false;
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_ACL) ?
&argp->acl_access : NULL))
return false;
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_DFACL) ?
&argp->acl_default : NULL))
return false;
return true;
}
static bool
nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_accessargs *args = rqstp->rq_argp;
if (!svcxdr_decode_fhandle(xdr, &args->fh))
return false;
if (xdr_stream_decode_u32(xdr, &args->access) < 0)
return false;
return true;
}
/*
* XDR encode functions
*/
/* GETACL */
static bool
nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct dentry *dentry = resp->fh.fh_dentry;
struct inode *inode;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
if (dentry == NULL || d_really_is_negative(dentry))
return true;
inode = d_inode(dentry);
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
return false;
if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
return false;
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
resp->mask & NFS_ACL, 0))
return false;
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
resp->mask & NFS_DFACL, NFS_ACL_DEFAULT))
return false;
return true;
}
/* ACCESS */
static bool
nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_accessres *resp = rqstp->rq_resp;
if (!svcxdr_encode_stat(xdr, resp->status))
return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
return false;
if (xdr_stream_encode_u32(xdr, resp->access) < 0)
return false;
break;
}
return true;
}
/*
* XDR release functions
*/
static void nfsaclsvc_release_getacl(struct svc_rqst *rqstp)
{
struct nfsd3_getaclres *resp = rqstp->rq_resp;
fh_put(&resp->fh);
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
}
static void nfsaclsvc_release_access(struct svc_rqst *rqstp)
{
struct nfsd3_accessres *resp = rqstp->rq_resp;
fh_put(&resp->fh);
}
struct nfsd3_voidargs { int dummy; };
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
#define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */
static const struct svc_procedure nfsd_acl_procedures2[5] = {
[ACLPROC2_NULL] = {
.pc_func = nfsacld_proc_null,
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
.pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
.pc_name = "NULL",
},
[ACLPROC2_GETACL] = {
.pc_func = nfsacld_proc_getacl,
.pc_decode = nfsaclsvc_decode_getaclargs,
.pc_encode = nfsaclsvc_encode_getaclres,
.pc_release = nfsaclsvc_release_getacl,
.pc_argsize = sizeof(struct nfsd3_getaclargs),
.pc_argzero = sizeof(struct nfsd3_getaclargs),
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
.pc_name = "GETACL",
},
[ACLPROC2_SETACL] = {
.pc_func = nfsacld_proc_setacl,
.pc_decode = nfsaclsvc_decode_setaclargs,
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd3_setaclargs),
.pc_argzero = sizeof(struct nfsd3_setaclargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
.pc_name = "SETACL",
},
[ACLPROC2_GETATTR] = {
.pc_func = nfsacld_proc_getattr,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
.pc_name = "GETATTR",
},
[ACLPROC2_ACCESS] = {
.pc_func = nfsacld_proc_access,
.pc_decode = nfsaclsvc_decode_accessargs,
.pc_encode = nfsaclsvc_encode_accessres,
.pc_release = nfsaclsvc_release_access,
.pc_argsize = sizeof(struct nfsd3_accessargs),
.pc_argzero = sizeof(struct nfsd3_accessargs),
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1,
.pc_name = "SETATTR",
},
};
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_acl_count2[ARRAY_SIZE(nfsd_acl_procedures2)]);
const struct svc_version nfsd_acl_version2 = {
.vs_vers = 2,
.vs_nproc = ARRAY_SIZE(nfsd_acl_procedures2),
.vs_proc = nfsd_acl_procedures2,
.vs_count = nfsd_acl_count2,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS3_SVC_XDRSIZE,
};
| linux-master | fs/nfsd/nfs2acl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NFS exporting and validation.
*
* We maintain a list of clients, each of which has a list of
* exports. To export an fs to a given client, you first have
* to create the client entry with NFSCTL_ADDCLIENT, which
* creates a client control block and adds it to the hash
* table. Then, you call NFSCTL_EXPORT for each fs.
*
*
* Copyright (C) 1995, 1996 Olaf Kirch, <[email protected]>
*/
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/module.h>
#include <linux/exportfs.h>
#include <linux/sunrpc/svc_xprt.h>
#include "nfsd.h"
#include "nfsfh.h"
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
/*
* We have two caches.
* One maps client+vfsmnt+dentry to export options - the export map
* The other maps client+filehandle-fragment to export options. - the expkey map
*
* The export options are actually stored in the first map, and the
* second map contains a reference to the entry in the first map.
*/
#define EXPKEY_HASHBITS 8
#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
static void expkey_put(struct kref *ref)
{
struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
if (test_bit(CACHE_VALID, &key->h.flags) &&
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
kfree_rcu(key, ek_rcu);
}
static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
{
return sunrpc_cache_pipe_upcall(cd, h);
}
static void expkey_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
{
/* client fsidtype \xfsid */
struct svc_expkey *ek = container_of(h, struct svc_expkey, h);
char type[5];
qword_add(bpp, blen, ek->ek_client->name);
snprintf(type, 5, "%d", ek->ek_fsidtype);
qword_add(bpp, blen, type);
qword_addhex(bpp, blen, (char*)ek->ek_fsid, key_len(ek->ek_fsidtype));
(*bpp)[-1] = '\n';
}
static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
struct svc_expkey *old);
static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
{
/* client fsidtype fsid expiry [path] */
char *buf;
int len;
struct auth_domain *dom = NULL;
int err;
int fsidtype;
char *ep;
struct svc_expkey key;
struct svc_expkey *ek = NULL;
if (mesg[mlen - 1] != '\n')
return -EINVAL;
mesg[mlen-1] = 0;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
err = -ENOMEM;
if (!buf)
goto out;
err = -EINVAL;
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out;
err = -ENOENT;
dom = auth_domain_find(buf);
if (!dom)
goto out;
dprintk("found domain %s\n", buf);
err = -EINVAL;
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out;
fsidtype = simple_strtoul(buf, &ep, 10);
if (*ep)
goto out;
dprintk("found fsidtype %d\n", fsidtype);
if (key_len(fsidtype)==0) /* invalid type */
goto out;
if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
goto out;
dprintk("found fsid length %d\n", len);
if (len != key_len(fsidtype))
goto out;
/* OK, we seem to have a valid key */
key.h.flags = 0;
err = get_expiry(&mesg, &key.h.expiry_time);
if (err)
goto out;
key.ek_client = dom;
key.ek_fsidtype = fsidtype;
memcpy(key.ek_fsid, buf, len);
ek = svc_expkey_lookup(cd, &key);
err = -ENOMEM;
if (!ek)
goto out;
/* now we want a pathname, or empty meaning NEGATIVE */
err = -EINVAL;
len = qword_get(&mesg, buf, PAGE_SIZE);
if (len < 0)
goto out;
dprintk("Path seems to be <%s>\n", buf);
err = 0;
if (len == 0) {
set_bit(CACHE_NEGATIVE, &key.h.flags);
ek = svc_expkey_update(cd, &key, ek);
if (ek)
trace_nfsd_expkey_update(ek, NULL);
else
err = -ENOMEM;
} else {
err = kern_path(buf, 0, &key.ek_path);
if (err)
goto out;
dprintk("Found the path %s\n", buf);
ek = svc_expkey_update(cd, &key, ek);
if (ek)
trace_nfsd_expkey_update(ek, buf);
else
err = -ENOMEM;
path_put(&key.ek_path);
}
cache_flush();
out:
if (ek)
cache_put(&ek->h, cd);
if (dom)
auth_domain_put(dom);
kfree(buf);
return err;
}
static int expkey_show(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h)
{
struct svc_expkey *ek ;
int i;
if (h ==NULL) {
seq_puts(m, "#domain fsidtype fsid [path]\n");
return 0;
}
ek = container_of(h, struct svc_expkey, h);
seq_printf(m, "%s %d 0x", ek->ek_client->name,
ek->ek_fsidtype);
for (i=0; i < key_len(ek->ek_fsidtype)/4; i++)
seq_printf(m, "%08x", ek->ek_fsid[i]);
if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags)) {
seq_printf(m, " ");
seq_path(m, &ek->ek_path, "\\ \t\n");
}
seq_printf(m, "\n");
return 0;
}
static inline int expkey_match (struct cache_head *a, struct cache_head *b)
{
struct svc_expkey *orig = container_of(a, struct svc_expkey, h);
struct svc_expkey *new = container_of(b, struct svc_expkey, h);
if (orig->ek_fsidtype != new->ek_fsidtype ||
orig->ek_client != new->ek_client ||
memcmp(orig->ek_fsid, new->ek_fsid, key_len(orig->ek_fsidtype)) != 0)
return 0;
return 1;
}
static inline void expkey_init(struct cache_head *cnew,
struct cache_head *citem)
{
struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
kref_get(&item->ek_client->ref);
new->ek_client = item->ek_client;
new->ek_fsidtype = item->ek_fsidtype;
memcpy(new->ek_fsid, item->ek_fsid, sizeof(new->ek_fsid));
}
static inline void expkey_update(struct cache_head *cnew,
struct cache_head *citem)
{
struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
new->ek_path = item->ek_path;
path_get(&item->ek_path);
}
static struct cache_head *expkey_alloc(void)
{
struct svc_expkey *i = kmalloc(sizeof(*i), GFP_KERNEL);
if (i)
return &i->h;
else
return NULL;
}
static void expkey_flush(void)
{
/*
* Take the nfsd_mutex here to ensure that the file cache is not
* destroyed while we're in the middle of flushing.
*/
mutex_lock(&nfsd_mutex);
nfsd_file_cache_purge(current->nsproxy->net_ns);
mutex_unlock(&nfsd_mutex);
}
static const struct cache_detail svc_expkey_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPKEY_HASHMAX,
.name = "nfsd.fh",
.cache_put = expkey_put,
.cache_upcall = expkey_upcall,
.cache_request = expkey_request,
.cache_parse = expkey_parse,
.cache_show = expkey_show,
.match = expkey_match,
.init = expkey_init,
.update = expkey_update,
.alloc = expkey_alloc,
.flush = expkey_flush,
};
static int
svc_expkey_hash(struct svc_expkey *item)
{
int hash = item->ek_fsidtype;
char * cp = (char*)item->ek_fsid;
int len = key_len(item->ek_fsidtype);
hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS);
hash &= EXPKEY_HASHMASK;
return hash;
}
static struct svc_expkey *
svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
{
struct cache_head *ch;
int hash = svc_expkey_hash(item);
ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
if (ch)
return container_of(ch, struct svc_expkey, h);
else
return NULL;
}
static struct svc_expkey *
svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
struct svc_expkey *old)
{
struct cache_head *ch;
int hash = svc_expkey_hash(new);
ch = sunrpc_cache_update(cd, &new->h, &old->h, hash);
if (ch)
return container_of(ch, struct svc_expkey, h);
else
return NULL;
}
#define EXPORT_HASHBITS 8
#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS)
static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
{
struct nfsd4_fs_location *locations = fsloc->locations;
int i;
if (!locations)
return;
for (i = 0; i < fsloc->locations_count; i++) {
kfree(locations[i].path);
kfree(locations[i].hosts);
}
kfree(locations);
fsloc->locations = NULL;
}
static int export_stats_init(struct export_stats *stats)
{
stats->start_time = ktime_get_seconds();
return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM);
}
static void export_stats_reset(struct export_stats *stats)
{
nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
}
static void export_stats_destroy(struct export_stats *stats)
{
nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
}
static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(&exp->ex_stats);
kfree(exp->ex_uuid);
kfree_rcu(exp, ex_rcu);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
{
return sunrpc_cache_pipe_upcall(cd, h);
}
static void svc_export_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
{
/* client path */
struct svc_export *exp = container_of(h, struct svc_export, h);
char *pth;
qword_add(bpp, blen, exp->ex_client->name);
pth = d_path(&exp->ex_path, *bpp, *blen);
if (IS_ERR(pth)) {
/* is this correct? */
(*bpp)[0] = '\n';
return;
}
qword_add(bpp, blen, pth);
(*bpp)[-1] = '\n';
}
static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
static int check_export(struct path *path, int *flags, unsigned char *uuid)
{
struct inode *inode = d_inode(path->dentry);
/*
* We currently export only dirs, regular files, and (for v4
* pseudoroot) symlinks.
*/
if (!S_ISDIR(inode->i_mode) &&
!S_ISLNK(inode->i_mode) &&
!S_ISREG(inode->i_mode))
return -ENOTDIR;
/*
* Mountd should never pass down a writeable V4ROOT export, but,
* just to make sure:
*/
if (*flags & NFSEXP_V4ROOT)
*flags |= NFSEXP_READONLY;
/* There are two requirements on a filesystem to be exportable.
* 1: We must be able to identify the filesystem from a number.
* either a device number (so FS_REQUIRES_DEV needed)
* or an FSID number (so NFSEXP_FSID or ->uuid is needed).
* 2: We must be able to find an inode from a filehandle.
* This means that s_export_op must be set.
* 3: We must not currently be on an idmapped mount.
*/
if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) &&
!(*flags & NFSEXP_FSID) &&
uuid == NULL) {
dprintk("exp_export: export of non-dev fs without fsid\n");
return -EINVAL;
}
if (!inode->i_sb->s_export_op ||
!inode->i_sb->s_export_op->fh_to_dentry) {
dprintk("exp_export: export of invalid fs type.\n");
return -EINVAL;
}
if (is_idmapped_mnt(path->mnt)) {
dprintk("exp_export: export of idmapped mounts not yet supported.\n");
return -EINVAL;
}
if (inode->i_sb->s_export_op->flags & EXPORT_OP_NOSUBTREECHK &&
!(*flags & NFSEXP_NOSUBTREECHECK)) {
dprintk("%s: %s does not support subtree checking!\n",
__func__, inode->i_sb->s_type->name);
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_NFSD_V4
static int
fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc)
{
int len;
int migrated, i, err;
/* more than one fsloc */
if (fsloc->locations)
return -EINVAL;
/* listsize */
err = get_uint(mesg, &fsloc->locations_count);
if (err)
return err;
if (fsloc->locations_count > MAX_FS_LOCATIONS)
return -EINVAL;
if (fsloc->locations_count == 0)
return 0;
fsloc->locations = kcalloc(fsloc->locations_count,
sizeof(struct nfsd4_fs_location),
GFP_KERNEL);
if (!fsloc->locations)
return -ENOMEM;
for (i=0; i < fsloc->locations_count; i++) {
/* colon separated host list */
err = -EINVAL;
len = qword_get(mesg, buf, PAGE_SIZE);
if (len <= 0)
goto out_free_all;
err = -ENOMEM;
fsloc->locations[i].hosts = kstrdup(buf, GFP_KERNEL);
if (!fsloc->locations[i].hosts)
goto out_free_all;
err = -EINVAL;
/* slash separated path component list */
len = qword_get(mesg, buf, PAGE_SIZE);
if (len <= 0)
goto out_free_all;
err = -ENOMEM;
fsloc->locations[i].path = kstrdup(buf, GFP_KERNEL);
if (!fsloc->locations[i].path)
goto out_free_all;
}
/* migrated */
err = get_int(mesg, &migrated);
if (err)
goto out_free_all;
err = -EINVAL;
if (migrated < 0 || migrated > 1)
goto out_free_all;
fsloc->migrated = migrated;
return 0;
out_free_all:
nfsd4_fslocs_free(fsloc);
return err;
}
static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
{
struct exp_flavor_info *f;
u32 listsize;
int err;
/* more than one secinfo */
if (exp->ex_nflavors)
return -EINVAL;
err = get_uint(mesg, &listsize);
if (err)
return err;
if (listsize > MAX_SECINFO_LIST)
return -EINVAL;
for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) {
err = get_uint(mesg, &f->pseudoflavor);
if (err)
return err;
/*
* XXX: It would be nice to also check whether this
* pseudoflavor is supported, so we can discover the
* problem at export time instead of when a client fails
* to authenticate.
*/
err = get_uint(mesg, &f->flags);
if (err)
return err;
/* Only some flags are allowed to differ between flavors: */
if (~NFSEXP_SECINFO_FLAGS & (f->flags ^ exp->ex_flags))
return -EINVAL;
}
exp->ex_nflavors = listsize;
return 0;
}
#else /* CONFIG_NFSD_V4 */
static inline int
fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc){return 0;}
static inline int
secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
#endif
static int xprtsec_parse(char **mesg, char *buf, struct svc_export *exp)
{
unsigned int i, mode, listsize;
int err;
err = get_uint(mesg, &listsize);
if (err)
return err;
if (listsize > NFSEXP_XPRTSEC_NUM)
return -EINVAL;
exp->ex_xprtsec_modes = 0;
for (i = 0; i < listsize; i++) {
err = get_uint(mesg, &mode);
if (err)
return err;
if (mode > NFSEXP_XPRTSEC_MTLS)
return -EINVAL;
exp->ex_xprtsec_modes |= mode;
}
return 0;
}
static inline int
nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid)
{
int len;
/* more than one uuid */
if (*puuid)
return -EINVAL;
/* expect a 16 byte uuid encoded as \xXXXX... */
len = qword_get(mesg, buf, PAGE_SIZE);
if (len != EX_UUID_LEN)
return -EINVAL;
*puuid = kmemdup(buf, EX_UUID_LEN, GFP_KERNEL);
if (*puuid == NULL)
return -ENOMEM;
return 0;
}
static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
{
/* client path expiry [flags anonuid anongid fsid] */
char *buf;
int err;
struct auth_domain *dom = NULL;
struct svc_export exp = {}, *expp;
int an_int;
if (mesg[mlen-1] != '\n')
return -EINVAL;
mesg[mlen-1] = 0;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* client */
err = -EINVAL;
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out;
err = -ENOENT;
dom = auth_domain_find(buf);
if (!dom)
goto out;
/* path */
err = -EINVAL;
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out1;
err = kern_path(buf, 0, &exp.ex_path);
if (err)
goto out1;
exp.ex_client = dom;
exp.cd = cd;
exp.ex_devid_map = NULL;
exp.ex_xprtsec_modes = NFSEXP_XPRTSEC_ALL;
/* expiry */
err = get_expiry(&mesg, &exp.h.expiry_time);
if (err)
goto out3;
/* flags */
err = get_int(&mesg, &an_int);
if (err == -ENOENT) {
err = 0;
set_bit(CACHE_NEGATIVE, &exp.h.flags);
} else {
if (err || an_int < 0)
goto out3;
exp.ex_flags= an_int;
/* anon uid */
err = get_int(&mesg, &an_int);
if (err)
goto out3;
exp.ex_anon_uid= make_kuid(current_user_ns(), an_int);
/* anon gid */
err = get_int(&mesg, &an_int);
if (err)
goto out3;
exp.ex_anon_gid= make_kgid(current_user_ns(), an_int);
/* fsid */
err = get_int(&mesg, &an_int);
if (err)
goto out3;
exp.ex_fsid = an_int;
while (qword_get(&mesg, buf, PAGE_SIZE) > 0) {
if (strcmp(buf, "fsloc") == 0)
err = fsloc_parse(&mesg, buf, &exp.ex_fslocs);
else if (strcmp(buf, "uuid") == 0)
err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid);
else if (strcmp(buf, "secinfo") == 0)
err = secinfo_parse(&mesg, buf, &exp);
else if (strcmp(buf, "xprtsec") == 0)
err = xprtsec_parse(&mesg, buf, &exp);
else
/* quietly ignore unknown words and anything
* following. Newer user-space can try to set
* new values, then see what the result was.
*/
break;
if (err)
goto out4;
}
err = check_export(&exp.ex_path, &exp.ex_flags, exp.ex_uuid);
if (err)
goto out4;
/*
* No point caching this if it would immediately expire.
* Also, this protects exportfs's dummy export from the
* anon_uid/anon_gid checks:
*/
if (exp.h.expiry_time < seconds_since_boot())
goto out4;
/*
* For some reason exportfs has been passing down an
* invalid (-1) uid & gid on the "dummy" export which it
* uses to test export support. To make sure exportfs
* sees errors from check_export we therefore need to
* delay these checks till after check_export:
*/
err = -EINVAL;
if (!uid_valid(exp.ex_anon_uid))
goto out4;
if (!gid_valid(exp.ex_anon_gid))
goto out4;
err = 0;
nfsd4_setup_layout_type(&exp);
}
expp = svc_export_lookup(&exp);
if (!expp) {
err = -ENOMEM;
goto out4;
}
expp = svc_export_update(&exp, expp);
if (expp) {
trace_nfsd_export_update(expp);
cache_flush();
exp_put(expp);
} else
err = -ENOMEM;
out4:
nfsd4_fslocs_free(&exp.ex_fslocs);
kfree(exp.ex_uuid);
out3:
path_put(&exp.ex_path);
out1:
auth_domain_put(dom);
out:
kfree(buf);
return err;
}
static void exp_flags(struct seq_file *m, int flag, int fsid,
kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fslocs);
static void show_secinfo(struct seq_file *m, struct svc_export *exp);
static int is_export_stats_file(struct seq_file *m)
{
/*
* The export_stats file uses the same ops as the exports file.
* We use the file's name to determine the reported info per export.
* There is no rename in nsfdfs, so d_name.name is stable.
*/
return !strcmp(m->file->f_path.dentry->d_name.name, "export_stats");
}
static int svc_export_show(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h)
{
struct svc_export *exp;
bool export_stats = is_export_stats_file(m);
if (h == NULL) {
if (export_stats)
seq_puts(m, "#path domain start-time\n#\tstats\n");
else
seq_puts(m, "#path domain(flags)\n");
return 0;
}
exp = container_of(h, struct svc_export, h);
seq_path(m, &exp->ex_path, " \t\n\\");
seq_putc(m, '\t');
seq_escape(m, exp->ex_client->name, " \t\n\\");
if (export_stats) {
seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
seq_printf(m, "\tfh_stale: %lld\n",
percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
seq_printf(m, "\tio_read: %lld\n",
percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
seq_printf(m, "\tio_write: %lld\n",
percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
seq_putc(m, '\n');
return 0;
}
seq_putc(m, '(');
if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags)) {
exp_flags(m, exp->ex_flags, exp->ex_fsid,
exp->ex_anon_uid, exp->ex_anon_gid, &exp->ex_fslocs);
if (exp->ex_uuid) {
int i;
seq_puts(m, ",uuid=");
for (i = 0; i < EX_UUID_LEN; i++) {
if ((i&3) == 0 && i)
seq_putc(m, ':');
seq_printf(m, "%02x", exp->ex_uuid[i]);
}
}
show_secinfo(m, exp);
}
seq_puts(m, ")\n");
return 0;
}
static int svc_export_match(struct cache_head *a, struct cache_head *b)
{
struct svc_export *orig = container_of(a, struct svc_export, h);
struct svc_export *new = container_of(b, struct svc_export, h);
return orig->ex_client == new->ex_client &&
path_equal(&orig->ex_path, &new->ex_path);
}
static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
{
struct svc_export *new = container_of(cnew, struct svc_export, h);
struct svc_export *item = container_of(citem, struct svc_export, h);
kref_get(&item->ex_client->ref);
new->ex_client = item->ex_client;
new->ex_path = item->ex_path;
path_get(&item->ex_path);
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
export_stats_reset(&new->ex_stats);
}
static void export_update(struct cache_head *cnew, struct cache_head *citem)
{
struct svc_export *new = container_of(cnew, struct svc_export, h);
struct svc_export *item = container_of(citem, struct svc_export, h);
int i;
new->ex_flags = item->ex_flags;
new->ex_anon_uid = item->ex_anon_uid;
new->ex_anon_gid = item->ex_anon_gid;
new->ex_fsid = item->ex_fsid;
new->ex_devid_map = item->ex_devid_map;
item->ex_devid_map = NULL;
new->ex_uuid = item->ex_uuid;
item->ex_uuid = NULL;
new->ex_fslocs.locations = item->ex_fslocs.locations;
item->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = item->ex_fslocs.locations_count;
item->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = item->ex_fslocs.migrated;
item->ex_fslocs.migrated = 0;
new->ex_layout_types = item->ex_layout_types;
new->ex_nflavors = item->ex_nflavors;
for (i = 0; i < MAX_SECINFO_LIST; i++) {
new->ex_flavors[i] = item->ex_flavors[i];
}
new->ex_xprtsec_modes = item->ex_xprtsec_modes;
}
static struct cache_head *svc_export_alloc(void)
{
struct svc_export *i = kmalloc(sizeof(*i), GFP_KERNEL);
if (!i)
return NULL;
if (export_stats_init(&i->ex_stats)) {
kfree(i);
return NULL;
}
return &i->h;
}
static const struct cache_detail svc_export_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPORT_HASHMAX,
.name = "nfsd.export",
.cache_put = svc_export_put,
.cache_upcall = svc_export_upcall,
.cache_request = svc_export_request,
.cache_parse = svc_export_parse,
.cache_show = svc_export_show,
.match = svc_export_match,
.init = svc_export_init,
.update = export_update,
.alloc = svc_export_alloc,
};
static int
svc_export_hash(struct svc_export *exp)
{
int hash;
hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS);
hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS);
hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS);
return hash;
}
static struct svc_export *
svc_export_lookup(struct svc_export *exp)
{
struct cache_head *ch;
int hash = svc_export_hash(exp);
ch = sunrpc_cache_lookup_rcu(exp->cd, &exp->h, hash);
if (ch)
return container_of(ch, struct svc_export, h);
else
return NULL;
}
static struct svc_export *
svc_export_update(struct svc_export *new, struct svc_export *old)
{
struct cache_head *ch;
int hash = svc_export_hash(old);
ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash);
if (ch)
return container_of(ch, struct svc_export, h);
else
return NULL;
}
static struct svc_expkey *
exp_find_key(struct cache_detail *cd, struct auth_domain *clp, int fsid_type,
u32 *fsidv, struct cache_req *reqp)
{
struct svc_expkey key, *ek;
int err;
if (!clp)
return ERR_PTR(-ENOENT);
key.ek_client = clp;
key.ek_fsidtype = fsid_type;
memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
ek = svc_expkey_lookup(cd, &key);
if (ek == NULL)
return ERR_PTR(-ENOMEM);
err = cache_check(cd, &ek->h, reqp);
if (err) {
trace_nfsd_exp_find_key(&key, err);
return ERR_PTR(err);
}
return ek;
}
static struct svc_export *
exp_get_by_name(struct cache_detail *cd, struct auth_domain *clp,
const struct path *path, struct cache_req *reqp)
{
struct svc_export *exp, key;
int err;
if (!clp)
return ERR_PTR(-ENOENT);
key.ex_client = clp;
key.ex_path = *path;
key.cd = cd;
exp = svc_export_lookup(&key);
if (exp == NULL)
return ERR_PTR(-ENOMEM);
err = cache_check(cd, &exp->h, reqp);
if (err) {
trace_nfsd_exp_get_by_name(&key, err);
return ERR_PTR(err);
}
return exp;
}
/*
* Find the export entry for a given dentry.
*/
static struct svc_export *
exp_parent(struct cache_detail *cd, struct auth_domain *clp, struct path *path)
{
struct dentry *saved = dget(path->dentry);
struct svc_export *exp = exp_get_by_name(cd, clp, path, NULL);
while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) {
struct dentry *parent = dget_parent(path->dentry);
dput(path->dentry);
path->dentry = parent;
exp = exp_get_by_name(cd, clp, path, NULL);
}
dput(path->dentry);
path->dentry = saved;
return exp;
}
/*
* Obtain the root fh on behalf of a client.
* This could be done in user space, but I feel that it adds some safety
* since its harder to fool a kernel module than a user space program.
*/
int
exp_rootfh(struct net *net, struct auth_domain *clp, char *name,
struct knfsd_fh *f, int maxsize)
{
struct svc_export *exp;
struct path path;
struct inode *inode;
struct svc_fh fh;
int err;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
err = -EPERM;
/* NB: we probably ought to check that it's NUL-terminated */
if (kern_path(name, 0, &path)) {
printk("nfsd: exp_rootfh path not found %s", name);
return err;
}
inode = d_inode(path.dentry);
dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
name, path.dentry, clp->name,
inode->i_sb->s_id, inode->i_ino);
exp = exp_parent(cd, clp, &path);
if (IS_ERR(exp)) {
err = PTR_ERR(exp);
goto out;
}
/*
* fh must be initialized before calling fh_compose
*/
fh_init(&fh, maxsize);
if (fh_compose(&fh, exp, path.dentry, NULL))
err = -EINVAL;
else
err = 0;
memcpy(f, &fh.fh_handle, sizeof(struct knfsd_fh));
fh_put(&fh);
exp_put(exp);
out:
path_put(&path);
return err;
}
static struct svc_export *exp_find(struct cache_detail *cd,
struct auth_domain *clp, int fsid_type,
u32 *fsidv, struct cache_req *reqp)
{
struct svc_export *exp;
struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id);
struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp);
if (IS_ERR(ek))
return ERR_CAST(ek);
exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp);
cache_put(&ek->h, nn->svc_expkey_cache);
if (IS_ERR(exp))
return ERR_CAST(exp);
return exp;
}
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
{
struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
struct svc_xprt *xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
goto ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
!test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
goto ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
goto ok;
}
goto denied;
ok:
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
return 0;
/* ip-address based client; check sec= export option: */
for (f = exp->ex_flavors; f < end; f++) {
if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
return 0;
}
/* defaults in absence of sec= options: */
if (exp->ex_nflavors == 0) {
if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
return 0;
}
/* If the compound op contains a spo_must_allowed op,
* it will be sent with integrity/protection which
* will have to be expressly allowed on mounts that
* don't support it
*/
if (nfsd4_spo_must_allow(rqstp))
return 0;
denied:
return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec;
}
/*
* Uses rq_client and rq_gssclient to find an export; uses rq_client (an
* auth_unix client) if it's available and has secinfo information;
* otherwise, will try to use rq_gssclient.
*
* Called from functions that handle requests; functions that do work on
* behalf of mountd are passed a single client name to use, and should
* use exp_get_by_name() or exp_find().
*/
struct svc_export *
rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
if (rqstp->rq_client == NULL)
goto gss;
/* First try the auth_unix client: */
exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle);
if (PTR_ERR(exp) == -ENOENT)
goto gss;
if (IS_ERR(exp))
return exp;
/* If it has secinfo, assume there are no gss/... clients */
if (exp->ex_nflavors > 0)
return exp;
gss:
/* Otherwise, try falling back on gss client */
if (rqstp->rq_gssclient == NULL)
return exp;
gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
if (!IS_ERR(exp))
exp_put(exp);
return gssexp;
}
struct svc_export *
rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
if (rqstp->rq_client == NULL)
goto gss;
/* First try the auth_unix client: */
exp = exp_find(cd, rqstp->rq_client, fsid_type,
fsidv, &rqstp->rq_chandle);
if (PTR_ERR(exp) == -ENOENT)
goto gss;
if (IS_ERR(exp))
return exp;
/* If it has secinfo, assume there are no gss/... clients */
if (exp->ex_nflavors > 0)
return exp;
gss:
/* Otherwise, try falling back on gss client */
if (rqstp->rq_gssclient == NULL)
return exp;
gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
&rqstp->rq_chandle);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
if (!IS_ERR(exp))
exp_put(exp);
return gssexp;
}
struct svc_export *
rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
{
struct dentry *saved = dget(path->dentry);
struct svc_export *exp = rqst_exp_get_by_name(rqstp, path);
while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) {
struct dentry *parent = dget_parent(path->dentry);
dput(path->dentry);
path->dentry = parent;
exp = rqst_exp_get_by_name(rqstp, path);
}
dput(path->dentry);
path->dentry = saved;
return exp;
}
struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp)
{
u32 fsidv[2];
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
return rqst_exp_find(rqstp, FSID_NUM, fsidv);
}
/*
* Called when we need the filehandle for the root of the pseudofs,
* for a given NFSv4 client. The root is defined to be the
* export point with fsid==0
*/
__be32
exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
{
struct svc_export *exp;
__be32 rv;
exp = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
exp_put(exp);
return rv;
}
static struct flags {
int flag;
char *name[2];
} expflags[] = {
{ NFSEXP_READONLY, {"ro", "rw"}},
{ NFSEXP_INSECURE_PORT, {"insecure", ""}},
{ NFSEXP_ROOTSQUASH, {"root_squash", "no_root_squash"}},
{ NFSEXP_ALLSQUASH, {"all_squash", ""}},
{ NFSEXP_ASYNC, {"async", "sync"}},
{ NFSEXP_GATHERED_WRITES, {"wdelay", "no_wdelay"}},
{ NFSEXP_NOREADDIRPLUS, {"nordirplus", ""}},
{ NFSEXP_NOHIDE, {"nohide", ""}},
{ NFSEXP_CROSSMOUNT, {"crossmnt", ""}},
{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
{ NFSEXP_V4ROOT, {"v4root", ""}},
{ NFSEXP_PNFS, {"pnfs", ""}},
{ NFSEXP_SECURITY_LABEL, {"security_label", ""}},
{ 0, {"", ""}}
};
static void show_expflags(struct seq_file *m, int flags, int mask)
{
struct flags *flg;
int state, first = 0;
for (flg = expflags; flg->flag; flg++) {
if (flg->flag & ~mask)
continue;
state = (flg->flag & flags) ? 0 : 1;
if (*flg->name[state])
seq_printf(m, "%s%s", first++?",":"", flg->name[state]);
}
}
static void show_secinfo_flags(struct seq_file *m, int flags)
{
seq_printf(m, ",");
show_expflags(m, flags, NFSEXP_SECINFO_FLAGS);
}
static bool secinfo_flags_equal(int f, int g)
{
f &= NFSEXP_SECINFO_FLAGS;
g &= NFSEXP_SECINFO_FLAGS;
return f == g;
}
static int show_secinfo_run(struct seq_file *m, struct exp_flavor_info **fp, struct exp_flavor_info *end)
{
int flags;
flags = (*fp)->flags;
seq_printf(m, ",sec=%d", (*fp)->pseudoflavor);
(*fp)++;
while (*fp != end && secinfo_flags_equal(flags, (*fp)->flags)) {
seq_printf(m, ":%d", (*fp)->pseudoflavor);
(*fp)++;
}
return flags;
}
static void show_secinfo(struct seq_file *m, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
int flags;
if (exp->ex_nflavors == 0)
return;
f = exp->ex_flavors;
flags = show_secinfo_run(m, &f, end);
if (!secinfo_flags_equal(flags, exp->ex_flags))
show_secinfo_flags(m, flags);
while (f != end) {
flags = show_secinfo_run(m, &f, end);
show_secinfo_flags(m, flags);
}
}
static void exp_flags(struct seq_file *m, int flag, int fsid,
kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fsloc)
{
struct user_namespace *userns = m->file->f_cred->user_ns;
show_expflags(m, flag, NFSEXP_ALLFLAGS);
if (flag & NFSEXP_FSID)
seq_printf(m, ",fsid=%d", fsid);
if (!uid_eq(anonu, make_kuid(userns, (uid_t)-2)) &&
!uid_eq(anonu, make_kuid(userns, 0x10000-2)))
seq_printf(m, ",anonuid=%u", from_kuid_munged(userns, anonu));
if (!gid_eq(anong, make_kgid(userns, (gid_t)-2)) &&
!gid_eq(anong, make_kgid(userns, 0x10000-2)))
seq_printf(m, ",anongid=%u", from_kgid_munged(userns, anong));
if (fsloc && fsloc->locations_count > 0) {
char *loctype = (fsloc->migrated) ? "refer" : "replicas";
int i;
seq_printf(m, ",%s=", loctype);
seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\");
seq_putc(m, '@');
seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\");
for (i = 1; i < fsloc->locations_count; i++) {
seq_putc(m, ';');
seq_escape(m, fsloc->locations[i].path, ",;@ \t\n\\");
seq_putc(m, '@');
seq_escape(m, fsloc->locations[i].hosts, ",;@ \t\n\\");
}
}
}
static int e_show(struct seq_file *m, void *p)
{
struct cache_head *cp = p;
struct svc_export *exp = container_of(cp, struct svc_export, h);
struct cache_detail *cd = m->private;
bool export_stats = is_export_stats_file(m);
if (p == SEQ_START_TOKEN) {
seq_puts(m, "# Version 1.1\n");
if (export_stats)
seq_puts(m, "# Path Client Start-time\n#\tStats\n");
else
seq_puts(m, "# Path Client(Flags) # IPs\n");
return 0;
}
exp_get(exp);
if (cache_check(cd, &exp->h, NULL))
return 0;
exp_put(exp);
return svc_export_show(m, cd, cp);
}
const struct seq_operations nfs_exports_op = {
.start = cache_seq_start_rcu,
.next = cache_seq_next_rcu,
.stop = cache_seq_stop_rcu,
.show = e_show,
};
/*
* Initialize the exports module.
*/
int
nfsd_export_init(struct net *net)
{
int rv;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum);
nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
if (IS_ERR(nn->svc_export_cache))
return PTR_ERR(nn->svc_export_cache);
rv = cache_register_net(nn->svc_export_cache, net);
if (rv)
goto destroy_export_cache;
nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net);
if (IS_ERR(nn->svc_expkey_cache)) {
rv = PTR_ERR(nn->svc_expkey_cache);
goto unregister_export_cache;
}
rv = cache_register_net(nn->svc_expkey_cache, net);
if (rv)
goto destroy_expkey_cache;
return 0;
destroy_expkey_cache:
cache_destroy_net(nn->svc_expkey_cache, net);
unregister_export_cache:
cache_unregister_net(nn->svc_export_cache, net);
destroy_export_cache:
cache_destroy_net(nn->svc_export_cache, net);
return rv;
}
/*
* Flush exports table - called when last nfsd thread is killed
*/
void
nfsd_export_flush(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
cache_purge(nn->svc_expkey_cache);
cache_purge(nn->svc_export_cache);
}
/*
* Shutdown the exports module.
*/
void
nfsd_export_shutdown(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum);
cache_unregister_net(nn->svc_expkey_cache, net);
cache_unregister_net(nn->svc_export_cache, net);
cache_destroy_net(nn->svc_expkey_cache, net);
cache_destroy_net(nn->svc_export_cache, net);
svcauth_unix_purge(net);
dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum);
}
| linux-master | fs/nfsd/export.c |
/*
* Copyright (c) 2004 The Regents of the University of Michigan.
* Copyright (c) 2012 Jeff Layton <[email protected]>
* All rights reserved.
*
* Andy Adamson <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <crypto/hash.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <net/net_namespace.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfsd/cld.h>
#include "nfsd.h"
#include "state.h"
#include "vfs.h"
#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
/* Declarations */
struct nfsd4_client_tracking_ops {
int (*init)(struct net *);
void (*exit)(struct net *);
void (*create)(struct nfs4_client *);
void (*remove)(struct nfs4_client *);
int (*check)(struct nfs4_client *);
void (*grace_done)(struct nfsd_net *);
uint8_t version;
size_t msglen;
};
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops;
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2;
/* Globals */
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
static int
nfs4_save_creds(const struct cred **original_creds)
{
struct cred *new;
new = prepare_creds();
if (!new)
return -ENOMEM;
new->fsuid = GLOBAL_ROOT_UID;
new->fsgid = GLOBAL_ROOT_GID;
*original_creds = override_creds(new);
put_cred(new);
return 0;
}
static void
nfs4_reset_creds(const struct cred *original)
{
revert_creds(original);
}
static void
md5_to_hex(char *out, char *md5)
{
int i;
for (i=0; i<16; i++) {
unsigned char c = md5[i];
*out++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
*out++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
}
*out = '\0';
}
static int
nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
{
struct xdr_netobj cksum;
struct crypto_shash *tfm;
int status;
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
clname->len, clname->data);
tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm)) {
status = PTR_ERR(tfm);
goto out_no_tfm;
}
cksum.len = crypto_shash_digestsize(tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL) {
status = -ENOMEM;
goto out;
}
status = crypto_shash_tfm_digest(tfm, clname->data, clname->len,
cksum.data);
if (status)
goto out;
md5_to_hex(dname, cksum.data);
status = 0;
out:
kfree(cksum.data);
crypto_free_shash(tfm);
out_no_tfm:
return status;
}
/*
* If we had an error generating the recdir name for the legacy tracker
* then warn the admin. If the error doesn't appear to be transient,
* then disable recovery tracking.
*/
static void
legacy_recdir_name_error(struct nfs4_client *clp, int error)
{
printk(KERN_ERR "NFSD: unable to generate recoverydir "
"name (%d).\n", error);
/*
* if the algorithm just doesn't exist, then disable the recovery
* tracker altogether. The crypto libs will generally return this if
* FIPS is enabled as well.
*/
if (error == -ENOENT) {
printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
"Reboot recovery will not function correctly!\n");
nfsd4_client_tracking_exit(clp->net);
}
}
static void
__nfsd4_create_reclaim_record_grace(struct nfs4_client *clp,
const char *dname, int len, struct nfsd_net *nn)
{
struct xdr_netobj name;
struct xdr_netobj princhash = { .len = 0, .data = NULL };
struct nfs4_client_reclaim *crp;
name.data = kmemdup(dname, len, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
return;
}
name.len = len;
crp = nfs4_client_to_reclaim(name, princhash, nn);
if (!crp) {
kfree(name.data);
return;
}
crp->cr_clp = clp;
}
static void
nfsd4_create_clid_dir(struct nfs4_client *clp)
{
const struct cred *original_cred;
char dname[HEXDIR_LEN];
struct dentry *dir, *dentry;
int status;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (test_and_set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
if (!nn->rec_file)
return;
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status)
return legacy_recdir_name_error(clp, status);
status = nfs4_save_creds(&original_cred);
if (status < 0)
return;
status = mnt_want_write_file(nn->rec_file);
if (status)
goto out_creds;
dir = nn->rec_file->f_path.dentry;
/* lock the parent */
inode_lock(d_inode(dir));
dentry = lookup_one_len(dname, dir, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
goto out_unlock;
}
if (d_really_is_positive(dentry))
/*
* In the 4.1 case, where we're called from
* reclaim_complete(), records from the previous reboot
* may still be left, so this is OK.
*
* In the 4.0 case, we should never get here; but we may
* as well be forgiving and just succeed silently.
*/
goto out_put;
status = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), dentry, S_IRWXU);
out_put:
dput(dentry);
out_unlock:
inode_unlock(d_inode(dir));
if (status == 0) {
if (nn->in_grace)
__nfsd4_create_reclaim_record_grace(clp, dname,
HEXDIR_LEN, nn);
vfs_fsync(nn->rec_file, 0);
} else {
printk(KERN_ERR "NFSD: failed to write recovery record"
" (err %d); please check that %s exists"
" and is writeable", status,
user_recovery_dirname);
}
mnt_drop_write_file(nn->rec_file);
out_creds:
nfs4_reset_creds(original_cred);
}
typedef int (recdir_func)(struct dentry *, struct dentry *, struct nfsd_net *);
struct name_list {
char name[HEXDIR_LEN];
struct list_head list;
};
struct nfs4_dir_ctx {
struct dir_context ctx;
struct list_head names;
};
static bool
nfsd4_build_namelist(struct dir_context *__ctx, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct nfs4_dir_ctx *ctx =
container_of(__ctx, struct nfs4_dir_ctx, ctx);
struct name_list *entry;
if (namlen != HEXDIR_LEN - 1)
return true;
entry = kmalloc(sizeof(struct name_list), GFP_KERNEL);
if (entry == NULL)
return false;
memcpy(entry->name, name, HEXDIR_LEN - 1);
entry->name[HEXDIR_LEN - 1] = '\0';
list_add(&entry->list, &ctx->names);
return true;
}
static int
nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
{
const struct cred *original_cred;
struct dentry *dir = nn->rec_file->f_path.dentry;
struct nfs4_dir_ctx ctx = {
.ctx.actor = nfsd4_build_namelist,
.names = LIST_HEAD_INIT(ctx.names)
};
struct name_list *entry, *tmp;
int status;
status = nfs4_save_creds(&original_cred);
if (status < 0)
return status;
status = vfs_llseek(nn->rec_file, 0, SEEK_SET);
if (status < 0) {
nfs4_reset_creds(original_cred);
return status;
}
status = iterate_dir(nn->rec_file, &ctx.ctx);
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
if (!status) {
struct dentry *dentry;
dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
break;
}
status = f(dir, dentry, nn);
dput(dentry);
}
list_del(&entry->list);
kfree(entry);
}
inode_unlock(d_inode(dir));
nfs4_reset_creds(original_cred);
list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
dprintk("NFSD: %s. Left entry %s\n", __func__, entry->name);
list_del(&entry->list);
kfree(entry);
}
return status;
}
static int
nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn)
{
struct dentry *dir, *dentry;
int status;
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
dir = nn->rec_file->f_path.dentry;
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
dentry = lookup_one_len(name, dir, namlen);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
goto out_unlock;
}
status = -ENOENT;
if (d_really_is_negative(dentry))
goto out;
status = vfs_rmdir(&nop_mnt_idmap, d_inode(dir), dentry);
out:
dput(dentry);
out_unlock:
inode_unlock(d_inode(dir));
return status;
}
static void
__nfsd4_remove_reclaim_record_grace(const char *dname, int len,
struct nfsd_net *nn)
{
struct xdr_netobj name;
struct nfs4_client_reclaim *crp;
name.data = kmemdup(dname, len, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
return;
}
name.len = len;
crp = nfsd4_find_reclaim_client(name, nn);
kfree(name.data);
if (crp)
nfs4_remove_reclaim_record(crp, nn);
}
static void
nfsd4_remove_clid_dir(struct nfs4_client *clp)
{
const struct cred *original_cred;
char dname[HEXDIR_LEN];
int status;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (!nn->rec_file || !test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status)
return legacy_recdir_name_error(clp, status);
status = mnt_want_write_file(nn->rec_file);
if (status)
goto out;
clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
status = nfs4_save_creds(&original_cred);
if (status < 0)
goto out_drop_write;
status = nfsd4_unlink_clid_dir(dname, HEXDIR_LEN-1, nn);
nfs4_reset_creds(original_cred);
if (status == 0) {
vfs_fsync(nn->rec_file, 0);
if (nn->in_grace)
__nfsd4_remove_reclaim_record_grace(dname,
HEXDIR_LEN, nn);
}
out_drop_write:
mnt_drop_write_file(nn->rec_file);
out:
if (status)
printk("NFSD: Failed to remove expired client state directory"
" %.*s\n", HEXDIR_LEN, dname);
}
static int
purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
{
int status;
struct xdr_netobj name;
if (child->d_name.len != HEXDIR_LEN - 1) {
printk("%s: illegal name %pd in recovery directory\n",
__func__, child);
/* Keep trying; maybe the others are OK: */
return 0;
}
name.data = kmemdup_nul(child->d_name.name, child->d_name.len, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
goto out;
}
name.len = HEXDIR_LEN;
if (nfs4_has_reclaimed_state(name, nn))
goto out_free;
status = vfs_rmdir(&nop_mnt_idmap, d_inode(parent), child);
if (status)
printk("failed to remove client recovery directory %pd\n",
child);
out_free:
kfree(name.data);
out:
/* Keep trying, success or failure: */
return 0;
}
static void
nfsd4_recdir_purge_old(struct nfsd_net *nn)
{
int status;
nn->in_grace = false;
if (!nn->rec_file)
return;
status = mnt_want_write_file(nn->rec_file);
if (status)
goto out;
status = nfsd4_list_rec_dir(purge_old, nn);
if (status == 0)
vfs_fsync(nn->rec_file, 0);
mnt_drop_write_file(nn->rec_file);
out:
nfs4_release_reclaim(nn);
if (status)
printk("nfsd4: failed to purge old clients from recovery"
" directory %pD\n", nn->rec_file);
}
static int
load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
{
struct xdr_netobj name;
struct xdr_netobj princhash = { .len = 0, .data = NULL };
if (child->d_name.len != HEXDIR_LEN - 1) {
printk("%s: illegal name %pd in recovery directory\n",
__func__, child);
/* Keep trying; maybe the others are OK: */
return 0;
}
name.data = kmemdup_nul(child->d_name.name, child->d_name.len, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
goto out;
}
name.len = HEXDIR_LEN;
if (!nfs4_client_to_reclaim(name, princhash, nn))
kfree(name.data);
out:
return 0;
}
static int
nfsd4_recdir_load(struct net *net) {
int status;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (!nn->rec_file)
return 0;
status = nfsd4_list_rec_dir(load_recdir, nn);
if (status)
printk("nfsd4: failed loading clients from recovery"
" directory %pD\n", nn->rec_file);
return status;
}
/*
* Hold reference to the recovery directory.
*/
static int
nfsd4_init_recdir(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
const struct cred *original_cred;
int status;
printk("NFSD: Using %s as the NFSv4 state recovery directory\n",
user_recovery_dirname);
BUG_ON(nn->rec_file);
status = nfs4_save_creds(&original_cred);
if (status < 0) {
printk("NFSD: Unable to change credentials to find recovery"
" directory: error %d\n",
status);
return status;
}
nn->rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0);
if (IS_ERR(nn->rec_file)) {
printk("NFSD: unable to find recovery directory %s\n",
user_recovery_dirname);
status = PTR_ERR(nn->rec_file);
nn->rec_file = NULL;
}
nfs4_reset_creds(original_cred);
if (!status)
nn->in_grace = true;
return status;
}
static void
nfsd4_shutdown_recdir(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (!nn->rec_file)
return;
fput(nn->rec_file);
nn->rec_file = NULL;
}
static int
nfs4_legacy_state_init(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
nn->reclaim_str_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
sizeof(struct list_head),
GFP_KERNEL);
if (!nn->reclaim_str_hashtbl)
return -ENOMEM;
for (i = 0; i < CLIENT_HASH_SIZE; i++)
INIT_LIST_HEAD(&nn->reclaim_str_hashtbl[i]);
nn->reclaim_str_hashtbl_size = 0;
return 0;
}
static void
nfs4_legacy_state_shutdown(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
kfree(nn->reclaim_str_hashtbl);
}
static int
nfsd4_load_reboot_recovery_data(struct net *net)
{
int status;
status = nfsd4_init_recdir(net);
if (status)
return status;
status = nfsd4_recdir_load(net);
if (status)
nfsd4_shutdown_recdir(net);
return status;
}
static int
nfsd4_legacy_tracking_init(struct net *net)
{
int status;
/* XXX: The legacy code won't work in a container */
if (net != &init_net) {
pr_warn("NFSD: attempt to initialize legacy client tracking in a container ignored.\n");
return -EINVAL;
}
status = nfs4_legacy_state_init(net);
if (status)
return status;
status = nfsd4_load_reboot_recovery_data(net);
if (status)
goto err;
pr_info("NFSD: Using legacy client tracking operations.\n");
return 0;
err:
nfs4_legacy_state_shutdown(net);
return status;
}
static void
nfsd4_legacy_tracking_exit(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfs4_release_reclaim(nn);
nfsd4_shutdown_recdir(net);
nfs4_legacy_state_shutdown(net);
}
/*
* Change the NFSv4 recovery directory to recdir.
*/
int
nfs4_reset_recoverydir(char *recdir)
{
int status;
struct path path;
status = kern_path(recdir, LOOKUP_FOLLOW, &path);
if (status)
return status;
status = -ENOTDIR;
if (d_is_dir(path.dentry)) {
strcpy(user_recovery_dirname, recdir);
status = 0;
}
path_put(&path);
return status;
}
char *
nfs4_recoverydir(void)
{
return user_recovery_dirname;
}
static int
nfsd4_check_legacy_client(struct nfs4_client *clp)
{
int status;
char dname[HEXDIR_LEN];
struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct xdr_netobj name;
/* did we already find that this client is stable? */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status) {
legacy_recdir_name_error(clp, status);
return status;
}
/* look for it in the reclaim hashtable otherwise */
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
goto out_enoent;
}
name.len = HEXDIR_LEN;
crp = nfsd4_find_reclaim_client(name, nn);
kfree(name.data);
if (crp) {
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
crp->cr_clp = clp;
return 0;
}
out_enoent:
return -ENOENT;
}
static const struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = {
.init = nfsd4_legacy_tracking_init,
.exit = nfsd4_legacy_tracking_exit,
.create = nfsd4_create_clid_dir,
.remove = nfsd4_remove_clid_dir,
.check = nfsd4_check_legacy_client,
.grace_done = nfsd4_recdir_purge_old,
.version = 1,
.msglen = 0,
};
/* Globals */
#define NFSD_PIPE_DIR "nfsd"
#define NFSD_CLD_PIPE "cld"
/* per-net-ns structure for holding cld upcall info */
struct cld_net {
struct rpc_pipe *cn_pipe;
spinlock_t cn_lock;
struct list_head cn_list;
unsigned int cn_xid;
bool cn_has_legacy;
struct crypto_shash *cn_tfm;
};
struct cld_upcall {
struct list_head cu_list;
struct cld_net *cu_net;
struct completion cu_done;
union {
struct cld_msg_hdr cu_hdr;
struct cld_msg cu_msg;
struct cld_msg_v2 cu_msg_v2;
} cu_u;
};
static int
__cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg, struct nfsd_net *nn)
{
int ret;
struct rpc_pipe_msg msg;
struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_u);
memset(&msg, 0, sizeof(msg));
msg.data = cmsg;
msg.len = nn->client_tracking_ops->msglen;
ret = rpc_queue_upcall(pipe, &msg);
if (ret < 0) {
goto out;
}
wait_for_completion(&cup->cu_done);
if (msg.errno < 0)
ret = msg.errno;
out:
return ret;
}
static int
cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg, struct nfsd_net *nn)
{
int ret;
/*
* -EAGAIN occurs when pipe is closed and reopened while there are
* upcalls queued.
*/
do {
ret = __cld_pipe_upcall(pipe, cmsg, nn);
} while (ret == -EAGAIN);
return ret;
}
static ssize_t
__cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
struct nfsd_net *nn)
{
uint8_t cmd, princhashlen;
struct xdr_netobj name, princhash = { .len = 0, .data = NULL };
uint16_t namelen;
struct cld_net *cn = nn->cld_net;
if (get_user(cmd, &cmsg->cm_cmd)) {
dprintk("%s: error when copying cmd from userspace", __func__);
return -EFAULT;
}
if (cmd == Cld_GraceStart) {
if (nn->client_tracking_ops->version >= 2) {
const struct cld_clntinfo __user *ci;
ci = &cmsg->cm_u.cm_clntinfo;
if (get_user(namelen, &ci->cc_name.cn_len))
return -EFAULT;
name.data = memdup_user(&ci->cc_name.cn_id, namelen);
if (IS_ERR(name.data))
return PTR_ERR(name.data);
name.len = namelen;
get_user(princhashlen, &ci->cc_princhash.cp_len);
if (princhashlen > 0) {
princhash.data = memdup_user(
&ci->cc_princhash.cp_data,
princhashlen);
if (IS_ERR(princhash.data)) {
kfree(name.data);
return PTR_ERR(princhash.data);
}
princhash.len = princhashlen;
} else
princhash.len = 0;
} else {
const struct cld_name __user *cnm;
cnm = &cmsg->cm_u.cm_name;
if (get_user(namelen, &cnm->cn_len))
return -EFAULT;
name.data = memdup_user(&cnm->cn_id, namelen);
if (IS_ERR(name.data))
return PTR_ERR(name.data);
name.len = namelen;
}
if (name.len > 5 && memcmp(name.data, "hash:", 5) == 0) {
name.len = name.len - 5;
memmove(name.data, name.data + 5, name.len);
cn->cn_has_legacy = true;
}
if (!nfs4_client_to_reclaim(name, princhash, nn)) {
kfree(name.data);
kfree(princhash.data);
return -EFAULT;
}
return nn->client_tracking_ops->msglen;
}
return -EFAULT;
}
static ssize_t
cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
struct cld_upcall *tmp, *cup;
struct cld_msg_hdr __user *hdr = (struct cld_msg_hdr __user *)src;
struct cld_msg_v2 __user *cmsg = (struct cld_msg_v2 __user *)src;
uint32_t xid;
struct nfsd_net *nn = net_generic(file_inode(filp)->i_sb->s_fs_info,
nfsd_net_id);
struct cld_net *cn = nn->cld_net;
int16_t status;
if (mlen != nn->client_tracking_ops->msglen) {
dprintk("%s: got %zu bytes, expected %zu\n", __func__, mlen,
nn->client_tracking_ops->msglen);
return -EINVAL;
}
/* copy just the xid so we can try to find that */
if (copy_from_user(&xid, &hdr->cm_xid, sizeof(xid)) != 0) {
dprintk("%s: error when copying xid from userspace", __func__);
return -EFAULT;
}
/*
* copy the status so we know whether to remove the upcall from the
* list (for -EINPROGRESS, we just want to make sure the xid is
* valid, not remove the upcall from the list)
*/
if (get_user(status, &hdr->cm_status)) {
dprintk("%s: error when copying status from userspace", __func__);
return -EFAULT;
}
/* walk the list and find corresponding xid */
cup = NULL;
spin_lock(&cn->cn_lock);
list_for_each_entry(tmp, &cn->cn_list, cu_list) {
if (get_unaligned(&tmp->cu_u.cu_hdr.cm_xid) == xid) {
cup = tmp;
if (status != -EINPROGRESS)
list_del_init(&cup->cu_list);
break;
}
}
spin_unlock(&cn->cn_lock);
/* couldn't find upcall? */
if (!cup) {
dprintk("%s: couldn't find upcall -- xid=%u\n", __func__, xid);
return -EINVAL;
}
if (status == -EINPROGRESS)
return __cld_pipe_inprogress_downcall(cmsg, nn);
if (copy_from_user(&cup->cu_u.cu_msg_v2, src, mlen) != 0)
return -EFAULT;
complete(&cup->cu_done);
return mlen;
}
static void
cld_pipe_destroy_msg(struct rpc_pipe_msg *msg)
{
struct cld_msg *cmsg = msg->data;
struct cld_upcall *cup = container_of(cmsg, struct cld_upcall,
cu_u.cu_msg);
/* errno >= 0 means we got a downcall */
if (msg->errno >= 0)
return;
complete(&cup->cu_done);
}
static const struct rpc_pipe_ops cld_upcall_ops = {
.upcall = rpc_pipe_generic_upcall,
.downcall = cld_pipe_downcall,
.destroy_msg = cld_pipe_destroy_msg,
};
static struct dentry *
nfsd4_cld_register_sb(struct super_block *sb, struct rpc_pipe *pipe)
{
struct dentry *dir, *dentry;
dir = rpc_d_lookup_sb(sb, NFSD_PIPE_DIR);
if (dir == NULL)
return ERR_PTR(-ENOENT);
dentry = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe);
dput(dir);
return dentry;
}
static void
nfsd4_cld_unregister_sb(struct rpc_pipe *pipe)
{
if (pipe->dentry)
rpc_unlink(pipe->dentry);
}
static struct dentry *
nfsd4_cld_register_net(struct net *net, struct rpc_pipe *pipe)
{
struct super_block *sb;
struct dentry *dentry;
sb = rpc_get_sb_net(net);
if (!sb)
return NULL;
dentry = nfsd4_cld_register_sb(sb, pipe);
rpc_put_sb_net(net);
return dentry;
}
static void
nfsd4_cld_unregister_net(struct net *net, struct rpc_pipe *pipe)
{
struct super_block *sb;
sb = rpc_get_sb_net(net);
if (sb) {
nfsd4_cld_unregister_sb(pipe);
rpc_put_sb_net(net);
}
}
/* Initialize rpc_pipefs pipe for communication with client tracking daemon */
static int
__nfsd4_init_cld_pipe(struct net *net)
{
int ret;
struct dentry *dentry;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn;
if (nn->cld_net)
return 0;
cn = kzalloc(sizeof(*cn), GFP_KERNEL);
if (!cn) {
ret = -ENOMEM;
goto err;
}
cn->cn_pipe = rpc_mkpipe_data(&cld_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
if (IS_ERR(cn->cn_pipe)) {
ret = PTR_ERR(cn->cn_pipe);
goto err;
}
spin_lock_init(&cn->cn_lock);
INIT_LIST_HEAD(&cn->cn_list);
dentry = nfsd4_cld_register_net(net, cn->cn_pipe);
if (IS_ERR(dentry)) {
ret = PTR_ERR(dentry);
goto err_destroy_data;
}
cn->cn_pipe->dentry = dentry;
cn->cn_has_legacy = false;
nn->cld_net = cn;
return 0;
err_destroy_data:
rpc_destroy_pipe_data(cn->cn_pipe);
err:
kfree(cn);
printk(KERN_ERR "NFSD: unable to create nfsdcld upcall pipe (%d)\n",
ret);
return ret;
}
static int
nfsd4_init_cld_pipe(struct net *net)
{
int status;
status = __nfsd4_init_cld_pipe(net);
if (!status)
pr_info("NFSD: Using old nfsdcld client tracking operations.\n");
return status;
}
static void
nfsd4_remove_cld_pipe(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
nfsd4_cld_unregister_net(net, cn->cn_pipe);
rpc_destroy_pipe_data(cn->cn_pipe);
if (cn->cn_tfm)
crypto_free_shash(cn->cn_tfm);
kfree(nn->cld_net);
nn->cld_net = NULL;
}
static struct cld_upcall *
alloc_cld_upcall(struct nfsd_net *nn)
{
struct cld_upcall *new, *tmp;
struct cld_net *cn = nn->cld_net;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return new;
/* FIXME: hard cap on number in flight? */
restart_search:
spin_lock(&cn->cn_lock);
list_for_each_entry(tmp, &cn->cn_list, cu_list) {
if (tmp->cu_u.cu_msg.cm_xid == cn->cn_xid) {
cn->cn_xid++;
spin_unlock(&cn->cn_lock);
goto restart_search;
}
}
init_completion(&new->cu_done);
new->cu_u.cu_msg.cm_vers = nn->client_tracking_ops->version;
put_unaligned(cn->cn_xid++, &new->cu_u.cu_msg.cm_xid);
new->cu_net = cn;
list_add(&new->cu_list, &cn->cn_list);
spin_unlock(&cn->cn_lock);
dprintk("%s: allocated xid %u\n", __func__, new->cu_u.cu_msg.cm_xid);
return new;
}
static void
free_cld_upcall(struct cld_upcall *victim)
{
struct cld_net *cn = victim->cu_net;
spin_lock(&cn->cn_lock);
list_del(&victim->cu_list);
spin_unlock(&cn->cn_lock);
kfree(victim);
}
/* Ask daemon to create a new record */
static void
nfsd4_cld_create(struct nfs4_client *clp)
{
int ret;
struct cld_upcall *cup;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
/* Don't upcall if it's already stored */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
cup->cu_u.cu_msg.cm_cmd = Cld_Create;
cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
clp->cl_name.len);
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
if (!ret) {
ret = cup->cu_u.cu_msg.cm_status;
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
free_cld_upcall(cup);
out_err:
if (ret)
printk(KERN_ERR "NFSD: Unable to create client "
"record on stable storage: %d\n", ret);
}
/* Ask daemon to create a new record */
static void
nfsd4_cld_create_v2(struct nfs4_client *clp)
{
int ret;
struct cld_upcall *cup;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
struct cld_msg_v2 *cmsg;
struct crypto_shash *tfm = cn->cn_tfm;
struct xdr_netobj cksum;
char *principal = NULL;
/* Don't upcall if it's already stored */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
cmsg = &cup->cu_u.cu_msg_v2;
cmsg->cm_cmd = Cld_Create;
cmsg->cm_u.cm_clntinfo.cc_name.cn_len = clp->cl_name.len;
memcpy(cmsg->cm_u.cm_clntinfo.cc_name.cn_id, clp->cl_name.data,
clp->cl_name.len);
if (clp->cl_cred.cr_raw_principal)
principal = clp->cl_cred.cr_raw_principal;
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal) {
cksum.len = crypto_shash_digestsize(tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL) {
ret = -ENOMEM;
goto out;
}
ret = crypto_shash_tfm_digest(tfm, principal, strlen(principal),
cksum.data);
if (ret) {
kfree(cksum.data);
goto out;
}
cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = cksum.len;
memcpy(cmsg->cm_u.cm_clntinfo.cc_princhash.cp_data,
cksum.data, cksum.len);
kfree(cksum.data);
} else
cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = 0;
ret = cld_pipe_upcall(cn->cn_pipe, cmsg, nn);
if (!ret) {
ret = cmsg->cm_status;
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
out:
free_cld_upcall(cup);
out_err:
if (ret)
pr_err("NFSD: Unable to create client record on stable storage: %d\n",
ret);
}
/* Ask daemon to create a new record */
static void
nfsd4_cld_remove(struct nfs4_client *clp)
{
int ret;
struct cld_upcall *cup;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
/* Don't upcall if it's already removed */
if (!test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
cup->cu_u.cu_msg.cm_cmd = Cld_Remove;
cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
clp->cl_name.len);
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
if (!ret) {
ret = cup->cu_u.cu_msg.cm_status;
clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
free_cld_upcall(cup);
out_err:
if (ret)
printk(KERN_ERR "NFSD: Unable to remove client "
"record from stable storage: %d\n", ret);
}
/*
* For older nfsdcld's that do not allow us to "slurp" the clients
* from the tracking database during startup.
*
* Check for presence of a record, and update its timestamp
*/
static int
nfsd4_cld_check_v0(struct nfs4_client *clp)
{
int ret;
struct cld_upcall *cup;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
/* Don't upcall if one was already stored during this grace pd */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
cup = alloc_cld_upcall(nn);
if (!cup) {
printk(KERN_ERR "NFSD: Unable to check client record on "
"stable storage: %d\n", -ENOMEM);
return -ENOMEM;
}
cup->cu_u.cu_msg.cm_cmd = Cld_Check;
cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
clp->cl_name.len);
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
if (!ret) {
ret = cup->cu_u.cu_msg.cm_status;
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
free_cld_upcall(cup);
return ret;
}
/*
* For newer nfsdcld's that allow us to "slurp" the clients
* from the tracking database during startup.
*
* Check for presence of a record in the reclaim_str_hashtbl
*/
static int
nfsd4_cld_check(struct nfs4_client *clp)
{
struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
int status;
char dname[HEXDIR_LEN];
struct xdr_netobj name;
/* did we already find that this client is stable? */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
/* look for it in the reclaim hashtable otherwise */
crp = nfsd4_find_reclaim_client(clp->cl_name, nn);
if (crp)
goto found;
if (cn->cn_has_legacy) {
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status)
return -ENOENT;
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
return -ENOENT;
}
name.len = HEXDIR_LEN;
crp = nfsd4_find_reclaim_client(name, nn);
kfree(name.data);
if (crp)
goto found;
}
return -ENOENT;
found:
crp->cr_clp = clp;
return 0;
}
static int
nfsd4_cld_check_v2(struct nfs4_client *clp)
{
struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
int status;
char dname[HEXDIR_LEN];
struct xdr_netobj name;
struct crypto_shash *tfm = cn->cn_tfm;
struct xdr_netobj cksum;
char *principal = NULL;
/* did we already find that this client is stable? */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
/* look for it in the reclaim hashtable otherwise */
crp = nfsd4_find_reclaim_client(clp->cl_name, nn);
if (crp)
goto found;
if (cn->cn_has_legacy) {
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status)
return -ENOENT;
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data\n",
__func__);
return -ENOENT;
}
name.len = HEXDIR_LEN;
crp = nfsd4_find_reclaim_client(name, nn);
kfree(name.data);
if (crp)
goto found;
}
return -ENOENT;
found:
if (crp->cr_princhash.len) {
if (clp->cl_cred.cr_raw_principal)
principal = clp->cl_cred.cr_raw_principal;
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal == NULL)
return -ENOENT;
cksum.len = crypto_shash_digestsize(tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL)
return -ENOENT;
status = crypto_shash_tfm_digest(tfm, principal,
strlen(principal), cksum.data);
if (status) {
kfree(cksum.data);
return -ENOENT;
}
if (memcmp(crp->cr_princhash.data, cksum.data,
crp->cr_princhash.len)) {
kfree(cksum.data);
return -ENOENT;
}
kfree(cksum.data);
}
crp->cr_clp = clp;
return 0;
}
static int
nfsd4_cld_grace_start(struct nfsd_net *nn)
{
int ret;
struct cld_upcall *cup;
struct cld_net *cn = nn->cld_net;
cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
cup->cu_u.cu_msg.cm_cmd = Cld_GraceStart;
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
if (!ret)
ret = cup->cu_u.cu_msg.cm_status;
free_cld_upcall(cup);
out_err:
if (ret)
dprintk("%s: Unable to get clients from userspace: %d\n",
__func__, ret);
return ret;
}
/* For older nfsdcld's that need cm_gracetime */
static void
nfsd4_cld_grace_done_v0(struct nfsd_net *nn)
{
int ret;
struct cld_upcall *cup;
struct cld_net *cn = nn->cld_net;
cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
cup->cu_u.cu_msg.cm_u.cm_gracetime = nn->boot_time;
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
if (!ret)
ret = cup->cu_u.cu_msg.cm_status;
free_cld_upcall(cup);
out_err:
if (ret)
printk(KERN_ERR "NFSD: Unable to end grace period: %d\n", ret);
}
/*
* For newer nfsdcld's that do not need cm_gracetime. We also need to call
* nfs4_release_reclaim() to clear out the reclaim_str_hashtbl.
*/
static void
nfsd4_cld_grace_done(struct nfsd_net *nn)
{
int ret;
struct cld_upcall *cup;
struct cld_net *cn = nn->cld_net;
cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
if (!ret)
ret = cup->cu_u.cu_msg.cm_status;
free_cld_upcall(cup);
out_err:
nfs4_release_reclaim(nn);
if (ret)
printk(KERN_ERR "NFSD: Unable to end grace period: %d\n", ret);
}
static int
nfs4_cld_state_init(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
nn->reclaim_str_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
sizeof(struct list_head),
GFP_KERNEL);
if (!nn->reclaim_str_hashtbl)
return -ENOMEM;
for (i = 0; i < CLIENT_HASH_SIZE; i++)
INIT_LIST_HEAD(&nn->reclaim_str_hashtbl[i]);
nn->reclaim_str_hashtbl_size = 0;
nn->track_reclaim_completes = true;
atomic_set(&nn->nr_reclaim_complete, 0);
return 0;
}
static void
nfs4_cld_state_shutdown(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nn->track_reclaim_completes = false;
kfree(nn->reclaim_str_hashtbl);
}
static bool
cld_running(struct nfsd_net *nn)
{
struct cld_net *cn = nn->cld_net;
struct rpc_pipe *pipe = cn->cn_pipe;
return pipe->nreaders || pipe->nwriters;
}
static int
nfsd4_cld_get_version(struct nfsd_net *nn)
{
int ret = 0;
struct cld_upcall *cup;
struct cld_net *cn = nn->cld_net;
uint8_t version;
cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
cup->cu_u.cu_msg.cm_cmd = Cld_GetVersion;
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
if (!ret) {
ret = cup->cu_u.cu_msg.cm_status;
if (ret)
goto out_free;
version = cup->cu_u.cu_msg.cm_u.cm_version;
dprintk("%s: userspace returned version %u\n",
__func__, version);
if (version < 1)
version = 1;
else if (version > CLD_UPCALL_VERSION)
version = CLD_UPCALL_VERSION;
switch (version) {
case 1:
nn->client_tracking_ops = &nfsd4_cld_tracking_ops;
break;
case 2:
nn->client_tracking_ops = &nfsd4_cld_tracking_ops_v2;
break;
default:
break;
}
}
out_free:
free_cld_upcall(cup);
out_err:
if (ret)
dprintk("%s: Unable to get version from userspace: %d\n",
__func__, ret);
return ret;
}
static int
nfsd4_cld_tracking_init(struct net *net)
{
int status;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
bool running;
int retries = 10;
struct crypto_shash *tfm;
status = nfs4_cld_state_init(net);
if (status)
return status;
status = __nfsd4_init_cld_pipe(net);
if (status)
goto err_shutdown;
/*
* rpc pipe upcalls take 30 seconds to time out, so we don't want to
* queue an upcall unless we know that nfsdcld is running (because we
* want this to fail fast so that nfsd4_client_tracking_init() can try
* the next client tracking method). nfsdcld should already be running
* before nfsd is started, so the wait here is for nfsdcld to open the
* pipefs file we just created.
*/
while (!(running = cld_running(nn)) && retries--)
msleep(100);
if (!running) {
status = -ETIMEDOUT;
goto err_remove;
}
tfm = crypto_alloc_shash("sha256", 0, 0);
if (IS_ERR(tfm)) {
status = PTR_ERR(tfm);
goto err_remove;
}
nn->cld_net->cn_tfm = tfm;
status = nfsd4_cld_get_version(nn);
if (status == -EOPNOTSUPP)
pr_warn("NFSD: nfsdcld GetVersion upcall failed. Please upgrade nfsdcld.\n");
status = nfsd4_cld_grace_start(nn);
if (status) {
if (status == -EOPNOTSUPP)
pr_warn("NFSD: nfsdcld GraceStart upcall failed. Please upgrade nfsdcld.\n");
nfs4_release_reclaim(nn);
goto err_remove;
} else
pr_info("NFSD: Using nfsdcld client tracking operations.\n");
return 0;
err_remove:
nfsd4_remove_cld_pipe(net);
err_shutdown:
nfs4_cld_state_shutdown(net);
return status;
}
static void
nfsd4_cld_tracking_exit(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfs4_release_reclaim(nn);
nfsd4_remove_cld_pipe(net);
nfs4_cld_state_shutdown(net);
}
/* For older nfsdcld's */
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v0 = {
.init = nfsd4_init_cld_pipe,
.exit = nfsd4_remove_cld_pipe,
.create = nfsd4_cld_create,
.remove = nfsd4_cld_remove,
.check = nfsd4_cld_check_v0,
.grace_done = nfsd4_cld_grace_done_v0,
.version = 1,
.msglen = sizeof(struct cld_msg),
};
/* For newer nfsdcld's */
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops = {
.init = nfsd4_cld_tracking_init,
.exit = nfsd4_cld_tracking_exit,
.create = nfsd4_cld_create,
.remove = nfsd4_cld_remove,
.check = nfsd4_cld_check,
.grace_done = nfsd4_cld_grace_done,
.version = 1,
.msglen = sizeof(struct cld_msg),
};
/* v2 create/check ops include the principal, if available */
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2 = {
.init = nfsd4_cld_tracking_init,
.exit = nfsd4_cld_tracking_exit,
.create = nfsd4_cld_create_v2,
.remove = nfsd4_cld_remove,
.check = nfsd4_cld_check_v2,
.grace_done = nfsd4_cld_grace_done,
.version = 2,
.msglen = sizeof(struct cld_msg_v2),
};
/* upcall via usermodehelper */
static char cltrack_prog[PATH_MAX] = "/sbin/nfsdcltrack";
module_param_string(cltrack_prog, cltrack_prog, sizeof(cltrack_prog),
S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(cltrack_prog, "Path to the nfsdcltrack upcall program");
static bool cltrack_legacy_disable;
module_param(cltrack_legacy_disable, bool, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(cltrack_legacy_disable,
"Disable legacy recoverydir conversion. Default: false");
#define LEGACY_TOPDIR_ENV_PREFIX "NFSDCLTRACK_LEGACY_TOPDIR="
#define LEGACY_RECDIR_ENV_PREFIX "NFSDCLTRACK_LEGACY_RECDIR="
#define HAS_SESSION_ENV_PREFIX "NFSDCLTRACK_CLIENT_HAS_SESSION="
#define GRACE_START_ENV_PREFIX "NFSDCLTRACK_GRACE_START="
static char *
nfsd4_cltrack_legacy_topdir(void)
{
int copied;
size_t len;
char *result;
if (cltrack_legacy_disable)
return NULL;
len = strlen(LEGACY_TOPDIR_ENV_PREFIX) +
strlen(nfs4_recoverydir()) + 1;
result = kmalloc(len, GFP_KERNEL);
if (!result)
return result;
copied = snprintf(result, len, LEGACY_TOPDIR_ENV_PREFIX "%s",
nfs4_recoverydir());
if (copied >= len) {
/* just return nothing if output was truncated */
kfree(result);
return NULL;
}
return result;
}
static char *
nfsd4_cltrack_legacy_recdir(const struct xdr_netobj *name)
{
int copied;
size_t len;
char *result;
if (cltrack_legacy_disable)
return NULL;
/* +1 is for '/' between "topdir" and "recdir" */
len = strlen(LEGACY_RECDIR_ENV_PREFIX) +
strlen(nfs4_recoverydir()) + 1 + HEXDIR_LEN;
result = kmalloc(len, GFP_KERNEL);
if (!result)
return result;
copied = snprintf(result, len, LEGACY_RECDIR_ENV_PREFIX "%s/",
nfs4_recoverydir());
if (copied > (len - HEXDIR_LEN)) {
/* just return nothing if output will be truncated */
kfree(result);
return NULL;
}
copied = nfs4_make_rec_clidname(result + copied, name);
if (copied) {
kfree(result);
return NULL;
}
return result;
}
static char *
nfsd4_cltrack_client_has_session(struct nfs4_client *clp)
{
int copied;
size_t len;
char *result;
/* prefix + Y/N character + terminating NULL */
len = strlen(HAS_SESSION_ENV_PREFIX) + 1 + 1;
result = kmalloc(len, GFP_KERNEL);
if (!result)
return result;
copied = snprintf(result, len, HAS_SESSION_ENV_PREFIX "%c",
clp->cl_minorversion ? 'Y' : 'N');
if (copied >= len) {
/* just return nothing if output was truncated */
kfree(result);
return NULL;
}
return result;
}
static char *
nfsd4_cltrack_grace_start(time64_t grace_start)
{
int copied;
size_t len;
char *result;
/* prefix + max width of int64_t string + terminating NULL */
len = strlen(GRACE_START_ENV_PREFIX) + 22 + 1;
result = kmalloc(len, GFP_KERNEL);
if (!result)
return result;
copied = snprintf(result, len, GRACE_START_ENV_PREFIX "%lld",
grace_start);
if (copied >= len) {
/* just return nothing if output was truncated */
kfree(result);
return NULL;
}
return result;
}
static int
nfsd4_umh_cltrack_upcall(char *cmd, char *arg, char *env0, char *env1)
{
char *envp[3];
char *argv[4];
int ret;
if (unlikely(!cltrack_prog[0])) {
dprintk("%s: cltrack_prog is disabled\n", __func__);
return -EACCES;
}
dprintk("%s: cmd: %s\n", __func__, cmd);
dprintk("%s: arg: %s\n", __func__, arg ? arg : "(null)");
dprintk("%s: env0: %s\n", __func__, env0 ? env0 : "(null)");
dprintk("%s: env1: %s\n", __func__, env1 ? env1 : "(null)");
envp[0] = env0;
envp[1] = env1;
envp[2] = NULL;
argv[0] = (char *)cltrack_prog;
argv[1] = cmd;
argv[2] = arg;
argv[3] = NULL;
ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
/*
* Disable the upcall mechanism if we're getting an ENOENT or EACCES
* error. The admin can re-enable it on the fly by using sysfs
* once the problem has been fixed.
*/
if (ret == -ENOENT || ret == -EACCES) {
dprintk("NFSD: %s was not found or isn't executable (%d). "
"Setting cltrack_prog to blank string!",
cltrack_prog, ret);
cltrack_prog[0] = '\0';
}
dprintk("%s: %s return value: %d\n", __func__, cltrack_prog, ret);
return ret;
}
static char *
bin_to_hex_dup(const unsigned char *src, int srclen)
{
char *buf;
/* +1 for terminating NULL */
buf = kzalloc((srclen * 2) + 1, GFP_KERNEL);
if (!buf)
return buf;
bin2hex(buf, src, srclen);
return buf;
}
static int
nfsd4_umh_cltrack_init(struct net *net)
{
int ret;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
char *grace_start = nfsd4_cltrack_grace_start(nn->boot_time);
/* XXX: The usermode helper s not working in container yet. */
if (net != &init_net) {
pr_warn("NFSD: attempt to initialize umh client tracking in a container ignored.\n");
kfree(grace_start);
return -EINVAL;
}
ret = nfsd4_umh_cltrack_upcall("init", NULL, grace_start, NULL);
kfree(grace_start);
if (!ret)
pr_info("NFSD: Using UMH upcall client tracking operations.\n");
return ret;
}
static void
nfsd4_cltrack_upcall_lock(struct nfs4_client *clp)
{
wait_on_bit_lock(&clp->cl_flags, NFSD4_CLIENT_UPCALL_LOCK,
TASK_UNINTERRUPTIBLE);
}
static void
nfsd4_cltrack_upcall_unlock(struct nfs4_client *clp)
{
smp_mb__before_atomic();
clear_bit(NFSD4_CLIENT_UPCALL_LOCK, &clp->cl_flags);
smp_mb__after_atomic();
wake_up_bit(&clp->cl_flags, NFSD4_CLIENT_UPCALL_LOCK);
}
static void
nfsd4_umh_cltrack_create(struct nfs4_client *clp)
{
char *hexid, *has_session, *grace_start;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
/*
* With v4.0 clients, there's little difference in outcome between a
* create and check operation, and we can end up calling into this
* function multiple times per client (once for each openowner). So,
* for v4.0 clients skip upcalling once the client has been recorded
* on stable storage.
*
* For v4.1+ clients, the outcome of the two operations is different,
* so we must ensure that we upcall for the create operation. v4.1+
* clients call this on RECLAIM_COMPLETE though, so we should only end
* up doing a single create upcall per client.
*/
if (clp->cl_minorversion == 0 &&
test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
hexid = bin_to_hex_dup(clp->cl_name.data, clp->cl_name.len);
if (!hexid) {
dprintk("%s: can't allocate memory for upcall!\n", __func__);
return;
}
has_session = nfsd4_cltrack_client_has_session(clp);
grace_start = nfsd4_cltrack_grace_start(nn->boot_time);
nfsd4_cltrack_upcall_lock(clp);
if (!nfsd4_umh_cltrack_upcall("create", hexid, has_session, grace_start))
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
nfsd4_cltrack_upcall_unlock(clp);
kfree(has_session);
kfree(grace_start);
kfree(hexid);
}
static void
nfsd4_umh_cltrack_remove(struct nfs4_client *clp)
{
char *hexid;
if (!test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
hexid = bin_to_hex_dup(clp->cl_name.data, clp->cl_name.len);
if (!hexid) {
dprintk("%s: can't allocate memory for upcall!\n", __func__);
return;
}
nfsd4_cltrack_upcall_lock(clp);
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags) &&
nfsd4_umh_cltrack_upcall("remove", hexid, NULL, NULL) == 0)
clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
nfsd4_cltrack_upcall_unlock(clp);
kfree(hexid);
}
static int
nfsd4_umh_cltrack_check(struct nfs4_client *clp)
{
int ret;
char *hexid, *has_session, *legacy;
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
hexid = bin_to_hex_dup(clp->cl_name.data, clp->cl_name.len);
if (!hexid) {
dprintk("%s: can't allocate memory for upcall!\n", __func__);
return -ENOMEM;
}
has_session = nfsd4_cltrack_client_has_session(clp);
legacy = nfsd4_cltrack_legacy_recdir(&clp->cl_name);
nfsd4_cltrack_upcall_lock(clp);
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) {
ret = 0;
} else {
ret = nfsd4_umh_cltrack_upcall("check", hexid, has_session, legacy);
if (ret == 0)
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
nfsd4_cltrack_upcall_unlock(clp);
kfree(has_session);
kfree(legacy);
kfree(hexid);
return ret;
}
static void
nfsd4_umh_cltrack_grace_done(struct nfsd_net *nn)
{
char *legacy;
char timestr[22]; /* FIXME: better way to determine max size? */
sprintf(timestr, "%lld", nn->boot_time);
legacy = nfsd4_cltrack_legacy_topdir();
nfsd4_umh_cltrack_upcall("gracedone", timestr, legacy, NULL);
kfree(legacy);
}
static const struct nfsd4_client_tracking_ops nfsd4_umh_tracking_ops = {
.init = nfsd4_umh_cltrack_init,
.exit = NULL,
.create = nfsd4_umh_cltrack_create,
.remove = nfsd4_umh_cltrack_remove,
.check = nfsd4_umh_cltrack_check,
.grace_done = nfsd4_umh_cltrack_grace_done,
.version = 1,
.msglen = 0,
};
int
nfsd4_client_tracking_init(struct net *net)
{
int status;
struct path path;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
/* just run the init if it the method is already decided */
if (nn->client_tracking_ops)
goto do_init;
/* First, try to use nfsdcld */
nn->client_tracking_ops = &nfsd4_cld_tracking_ops;
status = nn->client_tracking_ops->init(net);
if (!status)
return status;
if (status != -ETIMEDOUT) {
nn->client_tracking_ops = &nfsd4_cld_tracking_ops_v0;
status = nn->client_tracking_ops->init(net);
if (!status)
return status;
}
/*
* Next, try the UMH upcall.
*/
nn->client_tracking_ops = &nfsd4_umh_tracking_ops;
status = nn->client_tracking_ops->init(net);
if (!status)
return status;
/*
* Finally, See if the recoverydir exists and is a directory.
* If it is, then use the legacy ops.
*/
nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
if (!status) {
status = d_is_dir(path.dentry);
path_put(&path);
if (!status) {
status = -EINVAL;
goto out;
}
}
do_init:
status = nn->client_tracking_ops->init(net);
out:
if (status) {
printk(KERN_WARNING "NFSD: Unable to initialize client "
"recovery tracking! (%d)\n", status);
nn->client_tracking_ops = NULL;
}
return status;
}
void
nfsd4_client_tracking_exit(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (nn->client_tracking_ops) {
if (nn->client_tracking_ops->exit)
nn->client_tracking_ops->exit(net);
nn->client_tracking_ops = NULL;
}
}
void
nfsd4_client_record_create(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (nn->client_tracking_ops)
nn->client_tracking_ops->create(clp);
}
void
nfsd4_client_record_remove(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (nn->client_tracking_ops)
nn->client_tracking_ops->remove(clp);
}
int
nfsd4_client_record_check(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (nn->client_tracking_ops)
return nn->client_tracking_ops->check(clp);
return -EOPNOTSUPP;
}
void
nfsd4_record_grace_done(struct nfsd_net *nn)
{
if (nn->client_tracking_ops)
nn->client_tracking_ops->grace_done(nn);
}
static int
rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct super_block *sb = ptr;
struct net *net = sb->s_fs_info;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
struct dentry *dentry;
int ret = 0;
if (!try_module_get(THIS_MODULE))
return 0;
if (!cn) {
module_put(THIS_MODULE);
return 0;
}
switch (event) {
case RPC_PIPEFS_MOUNT:
dentry = nfsd4_cld_register_sb(sb, cn->cn_pipe);
if (IS_ERR(dentry)) {
ret = PTR_ERR(dentry);
break;
}
cn->cn_pipe->dentry = dentry;
break;
case RPC_PIPEFS_UMOUNT:
if (cn->cn_pipe->dentry)
nfsd4_cld_unregister_sb(cn->cn_pipe);
break;
default:
ret = -ENOTSUPP;
break;
}
module_put(THIS_MODULE);
return ret;
}
static struct notifier_block nfsd4_cld_block = {
.notifier_call = rpc_pipefs_event,
};
int
register_cld_notifier(void)
{
WARN_ON(!nfsd_net_id);
return rpc_pipefs_notifier_register(&nfsd4_cld_block);
}
void
unregister_cld_notifier(void)
{
rpc_pipefs_notifier_unregister(&nfsd4_cld_block);
}
| linux-master | fs/nfsd/nfs4recover.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Tom Haynes <[email protected]>
*
* The following implements a super-simple flex-file server
* where the NFSv4.1 mds is also the ds. And the storage is
* the same. I.e., writing to the mds via a NFSv4.1 WRITE
* goes to the same location as the NFSv3 WRITE.
*/
#include <linux/slab.h>
#include <linux/nfsd/debug.h>
#include <linux/sunrpc/addr.h>
#include "flexfilelayoutxdr.h"
#include "pnfs.h"
#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
static __be32
nfsd4_ff_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
struct nfsd4_layoutget *args)
{
struct nfsd4_layout_seg *seg = &args->lg_seg;
u32 device_generation = 0;
int error;
uid_t u;
struct pnfs_ff_layout *fl;
/*
* The super simple flex file server has 1 mirror, 1 data server,
* and 1 file handle. So instead of 4 allocs, do 1 for now.
* Zero it out for the stateid - don't want junk in there!
*/
error = -ENOMEM;
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
if (!fl)
goto out_error;
args->lg_content = fl;
/*
* Avoid layout commit, try to force the I/O to the DS,
* and for fun, cause all IOMODE_RW layout segments to
* effectively be WRITE only.
*/
fl->flags = FF_FLAGS_NO_LAYOUTCOMMIT | FF_FLAGS_NO_IO_THRU_MDS |
FF_FLAGS_NO_READ_IO;
/* Do not allow a IOMODE_READ segment to have write pemissions */
if (seg->iomode == IOMODE_READ) {
u = from_kuid(&init_user_ns, inode->i_uid) + 1;
fl->uid = make_kuid(&init_user_ns, u);
} else
fl->uid = inode->i_uid;
fl->gid = inode->i_gid;
error = nfsd4_set_deviceid(&fl->deviceid, fhp, device_generation);
if (error)
goto out_error;
fl->fh.size = fhp->fh_handle.fh_size;
memcpy(fl->fh.data, &fhp->fh_handle.fh_raw, fl->fh.size);
/* Give whole file layout segments */
seg->offset = 0;
seg->length = NFS4_MAX_UINT64;
dprintk("GET: 0x%llx:0x%llx %d\n", seg->offset, seg->length,
seg->iomode);
return 0;
out_error:
seg->length = 0;
return nfserrno(error);
}
static __be32
nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
struct nfs4_client *clp, struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_ff_device_addr *da;
u16 port;
char addr[INET6_ADDRSTRLEN];
da = kzalloc(sizeof(struct pnfs_ff_device_addr), GFP_KERNEL);
if (!da)
return nfserrno(-ENOMEM);
gdp->gd_device = da;
da->version = 3;
da->minor_version = 0;
da->rsize = svc_max_payload(rqstp);
da->wsize = da->rsize;
rpc_ntop((struct sockaddr *)&rqstp->rq_daddr,
addr, INET6_ADDRSTRLEN);
if (rqstp->rq_daddr.ss_family == AF_INET) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)&rqstp->rq_daddr;
port = ntohs(sin->sin_port);
snprintf(da->netaddr.netid, FF_NETID_LEN + 1, "tcp");
da->netaddr.netid_len = 3;
} else {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)&rqstp->rq_daddr;
port = ntohs(sin6->sin6_port);
snprintf(da->netaddr.netid, FF_NETID_LEN + 1, "tcp6");
da->netaddr.netid_len = 4;
}
da->netaddr.addr_len =
snprintf(da->netaddr.addr, FF_ADDR_LEN + 1,
"%s.%d.%d", addr, port >> 8, port & 0xff);
da->tightly_coupled = false;
return 0;
}
const struct nfsd4_layout_ops ff_layout_ops = {
.notify_types =
NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
.disable_recalls = true,
.proc_getdeviceinfo = nfsd4_ff_proc_getdeviceinfo,
.encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
.proc_layoutget = nfsd4_ff_proc_layoutget,
.encode_layoutget = nfsd4_ff_encode_layoutget,
};
| linux-master | fs/nfsd/flexfilelayout.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Process version 2 NFS requests.
*
* Copyright (C) 1995-1997 Olaf Kirch <[email protected]>
*/
#include <linux/namei.h>
#include "cache.h"
#include "xdr.h"
#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
static __be32
nfsd_proc_null(struct svc_rqst *rqstp)
{
return rpc_success;
}
/*
* Get a file's attributes
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
nfsd_proc_getattr(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0,
NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
/*
* Set a file's attributes
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
nfsd_proc_setattr(struct svc_rqst *rqstp)
{
struct nfsd_sattrargs *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
struct iattr *iap = &argp->attrs;
struct nfsd_attrs attrs = {
.na_iattr = iap,
};
struct svc_fh *fhp;
dprintk("nfsd: SETATTR %s, valid=%x, size=%ld\n",
SVCFH_fmt(&argp->fh),
argp->attrs.ia_valid, (long) argp->attrs.ia_size);
fhp = fh_copy(&resp->fh, &argp->fh);
/*
* NFSv2 does not differentiate between "set-[ac]time-to-now"
* which only requires access, and "set-[ac]time-to-X" which
* requires ownership.
* So if it looks like it might be "set both to the same time which
* is close to now", and if setattr_prepare fails, then we
* convert to "set to now" instead of "set to explicit time"
*
* We only call setattr_prepare as the last test as technically
* it is not an interface that we should be using.
*/
#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
#define MAX_TOUCH_TIME_ERROR (30*60)
if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
/*
* Looks probable.
*
* Now just make sure time is in the right ballpark.
* Solaris, at least, doesn't seem to care what the time
* request is. We require it be within 30 minutes of now.
*/
time64_t delta = iap->ia_atime.tv_sec - ktime_get_real_seconds();
resp->status = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
if (resp->status != nfs_ok)
goto out;
if (delta < 0)
delta = -delta;
if (delta < MAX_TOUCH_TIME_ERROR &&
setattr_prepare(&nop_mnt_idmap, fhp->fh_dentry, iap) != 0) {
/*
* Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
* This will cause notify_change to set these times
* to "now"
*/
iap->ia_valid &= ~BOTH_TIME_SET;
}
}
resp->status = nfsd_setattr(rqstp, fhp, &attrs, 0, (time64_t)0);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
/* Obsolete, replaced by MNTPROC_MNT. */
static __be32
nfsd_proc_root(struct svc_rqst *rqstp)
{
return rpc_success;
}
/*
* Look up a path name component
* Note: the dentry in the resp->fh may be negative if the file
* doesn't exist yet.
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
nfsd_proc_lookup(struct svc_rqst *rqstp)
{
struct nfsd_diropargs *argp = rqstp->rq_argp;
struct nfsd_diropres *resp = rqstp->rq_resp;
dprintk("nfsd: LOOKUP %s %.*s\n",
SVCFH_fmt(&argp->fh), argp->len, argp->name);
fh_init(&resp->fh, NFS_FHSIZE);
resp->status = nfsd_lookup(rqstp, &argp->fh, argp->name, argp->len,
&resp->fh);
fh_put(&argp->fh);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
/*
* Read a symlink.
*/
static __be32
nfsd_proc_readlink(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_readlinkres *resp = rqstp->rq_resp;
dprintk("nfsd: READLINK %s\n", SVCFH_fmt(&argp->fh));
/* Read the symlink. */
resp->len = NFS_MAXPATHLEN;
resp->page = *(rqstp->rq_next_page++);
resp->status = nfsd_readlink(rqstp, &argp->fh,
page_address(resp->page), &resp->len);
fh_put(&argp->fh);
return rpc_success;
}
/*
* Read a portion of a file.
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
nfsd_proc_read(struct svc_rqst *rqstp)
{
struct nfsd_readargs *argp = rqstp->rq_argp;
struct nfsd_readres *resp = rqstp->rq_resp;
u32 eof;
dprintk("nfsd: READ %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, argp->offset);
argp->count = min_t(u32, argp->count, NFSSVC_MAXBLKSIZE_V2);
argp->count = min_t(u32, argp->count, rqstp->rq_res.buflen);
resp->pages = rqstp->rq_next_page;
/* Obtain buffer pointer for payload. 19 is 1 word for
* status, 17 words for fattr, and 1 word for the byte count.
*/
svc_reserve_auth(rqstp, (19<<2) + argp->count + 4);
resp->count = argp->count;
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
&resp->count, &eof);
if (resp->status == nfs_ok)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
set_bit(RQ_DROPME, &rqstp->rq_flags);
return rpc_success;
}
/* Reserved */
static __be32
nfsd_proc_writecache(struct svc_rqst *rqstp)
{
return rpc_success;
}
/*
* Write data to a file
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
nfsd_proc_write(struct svc_rqst *rqstp)
{
struct nfsd_writeargs *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
unsigned long cnt = argp->len;
unsigned int nvecs;
dprintk("nfsd: WRITE %s %u bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->len, argp->offset);
nvecs = svc_fill_write_vector(rqstp, &argp->payload);
resp->status = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
argp->offset, rqstp->rq_vec, nvecs,
&cnt, NFS_DATA_SYNC, NULL);
if (resp->status == nfs_ok)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
set_bit(RQ_DROPME, &rqstp->rq_flags);
return rpc_success;
}
/*
* CREATE processing is complicated. The keyword here is `overloaded.'
* The parent directory is kept locked between the check for existence
* and the actual create() call in compliance with VFS protocols.
* N.B. After this call _both_ argp->fh and resp->fh need an fh_put
*/
static __be32
nfsd_proc_create(struct svc_rqst *rqstp)
{
struct nfsd_createargs *argp = rqstp->rq_argp;
struct nfsd_diropres *resp = rqstp->rq_resp;
svc_fh *dirfhp = &argp->fh;
svc_fh *newfhp = &resp->fh;
struct iattr *attr = &argp->attrs;
struct nfsd_attrs attrs = {
.na_iattr = attr,
};
struct inode *inode;
struct dentry *dchild;
int type, mode;
int hosterr;
dev_t rdev = 0, wanted = new_decode_dev(attr->ia_size);
dprintk("nfsd: CREATE %s %.*s\n",
SVCFH_fmt(dirfhp), argp->len, argp->name);
/* First verify the parent file handle */
resp->status = fh_verify(rqstp, dirfhp, S_IFDIR, NFSD_MAY_EXEC);
if (resp->status != nfs_ok)
goto done; /* must fh_put dirfhp even on error */
/* Check for NFSD_MAY_WRITE in nfsd_create if necessary */
resp->status = nfserr_exist;
if (isdotent(argp->name, argp->len))
goto done;
hosterr = fh_want_write(dirfhp);
if (hosterr) {
resp->status = nfserrno(hosterr);
goto done;
}
inode_lock_nested(dirfhp->fh_dentry->d_inode, I_MUTEX_PARENT);
dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
if (IS_ERR(dchild)) {
resp->status = nfserrno(PTR_ERR(dchild));
goto out_unlock;
}
fh_init(newfhp, NFS_FHSIZE);
resp->status = fh_compose(newfhp, dirfhp->fh_export, dchild, dirfhp);
if (!resp->status && d_really_is_negative(dchild))
resp->status = nfserr_noent;
dput(dchild);
if (resp->status) {
if (resp->status != nfserr_noent)
goto out_unlock;
/*
* If the new file handle wasn't verified, we can't tell
* whether the file exists or not. Time to bail ...
*/
resp->status = nfserr_acces;
if (!newfhp->fh_dentry) {
printk(KERN_WARNING
"nfsd_proc_create: file handle not verified\n");
goto out_unlock;
}
}
inode = d_inode(newfhp->fh_dentry);
/* Unfudge the mode bits */
if (attr->ia_valid & ATTR_MODE) {
type = attr->ia_mode & S_IFMT;
mode = attr->ia_mode & ~S_IFMT;
if (!type) {
/* no type, so if target exists, assume same as that,
* else assume a file */
if (inode) {
type = inode->i_mode & S_IFMT;
switch(type) {
case S_IFCHR:
case S_IFBLK:
/* reserve rdev for later checking */
rdev = inode->i_rdev;
attr->ia_valid |= ATTR_SIZE;
fallthrough;
case S_IFIFO:
/* this is probably a permission check..
* at least IRIX implements perm checking on
* echo thing > device-special-file-or-pipe
* by doing a CREATE with type==0
*/
resp->status = nfsd_permission(rqstp,
newfhp->fh_export,
newfhp->fh_dentry,
NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS);
if (resp->status && resp->status != nfserr_rofs)
goto out_unlock;
}
} else
type = S_IFREG;
}
} else if (inode) {
type = inode->i_mode & S_IFMT;
mode = inode->i_mode & ~S_IFMT;
} else {
type = S_IFREG;
mode = 0; /* ??? */
}
attr->ia_valid |= ATTR_MODE;
attr->ia_mode = mode;
/* Special treatment for non-regular files according to the
* gospel of sun micro
*/
if (type != S_IFREG) {
if (type != S_IFBLK && type != S_IFCHR) {
rdev = 0;
} else if (type == S_IFCHR && !(attr->ia_valid & ATTR_SIZE)) {
/* If you think you've seen the worst, grok this. */
type = S_IFIFO;
} else {
/* Okay, char or block special */
if (!rdev)
rdev = wanted;
}
/* we've used the SIZE information, so discard it */
attr->ia_valid &= ~ATTR_SIZE;
/* Make sure the type and device matches */
resp->status = nfserr_exist;
if (inode && inode_wrong_type(inode, type))
goto out_unlock;
}
resp->status = nfs_ok;
if (!inode) {
/* File doesn't exist. Create it and set attrs */
resp->status = nfsd_create_locked(rqstp, dirfhp, &attrs, type,
rdev, newfhp);
} else if (type == S_IFREG) {
dprintk("nfsd: existing %s, valid=%x, size=%ld\n",
argp->name, attr->ia_valid, (long) attr->ia_size);
/* File already exists. We ignore all attributes except
* size, so that creat() behaves exactly like
* open(..., O_CREAT|O_TRUNC|O_WRONLY).
*/
attr->ia_valid &= ATTR_SIZE;
if (attr->ia_valid)
resp->status = nfsd_setattr(rqstp, newfhp, &attrs, 0,
(time64_t)0);
}
out_unlock:
inode_unlock(dirfhp->fh_dentry->d_inode);
fh_drop_write(dirfhp);
done:
fh_put(dirfhp);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
static __be32
nfsd_proc_remove(struct svc_rqst *rqstp)
{
struct nfsd_diropargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
dprintk("nfsd: REMOVE %s %.*s\n", SVCFH_fmt(&argp->fh),
argp->len, argp->name);
/* Unlink. -SIFDIR means file must not be a directory */
resp->status = nfsd_unlink(rqstp, &argp->fh, -S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
return rpc_success;
}
static __be32
nfsd_proc_rename(struct svc_rqst *rqstp)
{
struct nfsd_renameargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
dprintk("nfsd: RENAME %s %.*s -> \n",
SVCFH_fmt(&argp->ffh), argp->flen, argp->fname);
dprintk("nfsd: -> %s %.*s\n",
SVCFH_fmt(&argp->tfh), argp->tlen, argp->tname);
resp->status = nfsd_rename(rqstp, &argp->ffh, argp->fname, argp->flen,
&argp->tfh, argp->tname, argp->tlen);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
return rpc_success;
}
static __be32
nfsd_proc_link(struct svc_rqst *rqstp)
{
struct nfsd_linkargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
dprintk("nfsd: LINK %s ->\n",
SVCFH_fmt(&argp->ffh));
dprintk("nfsd: %s %.*s\n",
SVCFH_fmt(&argp->tfh),
argp->tlen,
argp->tname);
resp->status = nfsd_link(rqstp, &argp->tfh, argp->tname, argp->tlen,
&argp->ffh);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
return rpc_success;
}
static __be32
nfsd_proc_symlink(struct svc_rqst *rqstp)
{
struct nfsd_symlinkargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
struct nfsd_attrs attrs = {
.na_iattr = &argp->attrs,
};
struct svc_fh newfh;
if (argp->tlen > NFS_MAXPATHLEN) {
resp->status = nfserr_nametoolong;
goto out;
}
argp->tname = svc_fill_symlink_pathname(rqstp, &argp->first,
page_address(rqstp->rq_arg.pages[0]),
argp->tlen);
if (IS_ERR(argp->tname)) {
resp->status = nfserrno(PTR_ERR(argp->tname));
goto out;
}
dprintk("nfsd: SYMLINK %s %.*s -> %.*s\n",
SVCFH_fmt(&argp->ffh), argp->flen, argp->fname,
argp->tlen, argp->tname);
fh_init(&newfh, NFS_FHSIZE);
resp->status = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen,
argp->tname, &attrs, &newfh);
kfree(argp->tname);
fh_put(&argp->ffh);
fh_put(&newfh);
out:
return rpc_success;
}
/*
* Make directory. This operation is not idempotent.
* N.B. After this call resp->fh needs an fh_put
*/
static __be32
nfsd_proc_mkdir(struct svc_rqst *rqstp)
{
struct nfsd_createargs *argp = rqstp->rq_argp;
struct nfsd_diropres *resp = rqstp->rq_resp;
struct nfsd_attrs attrs = {
.na_iattr = &argp->attrs,
};
dprintk("nfsd: MKDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
if (resp->fh.fh_dentry) {
printk(KERN_WARNING
"nfsd_proc_mkdir: response already verified??\n");
}
argp->attrs.ia_valid &= ~ATTR_SIZE;
fh_init(&resp->fh, NFS_FHSIZE);
resp->status = nfsd_create(rqstp, &argp->fh, argp->name, argp->len,
&attrs, S_IFDIR, 0, &resp->fh);
fh_put(&argp->fh);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
return rpc_success;
}
/*
* Remove a directory
*/
static __be32
nfsd_proc_rmdir(struct svc_rqst *rqstp)
{
struct nfsd_diropargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
dprintk("nfsd: RMDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
resp->status = nfsd_unlink(rqstp, &argp->fh, S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
return rpc_success;
}
static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd_readdirres *resp,
u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
buf->buflen = clamp(count, (u32)(XDR_UNIT * 2), (u32)PAGE_SIZE);
buf->buflen -= XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
rqstp->rq_next_page++;
xdr_init_encode_pages(xdr, buf, buf->pages, NULL);
}
/*
* Read a portion of a directory.
*/
static __be32
nfsd_proc_readdir(struct svc_rqst *rqstp)
{
struct nfsd_readdirargs *argp = rqstp->rq_argp;
struct nfsd_readdirres *resp = rqstp->rq_resp;
loff_t offset;
dprintk("nfsd: READDIR %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, argp->cookie);
nfsd_init_dirlist_pages(rqstp, resp, argp->count);
resp->common.err = nfs_ok;
resp->cookie_offset = 0;
offset = argp->cookie;
resp->status = nfsd_readdir(rqstp, &argp->fh, &offset,
&resp->common, nfssvc_encode_entry);
nfssvc_encode_nfscookie(resp, offset);
fh_put(&argp->fh);
return rpc_success;
}
/*
* Get file system info
*/
static __be32
nfsd_proc_statfs(struct svc_rqst *rqstp)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_statfsres *resp = rqstp->rq_resp;
dprintk("nfsd: STATFS %s\n", SVCFH_fmt(&argp->fh));
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats,
NFSD_MAY_BYPASS_GSS_ON_ROOT);
fh_put(&argp->fh);
return rpc_success;
}
/*
* NFSv2 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
#define ST 1 /* status */
#define FH 8 /* filehandle */
#define AT 18 /* attributes */
static const struct svc_procedure nfsd_procedures2[18] = {
[NFSPROC_NULL] = {
.pc_func = nfsd_proc_null,
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
.pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
.pc_name = "NULL",
},
[NFSPROC_GETATTR] = {
.pc_func = nfsd_proc_getattr,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
.pc_name = "GETATTR",
},
[NFSPROC_SETATTR] = {
.pc_func = nfsd_proc_setattr,
.pc_decode = nfssvc_decode_sattrargs,
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_sattrargs),
.pc_argzero = sizeof(struct nfsd_sattrargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
.pc_name = "SETATTR",
},
[NFSPROC_ROOT] = {
.pc_func = nfsd_proc_root,
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
.pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
.pc_name = "ROOT",
},
[NFSPROC_LOOKUP] = {
.pc_func = nfsd_proc_lookup,
.pc_decode = nfssvc_decode_diropargs,
.pc_encode = nfssvc_encode_diropres,
.pc_release = nfssvc_release_diropres,
.pc_argsize = sizeof(struct nfsd_diropargs),
.pc_argzero = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+AT,
.pc_name = "LOOKUP",
},
[NFSPROC_READLINK] = {
.pc_func = nfsd_proc_readlink,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_readlinkres,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
.pc_name = "READLINK",
},
[NFSPROC_READ] = {
.pc_func = nfsd_proc_read,
.pc_decode = nfssvc_decode_readargs,
.pc_encode = nfssvc_encode_readres,
.pc_release = nfssvc_release_readres,
.pc_argsize = sizeof(struct nfsd_readargs),
.pc_argzero = sizeof(struct nfsd_readargs),
.pc_ressize = sizeof(struct nfsd_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
.pc_name = "READ",
},
[NFSPROC_WRITECACHE] = {
.pc_func = nfsd_proc_writecache,
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
.pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
.pc_name = "WRITECACHE",
},
[NFSPROC_WRITE] = {
.pc_func = nfsd_proc_write,
.pc_decode = nfssvc_decode_writeargs,
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_writeargs),
.pc_argzero = sizeof(struct nfsd_writeargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
.pc_name = "WRITE",
},
[NFSPROC_CREATE] = {
.pc_func = nfsd_proc_create,
.pc_decode = nfssvc_decode_createargs,
.pc_encode = nfssvc_encode_diropres,
.pc_release = nfssvc_release_diropres,
.pc_argsize = sizeof(struct nfsd_createargs),
.pc_argzero = sizeof(struct nfsd_createargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
.pc_name = "CREATE",
},
[NFSPROC_REMOVE] = {
.pc_func = nfsd_proc_remove,
.pc_decode = nfssvc_decode_diropargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_diropargs),
.pc_argzero = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "REMOVE",
},
[NFSPROC_RENAME] = {
.pc_func = nfsd_proc_rename,
.pc_decode = nfssvc_decode_renameargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_renameargs),
.pc_argzero = sizeof(struct nfsd_renameargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "RENAME",
},
[NFSPROC_LINK] = {
.pc_func = nfsd_proc_link,
.pc_decode = nfssvc_decode_linkargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_linkargs),
.pc_argzero = sizeof(struct nfsd_linkargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "LINK",
},
[NFSPROC_SYMLINK] = {
.pc_func = nfsd_proc_symlink,
.pc_decode = nfssvc_decode_symlinkargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_symlinkargs),
.pc_argzero = sizeof(struct nfsd_symlinkargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "SYMLINK",
},
[NFSPROC_MKDIR] = {
.pc_func = nfsd_proc_mkdir,
.pc_decode = nfssvc_decode_createargs,
.pc_encode = nfssvc_encode_diropres,
.pc_release = nfssvc_release_diropres,
.pc_argsize = sizeof(struct nfsd_createargs),
.pc_argzero = sizeof(struct nfsd_createargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
.pc_name = "MKDIR",
},
[NFSPROC_RMDIR] = {
.pc_func = nfsd_proc_rmdir,
.pc_decode = nfssvc_decode_diropargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_diropargs),
.pc_argzero = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "RMDIR",
},
[NFSPROC_READDIR] = {
.pc_func = nfsd_proc_readdir,
.pc_decode = nfssvc_decode_readdirargs,
.pc_encode = nfssvc_encode_readdirres,
.pc_argsize = sizeof(struct nfsd_readdirargs),
.pc_argzero = sizeof(struct nfsd_readdirargs),
.pc_ressize = sizeof(struct nfsd_readdirres),
.pc_cachetype = RC_NOCACHE,
.pc_name = "READDIR",
},
[NFSPROC_STATFS] = {
.pc_func = nfsd_proc_statfs,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_statfsres,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_statfsres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+5,
.pc_name = "STATFS",
},
};
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_count2[ARRAY_SIZE(nfsd_procedures2)]);
const struct svc_version nfsd_version2 = {
.vs_vers = 2,
.vs_nproc = ARRAY_SIZE(nfsd_procedures2),
.vs_proc = nfsd_procedures2,
.vs_count = nfsd_count2,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS2_SVC_XDRSIZE,
};
| linux-master | fs/nfsd/nfsproc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Central processing for nfsd.
*
* Authors: Olaf Kirch ([email protected])
*
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <[email protected]>
*/
#include <linux/sched/signal.h>
#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/fs_struct.h>
#include <linux/swap.h>
#include <linux/siphash.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <net/net_namespace.h>
#include "nfsd.h"
#include "cache.h"
#include "vfs.h"
#include "netns.h"
#include "filecache.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_SVC
extern struct svc_program nfsd_program;
static int nfsd(void *vrqstp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static int nfsd_acl_rpcbind_set(struct net *,
const struct svc_program *,
u32, int,
unsigned short,
unsigned short);
static __be32 nfsd_acl_init_request(struct svc_rqst *,
const struct svc_program *,
struct svc_process_info *);
#endif
static int nfsd_rpcbind_set(struct net *,
const struct svc_program *,
u32, int,
unsigned short,
unsigned short);
static __be32 nfsd_init_request(struct svc_rqst *,
const struct svc_program *,
struct svc_process_info *);
/*
* nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members
* of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks.
*
* If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
* properly initialised 'struct svc_serv' with ->sv_nrthreads > 0 (unless
* nn->keep_active is set). That number of nfsd threads must
* exist and each must be listed in ->sp_all_threads in some entry of
* ->sv_pools[].
*
* Each active thread holds a counted reference on nn->nfsd_serv, as does
* the nn->keep_active flag and various transient calls to svc_get().
*
* Finally, the nfsd_mutex also protects some of the global variables that are
* accessed when nfsd starts and that are settable via the write_* routines in
* nfsctl.c. In particular:
*
* user_recovery_dirname
* user_lease_time
* nfsd_versions
*/
DEFINE_MUTEX(nfsd_mutex);
/*
* nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
* nfsd_drc_max_pages limits the total amount of memory available for
* version 4.1 DRC caches.
* nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
*/
DEFINE_SPINLOCK(nfsd_drc_lock);
unsigned long nfsd_drc_max_mem;
unsigned long nfsd_drc_mem_used;
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static struct svc_stat nfsd_acl_svcstats;
static const struct svc_version *nfsd_acl_version[] = {
# if defined(CONFIG_NFSD_V2_ACL)
[2] = &nfsd_acl_version2,
# endif
# if defined(CONFIG_NFSD_V3_ACL)
[3] = &nfsd_acl_version3,
# endif
};
#define NFSD_ACL_MINVERS 2
#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
static struct svc_program nfsd_acl_program = {
.pg_prog = NFS_ACL_PROGRAM,
.pg_nvers = NFSD_ACL_NRVERS,
.pg_vers = nfsd_acl_version,
.pg_name = "nfsacl",
.pg_class = "nfsd",
.pg_stats = &nfsd_acl_svcstats,
.pg_authenticate = &svc_set_client,
.pg_init_request = nfsd_acl_init_request,
.pg_rpcbind_set = nfsd_acl_rpcbind_set,
};
static struct svc_stat nfsd_acl_svcstats = {
.program = &nfsd_acl_program,
};
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
static const struct svc_version *nfsd_version[] = {
#if defined(CONFIG_NFSD_V2)
[2] = &nfsd_version2,
#endif
[3] = &nfsd_version3,
#if defined(CONFIG_NFSD_V4)
[4] = &nfsd_version4,
#endif
};
#define NFSD_MINVERS 2
#define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
struct svc_program nfsd_program = {
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
.pg_next = &nfsd_acl_program,
#endif
.pg_prog = NFS_PROGRAM, /* program number */
.pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
.pg_vers = nfsd_version, /* version table */
.pg_name = "nfsd", /* program name */
.pg_class = "nfsd", /* authentication class */
.pg_stats = &nfsd_svcstats, /* version table */
.pg_authenticate = &svc_set_client, /* export authentication */
.pg_init_request = nfsd_init_request,
.pg_rpcbind_set = nfsd_rpcbind_set,
};
static bool
nfsd_support_version(int vers)
{
if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
return nfsd_version[vers] != NULL;
return false;
}
static bool *
nfsd_alloc_versions(void)
{
bool *vers = kmalloc_array(NFSD_NRVERS, sizeof(bool), GFP_KERNEL);
unsigned i;
if (vers) {
/* All compiled versions are enabled by default */
for (i = 0; i < NFSD_NRVERS; i++)
vers[i] = nfsd_support_version(i);
}
return vers;
}
static bool *
nfsd_alloc_minorversions(void)
{
bool *vers = kmalloc_array(NFSD_SUPPORTED_MINOR_VERSION + 1,
sizeof(bool), GFP_KERNEL);
unsigned i;
if (vers) {
/* All minor versions are enabled by default */
for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
vers[i] = nfsd_support_version(4);
}
return vers;
}
void
nfsd_netns_free_versions(struct nfsd_net *nn)
{
kfree(nn->nfsd_versions);
kfree(nn->nfsd4_minorversions);
nn->nfsd_versions = NULL;
nn->nfsd4_minorversions = NULL;
}
static void
nfsd_netns_init_versions(struct nfsd_net *nn)
{
if (!nn->nfsd_versions) {
nn->nfsd_versions = nfsd_alloc_versions();
nn->nfsd4_minorversions = nfsd_alloc_minorversions();
if (!nn->nfsd_versions || !nn->nfsd4_minorversions)
nfsd_netns_free_versions(nn);
}
}
int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change)
{
if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
return 0;
switch(change) {
case NFSD_SET:
if (nn->nfsd_versions)
nn->nfsd_versions[vers] = nfsd_support_version(vers);
break;
case NFSD_CLEAR:
nfsd_netns_init_versions(nn);
if (nn->nfsd_versions)
nn->nfsd_versions[vers] = false;
break;
case NFSD_TEST:
if (nn->nfsd_versions)
return nn->nfsd_versions[vers];
fallthrough;
case NFSD_AVAIL:
return nfsd_support_version(vers);
}
return 0;
}
static void
nfsd_adjust_nfsd_versions4(struct nfsd_net *nn)
{
unsigned i;
for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++) {
if (nn->nfsd4_minorversions[i])
return;
}
nfsd_vers(nn, 4, NFSD_CLEAR);
}
int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change)
{
if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
change != NFSD_AVAIL)
return -1;
switch(change) {
case NFSD_SET:
if (nn->nfsd4_minorversions) {
nfsd_vers(nn, 4, NFSD_SET);
nn->nfsd4_minorversions[minorversion] =
nfsd_vers(nn, 4, NFSD_TEST);
}
break;
case NFSD_CLEAR:
nfsd_netns_init_versions(nn);
if (nn->nfsd4_minorversions) {
nn->nfsd4_minorversions[minorversion] = false;
nfsd_adjust_nfsd_versions4(nn);
}
break;
case NFSD_TEST:
if (nn->nfsd4_minorversions)
return nn->nfsd4_minorversions[minorversion];
return nfsd_vers(nn, 4, NFSD_TEST);
case NFSD_AVAIL:
return minorversion <= NFSD_SUPPORTED_MINOR_VERSION &&
nfsd_vers(nn, 4, NFSD_AVAIL);
}
return 0;
}
/*
* Maximum number of nfsd processes
*/
#define NFSD_MAXSERVS 8192
int nfsd_nrthreads(struct net *net)
{
int rv = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
mutex_lock(&nfsd_mutex);
if (nn->nfsd_serv)
rv = nn->nfsd_serv->sv_nrthreads;
mutex_unlock(&nfsd_mutex);
return rv;
}
static int nfsd_init_socks(struct net *net, const struct cred *cred)
{
int error;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (!list_empty(&nn->nfsd_serv->sv_permsocks))
return 0;
error = svc_xprt_create(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT,
SVC_SOCK_DEFAULTS, cred);
if (error < 0)
return error;
error = svc_xprt_create(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT,
SVC_SOCK_DEFAULTS, cred);
if (error < 0)
return error;
return 0;
}
static int nfsd_users = 0;
static int nfsd_startup_generic(void)
{
int ret;
if (nfsd_users++)
return 0;
ret = nfsd_file_cache_init();
if (ret)
goto dec_users;
ret = nfs4_state_start();
if (ret)
goto out_file_cache;
return 0;
out_file_cache:
nfsd_file_cache_shutdown();
dec_users:
nfsd_users--;
return ret;
}
static void nfsd_shutdown_generic(void)
{
if (--nfsd_users)
return;
nfs4_state_shutdown();
nfsd_file_cache_shutdown();
}
static bool nfsd_needs_lockd(struct nfsd_net *nn)
{
return nfsd_vers(nn, 2, NFSD_TEST) || nfsd_vers(nn, 3, NFSD_TEST);
}
/**
* nfsd_copy_write_verifier - Atomically copy a write verifier
* @verf: buffer in which to receive the verifier cookie
* @nn: NFS net namespace
*
* This function provides a wait-free mechanism for copying the
* namespace's write verifier without tearing it.
*/
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
{
int seq = 0;
do {
read_seqbegin_or_lock(&nn->writeverf_lock, &seq);
memcpy(verf, nn->writeverf, sizeof(nn->writeverf));
} while (need_seqretry(&nn->writeverf_lock, seq));
done_seqretry(&nn->writeverf_lock, seq);
}
static void nfsd_reset_write_verifier_locked(struct nfsd_net *nn)
{
struct timespec64 now;
u64 verf;
/*
* Because the time value is hashed, y2038 time_t overflow
* is irrelevant in this usage.
*/
ktime_get_raw_ts64(&now);
verf = siphash_2u64(now.tv_sec, now.tv_nsec, &nn->siphash_key);
memcpy(nn->writeverf, &verf, sizeof(nn->writeverf));
}
/**
* nfsd_reset_write_verifier - Generate a new write verifier
* @nn: NFS net namespace
*
* This function updates the ->writeverf field of @nn. This field
* contains an opaque cookie that, according to Section 18.32.3 of
* RFC 8881, "the client can use to determine whether a server has
* changed instance state (e.g., server restart) between a call to
* WRITE and a subsequent call to either WRITE or COMMIT. This
* cookie MUST be unchanged during a single instance of the NFSv4.1
* server and MUST be unique between instances of the NFSv4.1
* server."
*/
void nfsd_reset_write_verifier(struct nfsd_net *nn)
{
write_seqlock(&nn->writeverf_lock);
nfsd_reset_write_verifier_locked(nn);
write_sequnlock(&nn->writeverf_lock);
}
/*
* Crank up a set of per-namespace resources for a new NFSD instance,
* including lockd, a duplicate reply cache, an open file cache
* instance, and a cache of NFSv4 state objects.
*/
static int nfsd_startup_net(struct net *net, const struct cred *cred)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
if (nn->nfsd_net_up)
return 0;
ret = nfsd_startup_generic();
if (ret)
return ret;
ret = nfsd_init_socks(net, cred);
if (ret)
goto out_socks;
if (nfsd_needs_lockd(nn) && !nn->lockd_up) {
ret = lockd_up(net, cred);
if (ret)
goto out_socks;
nn->lockd_up = true;
}
ret = nfsd_file_cache_start_net(net);
if (ret)
goto out_lockd;
ret = nfsd_reply_cache_init(nn);
if (ret)
goto out_filecache;
ret = nfs4_state_start_net(net);
if (ret)
goto out_reply_cache;
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(nn);
#endif
nn->nfsd_net_up = true;
return 0;
out_reply_cache:
nfsd_reply_cache_shutdown(nn);
out_filecache:
nfsd_file_cache_shutdown_net(net);
out_lockd:
if (nn->lockd_up) {
lockd_down(net);
nn->lockd_up = false;
}
out_socks:
nfsd_shutdown_generic();
return ret;
}
static void nfsd_shutdown_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfs4_state_shutdown_net(net);
nfsd_reply_cache_shutdown(nn);
nfsd_file_cache_shutdown_net(net);
if (nn->lockd_up) {
lockd_down(net);
nn->lockd_up = false;
}
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
}
static DEFINE_SPINLOCK(nfsd_notifier_lock);
static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct net *net = dev_net(dev);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in sin;
if (event != NETDEV_DOWN || !nn->nfsd_serv)
goto out;
spin_lock(&nfsd_notifier_lock);
if (nn->nfsd_serv) {
dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa->ifa_local);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
}
spin_unlock(&nfsd_notifier_lock);
out:
return NOTIFY_DONE;
}
static struct notifier_block nfsd_inetaddr_notifier = {
.notifier_call = nfsd_inetaddr_event,
};
#if IS_ENABLED(CONFIG_IPV6)
static int nfsd_inet6addr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct net_device *dev = ifa->idev->dev;
struct net *net = dev_net(dev);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in6 sin6;
if (event != NETDEV_DOWN || !nn->nfsd_serv)
goto out;
spin_lock(&nfsd_notifier_lock);
if (nn->nfsd_serv) {
dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr);
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ifa->addr;
if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
}
spin_unlock(&nfsd_notifier_lock);
out:
return NOTIFY_DONE;
}
static struct notifier_block nfsd_inet6addr_notifier = {
.notifier_call = nfsd_inet6addr_event,
};
#endif
/* Only used under nfsd_mutex, so this atomic may be overkill: */
static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
static void nfsd_last_thread(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv = nn->nfsd_serv;
spin_lock(&nfsd_notifier_lock);
nn->nfsd_serv = NULL;
spin_unlock(&nfsd_notifier_lock);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
svc_xprt_destroy_all(serv, net);
/*
* write_ports can create the server without actually starting
* any threads--if we get shut down before any threads are
* started, then nfsd_last_thread will be run before any of this
* other initialization has been done except the rpcb information.
*/
svc_rpcb_cleanup(serv, net);
if (!nn->nfsd_net_up)
return;
nfsd_shutdown_net(net);
pr_info("nfsd: last server has exited, flushing export cache\n");
nfsd_export_flush(net);
}
void nfsd_reset_versions(struct nfsd_net *nn)
{
int i;
for (i = 0; i < NFSD_NRVERS; i++)
if (nfsd_vers(nn, i, NFSD_TEST))
return;
for (i = 0; i < NFSD_NRVERS; i++)
if (i != 4)
nfsd_vers(nn, i, NFSD_SET);
else {
int minor = 0;
while (nfsd_minorversion(nn, minor, NFSD_SET) >= 0)
minor++;
}
}
/*
* Each session guarantees a negotiated per slot memory cache for replies
* which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
* NFSv4.1 server might want to use more memory for a DRC than a machine
* with mutiple services.
*
* Impose a hard limit on the number of pages for the DRC which varies
* according to the machines free pages. This is of course only a default.
*
* For now this is a #defined shift which could be under admin control
* in the future.
*/
static void set_max_drc(void)
{
#define NFSD_DRC_SIZE_SHIFT 7
nfsd_drc_max_mem = (nr_free_buffer_pages()
>> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
nfsd_drc_mem_used = 0;
dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem);
}
static int nfsd_get_default_max_blksize(void)
{
struct sysinfo i;
unsigned long long target;
unsigned long ret;
si_meminfo(&i);
target = (i.totalram - i.totalhigh) << PAGE_SHIFT;
/*
* Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
* machines, but only uses 32K on 128M machines. Bottom out at
* 8K on 32M and smaller. Of course, this is only a default.
*/
target >>= 12;
ret = NFSSVC_MAXBLKSIZE;
while (ret > target && ret >= 8*1024*2)
ret /= 2;
return ret;
}
void nfsd_shutdown_threads(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv;
mutex_lock(&nfsd_mutex);
serv = nn->nfsd_serv;
if (serv == NULL) {
mutex_unlock(&nfsd_mutex);
return;
}
svc_get(serv);
/* Kill outstanding nfsd threads */
svc_set_num_threads(serv, NULL, 0);
nfsd_last_thread(net);
svc_put(serv);
mutex_unlock(&nfsd_mutex);
}
bool i_am_nfsd(void)
{
return kthread_func(current) == nfsd;
}
int nfsd_create_serv(struct net *net)
{
int error;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv;
WARN_ON(!mutex_is_locked(&nfsd_mutex));
if (nn->nfsd_serv) {
svc_get(nn->nfsd_serv);
return 0;
}
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions(nn);
serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd);
if (serv == NULL)
return -ENOMEM;
serv->sv_maxconn = nn->max_connections;
error = svc_bind(serv, net);
if (error < 0) {
svc_put(serv);
return error;
}
spin_lock(&nfsd_notifier_lock);
nn->nfsd_serv = serv;
spin_unlock(&nfsd_notifier_lock);
set_max_drc();
/* check if the notifier is already set */
if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
register_inetaddr_notifier(&nfsd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
nfsd_reset_write_verifier(nn);
return 0;
}
int nfsd_nrpools(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (nn->nfsd_serv == NULL)
return 0;
else
return nn->nfsd_serv->sv_nrpools;
}
int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
{
int i = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (nn->nfsd_serv != NULL) {
for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
}
return 0;
}
int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
{
int i = 0;
int tot = 0;
int err = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
WARN_ON(!mutex_is_locked(&nfsd_mutex));
if (nn->nfsd_serv == NULL || n <= 0)
return 0;
if (n > nn->nfsd_serv->sv_nrpools)
n = nn->nfsd_serv->sv_nrpools;
/* enforce a global maximum number of threads */
tot = 0;
for (i = 0; i < n; i++) {
nthreads[i] = min(nthreads[i], NFSD_MAXSERVS);
tot += nthreads[i];
}
if (tot > NFSD_MAXSERVS) {
/* total too large: scale down requested numbers */
for (i = 0; i < n && tot > 0; i++) {
int new = nthreads[i] * NFSD_MAXSERVS / tot;
tot -= (nthreads[i] - new);
nthreads[i] = new;
}
for (i = 0; i < n && tot > 0; i++) {
nthreads[i]--;
tot--;
}
}
/*
* There must always be a thread in pool 0; the admin
* can't shut down NFS completely using pool_threads.
*/
if (nthreads[0] == 0)
nthreads[0] = 1;
/* apply the new numbers */
svc_get(nn->nfsd_serv);
for (i = 0; i < n; i++) {
err = svc_set_num_threads(nn->nfsd_serv,
&nn->nfsd_serv->sv_pools[i],
nthreads[i]);
if (err)
break;
}
svc_put(nn->nfsd_serv);
return err;
}
/*
* Adjust the number of threads and return the new number of threads.
* This is also the function that starts the server if necessary, if
* this is the first time nrservs is nonzero.
*/
int
nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
{
int error;
bool nfsd_up_before;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv;
mutex_lock(&nfsd_mutex);
dprintk("nfsd: creating service\n");
nrservs = max(nrservs, 0);
nrservs = min(nrservs, NFSD_MAXSERVS);
error = 0;
if (nrservs == 0 && nn->nfsd_serv == NULL)
goto out;
strscpy(nn->nfsd_name, utsname()->nodename,
sizeof(nn->nfsd_name));
error = nfsd_create_serv(net);
if (error)
goto out;
nfsd_up_before = nn->nfsd_net_up;
serv = nn->nfsd_serv;
error = nfsd_startup_net(net, cred);
if (error)
goto out_put;
error = svc_set_num_threads(serv, NULL, nrservs);
if (error)
goto out_shutdown;
error = serv->sv_nrthreads;
if (error == 0)
nfsd_last_thread(net);
out_shutdown:
if (error < 0 && !nfsd_up_before)
nfsd_shutdown_net(net);
out_put:
/* Threads now hold service active */
if (xchg(&nn->keep_active, 0))
svc_put(serv);
svc_put(serv);
out:
mutex_unlock(&nfsd_mutex);
return error;
}
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static bool
nfsd_support_acl_version(int vers)
{
if (vers >= NFSD_ACL_MINVERS && vers < NFSD_ACL_NRVERS)
return nfsd_acl_version[vers] != NULL;
return false;
}
static int
nfsd_acl_rpcbind_set(struct net *net, const struct svc_program *progp,
u32 version, int family, unsigned short proto,
unsigned short port)
{
if (!nfsd_support_acl_version(version) ||
!nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
return 0;
return svc_generic_rpcbind_set(net, progp, version, family,
proto, port);
}
static __be32
nfsd_acl_init_request(struct svc_rqst *rqstp,
const struct svc_program *progp,
struct svc_process_info *ret)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
int i;
if (likely(nfsd_support_acl_version(rqstp->rq_vers) &&
nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
return svc_generic_init_request(rqstp, progp, ret);
ret->mismatch.lovers = NFSD_ACL_NRVERS;
for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) {
if (nfsd_support_acl_version(rqstp->rq_vers) &&
nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.lovers = i;
break;
}
}
if (ret->mismatch.lovers == NFSD_ACL_NRVERS)
return rpc_prog_unavail;
ret->mismatch.hivers = NFSD_ACL_MINVERS;
for (i = NFSD_ACL_NRVERS - 1; i >= NFSD_ACL_MINVERS; i--) {
if (nfsd_support_acl_version(rqstp->rq_vers) &&
nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.hivers = i;
break;
}
}
return rpc_prog_mismatch;
}
#endif
static int
nfsd_rpcbind_set(struct net *net, const struct svc_program *progp,
u32 version, int family, unsigned short proto,
unsigned short port)
{
if (!nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
return 0;
return svc_generic_rpcbind_set(net, progp, version, family,
proto, port);
}
static __be32
nfsd_init_request(struct svc_rqst *rqstp,
const struct svc_program *progp,
struct svc_process_info *ret)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
int i;
if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
return svc_generic_init_request(rqstp, progp, ret);
ret->mismatch.lovers = NFSD_NRVERS;
for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.lovers = i;
break;
}
}
if (ret->mismatch.lovers == NFSD_NRVERS)
return rpc_prog_unavail;
ret->mismatch.hivers = NFSD_MINVERS;
for (i = NFSD_NRVERS - 1; i >= NFSD_MINVERS; i--) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.hivers = i;
break;
}
}
return rpc_prog_mismatch;
}
/*
* This is the NFS server kernel thread
*/
static int
nfsd(void *vrqstp)
{
struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
struct net *net = perm_sock->xpt_net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
/* At this point, the thread shares current->fs
* with the init process. We need to create files with the
* umask as defined by the client instead of init's umask. */
if (unshare_fs_struct() < 0) {
printk("Unable to start nfsd thread: out of memory\n");
goto out;
}
current->fs->umask = 0;
atomic_inc(&nfsdstats.th_cnt);
set_freezable();
/*
* The main request loop
*/
while (!kthread_should_stop()) {
/* Update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nn->max_connections;
svc_recv(rqstp);
validate_process_creds();
}
atomic_dec(&nfsdstats.th_cnt);
out:
/* Release the thread */
svc_exit_thread(rqstp);
return 0;
}
/**
* nfsd_dispatch - Process an NFS or NFSACL Request
* @rqstp: incoming request
*
* This RPC dispatcher integrates the NFS server's duplicate reply cache.
*
* Return values:
* %0: Processing complete; do not send a Reply
* %1: Processing complete; send Reply in rqstp->rq_res
*/
int nfsd_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *proc = rqstp->rq_procinfo;
__be32 *statp = rqstp->rq_accept_statp;
struct nfsd_cacherep *rp;
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
*/
rqstp->rq_cachetype = proc->pc_cachetype;
if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
rp = NULL;
switch (nfsd_cache_lookup(rqstp, &rp)) {
case RC_DOIT:
break;
case RC_REPLY:
goto out_cached_reply;
case RC_DROPIT:
goto out_dropit;
}
*statp = proc->pc_func(rqstp);
if (test_bit(RQ_DROPME, &rqstp->rq_flags))
goto out_update_drop;
if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
out_cached_reply:
return 1;
out_decode_err:
trace_nfsd_garbage_args_err(rqstp);
*statp = rpc_garbage_args;
return 1;
out_update_drop:
nfsd_cache_update(rqstp, rp, RC_NOCACHE, NULL);
out_dropit:
return 0;
out_encode_err:
trace_nfsd_cant_encode_err(rqstp);
nfsd_cache_update(rqstp, rp, RC_NOCACHE, NULL);
*statp = rpc_system_err;
return 1;
}
/**
* nfssvc_decode_voidarg - Decode void arguments
* @rqstp: Server RPC transaction context
* @xdr: XDR stream positioned at arguments to decode
*
* Return values:
* %false: Arguments were not valid
* %true: Decoding was successful
*/
bool nfssvc_decode_voidarg(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
return true;
}
/**
* nfssvc_encode_voidres - Encode void results
* @rqstp: Server RPC transaction context
* @xdr: XDR stream into which to encode results
*
* Return values:
* %false: Local error while encoding
* %true: Encoding was successful
*/
bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
return true;
}
int nfsd_pool_stats_open(struct inode *inode, struct file *file)
{
int ret;
struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
mutex_lock(&nfsd_mutex);
if (nn->nfsd_serv == NULL) {
mutex_unlock(&nfsd_mutex);
return -ENODEV;
}
svc_get(nn->nfsd_serv);
ret = svc_pool_stats_open(nn->nfsd_serv, file);
mutex_unlock(&nfsd_mutex);
return ret;
}
int nfsd_pool_stats_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct svc_serv *serv = seq->private;
int ret = seq_release(inode, file);
mutex_lock(&nfsd_mutex);
svc_put(serv);
mutex_unlock(&nfsd_mutex);
return ret;
}
| linux-master | fs/nfsd/nfssvc.c |
/*
* Common NFSv4 ACL handling code.
*
* Copyright (c) 2002, 2003 The Regents of the University of Michigan.
* All rights reserved.
*
* Marius Aamodt Eriksen <[email protected]>
* Jeff Sedlak <[email protected]>
* J. Bruce Fields <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/posix_acl.h>
#include "nfsfh.h"
#include "nfsd.h"
#include "acl.h"
#include "vfs.h"
#define NFS4_ACL_TYPE_DEFAULT 0x01
#define NFS4_ACL_DIR 0x02
#define NFS4_ACL_OWNER 0x04
/* mode bit translations: */
#define NFS4_READ_MODE (NFS4_ACE_READ_DATA)
#define NFS4_WRITE_MODE (NFS4_ACE_WRITE_DATA | NFS4_ACE_APPEND_DATA)
#define NFS4_EXECUTE_MODE NFS4_ACE_EXECUTE
#define NFS4_ANYONE_MODE (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL | NFS4_ACE_SYNCHRONIZE)
#define NFS4_OWNER_MODE (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL)
/* flags used to simulate posix default ACLs */
#define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \
| NFS4_ACE_DIRECTORY_INHERIT_ACE)
#define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS \
| NFS4_ACE_INHERIT_ONLY_ACE \
| NFS4_ACE_IDENTIFIER_GROUP)
static u32
mask_from_posix(unsigned short perm, unsigned int flags)
{
int mask = NFS4_ANYONE_MODE;
if (flags & NFS4_ACL_OWNER)
mask |= NFS4_OWNER_MODE;
if (perm & ACL_READ)
mask |= NFS4_READ_MODE;
if (perm & ACL_WRITE)
mask |= NFS4_WRITE_MODE;
if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR))
mask |= NFS4_ACE_DELETE_CHILD;
if (perm & ACL_EXECUTE)
mask |= NFS4_EXECUTE_MODE;
return mask;
}
static u32
deny_mask_from_posix(unsigned short perm, u32 flags)
{
u32 mask = 0;
if (perm & ACL_READ)
mask |= NFS4_READ_MODE;
if (perm & ACL_WRITE)
mask |= NFS4_WRITE_MODE;
if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR))
mask |= NFS4_ACE_DELETE_CHILD;
if (perm & ACL_EXECUTE)
mask |= NFS4_EXECUTE_MODE;
return mask;
}
/* XXX: modify functions to return NFS errors; they're only ever
* used by nfs code, after all.... */
/* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the
* side of being more restrictive, so the mode bit mapping below is
* pessimistic. An optimistic version would be needed to handle DENY's,
* but we expect to coalesce all ALLOWs and DENYs before mapping to mode
* bits. */
static void
low_mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags)
{
u32 write_mode = NFS4_WRITE_MODE;
if (flags & NFS4_ACL_DIR)
write_mode |= NFS4_ACE_DELETE_CHILD;
*mode = 0;
if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE)
*mode |= ACL_READ;
if ((perm & write_mode) == write_mode)
*mode |= ACL_WRITE;
if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE)
*mode |= ACL_EXECUTE;
}
static short ace2type(struct nfs4_ace *);
static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *,
unsigned int);
int
nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
struct nfs4_acl **acl)
{
struct inode *inode = d_inode(dentry);
int error = 0;
struct posix_acl *pacl = NULL, *dpacl = NULL;
unsigned int flags = 0;
int size = 0;
pacl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (!pacl)
pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(pacl))
return PTR_ERR(pacl);
/* allocate for worst case: one (deny, allow) pair each: */
size += 2 * pacl->a_count;
if (S_ISDIR(inode->i_mode)) {
flags = NFS4_ACL_DIR;
dpacl = get_inode_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(dpacl)) {
error = PTR_ERR(dpacl);
goto rel_pacl;
}
if (dpacl)
size += 2 * dpacl->a_count;
}
*acl = kmalloc(nfs4_acl_bytes(size), GFP_KERNEL);
if (*acl == NULL) {
error = -ENOMEM;
goto out;
}
(*acl)->naces = 0;
_posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
if (dpacl)
_posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT);
out:
posix_acl_release(dpacl);
rel_pacl:
posix_acl_release(pacl);
return error;
}
struct posix_acl_summary {
unsigned short owner;
unsigned short users;
unsigned short group;
unsigned short groups;
unsigned short other;
unsigned short mask;
};
static void
summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas)
{
struct posix_acl_entry *pa, *pe;
/*
* Only pas.users and pas.groups need initialization; previous
* posix_acl_valid() calls ensure that the other fields will be
* initialized in the following loop. But, just to placate gcc:
*/
memset(pas, 0, sizeof(*pas));
pas->mask = 07;
pe = acl->a_entries + acl->a_count;
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch (pa->e_tag) {
case ACL_USER_OBJ:
pas->owner = pa->e_perm;
break;
case ACL_GROUP_OBJ:
pas->group = pa->e_perm;
break;
case ACL_USER:
pas->users |= pa->e_perm;
break;
case ACL_GROUP:
pas->groups |= pa->e_perm;
break;
case ACL_OTHER:
pas->other = pa->e_perm;
break;
case ACL_MASK:
pas->mask = pa->e_perm;
break;
}
}
/* We'll only care about effective permissions: */
pas->users &= pas->mask;
pas->group &= pas->mask;
pas->groups &= pas->mask;
}
/* We assume the acl has been verified with posix_acl_valid. */
static void
_posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
unsigned int flags)
{
struct posix_acl_entry *pa, *group_owner_entry;
struct nfs4_ace *ace;
struct posix_acl_summary pas;
unsigned short deny;
int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ?
NFS4_INHERITANCE_FLAGS | NFS4_ACE_INHERIT_ONLY_ACE : 0);
BUG_ON(pacl->a_count < 3);
summarize_posix_acl(pacl, &pas);
pa = pacl->a_entries;
ace = acl->aces + acl->naces;
/* We could deny everything not granted by the owner: */
deny = ~pas.owner;
/*
* but it is equivalent (and simpler) to deny only what is not
* granted by later entries:
*/
deny &= pas.users | pas.group | pas.groups | pas.other;
if (deny) {
ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
ace->flag = eflag;
ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_OWNER;
ace++;
acl->naces++;
}
ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
ace->flag = eflag;
ace->access_mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER);
ace->whotype = NFS4_ACL_WHO_OWNER;
ace++;
acl->naces++;
pa++;
while (pa->e_tag == ACL_USER) {
deny = ~(pa->e_perm & pas.mask);
deny &= pas.groups | pas.group | pas.other;
if (deny) {
ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
ace->flag = eflag;
ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
ace->who_uid = pa->e_uid;
ace++;
acl->naces++;
}
ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
ace->flag = eflag;
ace->access_mask = mask_from_posix(pa->e_perm & pas.mask,
flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
ace->who_uid = pa->e_uid;
ace++;
acl->naces++;
pa++;
}
/* In the case of groups, we apply allow ACEs first, then deny ACEs,
* since a user can be in more than one group. */
/* allow ACEs */
group_owner_entry = pa;
ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
ace->flag = eflag;
ace->access_mask = mask_from_posix(pas.group, flags);
ace->whotype = NFS4_ACL_WHO_GROUP;
ace++;
acl->naces++;
pa++;
while (pa->e_tag == ACL_GROUP) {
ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
ace->access_mask = mask_from_posix(pa->e_perm & pas.mask,
flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
ace->who_gid = pa->e_gid;
ace++;
acl->naces++;
pa++;
}
/* deny ACEs */
pa = group_owner_entry;
deny = ~pas.group & pas.other;
if (deny) {
ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
ace->flag = eflag;
ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_GROUP;
ace++;
acl->naces++;
}
pa++;
while (pa->e_tag == ACL_GROUP) {
deny = ~(pa->e_perm & pas.mask);
deny &= pas.other;
if (deny) {
ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
ace->who_gid = pa->e_gid;
ace++;
acl->naces++;
}
pa++;
}
if (pa->e_tag == ACL_MASK)
pa++;
ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
ace->flag = eflag;
ace->access_mask = mask_from_posix(pa->e_perm, flags);
ace->whotype = NFS4_ACL_WHO_EVERYONE;
acl->naces++;
}
static bool
pace_gt(struct posix_acl_entry *pace1, struct posix_acl_entry *pace2)
{
if (pace1->e_tag != pace2->e_tag)
return pace1->e_tag > pace2->e_tag;
if (pace1->e_tag == ACL_USER)
return uid_gt(pace1->e_uid, pace2->e_uid);
if (pace1->e_tag == ACL_GROUP)
return gid_gt(pace1->e_gid, pace2->e_gid);
return false;
}
static void
sort_pacl_range(struct posix_acl *pacl, int start, int end) {
int sorted = 0, i;
/* We just do a bubble sort; easy to do in place, and we're not
* expecting acl's to be long enough to justify anything more. */
while (!sorted) {
sorted = 1;
for (i = start; i < end; i++) {
if (pace_gt(&pacl->a_entries[i],
&pacl->a_entries[i+1])) {
sorted = 0;
swap(pacl->a_entries[i],
pacl->a_entries[i + 1]);
}
}
}
}
static void
sort_pacl(struct posix_acl *pacl)
{
/* posix_acl_valid requires that users and groups be in order
* by uid/gid. */
int i, j;
/* no users or groups */
if (!pacl || pacl->a_count <= 4)
return;
i = 1;
while (pacl->a_entries[i].e_tag == ACL_USER)
i++;
sort_pacl_range(pacl, 1, i-1);
BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ);
j = ++i;
while (pacl->a_entries[j].e_tag == ACL_GROUP)
j++;
sort_pacl_range(pacl, i, j-1);
return;
}
/*
* While processing the NFSv4 ACE, this maintains bitmasks representing
* which permission bits have been allowed and which denied to a given
* entity: */
struct posix_ace_state {
u32 allow;
u32 deny;
};
struct posix_user_ace_state {
union {
kuid_t uid;
kgid_t gid;
};
struct posix_ace_state perms;
};
struct posix_ace_state_array {
int n;
struct posix_user_ace_state aces[];
};
/*
* While processing the NFSv4 ACE, this maintains the partial permissions
* calculated so far: */
struct posix_acl_state {
unsigned char valid;
struct posix_ace_state owner;
struct posix_ace_state group;
struct posix_ace_state other;
struct posix_ace_state everyone;
struct posix_ace_state mask; /* Deny unused in this case */
struct posix_ace_state_array *users;
struct posix_ace_state_array *groups;
};
static int
init_state(struct posix_acl_state *state, int cnt)
{
int alloc;
memset(state, 0, sizeof(struct posix_acl_state));
/*
* In the worst case, each individual acl could be for a distinct
* named user or group, but we don't know which, so we allocate
* enough space for either:
*/
alloc = sizeof(struct posix_ace_state_array)
+ cnt*sizeof(struct posix_user_ace_state);
state->users = kzalloc(alloc, GFP_KERNEL);
if (!state->users)
return -ENOMEM;
state->groups = kzalloc(alloc, GFP_KERNEL);
if (!state->groups) {
kfree(state->users);
return -ENOMEM;
}
return 0;
}
static void
free_state(struct posix_acl_state *state) {
kfree(state->users);
kfree(state->groups);
}
static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_state *astate)
{
state->mask.allow |= astate->allow;
}
static struct posix_acl *
posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
{
struct posix_acl_entry *pace;
struct posix_acl *pacl;
int nace;
int i;
/*
* ACLs with no ACEs are treated differently in the inheritable
* and effective cases: when there are no inheritable ACEs,
* calls ->set_acl with a NULL ACL structure.
*/
if (!state->valid && (flags & NFS4_ACL_TYPE_DEFAULT))
return NULL;
/*
* When there are no effective ACEs, the following will end
* up setting a 3-element effective posix ACL with all
* permissions zero.
*/
if (!state->users->n && !state->groups->n)
nace = 3;
else /* Note we also include a MASK ACE in this case: */
nace = 4 + state->users->n + state->groups->n;
pacl = posix_acl_alloc(nace, GFP_KERNEL);
if (!pacl)
return ERR_PTR(-ENOMEM);
pace = pacl->a_entries;
pace->e_tag = ACL_USER_OBJ;
low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags);
for (i=0; i < state->users->n; i++) {
pace++;
pace->e_tag = ACL_USER;
low_mode_from_nfs4(state->users->aces[i].perms.allow,
&pace->e_perm, flags);
pace->e_uid = state->users->aces[i].uid;
add_to_mask(state, &state->users->aces[i].perms);
}
pace++;
pace->e_tag = ACL_GROUP_OBJ;
low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags);
add_to_mask(state, &state->group);
for (i=0; i < state->groups->n; i++) {
pace++;
pace->e_tag = ACL_GROUP;
low_mode_from_nfs4(state->groups->aces[i].perms.allow,
&pace->e_perm, flags);
pace->e_gid = state->groups->aces[i].gid;
add_to_mask(state, &state->groups->aces[i].perms);
}
if (state->users->n || state->groups->n) {
pace++;
pace->e_tag = ACL_MASK;
low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
}
pace++;
pace->e_tag = ACL_OTHER;
low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags);
return pacl;
}
static inline void allow_bits(struct posix_ace_state *astate, u32 mask)
{
/* Allow all bits in the mask not already denied: */
astate->allow |= mask & ~astate->deny;
}
static inline void deny_bits(struct posix_ace_state *astate, u32 mask)
{
/* Deny all bits in the mask not already allowed: */
astate->deny |= mask & ~astate->allow;
}
static int find_uid(struct posix_acl_state *state, kuid_t uid)
{
struct posix_ace_state_array *a = state->users;
int i;
for (i = 0; i < a->n; i++)
if (uid_eq(a->aces[i].uid, uid))
return i;
/* Not found: */
a->n++;
a->aces[i].uid = uid;
a->aces[i].perms.allow = state->everyone.allow;
a->aces[i].perms.deny = state->everyone.deny;
return i;
}
static int find_gid(struct posix_acl_state *state, kgid_t gid)
{
struct posix_ace_state_array *a = state->groups;
int i;
for (i = 0; i < a->n; i++)
if (gid_eq(a->aces[i].gid, gid))
return i;
/* Not found: */
a->n++;
a->aces[i].gid = gid;
a->aces[i].perms.allow = state->everyone.allow;
a->aces[i].perms.deny = state->everyone.deny;
return i;
}
static void deny_bits_array(struct posix_ace_state_array *a, u32 mask)
{
int i;
for (i=0; i < a->n; i++)
deny_bits(&a->aces[i].perms, mask);
}
static void allow_bits_array(struct posix_ace_state_array *a, u32 mask)
{
int i;
for (i=0; i < a->n; i++)
allow_bits(&a->aces[i].perms, mask);
}
static void process_one_v4_ace(struct posix_acl_state *state,
struct nfs4_ace *ace)
{
u32 mask = ace->access_mask;
short type = ace2type(ace);
int i;
state->valid |= type;
switch (type) {
case ACL_USER_OBJ:
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->owner, mask);
} else {
deny_bits(&state->owner, mask);
}
break;
case ACL_USER:
i = find_uid(state, ace->who_uid);
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->users->aces[i].perms, mask);
} else {
deny_bits(&state->users->aces[i].perms, mask);
mask = state->users->aces[i].perms.deny;
deny_bits(&state->owner, mask);
}
break;
case ACL_GROUP_OBJ:
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->group, mask);
} else {
deny_bits(&state->group, mask);
mask = state->group.deny;
deny_bits(&state->owner, mask);
deny_bits(&state->everyone, mask);
deny_bits_array(state->users, mask);
deny_bits_array(state->groups, mask);
}
break;
case ACL_GROUP:
i = find_gid(state, ace->who_gid);
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->groups->aces[i].perms, mask);
} else {
deny_bits(&state->groups->aces[i].perms, mask);
mask = state->groups->aces[i].perms.deny;
deny_bits(&state->owner, mask);
deny_bits(&state->group, mask);
deny_bits(&state->everyone, mask);
deny_bits_array(state->users, mask);
deny_bits_array(state->groups, mask);
}
break;
case ACL_OTHER:
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->owner, mask);
allow_bits(&state->group, mask);
allow_bits(&state->other, mask);
allow_bits(&state->everyone, mask);
allow_bits_array(state->users, mask);
allow_bits_array(state->groups, mask);
} else {
deny_bits(&state->owner, mask);
deny_bits(&state->group, mask);
deny_bits(&state->other, mask);
deny_bits(&state->everyone, mask);
deny_bits_array(state->users, mask);
deny_bits_array(state->groups, mask);
}
}
}
static int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl,
struct posix_acl **pacl, struct posix_acl **dpacl,
unsigned int flags)
{
struct posix_acl_state effective_acl_state, default_acl_state;
struct nfs4_ace *ace;
int ret;
ret = init_state(&effective_acl_state, acl->naces);
if (ret)
return ret;
ret = init_state(&default_acl_state, acl->naces);
if (ret)
goto out_estate;
ret = -EINVAL;
for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE &&
ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE)
goto out_dstate;
if (ace->flag & ~NFS4_SUPPORTED_FLAGS)
goto out_dstate;
if ((ace->flag & NFS4_INHERITANCE_FLAGS) == 0) {
process_one_v4_ace(&effective_acl_state, ace);
continue;
}
if (!(flags & NFS4_ACL_DIR))
goto out_dstate;
/*
* Note that when only one of FILE_INHERIT or DIRECTORY_INHERIT
* is set, we're effectively turning on the other. That's OK,
* according to rfc 3530.
*/
process_one_v4_ace(&default_acl_state, ace);
if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE))
process_one_v4_ace(&effective_acl_state, ace);
}
/*
* At this point, the default ACL may have zeroed-out entries for owner,
* group and other. That usually results in a non-sensical resulting ACL
* that denies all access except to any ACE that was explicitly added.
*
* The setfacl command solves a similar problem with this logic:
*
* "If a Default ACL entry is created, and the Default ACL contains
* no owner, owning group, or others entry, a copy of the ACL
* owner, owning group, or others entry is added to the Default ACL."
*
* Copy any missing ACEs from the effective set, if any ACEs were
* explicitly set.
*/
if (default_acl_state.valid) {
if (!(default_acl_state.valid & ACL_USER_OBJ))
default_acl_state.owner = effective_acl_state.owner;
if (!(default_acl_state.valid & ACL_GROUP_OBJ))
default_acl_state.group = effective_acl_state.group;
if (!(default_acl_state.valid & ACL_OTHER))
default_acl_state.other = effective_acl_state.other;
}
*pacl = posix_state_to_acl(&effective_acl_state, flags);
if (IS_ERR(*pacl)) {
ret = PTR_ERR(*pacl);
*pacl = NULL;
goto out_dstate;
}
*dpacl = posix_state_to_acl(&default_acl_state,
flags | NFS4_ACL_TYPE_DEFAULT);
if (IS_ERR(*dpacl)) {
ret = PTR_ERR(*dpacl);
*dpacl = NULL;
posix_acl_release(*pacl);
*pacl = NULL;
goto out_dstate;
}
sort_pacl(*pacl);
sort_pacl(*dpacl);
ret = 0;
out_dstate:
free_state(&default_acl_state);
out_estate:
free_state(&effective_acl_state);
return ret;
}
__be32 nfsd4_acl_to_attr(enum nfs_ftype4 type, struct nfs4_acl *acl,
struct nfsd_attrs *attr)
{
int host_error;
unsigned int flags = 0;
if (!acl)
return nfs_ok;
if (type == NF4DIR)
flags = NFS4_ACL_DIR;
host_error = nfs4_acl_nfsv4_to_posix(acl, &attr->na_pacl,
&attr->na_dpacl, flags);
if (host_error == -EINVAL)
return nfserr_attrnotsupp;
else
return nfserrno(host_error);
}
static short
ace2type(struct nfs4_ace *ace)
{
switch (ace->whotype) {
case NFS4_ACL_WHO_NAMED:
return (ace->flag & NFS4_ACE_IDENTIFIER_GROUP ?
ACL_GROUP : ACL_USER);
case NFS4_ACL_WHO_OWNER:
return ACL_USER_OBJ;
case NFS4_ACL_WHO_GROUP:
return ACL_GROUP_OBJ;
case NFS4_ACL_WHO_EVERYONE:
return ACL_OTHER;
}
BUG();
return -1;
}
/*
* return the size of the struct nfs4_acl required to represent an acl
* with @entries entries.
*/
int nfs4_acl_bytes(int entries)
{
return sizeof(struct nfs4_acl) + entries * sizeof(struct nfs4_ace);
}
static struct {
char *string;
int stringlen;
int type;
} s2t_map[] = {
{
.string = "OWNER@",
.stringlen = sizeof("OWNER@") - 1,
.type = NFS4_ACL_WHO_OWNER,
},
{
.string = "GROUP@",
.stringlen = sizeof("GROUP@") - 1,
.type = NFS4_ACL_WHO_GROUP,
},
{
.string = "EVERYONE@",
.stringlen = sizeof("EVERYONE@") - 1,
.type = NFS4_ACL_WHO_EVERYONE,
},
};
int
nfs4_acl_get_whotype(char *p, u32 len)
{
int i;
for (i = 0; i < ARRAY_SIZE(s2t_map); i++) {
if (s2t_map[i].stringlen == len &&
0 == memcmp(s2t_map[i].string, p, len))
return s2t_map[i].type;
}
return NFS4_ACL_WHO_NAMED;
}
__be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who)
{
__be32 *p;
int i;
for (i = 0; i < ARRAY_SIZE(s2t_map); i++) {
if (s2t_map[i].type != who)
continue;
p = xdr_reserve_space(xdr, s2t_map[i].stringlen + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, s2t_map[i].string,
s2t_map[i].stringlen);
return 0;
}
WARN_ON_ONCE(1);
return nfserr_serverfault;
}
| linux-master | fs/nfsd/nfs4acl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The NFSD open file cache.
*
* (c) 2015 - Jeff Layton <[email protected]>
*
* An nfsd_file object is a per-file collection of open state that binds
* together:
* - a struct file *
* - a user credential
* - a network namespace
* - a read-ahead context
* - monitoring for writeback errors
*
* nfsd_file objects are reference-counted. Consumers acquire a new
* object via the nfsd_file_acquire API. They manage their interest in
* the acquired object, and hence the object's reference count, via
* nfsd_file_get and nfsd_file_put. There are two varieties of nfsd_file
* object:
*
* * non-garbage-collected: When a consumer wants to precisely control
* the lifetime of a file's open state, it acquires a non-garbage-
* collected nfsd_file. The final nfsd_file_put releases the open
* state immediately.
*
* * garbage-collected: When a consumer does not control the lifetime
* of open state, it acquires a garbage-collected nfsd_file. The
* final nfsd_file_put allows the open state to linger for a period
* during which it may be re-used.
*/
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/list_lru.h>
#include <linux/fsnotify_backend.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
#include <linux/rhashtable.h>
#include "vfs.h"
#include "nfsd.h"
#include "nfsfh.h"
#include "netns.h"
#include "filecache.h"
#include "trace.h"
#define NFSD_LAUNDRETTE_DELAY (2 * HZ)
#define NFSD_FILE_CACHE_UP (0)
/* We only care about NFSD_MAY_READ/WRITE for this cache */
#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
static DEFINE_PER_CPU(unsigned long, nfsd_file_releases);
static DEFINE_PER_CPU(unsigned long, nfsd_file_total_age);
static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
struct nfsd_fcache_disposal {
struct work_struct work;
spinlock_t lock;
struct list_head freeme;
};
static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
static struct kmem_cache *nfsd_file_slab;
static struct kmem_cache *nfsd_file_mark_slab;
static struct list_lru nfsd_file_lru;
static unsigned long nfsd_file_flags;
static struct fsnotify_group *nfsd_file_fsnotify_group;
static struct delayed_work nfsd_filecache_laundrette;
static struct rhltable nfsd_file_rhltable
____cacheline_aligned_in_smp;
static bool
nfsd_match_cred(const struct cred *c1, const struct cred *c2)
{
int i;
if (!uid_eq(c1->fsuid, c2->fsuid))
return false;
if (!gid_eq(c1->fsgid, c2->fsgid))
return false;
if (c1->group_info == NULL || c2->group_info == NULL)
return c1->group_info == c2->group_info;
if (c1->group_info->ngroups != c2->group_info->ngroups)
return false;
for (i = 0; i < c1->group_info->ngroups; i++) {
if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
return false;
}
return true;
}
static const struct rhashtable_params nfsd_file_rhash_params = {
.key_len = sizeof_field(struct nfsd_file, nf_inode),
.key_offset = offsetof(struct nfsd_file, nf_inode),
.head_offset = offsetof(struct nfsd_file, nf_rlist),
/*
* Start with a single page hash table to reduce resizing churn
* on light workloads.
*/
.min_size = 256,
.automatic_shrinking = true,
};
static void
nfsd_file_schedule_laundrette(void)
{
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
NFSD_LAUNDRETTE_DELAY);
}
static void
nfsd_file_slab_free(struct rcu_head *rcu)
{
struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
put_cred(nf->nf_cred);
kmem_cache_free(nfsd_file_slab, nf);
}
static void
nfsd_file_mark_free(struct fsnotify_mark *mark)
{
struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
nfm_mark);
kmem_cache_free(nfsd_file_mark_slab, nfm);
}
static struct nfsd_file_mark *
nfsd_file_mark_get(struct nfsd_file_mark *nfm)
{
if (!refcount_inc_not_zero(&nfm->nfm_ref))
return NULL;
return nfm;
}
static void
nfsd_file_mark_put(struct nfsd_file_mark *nfm)
{
if (refcount_dec_and_test(&nfm->nfm_ref)) {
fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
fsnotify_put_mark(&nfm->nfm_mark);
}
}
static struct nfsd_file_mark *
nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode)
{
int err;
struct fsnotify_mark *mark;
struct nfsd_file_mark *nfm = NULL, *new;
do {
fsnotify_group_lock(nfsd_file_fsnotify_group);
mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
nfsd_file_fsnotify_group);
if (mark) {
nfm = nfsd_file_mark_get(container_of(mark,
struct nfsd_file_mark,
nfm_mark));
fsnotify_group_unlock(nfsd_file_fsnotify_group);
if (nfm) {
fsnotify_put_mark(mark);
break;
}
/* Avoid soft lockup race with nfsd_file_mark_put() */
fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
fsnotify_put_mark(mark);
} else {
fsnotify_group_unlock(nfsd_file_fsnotify_group);
}
/* allocate a new nfm */
new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
if (!new)
return NULL;
fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
refcount_set(&new->nfm_ref, 1);
err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
/*
* If the add was successful, then return the object.
* Otherwise, we need to put the reference we hold on the
* nfm_mark. The fsnotify code will take a reference and put
* it on failure, so we can't just free it directly. It's also
* not safe to call fsnotify_destroy_mark on it as the
* mark->group will be NULL. Thus, we can't let the nfm_ref
* counter drive the destruction at this point.
*/
if (likely(!err))
nfm = new;
else
fsnotify_put_mark(&new->nfm_mark);
} while (unlikely(err == -EEXIST));
return nfm;
}
static struct nfsd_file *
nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
bool want_gc)
{
struct nfsd_file *nf;
nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
if (unlikely(!nf))
return NULL;
INIT_LIST_HEAD(&nf->nf_lru);
nf->nf_birthtime = ktime_get();
nf->nf_file = NULL;
nf->nf_cred = get_current_cred();
nf->nf_net = net;
nf->nf_flags = want_gc ?
BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING) | BIT(NFSD_FILE_GC) :
BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING);
nf->nf_inode = inode;
refcount_set(&nf->nf_ref, 1);
nf->nf_may = need;
nf->nf_mark = NULL;
return nf;
}
/**
* nfsd_file_check_write_error - check for writeback errors on a file
* @nf: nfsd_file to check for writeback errors
*
* Check whether a nfsd_file has an unseen error. Reset the write
* verifier if so.
*/
static void
nfsd_file_check_write_error(struct nfsd_file *nf)
{
struct file *file = nf->nf_file;
if ((file->f_mode & FMODE_WRITE) &&
filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err)))
nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
}
static void
nfsd_file_hash_remove(struct nfsd_file *nf)
{
trace_nfsd_file_unhash(nf);
rhltable_remove(&nfsd_file_rhltable, &nf->nf_rlist,
nfsd_file_rhash_params);
}
static bool
nfsd_file_unhash(struct nfsd_file *nf)
{
if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
nfsd_file_hash_remove(nf);
return true;
}
return false;
}
static void
nfsd_file_free(struct nfsd_file *nf)
{
s64 age = ktime_to_ms(ktime_sub(ktime_get(), nf->nf_birthtime));
trace_nfsd_file_free(nf);
this_cpu_inc(nfsd_file_releases);
this_cpu_add(nfsd_file_total_age, age);
nfsd_file_unhash(nf);
if (nf->nf_mark)
nfsd_file_mark_put(nf->nf_mark);
if (nf->nf_file) {
nfsd_file_check_write_error(nf);
filp_close(nf->nf_file, NULL);
}
/*
* If this item is still linked via nf_lru, that's a bug.
* WARN and leak it to preserve system stability.
*/
if (WARN_ON_ONCE(!list_empty(&nf->nf_lru)))
return;
call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
}
static bool
nfsd_file_check_writeback(struct nfsd_file *nf)
{
struct file *file = nf->nf_file;
struct address_space *mapping;
/* File not open for write? */
if (!(file->f_mode & FMODE_WRITE))
return false;
/*
* Some filesystems (e.g. NFS) flush all dirty data on close.
* On others, there is no need to wait for writeback.
*/
if (!(file_inode(file)->i_sb->s_export_op->flags & EXPORT_OP_FLUSH_ON_CLOSE))
return false;
mapping = file->f_mapping;
return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
}
static bool nfsd_file_lru_add(struct nfsd_file *nf)
{
set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
if (list_lru_add(&nfsd_file_lru, &nf->nf_lru)) {
trace_nfsd_file_lru_add(nf);
return true;
}
return false;
}
static bool nfsd_file_lru_remove(struct nfsd_file *nf)
{
if (list_lru_del(&nfsd_file_lru, &nf->nf_lru)) {
trace_nfsd_file_lru_del(nf);
return true;
}
return false;
}
struct nfsd_file *
nfsd_file_get(struct nfsd_file *nf)
{
if (nf && refcount_inc_not_zero(&nf->nf_ref))
return nf;
return NULL;
}
/**
* nfsd_file_put - put the reference to a nfsd_file
* @nf: nfsd_file of which to put the reference
*
* Put a reference to a nfsd_file. In the non-GC case, we just put the
* reference immediately. In the GC case, if the reference would be
* the last one, the put it on the LRU instead to be cleaned up later.
*/
void
nfsd_file_put(struct nfsd_file *nf)
{
might_sleep();
trace_nfsd_file_put(nf);
if (test_bit(NFSD_FILE_GC, &nf->nf_flags) &&
test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
/*
* If this is the last reference (nf_ref == 1), then try to
* transfer it to the LRU.
*/
if (refcount_dec_not_one(&nf->nf_ref))
return;
/* Try to add it to the LRU. If that fails, decrement. */
if (nfsd_file_lru_add(nf)) {
/* If it's still hashed, we're done */
if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
nfsd_file_schedule_laundrette();
return;
}
/*
* We're racing with unhashing, so try to remove it from
* the LRU. If removal fails, then someone else already
* has our reference.
*/
if (!nfsd_file_lru_remove(nf))
return;
}
}
if (refcount_dec_and_test(&nf->nf_ref))
nfsd_file_free(nf);
}
static void
nfsd_file_dispose_list(struct list_head *dispose)
{
struct nfsd_file *nf;
while (!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del_init(&nf->nf_lru);
nfsd_file_free(nf);
}
}
/**
* nfsd_file_dispose_list_delayed - move list of dead files to net's freeme list
* @dispose: list of nfsd_files to be disposed
*
* Transfers each file to the "freeme" list for its nfsd_net, to eventually
* be disposed of by the per-net garbage collector.
*/
static void
nfsd_file_dispose_list_delayed(struct list_head *dispose)
{
while(!list_empty(dispose)) {
struct nfsd_file *nf = list_first_entry(dispose,
struct nfsd_file, nf_lru);
struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
struct nfsd_fcache_disposal *l = nn->fcache_disposal;
spin_lock(&l->lock);
list_move_tail(&nf->nf_lru, &l->freeme);
spin_unlock(&l->lock);
queue_work(nfsd_filecache_wq, &l->work);
}
}
/**
* nfsd_file_lru_cb - Examine an entry on the LRU list
* @item: LRU entry to examine
* @lru: controlling LRU
* @lock: LRU list lock (unused)
* @arg: dispose list
*
* Return values:
* %LRU_REMOVED: @item was removed from the LRU
* %LRU_ROTATE: @item is to be moved to the LRU tail
* %LRU_SKIP: @item cannot be evicted
*/
static enum lru_status
nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
spinlock_t *lock, void *arg)
__releases(lock)
__acquires(lock)
{
struct list_head *head = arg;
struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
/* We should only be dealing with GC entries here */
WARN_ON_ONCE(!test_bit(NFSD_FILE_GC, &nf->nf_flags));
/*
* Don't throw out files that are still undergoing I/O or
* that have uncleared errors pending.
*/
if (nfsd_file_check_writeback(nf)) {
trace_nfsd_file_gc_writeback(nf);
return LRU_SKIP;
}
/* If it was recently added to the list, skip it */
if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags)) {
trace_nfsd_file_gc_referenced(nf);
return LRU_ROTATE;
}
/*
* Put the reference held on behalf of the LRU. If it wasn't the last
* one, then just remove it from the LRU and ignore it.
*/
if (!refcount_dec_and_test(&nf->nf_ref)) {
trace_nfsd_file_gc_in_use(nf);
list_lru_isolate(lru, &nf->nf_lru);
return LRU_REMOVED;
}
/* Refcount went to zero. Unhash it and queue it to the dispose list */
nfsd_file_unhash(nf);
list_lru_isolate_move(lru, &nf->nf_lru, head);
this_cpu_inc(nfsd_file_evictions);
trace_nfsd_file_gc_disposed(nf);
return LRU_REMOVED;
}
static void
nfsd_file_gc(void)
{
LIST_HEAD(dispose);
unsigned long ret;
ret = list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb,
&dispose, list_lru_count(&nfsd_file_lru));
trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
nfsd_file_dispose_list_delayed(&dispose);
}
static void
nfsd_file_gc_worker(struct work_struct *work)
{
nfsd_file_gc();
if (list_lru_count(&nfsd_file_lru))
nfsd_file_schedule_laundrette();
}
static unsigned long
nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
{
return list_lru_count(&nfsd_file_lru);
}
static unsigned long
nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
{
LIST_HEAD(dispose);
unsigned long ret;
ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
nfsd_file_lru_cb, &dispose);
trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
nfsd_file_dispose_list_delayed(&dispose);
return ret;
}
static struct shrinker nfsd_file_shrinker = {
.scan_objects = nfsd_file_lru_scan,
.count_objects = nfsd_file_lru_count,
.seeks = 1,
};
/**
* nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
* @nf: nfsd_file to attempt to queue
* @dispose: private list to queue successfully-put objects
*
* Unhash an nfsd_file, try to get a reference to it, and then put that
* reference. If it's the last reference, queue it to the dispose list.
*/
static void
nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
__must_hold(RCU)
{
int decrement = 1;
/* If we raced with someone else unhashing, ignore it */
if (!nfsd_file_unhash(nf))
return;
/* If we can't get a reference, ignore it */
if (!nfsd_file_get(nf))
return;
/* Extra decrement if we remove from the LRU */
if (nfsd_file_lru_remove(nf))
++decrement;
/* If refcount goes to 0, then put on the dispose list */
if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
list_add(&nf->nf_lru, dispose);
trace_nfsd_file_closing(nf);
}
}
/**
* nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
* @inode: inode on which to close out nfsd_files
* @dispose: list on which to gather nfsd_files to close out
*
* An nfsd_file represents a struct file being held open on behalf of nfsd.
* An open file however can block other activity (such as leases), or cause
* undesirable behavior (e.g. spurious silly-renames when reexporting NFS).
*
* This function is intended to find open nfsd_files when this sort of
* conflicting access occurs and then attempt to close those files out.
*
* Populates the dispose list with entries that have already had their
* refcounts go to zero. The actual free of an nfsd_file can be expensive,
* so we leave it up to the caller whether it wants to wait or not.
*/
static void
nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
{
struct rhlist_head *tmp, *list;
struct nfsd_file *nf;
rcu_read_lock();
list = rhltable_lookup(&nfsd_file_rhltable, &inode,
nfsd_file_rhash_params);
rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
if (!test_bit(NFSD_FILE_GC, &nf->nf_flags))
continue;
nfsd_file_cond_queue(nf, dispose);
}
rcu_read_unlock();
}
/**
* nfsd_file_close_inode - attempt a delayed close of a nfsd_file
* @inode: inode of the file to attempt to remove
*
* Close out any open nfsd_files that can be reaped for @inode. The
* actual freeing is deferred to the dispose_list_delayed infrastructure.
*
* This is used by the fsnotify callbacks and setlease notifier.
*/
static void
nfsd_file_close_inode(struct inode *inode)
{
LIST_HEAD(dispose);
nfsd_file_queue_for_close(inode, &dispose);
nfsd_file_dispose_list_delayed(&dispose);
}
/**
* nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
* @inode: inode of the file to attempt to remove
*
* Close out any open nfsd_files that can be reaped for @inode. The
* nfsd_files are closed out synchronously.
*
* This is called from nfsd_rename and nfsd_unlink to avoid silly-renames
* when reexporting NFS.
*/
void
nfsd_file_close_inode_sync(struct inode *inode)
{
struct nfsd_file *nf;
LIST_HEAD(dispose);
trace_nfsd_file_close(inode);
nfsd_file_queue_for_close(inode, &dispose);
while (!list_empty(&dispose)) {
nf = list_first_entry(&dispose, struct nfsd_file, nf_lru);
list_del_init(&nf->nf_lru);
nfsd_file_free(nf);
}
flush_delayed_fput();
}
/**
* nfsd_file_delayed_close - close unused nfsd_files
* @work: dummy
*
* Scrape the freeme list for this nfsd_net, and then dispose of them
* all.
*/
static void
nfsd_file_delayed_close(struct work_struct *work)
{
LIST_HEAD(head);
struct nfsd_fcache_disposal *l = container_of(work,
struct nfsd_fcache_disposal, work);
spin_lock(&l->lock);
list_splice_init(&l->freeme, &head);
spin_unlock(&l->lock);
nfsd_file_dispose_list(&head);
}
static int
nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
void *data)
{
struct file_lock *fl = data;
/* Only close files for F_SETLEASE leases */
if (fl->fl_flags & FL_LEASE)
nfsd_file_close_inode(file_inode(fl->fl_file));
return 0;
}
static struct notifier_block nfsd_file_lease_notifier = {
.notifier_call = nfsd_file_lease_notifier_call,
};
static int
nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask,
struct inode *inode, struct inode *dir,
const struct qstr *name, u32 cookie)
{
if (WARN_ON_ONCE(!inode))
return 0;
trace_nfsd_file_fsnotify_handle_event(inode, mask);
/* Should be no marks on non-regular files */
if (!S_ISREG(inode->i_mode)) {
WARN_ON_ONCE(1);
return 0;
}
/* don't close files if this was not the last link */
if (mask & FS_ATTRIB) {
if (inode->i_nlink)
return 0;
}
nfsd_file_close_inode(inode);
return 0;
}
static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
.handle_inode_event = nfsd_file_fsnotify_handle_event,
.free_mark = nfsd_file_mark_free,
};
int
nfsd_file_cache_init(void)
{
int ret;
lockdep_assert_held(&nfsd_mutex);
if (test_and_set_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
return 0;
ret = rhltable_init(&nfsd_file_rhltable, &nfsd_file_rhash_params);
if (ret)
return ret;
ret = -ENOMEM;
nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
if (!nfsd_filecache_wq)
goto out;
nfsd_file_slab = kmem_cache_create("nfsd_file",
sizeof(struct nfsd_file), 0, 0, NULL);
if (!nfsd_file_slab) {
pr_err("nfsd: unable to create nfsd_file_slab\n");
goto out_err;
}
nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
sizeof(struct nfsd_file_mark), 0, 0, NULL);
if (!nfsd_file_mark_slab) {
pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
goto out_err;
}
ret = list_lru_init(&nfsd_file_lru);
if (ret) {
pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
goto out_err;
}
ret = register_shrinker(&nfsd_file_shrinker, "nfsd-filecache");
if (ret) {
pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
goto out_lru;
}
ret = lease_register_notifier(&nfsd_file_lease_notifier);
if (ret) {
pr_err("nfsd: unable to register lease notifier: %d\n", ret);
goto out_shrinker;
}
nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops,
FSNOTIFY_GROUP_NOFS);
if (IS_ERR(nfsd_file_fsnotify_group)) {
pr_err("nfsd: unable to create fsnotify group: %ld\n",
PTR_ERR(nfsd_file_fsnotify_group));
ret = PTR_ERR(nfsd_file_fsnotify_group);
nfsd_file_fsnotify_group = NULL;
goto out_notifier;
}
INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
out:
return ret;
out_notifier:
lease_unregister_notifier(&nfsd_file_lease_notifier);
out_shrinker:
unregister_shrinker(&nfsd_file_shrinker);
out_lru:
list_lru_destroy(&nfsd_file_lru);
out_err:
kmem_cache_destroy(nfsd_file_slab);
nfsd_file_slab = NULL;
kmem_cache_destroy(nfsd_file_mark_slab);
nfsd_file_mark_slab = NULL;
destroy_workqueue(nfsd_filecache_wq);
nfsd_filecache_wq = NULL;
rhltable_destroy(&nfsd_file_rhltable);
goto out;
}
/**
* __nfsd_file_cache_purge: clean out the cache for shutdown
* @net: net-namespace to shut down the cache (may be NULL)
*
* Walk the nfsd_file cache and close out any that match @net. If @net is NULL,
* then close out everything. Called when an nfsd instance is being shut down,
* and when the exports table is flushed.
*/
static void
__nfsd_file_cache_purge(struct net *net)
{
struct rhashtable_iter iter;
struct nfsd_file *nf;
LIST_HEAD(dispose);
rhltable_walk_enter(&nfsd_file_rhltable, &iter);
do {
rhashtable_walk_start(&iter);
nf = rhashtable_walk_next(&iter);
while (!IS_ERR_OR_NULL(nf)) {
if (!net || nf->nf_net == net)
nfsd_file_cond_queue(nf, &dispose);
nf = rhashtable_walk_next(&iter);
}
rhashtable_walk_stop(&iter);
} while (nf == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
nfsd_file_dispose_list(&dispose);
}
static struct nfsd_fcache_disposal *
nfsd_alloc_fcache_disposal(void)
{
struct nfsd_fcache_disposal *l;
l = kmalloc(sizeof(*l), GFP_KERNEL);
if (!l)
return NULL;
INIT_WORK(&l->work, nfsd_file_delayed_close);
spin_lock_init(&l->lock);
INIT_LIST_HEAD(&l->freeme);
return l;
}
static void
nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
{
cancel_work_sync(&l->work);
nfsd_file_dispose_list(&l->freeme);
kfree(l);
}
static void
nfsd_free_fcache_disposal_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfsd_fcache_disposal *l = nn->fcache_disposal;
nfsd_free_fcache_disposal(l);
}
int
nfsd_file_cache_start_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nn->fcache_disposal = nfsd_alloc_fcache_disposal();
return nn->fcache_disposal ? 0 : -ENOMEM;
}
/**
* nfsd_file_cache_purge - Remove all cache items associated with @net
* @net: target net namespace
*
*/
void
nfsd_file_cache_purge(struct net *net)
{
lockdep_assert_held(&nfsd_mutex);
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
__nfsd_file_cache_purge(net);
}
void
nfsd_file_cache_shutdown_net(struct net *net)
{
nfsd_file_cache_purge(net);
nfsd_free_fcache_disposal_net(net);
}
void
nfsd_file_cache_shutdown(void)
{
int i;
lockdep_assert_held(&nfsd_mutex);
if (test_and_clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
return;
lease_unregister_notifier(&nfsd_file_lease_notifier);
unregister_shrinker(&nfsd_file_shrinker);
/*
* make sure all callers of nfsd_file_lru_cb are done before
* calling nfsd_file_cache_purge
*/
cancel_delayed_work_sync(&nfsd_filecache_laundrette);
__nfsd_file_cache_purge(NULL);
list_lru_destroy(&nfsd_file_lru);
rcu_barrier();
fsnotify_put_group(nfsd_file_fsnotify_group);
nfsd_file_fsnotify_group = NULL;
kmem_cache_destroy(nfsd_file_slab);
nfsd_file_slab = NULL;
fsnotify_wait_marks_destroyed();
kmem_cache_destroy(nfsd_file_mark_slab);
nfsd_file_mark_slab = NULL;
destroy_workqueue(nfsd_filecache_wq);
nfsd_filecache_wq = NULL;
rhltable_destroy(&nfsd_file_rhltable);
for_each_possible_cpu(i) {
per_cpu(nfsd_file_cache_hits, i) = 0;
per_cpu(nfsd_file_acquisitions, i) = 0;
per_cpu(nfsd_file_releases, i) = 0;
per_cpu(nfsd_file_total_age, i) = 0;
per_cpu(nfsd_file_evictions, i) = 0;
}
}
static struct nfsd_file *
nfsd_file_lookup_locked(const struct net *net, const struct cred *cred,
struct inode *inode, unsigned char need,
bool want_gc)
{
struct rhlist_head *tmp, *list;
struct nfsd_file *nf;
list = rhltable_lookup(&nfsd_file_rhltable, &inode,
nfsd_file_rhash_params);
rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
if (nf->nf_may != need)
continue;
if (nf->nf_net != net)
continue;
if (!nfsd_match_cred(nf->nf_cred, cred))
continue;
if (test_bit(NFSD_FILE_GC, &nf->nf_flags) != want_gc)
continue;
if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
continue;
if (!nfsd_file_get(nf))
continue;
return nf;
}
return NULL;
}
/**
* nfsd_file_is_cached - are there any cached open files for this inode?
* @inode: inode to check
*
* The lookup matches inodes in all net namespaces and is atomic wrt
* nfsd_file_acquire().
*
* Return values:
* %true: filecache contains at least one file matching this inode
* %false: filecache contains no files matching this inode
*/
bool
nfsd_file_is_cached(struct inode *inode)
{
struct rhlist_head *tmp, *list;
struct nfsd_file *nf;
bool ret = false;
rcu_read_lock();
list = rhltable_lookup(&nfsd_file_rhltable, &inode,
nfsd_file_rhash_params);
rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist)
if (test_bit(NFSD_FILE_GC, &nf->nf_flags)) {
ret = true;
break;
}
rcu_read_unlock();
trace_nfsd_file_is_cached(inode, (int)ret);
return ret;
}
static __be32
nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf, bool want_gc)
{
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
struct net *net = SVC_NET(rqstp);
struct nfsd_file *new, *nf;
const struct cred *cred;
bool open_retry = true;
struct inode *inode;
__be32 status;
int ret;
status = fh_verify(rqstp, fhp, S_IFREG,
may_flags|NFSD_MAY_OWNER_OVERRIDE);
if (status != nfs_ok)
return status;
inode = d_inode(fhp->fh_dentry);
cred = get_current_cred();
retry:
rcu_read_lock();
nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
rcu_read_unlock();
if (nf) {
/*
* If the nf is on the LRU then it holds an extra reference
* that must be put if it's removed. It had better not be
* the last one however, since we should hold another.
*/
if (nfsd_file_lru_remove(nf))
WARN_ON_ONCE(refcount_dec_and_test(&nf->nf_ref));
goto wait_for_construction;
}
new = nfsd_file_alloc(net, inode, need, want_gc);
if (!new) {
status = nfserr_jukebox;
goto out;
}
rcu_read_lock();
spin_lock(&inode->i_lock);
nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
if (unlikely(nf)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
nfsd_file_slab_free(&new->nf_rcu);
goto wait_for_construction;
}
nf = new;
ret = rhltable_insert(&nfsd_file_rhltable, &nf->nf_rlist,
nfsd_file_rhash_params);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
if (likely(ret == 0))
goto open_file;
if (ret == -EEXIST)
goto retry;
trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
status = nfserr_jukebox;
goto construction_err;
wait_for_construction:
wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
/* Did construction of this file fail? */
if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
trace_nfsd_file_cons_err(rqstp, inode, may_flags, nf);
if (!open_retry) {
status = nfserr_jukebox;
goto construction_err;
}
open_retry = false;
goto retry;
}
this_cpu_inc(nfsd_file_cache_hits);
status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
if (status != nfs_ok) {
nfsd_file_put(nf);
nf = NULL;
}
out:
if (status == nfs_ok) {
this_cpu_inc(nfsd_file_acquisitions);
nfsd_file_check_write_error(nf);
*pnf = nf;
}
put_cred(cred);
trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
return status;
open_file:
trace_nfsd_file_alloc(nf);
nf->nf_mark = nfsd_file_mark_find_or_create(nf, inode);
if (nf->nf_mark) {
if (file) {
get_file(file);
nf->nf_file = file;
status = nfs_ok;
trace_nfsd_file_opened(nf, status);
} else {
status = nfsd_open_verified(rqstp, fhp, may_flags,
&nf->nf_file);
trace_nfsd_file_open(nf, status);
}
} else
status = nfserr_jukebox;
/*
* If construction failed, or we raced with a call to unlink()
* then unhash.
*/
if (status != nfs_ok || inode->i_nlink == 0)
nfsd_file_unhash(nf);
clear_and_wake_up_bit(NFSD_FILE_PENDING, &nf->nf_flags);
if (status == nfs_ok)
goto out;
construction_err:
if (refcount_dec_and_test(&nf->nf_ref))
nfsd_file_free(nf);
nf = NULL;
goto out;
}
/**
* nfsd_file_acquire_gc - Get a struct nfsd_file with an open file
* @rqstp: the RPC transaction being executed
* @fhp: the NFS filehandle of the file to be opened
* @may_flags: NFSD_MAY_ settings for the file
* @pnf: OUT: new or found "struct nfsd_file" object
*
* The nfsd_file object returned by this API is reference-counted
* and garbage-collected. The object is retained for a few
* seconds after the final nfsd_file_put() in case the caller
* wants to re-use it.
*
* Return values:
* %nfs_ok - @pnf points to an nfsd_file with its reference
* count boosted.
*
* On error, an nfsstat value in network byte order is returned.
*/
__be32
nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, true);
}
/**
* nfsd_file_acquire - Get a struct nfsd_file with an open file
* @rqstp: the RPC transaction being executed
* @fhp: the NFS filehandle of the file to be opened
* @may_flags: NFSD_MAY_ settings for the file
* @pnf: OUT: new or found "struct nfsd_file" object
*
* The nfsd_file_object returned by this API is reference-counted
* but not garbage-collected. The object is unhashed after the
* final nfsd_file_put().
*
* Return values:
* %nfs_ok - @pnf points to an nfsd_file with its reference
* count boosted.
*
* On error, an nfsstat value in network byte order is returned.
*/
__be32
nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, false);
}
/**
* nfsd_file_acquire_opened - Get a struct nfsd_file using existing open file
* @rqstp: the RPC transaction being executed
* @fhp: the NFS filehandle of the file just created
* @may_flags: NFSD_MAY_ settings for the file
* @file: cached, already-open file (may be NULL)
* @pnf: OUT: new or found "struct nfsd_file" object
*
* Acquire a nfsd_file object that is not GC'ed. If one doesn't already exist,
* and @file is non-NULL, use it to instantiate a new nfsd_file instead of
* opening a new one.
*
* Return values:
* %nfs_ok - @pnf points to an nfsd_file with its reference
* count boosted.
*
* On error, an nfsstat value in network byte order is returned.
*/
__be32
nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf)
{
return nfsd_file_do_acquire(rqstp, fhp, may_flags, file, pnf, false);
}
/*
* Note that fields may be added, removed or reordered in the future. Programs
* scraping this file for info should test the labels to ensure they're
* getting the correct field.
*/
int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
{
unsigned long releases = 0, evictions = 0;
unsigned long hits = 0, acquisitions = 0;
unsigned int i, count = 0, buckets = 0;
unsigned long lru = 0, total_age = 0;
/* Serialize with server shutdown */
mutex_lock(&nfsd_mutex);
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1) {
struct bucket_table *tbl;
struct rhashtable *ht;
lru = list_lru_count(&nfsd_file_lru);
rcu_read_lock();
ht = &nfsd_file_rhltable.ht;
count = atomic_read(&ht->nelems);
tbl = rht_dereference_rcu(ht->tbl, ht);
buckets = tbl->size;
rcu_read_unlock();
}
mutex_unlock(&nfsd_mutex);
for_each_possible_cpu(i) {
hits += per_cpu(nfsd_file_cache_hits, i);
acquisitions += per_cpu(nfsd_file_acquisitions, i);
releases += per_cpu(nfsd_file_releases, i);
total_age += per_cpu(nfsd_file_total_age, i);
evictions += per_cpu(nfsd_file_evictions, i);
}
seq_printf(m, "total inodes: %u\n", count);
seq_printf(m, "hash buckets: %u\n", buckets);
seq_printf(m, "lru entries: %lu\n", lru);
seq_printf(m, "cache hits: %lu\n", hits);
seq_printf(m, "acquisitions: %lu\n", acquisitions);
seq_printf(m, "releases: %lu\n", releases);
seq_printf(m, "evictions: %lu\n", evictions);
if (releases)
seq_printf(m, "mean age (ms): %ld\n", total_age / releases);
else
seq_printf(m, "mean age (ms): -\n");
return 0;
}
| linux-master | fs/nfsd/filecache.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.