python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Simple file system for zoned block devices exposing zones as files.
*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*/
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/magic.h>
#include <linux/iomap.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/statfs.h>
#include <linux/writeback.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/uio.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <linux/crc32.h>
#include <linux/task_io_accounting_ops.h>
#include "zonefs.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
/*
* Get the name of a zone group directory.
*/
static const char *zonefs_zgroup_name(enum zonefs_ztype ztype)
{
switch (ztype) {
case ZONEFS_ZTYPE_CNV:
return "cnv";
case ZONEFS_ZTYPE_SEQ:
return "seq";
default:
WARN_ON_ONCE(1);
return "???";
}
}
/*
* Manage the active zone count.
*/
static void zonefs_account_active(struct super_block *sb,
struct zonefs_zone *z)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
if (zonefs_zone_is_cnv(z))
return;
/*
* For zones that transitioned to the offline or readonly condition,
* we only need to clear the active state.
*/
if (z->z_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
goto out;
/*
* If the zone is active, that is, if it is explicitly open or
* partially written, check if it was already accounted as active.
*/
if ((z->z_flags & ZONEFS_ZONE_OPEN) ||
(z->z_wpoffset > 0 && z->z_wpoffset < z->z_capacity)) {
if (!(z->z_flags & ZONEFS_ZONE_ACTIVE)) {
z->z_flags |= ZONEFS_ZONE_ACTIVE;
atomic_inc(&sbi->s_active_seq_files);
}
return;
}
out:
/* The zone is not active. If it was, update the active count */
if (z->z_flags & ZONEFS_ZONE_ACTIVE) {
z->z_flags &= ~ZONEFS_ZONE_ACTIVE;
atomic_dec(&sbi->s_active_seq_files);
}
}
/*
* Manage the active zone count. Called with zi->i_truncate_mutex held.
*/
void zonefs_inode_account_active(struct inode *inode)
{
lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
return zonefs_account_active(inode->i_sb, zonefs_inode_zone(inode));
}
/*
* Execute a zone management operation.
*/
static int zonefs_zone_mgmt(struct super_block *sb,
struct zonefs_zone *z, enum req_op op)
{
int ret;
/*
* With ZNS drives, closing an explicitly open zone that has not been
* written will change the zone state to "closed", that is, the zone
* will remain active. Since this can then cause failure of explicit
* open operation on other zones if the drive active zone resources
* are exceeded, make sure that the zone does not remain active by
* resetting it.
*/
if (op == REQ_OP_ZONE_CLOSE && !z->z_wpoffset)
op = REQ_OP_ZONE_RESET;
trace_zonefs_zone_mgmt(sb, z, op);
ret = blkdev_zone_mgmt(sb->s_bdev, op, z->z_sector,
z->z_size >> SECTOR_SHIFT, GFP_NOFS);
if (ret) {
zonefs_err(sb,
"Zone management operation %s at %llu failed %d\n",
blk_op_str(op), z->z_sector, ret);
return ret;
}
return 0;
}
int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op)
{
lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
return zonefs_zone_mgmt(inode->i_sb, zonefs_inode_zone(inode), op);
}
void zonefs_i_size_write(struct inode *inode, loff_t isize)
{
struct zonefs_zone *z = zonefs_inode_zone(inode);
i_size_write(inode, isize);
/*
* A full zone is no longer open/active and does not need
* explicit closing.
*/
if (isize >= z->z_capacity) {
struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
if (z->z_flags & ZONEFS_ZONE_ACTIVE)
atomic_dec(&sbi->s_active_seq_files);
z->z_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
}
}
void zonefs_update_stats(struct inode *inode, loff_t new_isize)
{
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
loff_t old_isize = i_size_read(inode);
loff_t nr_blocks;
if (new_isize == old_isize)
return;
spin_lock(&sbi->s_lock);
/*
* This may be called for an update after an IO error.
* So beware of the values seen.
*/
if (new_isize < old_isize) {
nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits;
if (sbi->s_used_blocks > nr_blocks)
sbi->s_used_blocks -= nr_blocks;
else
sbi->s_used_blocks = 0;
} else {
sbi->s_used_blocks +=
(new_isize - old_isize) >> sb->s_blocksize_bits;
if (sbi->s_used_blocks > sbi->s_blocks)
sbi->s_used_blocks = sbi->s_blocks;
}
spin_unlock(&sbi->s_lock);
}
/*
* Check a zone condition. Return the amount of written (and still readable)
* data in the zone.
*/
static loff_t zonefs_check_zone_condition(struct super_block *sb,
struct zonefs_zone *z,
struct blk_zone *zone)
{
switch (zone->cond) {
case BLK_ZONE_COND_OFFLINE:
zonefs_warn(sb, "Zone %llu: offline zone\n",
z->z_sector);
z->z_flags |= ZONEFS_ZONE_OFFLINE;
return 0;
case BLK_ZONE_COND_READONLY:
/*
* The write pointer of read-only zones is invalid, so we cannot
* determine the zone wpoffset (inode size). We thus keep the
* zone wpoffset as is, which leads to an empty file
* (wpoffset == 0) on mount. For a runtime error, this keeps
* the inode size as it was when last updated so that the user
* can recover data.
*/
zonefs_warn(sb, "Zone %llu: read-only zone\n",
z->z_sector);
z->z_flags |= ZONEFS_ZONE_READONLY;
if (zonefs_zone_is_cnv(z))
return z->z_capacity;
return z->z_wpoffset;
case BLK_ZONE_COND_FULL:
/* The write pointer of full zones is invalid. */
return z->z_capacity;
default:
if (zonefs_zone_is_cnv(z))
return z->z_capacity;
return (zone->wp - zone->start) << SECTOR_SHIFT;
}
}
/*
* Check a zone condition and adjust its inode access permissions for
* offline and readonly zones.
*/
static void zonefs_inode_update_mode(struct inode *inode)
{
struct zonefs_zone *z = zonefs_inode_zone(inode);
if (z->z_flags & ZONEFS_ZONE_OFFLINE) {
/* Offline zones cannot be read nor written */
inode->i_flags |= S_IMMUTABLE;
inode->i_mode &= ~0777;
} else if (z->z_flags & ZONEFS_ZONE_READONLY) {
/* Readonly zones cannot be written */
inode->i_flags |= S_IMMUTABLE;
if (z->z_flags & ZONEFS_ZONE_INIT_MODE)
inode->i_mode &= ~0777;
else
inode->i_mode &= ~0222;
}
z->z_flags &= ~ZONEFS_ZONE_INIT_MODE;
z->z_mode = inode->i_mode;
}
struct zonefs_ioerr_data {
struct inode *inode;
bool write;
};
static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct zonefs_ioerr_data *err = data;
struct inode *inode = err->inode;
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
loff_t isize, data_size;
/*
* Check the zone condition: if the zone is not "bad" (offline or
* read-only), read errors are simply signaled to the IO issuer as long
* as there is no inconsistency between the inode size and the amount of
* data writen in the zone (data_size).
*/
data_size = zonefs_check_zone_condition(sb, z, zone);
isize = i_size_read(inode);
if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
!err->write && isize == data_size)
return 0;
/*
* At this point, we detected either a bad zone or an inconsistency
* between the inode size and the amount of data written in the zone.
* For the latter case, the cause may be a write IO error or an external
* action on the device. Two error patterns exist:
* 1) The inode size is lower than the amount of data in the zone:
* a write operation partially failed and data was writen at the end
* of the file. This can happen in the case of a large direct IO
* needing several BIOs and/or write requests to be processed.
* 2) The inode size is larger than the amount of data in the zone:
* this can happen with a deferred write error with the use of the
* device side write cache after getting successful write IO
* completions. Other possibilities are (a) an external corruption,
* e.g. an application reset the zone directly, or (b) the device
* has a serious problem (e.g. firmware bug).
*
* In all cases, warn about inode size inconsistency and handle the
* IO error according to the zone condition and to the mount options.
*/
if (zonefs_zone_is_seq(z) && isize != data_size)
zonefs_warn(sb,
"inode %lu: invalid size %lld (should be %lld)\n",
inode->i_ino, isize, data_size);
/*
* First handle bad zones signaled by hardware. The mount options
* errors=zone-ro and errors=zone-offline result in changing the
* zone condition to read-only and offline respectively, as if the
* condition was signaled by the hardware.
*/
if ((z->z_flags & ZONEFS_ZONE_OFFLINE) ||
(sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)) {
zonefs_warn(sb, "inode %lu: read/write access disabled\n",
inode->i_ino);
if (!(z->z_flags & ZONEFS_ZONE_OFFLINE))
z->z_flags |= ZONEFS_ZONE_OFFLINE;
zonefs_inode_update_mode(inode);
data_size = 0;
} else if ((z->z_flags & ZONEFS_ZONE_READONLY) ||
(sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)) {
zonefs_warn(sb, "inode %lu: write access disabled\n",
inode->i_ino);
if (!(z->z_flags & ZONEFS_ZONE_READONLY))
z->z_flags |= ZONEFS_ZONE_READONLY;
zonefs_inode_update_mode(inode);
data_size = isize;
} else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
data_size > isize) {
/* Do not expose garbage data */
data_size = isize;
}
/*
* If the filesystem is mounted with the explicit-open mount option, we
* need to clear the ZONEFS_ZONE_OPEN flag if the zone transitioned to
* the read-only or offline condition, to avoid attempting an explicit
* close of the zone when the inode file is closed.
*/
if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)))
z->z_flags &= ~ZONEFS_ZONE_OPEN;
/*
* If error=remount-ro was specified, any error result in remounting
* the volume as read-only.
*/
if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
zonefs_warn(sb, "remounting filesystem read-only\n");
sb->s_flags |= SB_RDONLY;
}
/*
* Update block usage stats and the inode size to prevent access to
* invalid data.
*/
zonefs_update_stats(inode, data_size);
zonefs_i_size_write(inode, data_size);
z->z_wpoffset = data_size;
zonefs_inode_account_active(inode);
return 0;
}
/*
* When an file IO error occurs, check the file zone to see if there is a change
* in the zone condition (e.g. offline or read-only). For a failed write to a
* sequential zone, the zone write pointer position must also be checked to
* eventually correct the file size and zonefs inode write pointer offset
* (which can be out of sync with the drive due to partial write failures).
*/
void __zonefs_io_error(struct inode *inode, bool write)
{
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
unsigned int noio_flag;
unsigned int nr_zones = 1;
struct zonefs_ioerr_data err = {
.inode = inode,
.write = write,
};
int ret;
/*
* The only files that have more than one zone are conventional zone
* files with aggregated conventional zones, for which the inode zone
* size is always larger than the device zone size.
*/
if (z->z_size > bdev_zone_sectors(sb->s_bdev))
nr_zones = z->z_size >>
(sbi->s_zone_sectors_shift + SECTOR_SHIFT);
/*
* Memory allocations in blkdev_report_zones() can trigger a memory
* reclaim which may in turn cause a recursion into zonefs as well as
* struct request allocations for the same device. The former case may
* end up in a deadlock on the inode truncate mutex, while the latter
* may prevent IO forward progress. Executing the report zones under
* the GFP_NOIO context avoids both problems.
*/
noio_flag = memalloc_noio_save();
ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
zonefs_io_error_cb, &err);
if (ret != nr_zones)
zonefs_err(sb, "Get inode %lu zone information failed %d\n",
inode->i_ino, ret);
memalloc_noio_restore(noio_flag);
}
static struct kmem_cache *zonefs_inode_cachep;
static struct inode *zonefs_alloc_inode(struct super_block *sb)
{
struct zonefs_inode_info *zi;
zi = alloc_inode_sb(sb, zonefs_inode_cachep, GFP_KERNEL);
if (!zi)
return NULL;
inode_init_once(&zi->i_vnode);
mutex_init(&zi->i_truncate_mutex);
zi->i_wr_refcnt = 0;
return &zi->i_vnode;
}
static void zonefs_free_inode(struct inode *inode)
{
kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
}
/*
* File system stat.
*/
static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype t;
buf->f_type = ZONEFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_namelen = ZONEFS_NAME_MAX;
spin_lock(&sbi->s_lock);
buf->f_blocks = sbi->s_blocks;
if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
buf->f_bfree = 0;
else
buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
buf->f_bavail = buf->f_bfree;
for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
if (sbi->s_zgroup[t].g_nr_zones)
buf->f_files += sbi->s_zgroup[t].g_nr_zones + 1;
}
buf->f_ffree = 0;
spin_unlock(&sbi->s_lock);
buf->f_fsid = uuid_to_fsid(sbi->s_uuid.b);
return 0;
}
enum {
Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
Opt_explicit_open, Opt_err,
};
static const match_table_t tokens = {
{ Opt_errors_ro, "errors=remount-ro"},
{ Opt_errors_zro, "errors=zone-ro"},
{ Opt_errors_zol, "errors=zone-offline"},
{ Opt_errors_repair, "errors=repair"},
{ Opt_explicit_open, "explicit-open" },
{ Opt_err, NULL}
};
static int zonefs_parse_options(struct super_block *sb, char *options)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
substring_t args[MAX_OPT_ARGS];
char *p;
if (!options)
return 0;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_errors_ro:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
break;
case Opt_errors_zro:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
break;
case Opt_errors_zol:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
break;
case Opt_errors_repair:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
break;
case Opt_explicit_open:
sbi->s_mount_opts |= ZONEFS_MNTOPT_EXPLICIT_OPEN;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
seq_puts(seq, ",errors=remount-ro");
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
seq_puts(seq, ",errors=zone-ro");
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
seq_puts(seq, ",errors=zone-offline");
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
seq_puts(seq, ",errors=repair");
return 0;
}
static int zonefs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
return zonefs_parse_options(sb, data);
}
static int zonefs_inode_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int ret;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (ret)
return ret;
/*
* Since files and directories cannot be created nor deleted, do not
* allow setting any write attributes on the sub-directories grouping
* files by zone type.
*/
if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
(iattr->ia_mode & 0222))
return -EPERM;
if (((iattr->ia_valid & ATTR_UID) &&
!uid_eq(iattr->ia_uid, inode->i_uid)) ||
((iattr->ia_valid & ATTR_GID) &&
!gid_eq(iattr->ia_gid, inode->i_gid))) {
ret = dquot_transfer(&nop_mnt_idmap, inode, iattr);
if (ret)
return ret;
}
if (iattr->ia_valid & ATTR_SIZE) {
ret = zonefs_file_truncate(inode, iattr->ia_size);
if (ret)
return ret;
}
setattr_copy(&nop_mnt_idmap, inode, iattr);
if (S_ISREG(inode->i_mode)) {
struct zonefs_zone *z = zonefs_inode_zone(inode);
z->z_mode = inode->i_mode;
z->z_uid = inode->i_uid;
z->z_gid = inode->i_gid;
}
return 0;
}
static const struct inode_operations zonefs_file_inode_operations = {
.setattr = zonefs_inode_setattr,
};
static long zonefs_fname_to_fno(const struct qstr *fname)
{
const char *name = fname->name;
unsigned int len = fname->len;
long fno = 0, shift = 1;
const char *rname;
char c = *name;
unsigned int i;
/*
* File names are always a base-10 number string without any
* leading 0s.
*/
if (!isdigit(c))
return -ENOENT;
if (len > 1 && c == '0')
return -ENOENT;
if (len == 1)
return c - '0';
for (i = 0, rname = name + len - 1; i < len; i++, rname--) {
c = *rname;
if (!isdigit(c))
return -ENOENT;
fno += (c - '0') * shift;
shift *= 10;
}
return fno;
}
static struct inode *zonefs_get_file_inode(struct inode *dir,
struct dentry *dentry)
{
struct zonefs_zone_group *zgroup = dir->i_private;
struct super_block *sb = dir->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_zone *z;
struct inode *inode;
ino_t ino;
long fno;
/* Get the file number from the file name */
fno = zonefs_fname_to_fno(&dentry->d_name);
if (fno < 0)
return ERR_PTR(fno);
if (!zgroup->g_nr_zones || fno >= zgroup->g_nr_zones)
return ERR_PTR(-ENOENT);
z = &zgroup->g_zones[fno];
ino = z->z_sector >> sbi->s_zone_sectors_shift;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
WARN_ON_ONCE(inode->i_private != z);
return inode;
}
inode->i_ino = ino;
inode->i_mode = z->z_mode;
inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
inode_get_ctime(dir));
inode->i_uid = z->z_uid;
inode->i_gid = z->z_gid;
inode->i_size = z->z_wpoffset;
inode->i_blocks = z->z_capacity >> SECTOR_SHIFT;
inode->i_private = z;
inode->i_op = &zonefs_file_inode_operations;
inode->i_fop = &zonefs_file_operations;
inode->i_mapping->a_ops = &zonefs_file_aops;
/* Update the inode access rights depending on the zone condition */
zonefs_inode_update_mode(inode);
unlock_new_inode(inode);
return inode;
}
static struct inode *zonefs_get_zgroup_inode(struct super_block *sb,
enum zonefs_ztype ztype)
{
struct inode *root = d_inode(sb->s_root);
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct inode *inode;
ino_t ino = bdev_nr_zones(sb->s_bdev) + ztype + 1;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
inode->i_ino = ino;
inode_init_owner(&nop_mnt_idmap, inode, root, S_IFDIR | 0555);
inode->i_size = sbi->s_zgroup[ztype].g_nr_zones;
inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
inode_get_ctime(root));
inode->i_private = &sbi->s_zgroup[ztype];
set_nlink(inode, 2);
inode->i_op = &zonefs_dir_inode_operations;
inode->i_fop = &zonefs_dir_operations;
unlock_new_inode(inode);
return inode;
}
static struct inode *zonefs_get_dir_inode(struct inode *dir,
struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
const char *name = dentry->d_name.name;
enum zonefs_ztype ztype;
/*
* We only need to check for the "seq" directory and
* the "cnv" directory if we have conventional zones.
*/
if (dentry->d_name.len != 3)
return ERR_PTR(-ENOENT);
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (sbi->s_zgroup[ztype].g_nr_zones &&
memcmp(name, zonefs_zgroup_name(ztype), 3) == 0)
break;
}
if (ztype == ZONEFS_ZTYPE_MAX)
return ERR_PTR(-ENOENT);
return zonefs_get_zgroup_inode(sb, ztype);
}
static struct dentry *zonefs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode;
if (dentry->d_name.len > ZONEFS_NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
if (dir == d_inode(dir->i_sb->s_root))
inode = zonefs_get_dir_inode(dir, dentry);
else
inode = zonefs_get_file_inode(dir, dentry);
if (IS_ERR(inode))
return ERR_CAST(inode);
return d_splice_alias(inode, dentry);
}
static int zonefs_readdir_root(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype ztype = ZONEFS_ZTYPE_CNV;
ino_t base_ino = bdev_nr_zones(sb->s_bdev) + 1;
if (ctx->pos >= inode->i_size)
return 0;
if (!dir_emit_dots(file, ctx))
return 0;
if (ctx->pos == 2) {
if (!sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones)
ztype = ZONEFS_ZTYPE_SEQ;
if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
base_ino + ztype, DT_DIR))
return 0;
ctx->pos++;
}
if (ctx->pos == 3 && ztype != ZONEFS_ZTYPE_SEQ) {
ztype = ZONEFS_ZTYPE_SEQ;
if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
base_ino + ztype, DT_DIR))
return 0;
ctx->pos++;
}
return 0;
}
static int zonefs_readdir_zgroup(struct file *file,
struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct zonefs_zone_group *zgroup = inode->i_private;
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_zone *z;
int fname_len;
char *fname;
ino_t ino;
int f;
/*
* The size of zone group directories is equal to the number
* of zone files in the group and does note include the "." and
* ".." entries. Hence the "+ 2" here.
*/
if (ctx->pos >= inode->i_size + 2)
return 0;
if (!dir_emit_dots(file, ctx))
return 0;
fname = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
if (!fname)
return -ENOMEM;
for (f = ctx->pos - 2; f < zgroup->g_nr_zones; f++) {
z = &zgroup->g_zones[f];
ino = z->z_sector >> sbi->s_zone_sectors_shift;
fname_len = snprintf(fname, ZONEFS_NAME_MAX - 1, "%u", f);
if (!dir_emit(ctx, fname, fname_len, ino, DT_REG))
break;
ctx->pos++;
}
kfree(fname);
return 0;
}
static int zonefs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
if (inode == d_inode(inode->i_sb->s_root))
return zonefs_readdir_root(file, ctx);
return zonefs_readdir_zgroup(file, ctx);
}
const struct inode_operations zonefs_dir_inode_operations = {
.lookup = zonefs_lookup,
.setattr = zonefs_inode_setattr,
};
const struct file_operations zonefs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = zonefs_readdir,
};
struct zonefs_zone_data {
struct super_block *sb;
unsigned int nr_zones[ZONEFS_ZTYPE_MAX];
sector_t cnv_zone_start;
struct blk_zone *zones;
};
static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct zonefs_zone_data *zd = data;
struct super_block *sb = zd->sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
/*
* We do not care about the first zone: it contains the super block
* and not exposed as a file.
*/
if (!idx)
return 0;
/*
* Count the number of zones that will be exposed as files.
* For sequential zones, we always have as many files as zones.
* FOr conventional zones, the number of files depends on if we have
* conventional zones aggregation enabled.
*/
switch (zone->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
if (sbi->s_features & ZONEFS_F_AGGRCNV) {
/* One file per set of contiguous conventional zones */
if (!(sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones) ||
zone->start != zd->cnv_zone_start)
sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
zd->cnv_zone_start = zone->start + zone->len;
} else {
/* One file per zone */
sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
}
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
sbi->s_zgroup[ZONEFS_ZTYPE_SEQ].g_nr_zones++;
break;
default:
zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
zone->type);
return -EIO;
}
memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
return 0;
}
static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
{
struct block_device *bdev = zd->sb->s_bdev;
int ret;
zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone),
GFP_KERNEL);
if (!zd->zones)
return -ENOMEM;
/* Get zones information from the device */
ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
zonefs_get_zone_info_cb, zd);
if (ret < 0) {
zonefs_err(zd->sb, "Zone report failed %d\n", ret);
return ret;
}
if (ret != bdev_nr_zones(bdev)) {
zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
ret, bdev_nr_zones(bdev));
return -EIO;
}
return 0;
}
static inline void zonefs_free_zone_info(struct zonefs_zone_data *zd)
{
kvfree(zd->zones);
}
/*
* Create a zone group and populate it with zone files.
*/
static int zonefs_init_zgroup(struct super_block *sb,
struct zonefs_zone_data *zd,
enum zonefs_ztype ztype)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
struct blk_zone *zone, *next, *end;
struct zonefs_zone *z;
unsigned int n = 0;
int ret;
/* Allocate the zone group. If it is empty, we have nothing to do. */
if (!zgroup->g_nr_zones)
return 0;
zgroup->g_zones = kvcalloc(zgroup->g_nr_zones,
sizeof(struct zonefs_zone), GFP_KERNEL);
if (!zgroup->g_zones)
return -ENOMEM;
/*
* Initialize the zone groups using the device zone information.
* We always skip the first zone as it contains the super block
* and is not use to back a file.
*/
end = zd->zones + bdev_nr_zones(sb->s_bdev);
for (zone = &zd->zones[1]; zone < end; zone = next) {
next = zone + 1;
if (zonefs_zone_type(zone) != ztype)
continue;
if (WARN_ON_ONCE(n >= zgroup->g_nr_zones))
return -EINVAL;
/*
* For conventional zones, contiguous zones can be aggregated
* together to form larger files. Note that this overwrites the
* length of the first zone of the set of contiguous zones
* aggregated together. If one offline or read-only zone is
* found, assume that all zones aggregated have the same
* condition.
*/
if (ztype == ZONEFS_ZTYPE_CNV &&
(sbi->s_features & ZONEFS_F_AGGRCNV)) {
for (; next < end; next++) {
if (zonefs_zone_type(next) != ztype)
break;
zone->len += next->len;
zone->capacity += next->capacity;
if (next->cond == BLK_ZONE_COND_READONLY &&
zone->cond != BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_READONLY;
else if (next->cond == BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_OFFLINE;
}
}
z = &zgroup->g_zones[n];
if (ztype == ZONEFS_ZTYPE_CNV)
z->z_flags |= ZONEFS_ZONE_CNV;
z->z_sector = zone->start;
z->z_size = zone->len << SECTOR_SHIFT;
if (z->z_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
!(sbi->s_features & ZONEFS_F_AGGRCNV)) {
zonefs_err(sb,
"Invalid zone size %llu (device zone sectors %llu)\n",
z->z_size,
bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
return -EINVAL;
}
z->z_capacity = min_t(loff_t, MAX_LFS_FILESIZE,
zone->capacity << SECTOR_SHIFT);
z->z_wpoffset = zonefs_check_zone_condition(sb, z, zone);
z->z_mode = S_IFREG | sbi->s_perm;
z->z_uid = sbi->s_uid;
z->z_gid = sbi->s_gid;
/*
* Let zonefs_inode_update_mode() know that we will need
* special initialization of the inode mode the first time
* it is accessed.
*/
z->z_flags |= ZONEFS_ZONE_INIT_MODE;
sb->s_maxbytes = max(z->z_capacity, sb->s_maxbytes);
sbi->s_blocks += z->z_capacity >> sb->s_blocksize_bits;
sbi->s_used_blocks += z->z_wpoffset >> sb->s_blocksize_bits;
/*
* For sequential zones, make sure that any open zone is closed
* first to ensure that the initial number of open zones is 0,
* in sync with the open zone accounting done when the mount
* option ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
*/
if (ztype == ZONEFS_ZTYPE_SEQ &&
(zone->cond == BLK_ZONE_COND_IMP_OPEN ||
zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
ret = zonefs_zone_mgmt(sb, z, REQ_OP_ZONE_CLOSE);
if (ret)
return ret;
}
zonefs_account_active(sb, z);
n++;
}
if (WARN_ON_ONCE(n != zgroup->g_nr_zones))
return -EINVAL;
zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
zonefs_zgroup_name(ztype),
zgroup->g_nr_zones,
zgroup->g_nr_zones > 1 ? "s" : "");
return 0;
}
static void zonefs_free_zgroups(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype ztype;
if (!sbi)
return;
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
kvfree(sbi->s_zgroup[ztype].g_zones);
sbi->s_zgroup[ztype].g_zones = NULL;
}
}
/*
* Create a zone group and populate it with zone files.
*/
static int zonefs_init_zgroups(struct super_block *sb)
{
struct zonefs_zone_data zd;
enum zonefs_ztype ztype;
int ret;
/* First get the device zone information */
memset(&zd, 0, sizeof(struct zonefs_zone_data));
zd.sb = sb;
ret = zonefs_get_zone_info(&zd);
if (ret)
goto cleanup;
/* Allocate and initialize the zone groups */
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
ret = zonefs_init_zgroup(sb, &zd, ztype);
if (ret) {
zonefs_info(sb,
"Zone group \"%s\" initialization failed\n",
zonefs_zgroup_name(ztype));
break;
}
}
cleanup:
zonefs_free_zone_info(&zd);
if (ret)
zonefs_free_zgroups(sb);
return ret;
}
/*
* Read super block information from the device.
*/
static int zonefs_read_super(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_super *super;
u32 crc, stored_crc;
struct page *page;
struct bio_vec bio_vec;
struct bio bio;
int ret;
page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = 0;
__bio_add_page(&bio, page, PAGE_SIZE, 0);
ret = submit_bio_wait(&bio);
if (ret)
goto free_page;
super = page_address(page);
ret = -EINVAL;
if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
goto free_page;
stored_crc = le32_to_cpu(super->s_crc);
super->s_crc = 0;
crc = crc32(~0U, (unsigned char *)super, sizeof(struct zonefs_super));
if (crc != stored_crc) {
zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
crc, stored_crc);
goto free_page;
}
sbi->s_features = le64_to_cpu(super->s_features);
if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
zonefs_err(sb, "Unknown features set 0x%llx\n",
sbi->s_features);
goto free_page;
}
if (sbi->s_features & ZONEFS_F_UID) {
sbi->s_uid = make_kuid(current_user_ns(),
le32_to_cpu(super->s_uid));
if (!uid_valid(sbi->s_uid)) {
zonefs_err(sb, "Invalid UID feature\n");
goto free_page;
}
}
if (sbi->s_features & ZONEFS_F_GID) {
sbi->s_gid = make_kgid(current_user_ns(),
le32_to_cpu(super->s_gid));
if (!gid_valid(sbi->s_gid)) {
zonefs_err(sb, "Invalid GID feature\n");
goto free_page;
}
}
if (sbi->s_features & ZONEFS_F_PERM)
sbi->s_perm = le32_to_cpu(super->s_perm);
if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
zonefs_err(sb, "Reserved area is being used\n");
goto free_page;
}
import_uuid(&sbi->s_uuid, super->s_uuid);
ret = 0;
free_page:
__free_page(page);
return ret;
}
static const struct super_operations zonefs_sops = {
.alloc_inode = zonefs_alloc_inode,
.free_inode = zonefs_free_inode,
.statfs = zonefs_statfs,
.remount_fs = zonefs_remount,
.show_options = zonefs_show_options,
};
static int zonefs_get_zgroup_inodes(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct inode *dir_inode;
enum zonefs_ztype ztype;
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (!sbi->s_zgroup[ztype].g_nr_zones)
continue;
dir_inode = zonefs_get_zgroup_inode(sb, ztype);
if (IS_ERR(dir_inode))
return PTR_ERR(dir_inode);
sbi->s_zgroup[ztype].g_inode = dir_inode;
}
return 0;
}
static void zonefs_release_zgroup_inodes(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype ztype;
if (!sbi)
return;
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (sbi->s_zgroup[ztype].g_inode) {
iput(sbi->s_zgroup[ztype].g_inode);
sbi->s_zgroup[ztype].g_inode = NULL;
}
}
}
/*
* Check that the device is zoned. If it is, get the list of zones and create
* sub-directories and files according to the device zone configuration and
* format options.
*/
static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
{
struct zonefs_sb_info *sbi;
struct inode *inode;
enum zonefs_ztype ztype;
int ret;
if (!bdev_is_zoned(sb->s_bdev)) {
zonefs_err(sb, "Not a zoned block device\n");
return -EINVAL;
}
/*
* Initialize super block information: the maximum file size is updated
* when the zone files are created so that the format option
* ZONEFS_F_AGGRCNV which increases the maximum file size of a file
* beyond the zone size is taken into account.
*/
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
spin_lock_init(&sbi->s_lock);
sb->s_fs_info = sbi;
sb->s_magic = ZONEFS_MAGIC;
sb->s_maxbytes = 0;
sb->s_op = &zonefs_sops;
sb->s_time_gran = 1;
/*
* The block size is set to the device zone write granularity to ensure
* that write operations are always aligned according to the device
* interface constraints.
*/
sb_set_blocksize(sb, bdev_zone_write_granularity(sb->s_bdev));
sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
sbi->s_uid = GLOBAL_ROOT_UID;
sbi->s_gid = GLOBAL_ROOT_GID;
sbi->s_perm = 0640;
sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO;
atomic_set(&sbi->s_wro_seq_files, 0);
sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev);
atomic_set(&sbi->s_active_seq_files, 0);
sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev);
ret = zonefs_read_super(sb);
if (ret)
return ret;
ret = zonefs_parse_options(sb, data);
if (ret)
return ret;
zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev));
if (!sbi->s_max_wro_seq_files &&
!sbi->s_max_active_seq_files &&
sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
zonefs_info(sb,
"No open and active zone limits. Ignoring explicit_open mount option\n");
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
}
/* Initialize the zone groups */
ret = zonefs_init_zgroups(sb);
if (ret)
goto cleanup;
/* Create the root directory inode */
ret = -ENOMEM;
inode = new_inode(sb);
if (!inode)
goto cleanup;
inode->i_ino = bdev_nr_zones(sb->s_bdev);
inode->i_mode = S_IFDIR | 0555;
inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_op = &zonefs_dir_inode_operations;
inode->i_fop = &zonefs_dir_operations;
inode->i_size = 2;
set_nlink(inode, 2);
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (sbi->s_zgroup[ztype].g_nr_zones) {
inc_nlink(inode);
inode->i_size++;
}
}
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto cleanup;
/*
* Take a reference on the zone groups directory inodes
* to keep them in the inode cache.
*/
ret = zonefs_get_zgroup_inodes(sb);
if (ret)
goto cleanup;
ret = zonefs_sysfs_register(sb);
if (ret)
goto cleanup;
return 0;
cleanup:
zonefs_release_zgroup_inodes(sb);
zonefs_free_zgroups(sb);
return ret;
}
static struct dentry *zonefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, zonefs_fill_super);
}
static void zonefs_kill_super(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
/* Release the reference on the zone group directory inodes */
zonefs_release_zgroup_inodes(sb);
kill_block_super(sb);
zonefs_sysfs_unregister(sb);
zonefs_free_zgroups(sb);
kfree(sbi);
}
/*
* File system definition and registration.
*/
static struct file_system_type zonefs_type = {
.owner = THIS_MODULE,
.name = "zonefs",
.mount = zonefs_mount,
.kill_sb = zonefs_kill_super,
.fs_flags = FS_REQUIRES_DEV,
};
static int __init zonefs_init_inodecache(void)
{
zonefs_inode_cachep = kmem_cache_create("zonefs_inode_cache",
sizeof(struct zonefs_inode_info), 0,
(SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
NULL);
if (zonefs_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void zonefs_destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy the inode cache.
*/
rcu_barrier();
kmem_cache_destroy(zonefs_inode_cachep);
}
static int __init zonefs_init(void)
{
int ret;
BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE);
ret = zonefs_init_inodecache();
if (ret)
return ret;
ret = zonefs_sysfs_init();
if (ret)
goto destroy_inodecache;
ret = register_filesystem(&zonefs_type);
if (ret)
goto sysfs_exit;
return 0;
sysfs_exit:
zonefs_sysfs_exit();
destroy_inodecache:
zonefs_destroy_inodecache();
return ret;
}
static void __exit zonefs_exit(void)
{
unregister_filesystem(&zonefs_type);
zonefs_sysfs_exit();
zonefs_destroy_inodecache();
}
MODULE_AUTHOR("Damien Le Moal");
MODULE_DESCRIPTION("Zone file system for zoned block devices");
MODULE_LICENSE("GPL");
MODULE_ALIAS_FS("zonefs");
module_init(zonefs_init);
module_exit(zonefs_exit);
| linux-master | fs/zonefs/super.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple file system for zoned block devices exposing zones as files.
*
* Copyright (C) 2022 Western Digital Corporation or its affiliates.
*/
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include "zonefs.h"
struct zonefs_sysfs_attr {
struct attribute attr;
ssize_t (*show)(struct zonefs_sb_info *sbi, char *buf);
};
#define ZONEFS_SYSFS_ATTR_RO(name) \
static struct zonefs_sysfs_attr zonefs_sysfs_attr_##name = __ATTR_RO(name)
#define ATTR_LIST(name) &zonefs_sysfs_attr_##name.attr
static ssize_t zonefs_sysfs_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct zonefs_sb_info *sbi =
container_of(kobj, struct zonefs_sb_info, s_kobj);
struct zonefs_sysfs_attr *zonefs_attr =
container_of(attr, struct zonefs_sysfs_attr, attr);
if (!zonefs_attr->show)
return 0;
return zonefs_attr->show(sbi, buf);
}
static ssize_t max_wro_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
{
return sysfs_emit(buf, "%u\n", sbi->s_max_wro_seq_files);
}
ZONEFS_SYSFS_ATTR_RO(max_wro_seq_files);
static ssize_t nr_wro_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
{
return sysfs_emit(buf, "%d\n", atomic_read(&sbi->s_wro_seq_files));
}
ZONEFS_SYSFS_ATTR_RO(nr_wro_seq_files);
static ssize_t max_active_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
{
return sysfs_emit(buf, "%u\n", sbi->s_max_active_seq_files);
}
ZONEFS_SYSFS_ATTR_RO(max_active_seq_files);
static ssize_t nr_active_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
{
return sysfs_emit(buf, "%d\n", atomic_read(&sbi->s_active_seq_files));
}
ZONEFS_SYSFS_ATTR_RO(nr_active_seq_files);
static struct attribute *zonefs_sysfs_attrs[] = {
ATTR_LIST(max_wro_seq_files),
ATTR_LIST(nr_wro_seq_files),
ATTR_LIST(max_active_seq_files),
ATTR_LIST(nr_active_seq_files),
NULL,
};
ATTRIBUTE_GROUPS(zonefs_sysfs);
static void zonefs_sysfs_sb_release(struct kobject *kobj)
{
struct zonefs_sb_info *sbi =
container_of(kobj, struct zonefs_sb_info, s_kobj);
complete(&sbi->s_kobj_unregister);
}
static const struct sysfs_ops zonefs_sysfs_attr_ops = {
.show = zonefs_sysfs_attr_show,
};
static const struct kobj_type zonefs_sb_ktype = {
.default_groups = zonefs_sysfs_groups,
.sysfs_ops = &zonefs_sysfs_attr_ops,
.release = zonefs_sysfs_sb_release,
};
static struct kobject *zonefs_sysfs_root;
int zonefs_sysfs_register(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
int ret;
init_completion(&sbi->s_kobj_unregister);
ret = kobject_init_and_add(&sbi->s_kobj, &zonefs_sb_ktype,
zonefs_sysfs_root, "%s", sb->s_id);
if (ret) {
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
return ret;
}
sbi->s_sysfs_registered = true;
return 0;
}
void zonefs_sysfs_unregister(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
if (!sbi || !sbi->s_sysfs_registered)
return;
kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
}
int __init zonefs_sysfs_init(void)
{
zonefs_sysfs_root = kobject_create_and_add("zonefs", fs_kobj);
if (!zonefs_sysfs_root)
return -ENOMEM;
return 0;
}
void zonefs_sysfs_exit(void)
{
kobject_put(zonefs_sysfs_root);
zonefs_sysfs_root = NULL;
}
| linux-master | fs/zonefs/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple file system for zoned block devices exposing zones as files.
*
* Copyright (C) 2022 Western Digital Corporation or its affiliates.
*/
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/iomap.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/statfs.h>
#include <linux/writeback.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/uio.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <linux/task_io_accounting_ops.h>
#include "zonefs.h"
#include "trace.h"
static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
/*
* All blocks are always mapped below EOF. If reading past EOF,
* act as if there is a hole up to the file maximum size.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
isize = i_size_read(inode);
if (iomap->offset >= isize) {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
iomap->length = length;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
iomap->length = isize - iomap->offset;
}
mutex_unlock(&zi->i_truncate_mutex);
trace_zonefs_iomap_begin(inode, iomap);
return 0;
}
static const struct iomap_ops zonefs_read_iomap_ops = {
.iomap_begin = zonefs_read_iomap_begin,
};
static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
/* All write I/Os should always be within the file maximum size */
if (WARN_ON_ONCE(offset + length > z->z_capacity))
return -EIO;
/*
* Sequential zones can only accept direct writes. This is already
* checked when writes are issued, so warn if we see a page writeback
* operation.
*/
if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT)))
return -EIO;
/*
* For conventional zones, all blocks are always mapped. For sequential
* zones, all blocks after always mapped below the inode size (zone
* write pointer) and unwriten beyond.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
isize = i_size_read(inode);
if (iomap->offset >= isize) {
iomap->type = IOMAP_UNWRITTEN;
iomap->length = z->z_capacity - iomap->offset;
} else {
iomap->type = IOMAP_MAPPED;
iomap->length = isize - iomap->offset;
}
mutex_unlock(&zi->i_truncate_mutex);
trace_zonefs_iomap_begin(inode, iomap);
return 0;
}
static const struct iomap_ops zonefs_write_iomap_ops = {
.iomap_begin = zonefs_write_iomap_begin,
};
static int zonefs_read_folio(struct file *unused, struct folio *folio)
{
return iomap_read_folio(folio, &zonefs_read_iomap_ops);
}
static void zonefs_readahead(struct readahead_control *rac)
{
iomap_readahead(rac, &zonefs_read_iomap_ops);
}
/*
* Map blocks for page writeback. This is used only on conventional zone files,
* which implies that the page range can only be within the fixed inode size.
*/
static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
struct inode *inode, loff_t offset)
{
struct zonefs_zone *z = zonefs_inode_zone(inode);
if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
return -EIO;
if (WARN_ON_ONCE(offset >= i_size_read(inode)))
return -EIO;
/* If the mapping is already OK, nothing needs to be done */
if (offset >= wpc->iomap.offset &&
offset < wpc->iomap.offset + wpc->iomap.length)
return 0;
return zonefs_write_iomap_begin(inode, offset,
z->z_capacity - offset,
IOMAP_WRITE, &wpc->iomap, NULL);
}
static const struct iomap_writeback_ops zonefs_writeback_ops = {
.map_blocks = zonefs_write_map_blocks,
};
static int zonefs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct iomap_writepage_ctx wpc = { };
return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
}
static int zonefs_swap_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *span)
{
struct inode *inode = file_inode(swap_file);
if (zonefs_inode_is_seq(inode)) {
zonefs_err(inode->i_sb,
"swap file: not a conventional zone file\n");
return -EINVAL;
}
return iomap_swapfile_activate(sis, swap_file, span,
&zonefs_read_iomap_ops);
}
const struct address_space_operations zonefs_file_aops = {
.read_folio = zonefs_read_folio,
.readahead = zonefs_readahead,
.writepages = zonefs_writepages,
.dirty_folio = iomap_dirty_folio,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
.swap_activate = zonefs_swap_activate,
};
int zonefs_file_truncate(struct inode *inode, loff_t isize)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
loff_t old_isize;
enum req_op op;
int ret = 0;
/*
* Only sequential zone files can be truncated and truncation is allowed
* only down to a 0 size, which is equivalent to a zone reset, and to
* the maximum file size, which is equivalent to a zone finish.
*/
if (!zonefs_zone_is_seq(z))
return -EPERM;
if (!isize)
op = REQ_OP_ZONE_RESET;
else if (isize == z->z_capacity)
op = REQ_OP_ZONE_FINISH;
else
return -EPERM;
inode_dio_wait(inode);
/* Serialize against page faults */
filemap_invalidate_lock(inode->i_mapping);
/* Serialize against zonefs_iomap_begin() */
mutex_lock(&zi->i_truncate_mutex);
old_isize = i_size_read(inode);
if (isize == old_isize)
goto unlock;
ret = zonefs_inode_zone_mgmt(inode, op);
if (ret)
goto unlock;
/*
* If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
* take care of open zones.
*/
if (z->z_flags & ZONEFS_ZONE_OPEN) {
/*
* Truncating a zone to EMPTY or FULL is the equivalent of
* closing the zone. For a truncation to 0, we need to
* re-open the zone to ensure new writes can be processed.
* For a truncation to the maximum file size, the zone is
* closed and writes cannot be accepted anymore, so clear
* the open flag.
*/
if (!isize)
ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
else
z->z_flags &= ~ZONEFS_ZONE_OPEN;
}
zonefs_update_stats(inode, isize);
truncate_setsize(inode, isize);
z->z_wpoffset = isize;
zonefs_inode_account_active(inode);
unlock:
mutex_unlock(&zi->i_truncate_mutex);
filemap_invalidate_unlock(inode->i_mapping);
return ret;
}
static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file_inode(file);
int ret = 0;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
/*
* Since only direct writes are allowed in sequential files, page cache
* flush is needed only for conventional zone files.
*/
if (zonefs_inode_is_cnv(inode))
ret = file_write_and_wait_range(file, start, end);
if (!ret)
ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (ret)
zonefs_io_error(inode, true);
return ret;
}
static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
vm_fault_t ret;
if (unlikely(IS_IMMUTABLE(inode)))
return VM_FAULT_SIGBUS;
/*
* Sanity check: only conventional zone files can have shared
* writeable mappings.
*/
if (zonefs_inode_is_seq(inode))
return VM_FAULT_NOPAGE;
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
/* Serialize against truncates */
filemap_invalidate_lock_shared(inode->i_mapping);
ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
filemap_invalidate_unlock_shared(inode->i_mapping);
sb_end_pagefault(inode->i_sb);
return ret;
}
static const struct vm_operations_struct zonefs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = zonefs_filemap_page_mkwrite,
};
static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
/*
* Conventional zones accept random writes, so their files can support
* shared writable mappings. For sequential zone files, only read
* mappings are possible since there are no guarantees for write
* ordering between msync() and page cache writeback.
*/
if (zonefs_inode_is_seq(file_inode(file)) &&
(vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
file_accessed(file);
vma->vm_ops = &zonefs_file_vm_ops;
return 0;
}
static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
{
loff_t isize = i_size_read(file_inode(file));
/*
* Seeks are limited to below the zone size for conventional zones
* and below the zone write pointer for sequential zones. In both
* cases, this limit is the inode size.
*/
return generic_file_llseek_size(file, offset, whence, isize, isize);
}
static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
int error, unsigned int flags)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (error) {
zonefs_io_error(inode, true);
return error;
}
if (size && zonefs_inode_is_seq(inode)) {
/*
* Note that we may be seeing completions out of order,
* but that is not a problem since a write completed
* successfully necessarily means that all preceding writes
* were also successful. So we can safely increase the inode
* size to the write end location.
*/
mutex_lock(&zi->i_truncate_mutex);
if (i_size_read(inode) < iocb->ki_pos + size) {
zonefs_update_stats(inode, iocb->ki_pos + size);
zonefs_i_size_write(inode, iocb->ki_pos + size);
}
mutex_unlock(&zi->i_truncate_mutex);
}
return 0;
}
static const struct iomap_dio_ops zonefs_write_dio_ops = {
.end_io = zonefs_file_write_dio_end_io,
};
/*
* Do not exceed the LFS limits nor the file zone size. If pos is under the
* limit it becomes a short access. If it exceeds the limit, return -EFBIG.
*/
static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
loff_t count)
{
struct inode *inode = file_inode(file);
struct zonefs_zone *z = zonefs_inode_zone(inode);
loff_t limit = rlimit(RLIMIT_FSIZE);
loff_t max_size = z->z_capacity;
if (limit != RLIM_INFINITY) {
if (pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
count = min(count, limit - pos);
}
if (!(file->f_flags & O_LARGEFILE))
max_size = min_t(loff_t, MAX_NON_LFS, max_size);
if (unlikely(pos >= max_size))
return -EFBIG;
return min(count, max_size - pos);
}
static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
loff_t count;
if (IS_SWAPFILE(inode))
return -ETXTBSY;
if (!iov_iter_count(from))
return 0;
if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
return -EINVAL;
if (iocb->ki_flags & IOCB_APPEND) {
if (zonefs_zone_is_cnv(z))
return -EINVAL;
mutex_lock(&zi->i_truncate_mutex);
iocb->ki_pos = z->z_wpoffset;
mutex_unlock(&zi->i_truncate_mutex);
}
count = zonefs_write_check_limits(file, iocb->ki_pos,
iov_iter_count(from));
if (count < 0)
return count;
iov_iter_truncate(from, count);
return iov_iter_count(from);
}
/*
* Handle direct writes. For sequential zone files, this is the only possible
* write path. For these files, check that the user is issuing writes
* sequentially from the end of the file. This code assumes that the block layer
* delivers write requests to the device in sequential order. This is always the
* case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
* elevator feature is being used (e.g. mq-deadline). The block layer always
* automatically select such an elevator for zoned block devices during the
* device initialization.
*/
static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
ssize_t ret, count;
/*
* For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
* as this can cause write reordering (e.g. the first aio gets EAGAIN
* on the inode lock but the second goes through but is now unaligned).
*/
if (zonefs_zone_is_seq(z) && !is_sync_kiocb(iocb) &&
(iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
return -EAGAIN;
} else {
inode_lock(inode);
}
count = zonefs_write_checks(iocb, from);
if (count <= 0) {
ret = count;
goto inode_unlock;
}
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL;
goto inode_unlock;
}
/* Enforce sequential writes (append only) in sequential zones */
if (zonefs_zone_is_seq(z)) {
mutex_lock(&zi->i_truncate_mutex);
if (iocb->ki_pos != z->z_wpoffset) {
mutex_unlock(&zi->i_truncate_mutex);
ret = -EINVAL;
goto inode_unlock;
}
mutex_unlock(&zi->i_truncate_mutex);
}
/*
* iomap_dio_rw() may return ENOTBLK if there was an issue with
* page invalidation. Overwrite that error code with EBUSY so that
* the user can make sense of the error.
*/
ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
&zonefs_write_dio_ops, 0, NULL, 0);
if (ret == -ENOTBLK)
ret = -EBUSY;
if (zonefs_zone_is_seq(z) &&
(ret > 0 || ret == -EIOCBQUEUED)) {
if (ret > 0)
count = ret;
/*
* Update the zone write pointer offset assuming the write
* operation succeeded. If it did not, the error recovery path
* will correct it. Also do active seq file accounting.
*/
mutex_lock(&zi->i_truncate_mutex);
z->z_wpoffset += count;
zonefs_inode_account_active(inode);
mutex_unlock(&zi->i_truncate_mutex);
}
inode_unlock:
inode_unlock(inode);
return ret;
}
static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
/*
* Direct IO writes are mandatory for sequential zone files so that the
* write IO issuing order is preserved.
*/
if (zonefs_inode_is_seq(inode))
return -EIO;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
return -EAGAIN;
} else {
inode_lock(inode);
}
ret = zonefs_write_checks(iocb, from);
if (ret <= 0)
goto inode_unlock;
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
if (ret == -EIO)
zonefs_io_error(inode, true);
inode_unlock:
inode_unlock(inode);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
return ret;
}
static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_zone *z = zonefs_inode_zone(inode);
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
if (sb_rdonly(inode->i_sb))
return -EROFS;
/* Write operations beyond the zone capacity are not allowed */
if (iocb->ki_pos >= z->z_capacity)
return -EFBIG;
if (iocb->ki_flags & IOCB_DIRECT) {
ssize_t ret = zonefs_file_dio_write(iocb, from);
if (ret != -ENOTBLK)
return ret;
}
return zonefs_file_buffered_write(iocb, from);
}
static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
int error, unsigned int flags)
{
if (error) {
zonefs_io_error(file_inode(iocb->ki_filp), false);
return error;
}
return 0;
}
static const struct iomap_dio_ops zonefs_read_dio_ops = {
.end_io = zonefs_file_read_dio_end_io,
};
static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
ssize_t ret;
/* Offline zones cannot be read */
if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
return -EPERM;
if (iocb->ki_pos >= z->z_capacity)
return 0;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock_shared(inode))
return -EAGAIN;
} else {
inode_lock_shared(inode);
}
/* Limit read operations to written data */
mutex_lock(&zi->i_truncate_mutex);
isize = i_size_read(inode);
if (iocb->ki_pos >= isize) {
mutex_unlock(&zi->i_truncate_mutex);
ret = 0;
goto inode_unlock;
}
iov_iter_truncate(to, isize - iocb->ki_pos);
mutex_unlock(&zi->i_truncate_mutex);
if (iocb->ki_flags & IOCB_DIRECT) {
size_t count = iov_iter_count(to);
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL;
goto inode_unlock;
}
file_accessed(iocb->ki_filp);
ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
&zonefs_read_dio_ops, 0, NULL, 0);
} else {
ret = generic_file_read_iter(iocb, to);
if (ret == -EIO)
zonefs_io_error(inode, false);
}
inode_unlock:
inode_unlock_shared(inode);
return ret;
}
static ssize_t zonefs_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
struct inode *inode = file_inode(in);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
loff_t isize;
ssize_t ret = 0;
/* Offline zones cannot be read */
if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
return -EPERM;
if (*ppos >= z->z_capacity)
return 0;
inode_lock_shared(inode);
/* Limit read operations to written data */
mutex_lock(&zi->i_truncate_mutex);
isize = i_size_read(inode);
if (*ppos >= isize)
len = 0;
else
len = min_t(loff_t, len, isize - *ppos);
mutex_unlock(&zi->i_truncate_mutex);
if (len > 0) {
ret = filemap_splice_read(in, ppos, pipe, len, flags);
if (ret == -EIO)
zonefs_io_error(inode, false);
}
inode_unlock_shared(inode);
return ret;
}
/*
* Write open accounting is done only for sequential files.
*/
static inline bool zonefs_seq_file_need_wro(struct inode *inode,
struct file *file)
{
if (zonefs_inode_is_cnv(inode))
return false;
if (!(file->f_mode & FMODE_WRITE))
return false;
return true;
}
static int zonefs_seq_file_write_open(struct inode *inode)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
int ret = 0;
mutex_lock(&zi->i_truncate_mutex);
if (!zi->i_wr_refcnt) {
struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
if (sbi->s_max_wro_seq_files
&& wro > sbi->s_max_wro_seq_files) {
atomic_dec(&sbi->s_wro_seq_files);
ret = -EBUSY;
goto unlock;
}
if (i_size_read(inode) < z->z_capacity) {
ret = zonefs_inode_zone_mgmt(inode,
REQ_OP_ZONE_OPEN);
if (ret) {
atomic_dec(&sbi->s_wro_seq_files);
goto unlock;
}
z->z_flags |= ZONEFS_ZONE_OPEN;
zonefs_inode_account_active(inode);
}
}
}
zi->i_wr_refcnt++;
unlock:
mutex_unlock(&zi->i_truncate_mutex);
return ret;
}
static int zonefs_file_open(struct inode *inode, struct file *file)
{
int ret;
file->f_mode |= FMODE_CAN_ODIRECT;
ret = generic_file_open(inode, file);
if (ret)
return ret;
if (zonefs_seq_file_need_wro(inode, file))
return zonefs_seq_file_write_open(inode);
return 0;
}
static void zonefs_seq_file_write_close(struct inode *inode)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
int ret = 0;
mutex_lock(&zi->i_truncate_mutex);
zi->i_wr_refcnt--;
if (zi->i_wr_refcnt)
goto unlock;
/*
* The file zone may not be open anymore (e.g. the file was truncated to
* its maximum size or it was fully written). For this case, we only
* need to decrement the write open count.
*/
if (z->z_flags & ZONEFS_ZONE_OPEN) {
ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
if (ret) {
__zonefs_io_error(inode, false);
/*
* Leaving zones explicitly open may lead to a state
* where most zones cannot be written (zone resources
* exhausted). So take preventive action by remounting
* read-only.
*/
if (z->z_flags & ZONEFS_ZONE_OPEN &&
!(sb->s_flags & SB_RDONLY)) {
zonefs_warn(sb,
"closing zone at %llu failed %d\n",
z->z_sector, ret);
zonefs_warn(sb,
"remounting filesystem read-only\n");
sb->s_flags |= SB_RDONLY;
}
goto unlock;
}
z->z_flags &= ~ZONEFS_ZONE_OPEN;
zonefs_inode_account_active(inode);
}
atomic_dec(&sbi->s_wro_seq_files);
unlock:
mutex_unlock(&zi->i_truncate_mutex);
}
static int zonefs_file_release(struct inode *inode, struct file *file)
{
/*
* If we explicitly open a zone we must close it again as well, but the
* zone management operation can fail (either due to an IO error or as
* the zone has gone offline or read-only). Make sure we don't fail the
* close(2) for user-space.
*/
if (zonefs_seq_file_need_wro(inode, file))
zonefs_seq_file_write_close(inode);
return 0;
}
const struct file_operations zonefs_file_operations = {
.open = zonefs_file_open,
.release = zonefs_file_release,
.fsync = zonefs_file_fsync,
.mmap = zonefs_file_mmap,
.llseek = zonefs_file_llseek,
.read_iter = zonefs_file_read_iter,
.write_iter = zonefs_file_write_iter,
.splice_read = zonefs_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iocb_bio_iopoll,
};
| linux-master | fs/zonefs/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* super.c
*
* load/unload driver, mount/dismount volumes
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/statfs.h>
#include <linux/moduleparam.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/parser.h>
#include <linux/crc32.h>
#include <linux/debugfs.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/quotaops.h>
#include <linux/signal.h>
#define CREATE_TRACE_POINTS
#include "ocfs2_trace.h"
#include <cluster/masklog.h>
#include "ocfs2.h"
/* this should be the only file to include a version 1 header */
#include "ocfs1_fs_compat.h"
#include "alloc.h"
#include "aops.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "export.h"
#include "extent_map.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
#include "namei.h"
#include "slot_map.h"
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
#include "xattr.h"
#include "quota.h"
#include "refcounttree.h"
#include "suballoc.h"
#include "buffer_head_io.h"
#include "filecheck.h"
static struct kmem_cache *ocfs2_inode_cachep;
struct kmem_cache *ocfs2_dquot_cachep;
struct kmem_cache *ocfs2_qf_chunk_cachep;
static struct dentry *ocfs2_debugfs_root;
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OCFS2 cluster file system");
struct mount_options
{
unsigned long commit_interval;
unsigned long mount_opt;
unsigned int atime_quantum;
unsigned short slot;
int localalloc_opt;
unsigned int resv_level;
int dir_resv_level;
char cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
};
static int ocfs2_parse_options(struct super_block *sb, char *options,
struct mount_options *mopt,
int is_remount);
static int ocfs2_check_set_options(struct super_block *sb,
struct mount_options *options);
static int ocfs2_show_options(struct seq_file *s, struct dentry *root);
static void ocfs2_put_super(struct super_block *sb);
static int ocfs2_mount_volume(struct super_block *sb);
static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err);
static int ocfs2_initialize_mem_caches(void);
static void ocfs2_free_mem_caches(void);
static void ocfs2_delete_osb(struct ocfs2_super *osb);
static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ocfs2_sync_fs(struct super_block *sb, int wait);
static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb);
static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb);
static void ocfs2_release_system_inodes(struct ocfs2_super *osb);
static int ocfs2_check_volume(struct ocfs2_super *osb);
static int ocfs2_verify_volume(struct ocfs2_dinode *di,
struct buffer_head *bh,
u32 sectsize,
struct ocfs2_blockcheck_stats *stats);
static int ocfs2_initialize_super(struct super_block *sb,
struct buffer_head *bh,
int sector_size,
struct ocfs2_blockcheck_stats *stats);
static int ocfs2_get_sector(struct super_block *sb,
struct buffer_head **bh,
int block,
int sect_size);
static struct inode *ocfs2_alloc_inode(struct super_block *sb);
static void ocfs2_free_inode(struct inode *inode);
static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend);
static int ocfs2_enable_quotas(struct ocfs2_super *osb);
static void ocfs2_disable_quotas(struct ocfs2_super *osb);
static struct dquot **ocfs2_get_dquots(struct inode *inode)
{
return OCFS2_I(inode)->i_dquot;
}
static const struct super_operations ocfs2_sops = {
.statfs = ocfs2_statfs,
.alloc_inode = ocfs2_alloc_inode,
.free_inode = ocfs2_free_inode,
.drop_inode = ocfs2_drop_inode,
.evict_inode = ocfs2_evict_inode,
.sync_fs = ocfs2_sync_fs,
.put_super = ocfs2_put_super,
.remount_fs = ocfs2_remount,
.show_options = ocfs2_show_options,
.quota_read = ocfs2_quota_read,
.quota_write = ocfs2_quota_write,
.get_dquots = ocfs2_get_dquots,
};
enum {
Opt_barrier,
Opt_err_panic,
Opt_err_ro,
Opt_intr,
Opt_nointr,
Opt_hb_none,
Opt_hb_local,
Opt_hb_global,
Opt_data_ordered,
Opt_data_writeback,
Opt_atime_quantum,
Opt_slot,
Opt_commit,
Opt_localalloc,
Opt_localflocks,
Opt_stack,
Opt_user_xattr,
Opt_nouser_xattr,
Opt_inode64,
Opt_acl,
Opt_noacl,
Opt_usrquota,
Opt_grpquota,
Opt_coherency_buffered,
Opt_coherency_full,
Opt_resv_level,
Opt_dir_resv_level,
Opt_journal_async_commit,
Opt_err_cont,
Opt_err,
};
static const match_table_t tokens = {
{Opt_barrier, "barrier=%u"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_intr, "intr"},
{Opt_nointr, "nointr"},
{Opt_hb_none, OCFS2_HB_NONE},
{Opt_hb_local, OCFS2_HB_LOCAL},
{Opt_hb_global, OCFS2_HB_GLOBAL},
{Opt_data_ordered, "data=ordered"},
{Opt_data_writeback, "data=writeback"},
{Opt_atime_quantum, "atime_quantum=%u"},
{Opt_slot, "preferred_slot=%u"},
{Opt_commit, "commit=%u"},
{Opt_localalloc, "localalloc=%d"},
{Opt_localflocks, "localflocks"},
{Opt_stack, "cluster_stack=%s"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_inode64, "inode64"},
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
{Opt_coherency_buffered, "coherency=buffered"},
{Opt_coherency_full, "coherency=full"},
{Opt_resv_level, "resv_level=%u"},
{Opt_dir_resv_level, "dir_resv_level=%u"},
{Opt_journal_async_commit, "journal_async_commit"},
{Opt_err_cont, "errors=continue"},
{Opt_err, NULL}
};
#ifdef CONFIG_DEBUG_FS
static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
{
struct ocfs2_cluster_connection *cconn = osb->cconn;
struct ocfs2_recovery_map *rm = osb->recovery_map;
struct ocfs2_orphan_scan *os = &osb->osb_orphan_scan;
int i, out = 0;
unsigned long flags;
out += scnprintf(buf + out, len - out,
"%10s => Id: %-s Uuid: %-s Gen: 0x%X Label: %-s\n",
"Device", osb->dev_str, osb->uuid_str,
osb->fs_generation, osb->vol_label);
out += scnprintf(buf + out, len - out,
"%10s => State: %d Flags: 0x%lX\n", "Volume",
atomic_read(&osb->vol_state), osb->osb_flags);
out += scnprintf(buf + out, len - out,
"%10s => Block: %lu Cluster: %d\n", "Sizes",
osb->sb->s_blocksize, osb->s_clustersize);
out += scnprintf(buf + out, len - out,
"%10s => Compat: 0x%X Incompat: 0x%X "
"ROcompat: 0x%X\n",
"Features", osb->s_feature_compat,
osb->s_feature_incompat, osb->s_feature_ro_compat);
out += scnprintf(buf + out, len - out,
"%10s => Opts: 0x%lX AtimeQuanta: %u\n", "Mount",
osb->s_mount_opt, osb->s_atime_quantum);
if (cconn) {
out += scnprintf(buf + out, len - out,
"%10s => Stack: %s Name: %*s "
"Version: %d.%d\n", "Cluster",
(*osb->osb_cluster_stack == '\0' ?
"o2cb" : osb->osb_cluster_stack),
cconn->cc_namelen, cconn->cc_name,
cconn->cc_version.pv_major,
cconn->cc_version.pv_minor);
}
spin_lock_irqsave(&osb->dc_task_lock, flags);
out += scnprintf(buf + out, len - out,
"%10s => Pid: %d Count: %lu WakeSeq: %lu "
"WorkSeq: %lu\n", "DownCnvt",
(osb->dc_task ? task_pid_nr(osb->dc_task) : -1),
osb->blocked_lock_count, osb->dc_wake_sequence,
osb->dc_work_sequence);
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
spin_lock(&osb->osb_lock);
out += scnprintf(buf + out, len - out, "%10s => Pid: %d Nodes:",
"Recovery",
(osb->recovery_thread_task ?
task_pid_nr(osb->recovery_thread_task) : -1));
if (rm->rm_used == 0)
out += scnprintf(buf + out, len - out, " None\n");
else {
for (i = 0; i < rm->rm_used; i++)
out += scnprintf(buf + out, len - out, " %d",
rm->rm_entries[i]);
out += scnprintf(buf + out, len - out, "\n");
}
spin_unlock(&osb->osb_lock);
out += scnprintf(buf + out, len - out,
"%10s => Pid: %d Interval: %lu\n", "Commit",
(osb->commit_task ? task_pid_nr(osb->commit_task) : -1),
osb->osb_commit_interval);
out += scnprintf(buf + out, len - out,
"%10s => State: %d TxnId: %lu NumTxns: %d\n",
"Journal", osb->journal->j_state,
osb->journal->j_trans_id,
atomic_read(&osb->journal->j_num_trans));
out += scnprintf(buf + out, len - out,
"%10s => GlobalAllocs: %d LocalAllocs: %d "
"SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
"Stats",
atomic_read(&osb->alloc_stats.bitmap_data),
atomic_read(&osb->alloc_stats.local_data),
atomic_read(&osb->alloc_stats.bg_allocs),
atomic_read(&osb->alloc_stats.moves),
atomic_read(&osb->alloc_stats.bg_extends));
out += scnprintf(buf + out, len - out,
"%10s => State: %u Descriptor: %llu Size: %u bits "
"Default: %u bits\n",
"LocalAlloc", osb->local_alloc_state,
(unsigned long long)osb->la_last_gd,
osb->local_alloc_bits, osb->local_alloc_default_bits);
spin_lock(&osb->osb_lock);
out += scnprintf(buf + out, len - out,
"%10s => InodeSlot: %d StolenInodes: %d, "
"MetaSlot: %d StolenMeta: %d\n", "Steal",
osb->s_inode_steal_slot,
atomic_read(&osb->s_num_inodes_stolen),
osb->s_meta_steal_slot,
atomic_read(&osb->s_num_meta_stolen));
spin_unlock(&osb->osb_lock);
out += scnprintf(buf + out, len - out, "OrphanScan => ");
out += scnprintf(buf + out, len - out, "Local: %u Global: %u ",
os->os_count, os->os_seqno);
out += scnprintf(buf + out, len - out, " Last Scan: ");
if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
out += scnprintf(buf + out, len - out, "Disabled\n");
else
out += scnprintf(buf + out, len - out, "%lu seconds ago\n",
(unsigned long)(ktime_get_seconds() - os->os_scantime));
out += scnprintf(buf + out, len - out, "%10s => %3s %10s\n",
"Slots", "Num", "RecoGen");
for (i = 0; i < osb->max_slots; ++i) {
out += scnprintf(buf + out, len - out,
"%10s %c %3d %10d\n",
" ",
(i == osb->slot_num ? '*' : ' '),
i, osb->slot_recovery_generations[i]);
}
return out;
}
static int ocfs2_osb_debug_open(struct inode *inode, struct file *file)
{
struct ocfs2_super *osb = inode->i_private;
char *buf = NULL;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
goto bail;
i_size_write(inode, ocfs2_osb_dump(osb, buf, PAGE_SIZE));
file->private_data = buf;
return 0;
bail:
return -ENOMEM;
}
static int ocfs2_debug_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static ssize_t ocfs2_debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
i_size_read(file->f_mapping->host));
}
#else
static int ocfs2_osb_debug_open(struct inode *inode, struct file *file)
{
return 0;
}
static int ocfs2_debug_release(struct inode *inode, struct file *file)
{
return 0;
}
static ssize_t ocfs2_debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return 0;
}
#endif /* CONFIG_DEBUG_FS */
static const struct file_operations ocfs2_osb_debug_fops = {
.open = ocfs2_osb_debug_open,
.release = ocfs2_debug_release,
.read = ocfs2_debug_read,
.llseek = generic_file_llseek,
};
static int ocfs2_sync_fs(struct super_block *sb, int wait)
{
int status;
tid_t target;
struct ocfs2_super *osb = OCFS2_SB(sb);
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (wait) {
status = ocfs2_flush_truncate_log(osb);
if (status < 0)
mlog_errno(status);
} else {
ocfs2_schedule_truncate_log_flush(osb, 0);
}
if (jbd2_journal_start_commit(osb->journal->j_journal,
&target)) {
if (wait)
jbd2_log_wait_commit(osb->journal->j_journal,
target);
}
return 0;
}
static int ocfs2_need_system_inode(struct ocfs2_super *osb, int ino)
{
if (!OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)
&& (ino == USER_QUOTA_SYSTEM_INODE
|| ino == LOCAL_USER_QUOTA_SYSTEM_INODE))
return 0;
if (!OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
&& (ino == GROUP_QUOTA_SYSTEM_INODE
|| ino == LOCAL_GROUP_QUOTA_SYSTEM_INODE))
return 0;
return 1;
}
static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
{
struct inode *new = NULL;
int status = 0;
int i;
new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0);
if (IS_ERR(new)) {
status = PTR_ERR(new);
mlog_errno(status);
goto bail;
}
osb->root_inode = new;
new = ocfs2_iget(osb, osb->system_dir_blkno, OCFS2_FI_FLAG_SYSFILE, 0);
if (IS_ERR(new)) {
status = PTR_ERR(new);
mlog_errno(status);
goto bail;
}
osb->sys_root_inode = new;
for (i = OCFS2_FIRST_ONLINE_SYSTEM_INODE;
i <= OCFS2_LAST_GLOBAL_SYSTEM_INODE; i++) {
if (!ocfs2_need_system_inode(osb, i))
continue;
new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
if (!new) {
ocfs2_release_system_inodes(osb);
status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
mlog_errno(status);
mlog(ML_ERROR, "Unable to load system inode %d, "
"possibly corrupt fs?", i);
goto bail;
}
// the array now has one ref, so drop this one
iput(new);
}
bail:
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
{
struct inode *new = NULL;
int status = 0;
int i;
for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1;
i < NUM_SYSTEM_INODES;
i++) {
if (!ocfs2_need_system_inode(osb, i))
continue;
new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
if (!new) {
ocfs2_release_system_inodes(osb);
status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
status, i, osb->slot_num);
goto bail;
}
/* the array now has one ref, so drop this one */
iput(new);
}
bail:
if (status)
mlog_errno(status);
return status;
}
static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
{
int i;
struct inode *inode;
for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) {
inode = osb->global_system_inodes[i];
if (inode) {
iput(inode);
osb->global_system_inodes[i] = NULL;
}
}
inode = osb->sys_root_inode;
if (inode) {
iput(inode);
osb->sys_root_inode = NULL;
}
inode = osb->root_inode;
if (inode) {
iput(inode);
osb->root_inode = NULL;
}
if (!osb->local_system_inodes)
return;
for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) {
if (osb->local_system_inodes[i]) {
iput(osb->local_system_inodes[i]);
osb->local_system_inodes[i] = NULL;
}
}
kfree(osb->local_system_inodes);
osb->local_system_inodes = NULL;
}
/* We're allocating fs objects, use GFP_NOFS */
static struct inode *ocfs2_alloc_inode(struct super_block *sb)
{
struct ocfs2_inode_info *oi;
oi = alloc_inode_sb(sb, ocfs2_inode_cachep, GFP_NOFS);
if (!oi)
return NULL;
oi->i_sync_tid = 0;
oi->i_datasync_tid = 0;
memset(&oi->i_dquot, 0, sizeof(oi->i_dquot));
jbd2_journal_init_jbd_inode(&oi->ip_jinode, &oi->vfs_inode);
return &oi->vfs_inode;
}
static void ocfs2_free_inode(struct inode *inode)
{
kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
}
static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
unsigned int cbits)
{
unsigned int bytes = 1 << cbits;
unsigned int trim = bytes;
unsigned int bitshift = 32;
/*
* i_size and all block offsets in ocfs2 are always 64 bits
* wide. i_clusters is 32 bits, in cluster-sized units. So on
* 64 bit platforms, cluster size will be the limiting factor.
*/
#if BITS_PER_LONG == 32
BUILD_BUG_ON(sizeof(sector_t) != 8);
/*
* We might be limited by page cache size.
*/
if (bytes > PAGE_SIZE) {
bytes = PAGE_SIZE;
trim = 1;
/*
* Shift by 31 here so that we don't get larger than
* MAX_LFS_FILESIZE
*/
bitshift = 31;
}
#endif
/*
* Trim by a whole cluster when we can actually approach the
* on-disk limits. Otherwise we can overflow i_clusters when
* an extent start is at the max offset.
*/
return (((unsigned long long)bytes) << bitshift) - trim;
}
static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
{
int incompat_features;
int ret = 0;
struct mount_options parsed_options;
struct ocfs2_super *osb = OCFS2_SB(sb);
u32 tmp;
sync_filesystem(sb);
if (!ocfs2_parse_options(sb, data, &parsed_options, 1) ||
!ocfs2_check_set_options(sb, &parsed_options)) {
ret = -EINVAL;
goto out;
}
tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE;
if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n");
goto out;
}
if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) !=
(parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change data mode on remount\n");
goto out;
}
/* Probably don't want this on remount; it might
* mess with other nodes */
if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) &&
(parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot enable inode64 on remount\n");
goto out;
}
/* We're going to/from readonly mode. */
if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
/* Disable quota accounting before remounting RO */
if (*flags & SB_RDONLY) {
ret = ocfs2_susp_quotas(osb, 0);
if (ret < 0)
goto out;
}
/* Lock here so the check of HARD_RO and the potential
* setting of SOFT_RO is atomic. */
spin_lock(&osb->osb_lock);
if (osb->osb_flags & OCFS2_OSB_HARD_RO) {
mlog(ML_ERROR, "Remount on readonly device is forbidden.\n");
ret = -EROFS;
goto unlock_osb;
}
if (*flags & SB_RDONLY) {
sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
} else {
if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
mlog(ML_ERROR, "Cannot remount RDWR "
"filesystem due to previous errors.\n");
ret = -EROFS;
goto unlock_osb;
}
incompat_features = OCFS2_HAS_RO_COMPAT_FEATURE(sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP);
if (incompat_features) {
mlog(ML_ERROR, "Cannot remount RDWR because "
"of unsupported optional features "
"(%x).\n", incompat_features);
ret = -EINVAL;
goto unlock_osb;
}
sb->s_flags &= ~SB_RDONLY;
osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
}
trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
unlock_osb:
spin_unlock(&osb->osb_lock);
/* Enable quota accounting after remounting RW */
if (!ret && !(*flags & SB_RDONLY)) {
if (sb_any_quota_suspended(sb))
ret = ocfs2_susp_quotas(osb, 1);
else
ret = ocfs2_enable_quotas(osb);
if (ret < 0) {
/* Return back changes... */
spin_lock(&osb->osb_lock);
sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
spin_unlock(&osb->osb_lock);
goto out;
}
}
}
if (!ret) {
/* Only save off the new mount options in case of a successful
* remount. */
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
if (parsed_options.commit_interval)
osb->osb_commit_interval = parsed_options.commit_interval;
if (!ocfs2_is_hard_readonly(osb))
ocfs2_set_journal_params(osb);
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
SB_POSIXACL : 0);
}
out:
return ret;
}
static int ocfs2_sb_probe(struct super_block *sb,
struct buffer_head **bh,
int *sector_size,
struct ocfs2_blockcheck_stats *stats)
{
int status, tmpstat;
struct ocfs1_vol_disk_hdr *hdr;
struct ocfs2_dinode *di;
int blksize;
*bh = NULL;
/* may be > 512 */
*sector_size = bdev_logical_block_size(sb->s_bdev);
if (*sector_size > OCFS2_MAX_BLOCKSIZE) {
mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n",
*sector_size, OCFS2_MAX_BLOCKSIZE);
status = -EINVAL;
goto bail;
}
/* Can this really happen? */
if (*sector_size < OCFS2_MIN_BLOCKSIZE)
*sector_size = OCFS2_MIN_BLOCKSIZE;
/* check block zero for old format */
status = ocfs2_get_sector(sb, bh, 0, *sector_size);
if (status < 0) {
mlog_errno(status);
goto bail;
}
hdr = (struct ocfs1_vol_disk_hdr *) (*bh)->b_data;
if (hdr->major_version == OCFS1_MAJOR_VERSION) {
mlog(ML_ERROR, "incompatible version: %u.%u\n",
hdr->major_version, hdr->minor_version);
status = -EINVAL;
}
if (memcmp(hdr->signature, OCFS1_VOLUME_SIGNATURE,
strlen(OCFS1_VOLUME_SIGNATURE)) == 0) {
mlog(ML_ERROR, "incompatible volume signature: %8s\n",
hdr->signature);
status = -EINVAL;
}
brelse(*bh);
*bh = NULL;
if (status < 0) {
mlog(ML_ERROR, "This is an ocfs v1 filesystem which must be "
"upgraded before mounting with ocfs v2\n");
goto bail;
}
/*
* Now check at magic offset for 512, 1024, 2048, 4096
* blocksizes. 4096 is the maximum blocksize because it is
* the minimum clustersize.
*/
status = -EINVAL;
for (blksize = *sector_size;
blksize <= OCFS2_MAX_BLOCKSIZE;
blksize <<= 1) {
tmpstat = ocfs2_get_sector(sb, bh,
OCFS2_SUPER_BLOCK_BLKNO,
blksize);
if (tmpstat < 0) {
status = tmpstat;
mlog_errno(status);
break;
}
di = (struct ocfs2_dinode *) (*bh)->b_data;
memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
spin_lock_init(&stats->b_lock);
tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats);
if (tmpstat < 0) {
brelse(*bh);
*bh = NULL;
}
if (tmpstat != -EAGAIN) {
status = tmpstat;
break;
}
}
bail:
return status;
}
static int ocfs2_verify_heartbeat(struct ocfs2_super *osb)
{
u32 hb_enabled = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL;
if (osb->s_mount_opt & hb_enabled) {
if (ocfs2_mount_local(osb)) {
mlog(ML_ERROR, "Cannot heartbeat on a locally "
"mounted device.\n");
return -EINVAL;
}
if (ocfs2_userspace_stack(osb)) {
mlog(ML_ERROR, "Userspace stack expected, but "
"o2cb heartbeat arguments passed to mount\n");
return -EINVAL;
}
if (((osb->s_mount_opt & OCFS2_MOUNT_HB_GLOBAL) &&
!ocfs2_cluster_o2cb_global_heartbeat(osb)) ||
((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) &&
ocfs2_cluster_o2cb_global_heartbeat(osb))) {
mlog(ML_ERROR, "Mismatching o2cb heartbeat modes\n");
return -EINVAL;
}
}
if (!(osb->s_mount_opt & hb_enabled)) {
if (!ocfs2_mount_local(osb) && !ocfs2_is_hard_readonly(osb) &&
!ocfs2_userspace_stack(osb)) {
mlog(ML_ERROR, "Heartbeat has to be started to mount "
"a read-write clustered device.\n");
return -EINVAL;
}
}
return 0;
}
/*
* If we're using a userspace stack, mount should have passed
* a name that matches the disk. If not, mount should not
* have passed a stack.
*/
static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb,
struct mount_options *mopt)
{
if (!ocfs2_userspace_stack(osb) && mopt->cluster_stack[0]) {
mlog(ML_ERROR,
"cluster stack passed to mount, but this filesystem "
"does not support it\n");
return -EINVAL;
}
if (ocfs2_userspace_stack(osb) &&
strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
OCFS2_STACK_LABEL_LEN)) {
mlog(ML_ERROR,
"cluster stack passed to mount (\"%s\") does not "
"match the filesystem (\"%s\")\n",
mopt->cluster_stack,
osb->osb_cluster_stack);
return -EINVAL;
}
return 0;
}
static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend)
{
int type;
struct super_block *sb = osb->sb;
unsigned int feature[OCFS2_MAXQUOTAS] = {
OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
int status = 0;
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
if (unsuspend)
status = dquot_resume(sb, type);
else {
struct ocfs2_mem_dqinfo *oinfo;
/* Cancel periodic syncing before suspending */
oinfo = sb_dqinfo(sb, type)->dqi_priv;
cancel_delayed_work_sync(&oinfo->dqi_sync_work);
status = dquot_suspend(sb, type);
}
if (status < 0)
break;
}
if (status < 0)
mlog(ML_ERROR, "Failed to suspend/unsuspend quotas on "
"remount (error = %d).\n", status);
return status;
}
static int ocfs2_enable_quotas(struct ocfs2_super *osb)
{
struct inode *inode[OCFS2_MAXQUOTAS] = { NULL, NULL };
struct super_block *sb = osb->sb;
unsigned int feature[OCFS2_MAXQUOTAS] = {
OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
unsigned int ino[OCFS2_MAXQUOTAS] = {
LOCAL_USER_QUOTA_SYSTEM_INODE,
LOCAL_GROUP_QUOTA_SYSTEM_INODE };
int status;
int type;
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE;
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
inode[type] = ocfs2_get_system_file_inode(osb, ino[type],
osb->slot_num);
if (!inode[type]) {
status = -ENOENT;
goto out_quota_off;
}
status = dquot_load_quota_inode(inode[type], type, QFMT_OCFS2,
DQUOT_USAGE_ENABLED);
if (status < 0)
goto out_quota_off;
}
for (type = 0; type < OCFS2_MAXQUOTAS; type++)
iput(inode[type]);
return 0;
out_quota_off:
ocfs2_disable_quotas(osb);
for (type = 0; type < OCFS2_MAXQUOTAS; type++)
iput(inode[type]);
mlog_errno(status);
return status;
}
static void ocfs2_disable_quotas(struct ocfs2_super *osb)
{
int type;
struct inode *inode;
struct super_block *sb = osb->sb;
struct ocfs2_mem_dqinfo *oinfo;
/* We mostly ignore errors in this function because there's not much
* we can do when we see them */
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!sb_has_quota_loaded(sb, type))
continue;
if (!sb_has_quota_suspended(sb, type)) {
oinfo = sb_dqinfo(sb, type)->dqi_priv;
cancel_delayed_work_sync(&oinfo->dqi_sync_work);
}
inode = igrab(sb->s_dquot.files[type]);
/* Turn off quotas. This will remove all dquot structures from
* memory and so they will be automatically synced to global
* quota files */
dquot_disable(sb, type, DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED);
iput(inode);
}
}
static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
{
struct dentry *root;
int status, sector_size;
struct mount_options parsed_options;
struct inode *inode = NULL;
struct ocfs2_super *osb = NULL;
struct buffer_head *bh = NULL;
char nodestr[12];
struct ocfs2_blockcheck_stats stats;
trace_ocfs2_fill_super(sb, data, silent);
if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
status = -EINVAL;
goto out;
}
/* probe for superblock */
status = ocfs2_sb_probe(sb, &bh, §or_size, &stats);
if (status < 0) {
mlog(ML_ERROR, "superblock probe failed!\n");
goto out;
}
status = ocfs2_initialize_super(sb, bh, sector_size, &stats);
brelse(bh);
bh = NULL;
if (status < 0)
goto out;
osb = OCFS2_SB(sb);
if (!ocfs2_check_set_options(sb, &parsed_options)) {
status = -EINVAL;
goto out_super;
}
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
osb->osb_commit_interval = parsed_options.commit_interval;
ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt);
osb->osb_resv_level = parsed_options.resv_level;
osb->osb_dir_resv_level = parsed_options.resv_level;
if (parsed_options.dir_resv_level == -1)
osb->osb_dir_resv_level = parsed_options.resv_level;
else
osb->osb_dir_resv_level = parsed_options.dir_resv_level;
status = ocfs2_verify_userspace_stack(osb, &parsed_options);
if (status)
goto out_super;
sb->s_magic = OCFS2_SUPER_MAGIC;
sb->s_flags = (sb->s_flags & ~(SB_POSIXACL | SB_NOSEC)) |
((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
/* Hard readonly mode only if: bdev_read_only, SB_RDONLY,
* heartbeat=none */
if (bdev_read_only(sb->s_bdev)) {
if (!sb_rdonly(sb)) {
status = -EACCES;
mlog(ML_ERROR, "Readonly device detected but readonly "
"mount was not specified.\n");
goto out_super;
}
/* You should not be able to start a local heartbeat
* on a readonly device. */
if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) {
status = -EROFS;
mlog(ML_ERROR, "Local heartbeat specified on readonly "
"device.\n");
goto out_super;
}
status = ocfs2_check_journals_nolocks(osb);
if (status < 0) {
if (status == -EROFS)
mlog(ML_ERROR, "Recovery required on readonly "
"file system, but write access is "
"unavailable.\n");
goto out_super;
}
ocfs2_set_ro_flag(osb, 1);
printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
"Cluster services will not be used for this mount. "
"Recovery will be skipped.\n", osb->dev_str);
}
if (!ocfs2_is_hard_readonly(osb)) {
if (sb_rdonly(sb))
ocfs2_set_ro_flag(osb, 0);
}
status = ocfs2_verify_heartbeat(osb);
if (status < 0)
goto out_super;
osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
ocfs2_debugfs_root);
debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
osb, &ocfs2_osb_debug_fops);
if (ocfs2_meta_ecc(osb))
ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
osb->osb_debug_root);
status = ocfs2_mount_volume(sb);
if (status < 0)
goto out_debugfs;
if (osb->root_inode)
inode = igrab(osb->root_inode);
if (!inode) {
status = -EIO;
goto out_dismount;
}
osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
&ocfs2_kset->kobj);
if (!osb->osb_dev_kset) {
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create device kset %s.\n", sb->s_id);
goto out_dismount;
}
/* Create filecheck sysfs related directories/files at
* /sys/fs/ocfs2/<devname>/filecheck */
if (ocfs2_filecheck_create_sysfs(osb)) {
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create filecheck sysfs directory at "
"/sys/fs/ocfs2/%s/filecheck.\n", sb->s_id);
goto out_dismount;
}
root = d_make_root(inode);
if (!root) {
status = -ENOMEM;
goto out_dismount;
}
sb->s_root = root;
ocfs2_complete_mount_recovery(osb);
if (ocfs2_mount_local(osb))
snprintf(nodestr, sizeof(nodestr), "local");
else
snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num);
printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %s, slot %d) "
"with %s data mode.\n",
osb->dev_str, nodestr, osb->slot_num,
osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
"ordered");
atomic_set(&osb->vol_state, VOLUME_MOUNTED);
wake_up(&osb->osb_mount_event);
/* Now we can initialize quotas because we can afford to wait
* for cluster locks recovery now. That also means that truncation
* log recovery can happen but that waits for proper quota setup */
if (!sb_rdonly(sb)) {
status = ocfs2_enable_quotas(osb);
if (status < 0) {
/* We have to err-out specially here because
* s_root is already set */
mlog_errno(status);
atomic_set(&osb->vol_state, VOLUME_DISABLED);
wake_up(&osb->osb_mount_event);
return status;
}
}
ocfs2_complete_quota_recovery(osb);
/* Now we wake up again for processes waiting for quotas */
atomic_set(&osb->vol_state, VOLUME_MOUNTED_QUOTAS);
wake_up(&osb->osb_mount_event);
/* Start this when the mount is almost sure of being successful */
ocfs2_orphan_scan_start(osb);
return status;
out_dismount:
atomic_set(&osb->vol_state, VOLUME_DISABLED);
wake_up(&osb->osb_mount_event);
ocfs2_free_replay_slots(osb);
ocfs2_dismount_volume(sb, 1);
goto out;
out_debugfs:
debugfs_remove_recursive(osb->osb_debug_root);
out_super:
ocfs2_release_system_inodes(osb);
kfree(osb->recovery_map);
ocfs2_delete_osb(osb);
kfree(osb);
out:
mlog_errno(status);
return status;
}
static struct dentry *ocfs2_mount(struct file_system_type *fs_type,
int flags,
const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super);
}
static struct file_system_type ocfs2_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2",
.mount = ocfs2_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
.next = NULL
};
MODULE_ALIAS_FS("ocfs2");
static int ocfs2_check_set_options(struct super_block *sb,
struct mount_options *options)
{
if (options->mount_opt & OCFS2_MOUNT_USRQUOTA &&
!OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
mlog(ML_ERROR, "User quotas were requested, but this "
"filesystem does not have the feature enabled.\n");
return 0;
}
if (options->mount_opt & OCFS2_MOUNT_GRPQUOTA &&
!OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
mlog(ML_ERROR, "Group quotas were requested, but this "
"filesystem does not have the feature enabled.\n");
return 0;
}
if (options->mount_opt & OCFS2_MOUNT_POSIX_ACL &&
!OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR)) {
mlog(ML_ERROR, "ACL support requested but extended attributes "
"feature is not enabled\n");
return 0;
}
/* No ACL setting specified? Use XATTR feature... */
if (!(options->mount_opt & (OCFS2_MOUNT_POSIX_ACL |
OCFS2_MOUNT_NO_POSIX_ACL))) {
if (OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR))
options->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
else
options->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
}
return 1;
}
static int ocfs2_parse_options(struct super_block *sb,
char *options,
struct mount_options *mopt,
int is_remount)
{
int status, user_stack = 0;
char *p;
u32 tmp;
int token, option;
substring_t args[MAX_OPT_ARGS];
trace_ocfs2_parse_options(is_remount, options ? options : "(none)");
mopt->commit_interval = 0;
mopt->mount_opt = OCFS2_MOUNT_NOINTR;
mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
mopt->slot = OCFS2_INVALID_SLOT;
mopt->localalloc_opt = -1;
mopt->cluster_stack[0] = '\0';
mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL;
mopt->dir_resv_level = -1;
if (!options) {
status = 1;
goto bail;
}
while ((p = strsep(&options, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_hb_local:
mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL;
break;
case Opt_hb_none:
mopt->mount_opt |= OCFS2_MOUNT_HB_NONE;
break;
case Opt_hb_global:
mopt->mount_opt |= OCFS2_MOUNT_HB_GLOBAL;
break;
case Opt_barrier:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option)
mopt->mount_opt |= OCFS2_MOUNT_BARRIER;
else
mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER;
break;
case Opt_intr:
mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR;
break;
case Opt_nointr:
mopt->mount_opt |= OCFS2_MOUNT_NOINTR;
break;
case Opt_err_panic:
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
break;
case Opt_err_ro:
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS;
break;
case Opt_err_cont:
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT;
break;
case Opt_data_ordered:
mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
break;
case Opt_data_writeback:
mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
break;
case Opt_user_xattr:
mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR;
break;
case Opt_nouser_xattr:
mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR;
break;
case Opt_atime_quantum:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= 0)
mopt->atime_quantum = option;
break;
case Opt_slot:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option)
mopt->slot = (u16)option;
break;
case Opt_commit:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option < 0)
return 0;
if (option == 0)
option = JBD2_DEFAULT_MAX_COMMIT_AGE;
mopt->commit_interval = HZ * option;
break;
case Opt_localalloc:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= 0)
mopt->localalloc_opt = option;
break;
case Opt_localflocks:
/*
* Changing this during remount could race
* flock() requests, or "unbalance" existing
* ones (e.g., a lock is taken in one mode but
* dropped in the other). If users care enough
* to flip locking modes during remount, we
* could add a "local" flag to individual
* flock structures for proper tracking of
* state.
*/
if (!is_remount)
mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS;
break;
case Opt_stack:
/* Check both that the option we were passed
* is of the right length and that it is a proper
* string of the right length.
*/
if (((args[0].to - args[0].from) !=
OCFS2_STACK_LABEL_LEN) ||
(strnlen(args[0].from,
OCFS2_STACK_LABEL_LEN) !=
OCFS2_STACK_LABEL_LEN)) {
mlog(ML_ERROR,
"Invalid cluster_stack option\n");
status = 0;
goto bail;
}
memcpy(mopt->cluster_stack, args[0].from,
OCFS2_STACK_LABEL_LEN);
mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
/*
* Open code the memcmp here as we don't have
* an osb to pass to
* ocfs2_userspace_stack().
*/
if (memcmp(mopt->cluster_stack,
OCFS2_CLASSIC_CLUSTER_STACK,
OCFS2_STACK_LABEL_LEN))
user_stack = 1;
break;
case Opt_inode64:
mopt->mount_opt |= OCFS2_MOUNT_INODE64;
break;
case Opt_usrquota:
mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA;
break;
case Opt_grpquota:
mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA;
break;
case Opt_coherency_buffered:
mopt->mount_opt |= OCFS2_MOUNT_COHERENCY_BUFFERED;
break;
case Opt_coherency_full:
mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED;
break;
case Opt_acl:
mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL;
break;
case Opt_noacl:
mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
break;
case Opt_resv_level:
if (is_remount)
break;
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= OCFS2_MIN_RESV_LEVEL &&
option < OCFS2_MAX_RESV_LEVEL)
mopt->resv_level = option;
break;
case Opt_dir_resv_level:
if (is_remount)
break;
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= OCFS2_MIN_RESV_LEVEL &&
option < OCFS2_MAX_RESV_LEVEL)
mopt->dir_resv_level = option;
break;
case Opt_journal_async_commit:
mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
break;
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
"or missing value\n", p);
status = 0;
goto bail;
}
}
if (user_stack == 0) {
/* Ensure only one heartbeat mode */
tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE);
if (hweight32(tmp) != 1) {
mlog(ML_ERROR, "Invalid heartbeat mount options\n");
status = 0;
goto bail;
}
}
status = 1;
bail:
return status;
}
static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
{
struct ocfs2_super *osb = OCFS2_SB(root->d_sb);
unsigned long opts = osb->s_mount_opt;
unsigned int local_alloc_megs;
if (opts & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL)) {
seq_printf(s, ",_netdev");
if (opts & OCFS2_MOUNT_HB_LOCAL)
seq_printf(s, ",%s", OCFS2_HB_LOCAL);
else
seq_printf(s, ",%s", OCFS2_HB_GLOBAL);
} else
seq_printf(s, ",%s", OCFS2_HB_NONE);
if (opts & OCFS2_MOUNT_NOINTR)
seq_printf(s, ",nointr");
if (opts & OCFS2_MOUNT_DATA_WRITEBACK)
seq_printf(s, ",data=writeback");
else
seq_printf(s, ",data=ordered");
if (opts & OCFS2_MOUNT_BARRIER)
seq_printf(s, ",barrier=1");
if (opts & OCFS2_MOUNT_ERRORS_PANIC)
seq_printf(s, ",errors=panic");
else if (opts & OCFS2_MOUNT_ERRORS_CONT)
seq_printf(s, ",errors=continue");
else
seq_printf(s, ",errors=remount-ro");
if (osb->preferred_slot != OCFS2_INVALID_SLOT)
seq_printf(s, ",preferred_slot=%d", osb->preferred_slot);
seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
if (osb->osb_commit_interval)
seq_printf(s, ",commit=%u",
(unsigned) (osb->osb_commit_interval / HZ));
local_alloc_megs = osb->local_alloc_bits >> (20 - osb->s_clustersize_bits);
if (local_alloc_megs != ocfs2_la_default_mb(osb))
seq_printf(s, ",localalloc=%d", local_alloc_megs);
if (opts & OCFS2_MOUNT_LOCALFLOCKS)
seq_printf(s, ",localflocks,");
if (osb->osb_cluster_stack[0])
seq_show_option(s, "cluster_stack", osb->osb_cluster_stack);
if (opts & OCFS2_MOUNT_USRQUOTA)
seq_printf(s, ",usrquota");
if (opts & OCFS2_MOUNT_GRPQUOTA)
seq_printf(s, ",grpquota");
if (opts & OCFS2_MOUNT_COHERENCY_BUFFERED)
seq_printf(s, ",coherency=buffered");
else
seq_printf(s, ",coherency=full");
if (opts & OCFS2_MOUNT_NOUSERXATTR)
seq_printf(s, ",nouser_xattr");
else
seq_printf(s, ",user_xattr");
if (opts & OCFS2_MOUNT_INODE64)
seq_printf(s, ",inode64");
if (opts & OCFS2_MOUNT_POSIX_ACL)
seq_printf(s, ",acl");
else
seq_printf(s, ",noacl");
if (osb->osb_resv_level != OCFS2_DEFAULT_RESV_LEVEL)
seq_printf(s, ",resv_level=%d", osb->osb_resv_level);
if (osb->osb_dir_resv_level != osb->osb_resv_level)
seq_printf(s, ",dir_resv_level=%d", osb->osb_resv_level);
if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
seq_printf(s, ",journal_async_commit");
return 0;
}
static int __init ocfs2_init(void)
{
int status;
status = init_ocfs2_uptodate_cache();
if (status < 0)
goto out1;
status = ocfs2_initialize_mem_caches();
if (status < 0)
goto out2;
ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL);
ocfs2_set_locking_protocol();
status = register_quota_format(&ocfs2_quota_format);
if (status < 0)
goto out3;
status = register_filesystem(&ocfs2_fs_type);
if (!status)
return 0;
unregister_quota_format(&ocfs2_quota_format);
out3:
debugfs_remove(ocfs2_debugfs_root);
ocfs2_free_mem_caches();
out2:
exit_ocfs2_uptodate_cache();
out1:
mlog_errno(status);
return status;
}
static void __exit ocfs2_exit(void)
{
unregister_quota_format(&ocfs2_quota_format);
debugfs_remove(ocfs2_debugfs_root);
ocfs2_free_mem_caches();
unregister_filesystem(&ocfs2_fs_type);
exit_ocfs2_uptodate_cache();
}
static void ocfs2_put_super(struct super_block *sb)
{
trace_ocfs2_put_super(sb);
ocfs2_sync_blockdev(sb);
ocfs2_dismount_volume(sb, 0);
}
static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct ocfs2_super *osb;
u32 numbits, freebits;
int status;
struct ocfs2_dinode *bm_lock;
struct buffer_head *bh = NULL;
struct inode *inode = NULL;
trace_ocfs2_statfs(dentry->d_sb, buf);
osb = OCFS2_SB(dentry->d_sb);
inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!inode) {
mlog(ML_ERROR, "failed to get bitmap inode\n");
status = -EIO;
goto bail;
}
status = ocfs2_inode_lock(inode, &bh, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
bm_lock = (struct ocfs2_dinode *) bh->b_data;
numbits = le32_to_cpu(bm_lock->id1.bitmap1.i_total);
freebits = numbits - le32_to_cpu(bm_lock->id1.bitmap1.i_used);
buf->f_type = OCFS2_SUPER_MAGIC;
buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_namelen = OCFS2_MAX_FILENAME_LEN;
buf->f_blocks = ((sector_t) numbits) *
(osb->s_clustersize >> osb->sb->s_blocksize_bits);
buf->f_bfree = ((sector_t) freebits) *
(osb->s_clustersize >> osb->sb->s_blocksize_bits);
buf->f_bavail = buf->f_bfree;
buf->f_files = numbits;
buf->f_ffree = freebits;
buf->f_fsid.val[0] = crc32_le(0, osb->uuid_str, OCFS2_VOL_UUID_LEN)
& 0xFFFFFFFFUL;
buf->f_fsid.val[1] = crc32_le(0, osb->uuid_str + OCFS2_VOL_UUID_LEN,
OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL;
brelse(bh);
ocfs2_inode_unlock(inode, 0);
status = 0;
bail:
iput(inode);
if (status)
mlog_errno(status);
return status;
}
static void ocfs2_inode_init_once(void *data)
{
struct ocfs2_inode_info *oi = data;
oi->ip_flags = 0;
oi->ip_open_count = 0;
spin_lock_init(&oi->ip_lock);
ocfs2_extent_map_init(&oi->vfs_inode);
INIT_LIST_HEAD(&oi->ip_io_markers);
INIT_LIST_HEAD(&oi->ip_unwritten_list);
oi->ip_dir_start_lookup = 0;
init_rwsem(&oi->ip_alloc_sem);
init_rwsem(&oi->ip_xattr_sem);
mutex_init(&oi->ip_io_mutex);
oi->ip_blkno = 0ULL;
oi->ip_clusters = 0;
oi->ip_next_orphan = NULL;
ocfs2_resv_init_once(&oi->ip_la_data_resv);
ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
ocfs2_lock_res_init_once(&oi->ip_open_lockres);
ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode),
&ocfs2_inode_caching_ops);
inode_init_once(&oi->vfs_inode);
}
static int ocfs2_initialize_mem_caches(void)
{
ocfs2_inode_cachep = kmem_cache_create("ocfs2_inode_cache",
sizeof(struct ocfs2_inode_info),
0,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
ocfs2_inode_init_once);
ocfs2_dquot_cachep = kmem_cache_create("ocfs2_dquot_cache",
sizeof(struct ocfs2_dquot),
0,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
NULL);
ocfs2_qf_chunk_cachep = kmem_cache_create("ocfs2_qf_chunk_cache",
sizeof(struct ocfs2_quota_chunk),
0,
(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
NULL);
if (!ocfs2_inode_cachep || !ocfs2_dquot_cachep ||
!ocfs2_qf_chunk_cachep) {
kmem_cache_destroy(ocfs2_inode_cachep);
kmem_cache_destroy(ocfs2_dquot_cachep);
kmem_cache_destroy(ocfs2_qf_chunk_cachep);
return -ENOMEM;
}
return 0;
}
static void ocfs2_free_mem_caches(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(ocfs2_inode_cachep);
ocfs2_inode_cachep = NULL;
kmem_cache_destroy(ocfs2_dquot_cachep);
ocfs2_dquot_cachep = NULL;
kmem_cache_destroy(ocfs2_qf_chunk_cachep);
ocfs2_qf_chunk_cachep = NULL;
}
static int ocfs2_get_sector(struct super_block *sb,
struct buffer_head **bh,
int block,
int sect_size)
{
if (!sb_set_blocksize(sb, sect_size)) {
mlog(ML_ERROR, "unable to set blocksize\n");
return -EIO;
}
*bh = sb_getblk(sb, block);
if (!*bh) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
lock_buffer(*bh);
if (!buffer_dirty(*bh))
clear_buffer_uptodate(*bh);
unlock_buffer(*bh);
if (bh_read(*bh, 0) < 0) {
mlog_errno(-EIO);
brelse(*bh);
*bh = NULL;
return -EIO;
}
return 0;
}
static int ocfs2_mount_volume(struct super_block *sb)
{
int status = 0;
struct ocfs2_super *osb = OCFS2_SB(sb);
if (ocfs2_is_hard_readonly(osb))
goto out;
mutex_init(&osb->obs_trim_fs_mutex);
status = ocfs2_dlm_init(osb);
if (status < 0) {
mlog_errno(status);
if (status == -EBADR && ocfs2_userspace_stack(osb))
mlog(ML_ERROR, "couldn't mount because cluster name on"
" disk does not match the running cluster name.\n");
goto out;
}
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
goto out_dlm;
}
/* This will load up the node map and add ourselves to it. */
status = ocfs2_find_slot(osb);
if (status < 0) {
mlog_errno(status);
goto out_super_lock;
}
/* load all node-local system inodes */
status = ocfs2_init_local_system_inodes(osb);
if (status < 0) {
mlog_errno(status);
goto out_super_lock;
}
status = ocfs2_check_volume(osb);
if (status < 0) {
mlog_errno(status);
goto out_system_inodes;
}
status = ocfs2_truncate_log_init(osb);
if (status < 0) {
mlog_errno(status);
goto out_check_volume;
}
ocfs2_super_unlock(osb, 1);
return 0;
out_check_volume:
ocfs2_free_replay_slots(osb);
out_system_inodes:
if (osb->local_alloc_state == OCFS2_LA_ENABLED)
ocfs2_shutdown_local_alloc(osb);
ocfs2_release_system_inodes(osb);
/* before journal shutdown, we should release slot_info */
ocfs2_free_slot_info(osb);
ocfs2_journal_shutdown(osb);
out_super_lock:
ocfs2_super_unlock(osb, 1);
out_dlm:
ocfs2_dlm_shutdown(osb, 0);
out:
return status;
}
static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
{
int tmp, hangup_needed = 0;
struct ocfs2_super *osb = NULL;
char nodestr[12];
trace_ocfs2_dismount_volume(sb);
BUG_ON(!sb);
osb = OCFS2_SB(sb);
BUG_ON(!osb);
/* Remove file check sysfs related directores/files,
* and wait for the pending file check operations */
ocfs2_filecheck_remove_sysfs(osb);
kset_unregister(osb->osb_dev_kset);
/* Orphan scan should be stopped as early as possible */
ocfs2_orphan_scan_stop(osb);
ocfs2_disable_quotas(osb);
/* All dquots should be freed by now */
WARN_ON(!llist_empty(&osb->dquot_drop_list));
/* Wait for worker to be done with the work structure in osb */
cancel_work_sync(&osb->dquot_drop_work);
ocfs2_shutdown_local_alloc(osb);
ocfs2_truncate_log_shutdown(osb);
/* This will disable recovery and flush any recovery work. */
ocfs2_recovery_exit(osb);
ocfs2_sync_blockdev(sb);
ocfs2_purge_refcount_trees(osb);
/* No cluster connection means we've failed during mount, so skip
* all the steps which depended on that to complete. */
if (osb->cconn) {
tmp = ocfs2_super_lock(osb, 1);
if (tmp < 0) {
mlog_errno(tmp);
return;
}
}
if (osb->slot_num != OCFS2_INVALID_SLOT)
ocfs2_put_slot(osb);
if (osb->cconn)
ocfs2_super_unlock(osb, 1);
ocfs2_release_system_inodes(osb);
ocfs2_journal_shutdown(osb);
/*
* If we're dismounting due to mount error, mount.ocfs2 will clean
* up heartbeat. If we're a local mount, there is no heartbeat.
* If we failed before we got a uuid_str yet, we can't stop
* heartbeat. Otherwise, do it.
*/
if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
!ocfs2_is_hard_readonly(osb))
hangup_needed = 1;
ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
debugfs_remove_recursive(osb->osb_debug_root);
if (hangup_needed)
ocfs2_cluster_hangup(osb->uuid_str, strlen(osb->uuid_str));
atomic_set(&osb->vol_state, VOLUME_DISMOUNTED);
if (ocfs2_mount_local(osb))
snprintf(nodestr, sizeof(nodestr), "local");
else
snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num);
printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %s)\n",
osb->dev_str, nodestr);
ocfs2_delete_osb(osb);
kfree(osb);
sb->s_dev = 0;
sb->s_fs_info = NULL;
}
static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uuid,
unsigned uuid_bytes)
{
int i, ret;
char *ptr;
BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN);
osb->uuid_str = kzalloc(OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL);
if (osb->uuid_str == NULL)
return -ENOMEM;
for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) {
/* print with null */
ret = snprintf(ptr, 3, "%02X", uuid[i]);
if (ret != 2) /* drop super cleans up */
return -EINVAL;
/* then only advance past the last char */
ptr += 2;
}
return 0;
}
/* Make sure entire volume is addressable by our journal. Requires
osb_clusters_at_boot to be valid and for the journal to have been
initialized by ocfs2_journal_init(). */
static int ocfs2_journal_addressable(struct ocfs2_super *osb)
{
int status = 0;
u64 max_block =
ocfs2_clusters_to_blocks(osb->sb,
osb->osb_clusters_at_boot) - 1;
/* 32-bit block number is always OK. */
if (max_block <= (u32)~0ULL)
goto out;
/* Volume is "huge", so see if our journal is new enough to
support it. */
if (!(OCFS2_HAS_COMPAT_FEATURE(osb->sb,
OCFS2_FEATURE_COMPAT_JBD2_SB) &&
jbd2_journal_check_used_features(osb->journal->j_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT))) {
mlog(ML_ERROR, "The journal cannot address the entire volume. "
"Enable the 'block64' journal option with tunefs.ocfs2");
status = -EFBIG;
goto out;
}
out:
return status;
}
static int ocfs2_initialize_super(struct super_block *sb,
struct buffer_head *bh,
int sector_size,
struct ocfs2_blockcheck_stats *stats)
{
int status;
int i, cbits, bbits;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
struct inode *inode = NULL;
struct ocfs2_super *osb;
u64 total_blocks;
osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL);
if (!osb) {
status = -ENOMEM;
mlog_errno(status);
goto out;
}
sb->s_fs_info = osb;
sb->s_op = &ocfs2_sops;
sb->s_d_op = &ocfs2_dentry_ops;
sb->s_export_op = &ocfs2_export_ops;
sb->s_qcop = &dquot_quotactl_sysfile_ops;
sb->dq_op = &ocfs2_quota_operations;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb->s_xattr = ocfs2_xattr_handlers;
sb->s_time_gran = 1;
sb->s_flags |= SB_NOATIME;
/* this is needed to support O_LARGEFILE */
cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
memcpy(&sb->s_uuid, di->id2.i_super.s_uuid,
sizeof(di->id2.i_super.s_uuid));
osb->osb_dx_mask = (1 << (cbits - bbits)) - 1;
for (i = 0; i < 3; i++)
osb->osb_dx_seed[i] = le32_to_cpu(di->id2.i_super.s_dx_seed[i]);
osb->osb_dx_seed[3] = le32_to_cpu(di->id2.i_super.s_uuid_hash);
osb->sb = sb;
osb->s_sectsize_bits = blksize_bits(sector_size);
BUG_ON(!osb->s_sectsize_bits);
spin_lock_init(&osb->dc_task_lock);
init_waitqueue_head(&osb->dc_event);
osb->dc_work_sequence = 0;
osb->dc_wake_sequence = 0;
INIT_LIST_HEAD(&osb->blocked_lock_list);
osb->blocked_lock_count = 0;
spin_lock_init(&osb->osb_lock);
spin_lock_init(&osb->osb_xattr_lock);
ocfs2_init_steal_slots(osb);
mutex_init(&osb->system_file_mutex);
atomic_set(&osb->alloc_stats.moves, 0);
atomic_set(&osb->alloc_stats.local_data, 0);
atomic_set(&osb->alloc_stats.bitmap_data, 0);
atomic_set(&osb->alloc_stats.bg_allocs, 0);
atomic_set(&osb->alloc_stats.bg_extends, 0);
/* Copy the blockcheck stats from the superblock probe */
osb->osb_ecc_stats = *stats;
ocfs2_init_node_maps(osb);
snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u",
MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots);
if (osb->max_slots > OCFS2_MAX_SLOTS || osb->max_slots == 0) {
mlog(ML_ERROR, "Invalid number of node slots (%u)\n",
osb->max_slots);
status = -EINVAL;
goto out;
}
ocfs2_orphan_scan_init(osb);
status = ocfs2_recovery_init(osb);
if (status) {
mlog(ML_ERROR, "Unable to initialize recovery state\n");
mlog_errno(status);
goto out;
}
init_waitqueue_head(&osb->checkpoint_event);
osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
osb->slot_num = OCFS2_INVALID_SLOT;
osb->s_xattr_inline_size = le16_to_cpu(
di->id2.i_super.s_xattr_inline_size);
osb->local_alloc_state = OCFS2_LA_UNUSED;
osb->local_alloc_bh = NULL;
INIT_DELAYED_WORK(&osb->la_enable_wq, ocfs2_la_enable_worker);
init_waitqueue_head(&osb->osb_mount_event);
ocfs2_resmap_init(osb, &osb->osb_la_resmap);
osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL);
if (!osb->vol_label) {
mlog(ML_ERROR, "unable to alloc vol label\n");
status = -ENOMEM;
goto out_recovery_map;
}
osb->slot_recovery_generations =
kcalloc(osb->max_slots, sizeof(*osb->slot_recovery_generations),
GFP_KERNEL);
if (!osb->slot_recovery_generations) {
status = -ENOMEM;
mlog_errno(status);
goto out_vol_label;
}
init_waitqueue_head(&osb->osb_wipe_event);
osb->osb_orphan_wipes = kcalloc(osb->max_slots,
sizeof(*osb->osb_orphan_wipes),
GFP_KERNEL);
if (!osb->osb_orphan_wipes) {
status = -ENOMEM;
mlog_errno(status);
goto out_slot_recovery_gen;
}
osb->osb_rf_lock_tree = RB_ROOT;
osb->s_feature_compat =
le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_compat);
osb->s_feature_ro_compat =
le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_ro_compat);
osb->s_feature_incompat =
le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_incompat);
if ((i = OCFS2_HAS_INCOMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_INCOMPAT_SUPP))) {
mlog(ML_ERROR, "couldn't mount because of unsupported "
"optional features (%x).\n", i);
status = -EINVAL;
goto out_orphan_wipes;
}
if (!sb_rdonly(osb->sb) && (i = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP))) {
mlog(ML_ERROR, "couldn't mount RDWR because of "
"unsupported optional features (%x).\n", i);
status = -EINVAL;
goto out_orphan_wipes;
}
if (ocfs2_clusterinfo_valid(osb)) {
/*
* ci_stack and ci_cluster in ocfs2_cluster_info may not be null
* terminated, so make sure no overflow happens here by using
* memcpy. Destination strings will always be null terminated
* because osb is allocated using kzalloc.
*/
osb->osb_stackflags =
OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
memcpy(osb->osb_cluster_stack,
OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
OCFS2_STACK_LABEL_LEN);
if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
mlog(ML_ERROR,
"couldn't mount because of an invalid "
"cluster stack label (%s) \n",
osb->osb_cluster_stack);
status = -EINVAL;
goto out_orphan_wipes;
}
memcpy(osb->osb_cluster_name,
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
OCFS2_CLUSTER_NAME_LEN);
} else {
/* The empty string is identical with classic tools that
* don't know about s_cluster_info. */
osb->osb_cluster_stack[0] = '\0';
}
get_random_bytes(&osb->s_next_generation, sizeof(u32));
/*
* FIXME
* This should be done in ocfs2_journal_init(), but any inode
* writes back operation will cause the filesystem to crash.
*/
status = ocfs2_journal_alloc(osb);
if (status < 0)
goto out_orphan_wipes;
INIT_WORK(&osb->dquot_drop_work, ocfs2_drop_dquot_refs);
init_llist_head(&osb->dquot_drop_list);
/* get some pseudo constants for clustersize bits */
osb->s_clustersize_bits =
le32_to_cpu(di->id2.i_super.s_clustersize_bits);
osb->s_clustersize = 1 << osb->s_clustersize_bits;
if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE ||
osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) {
mlog(ML_ERROR, "Volume has invalid cluster size (%d)\n",
osb->s_clustersize);
status = -EINVAL;
goto out_journal;
}
total_blocks = ocfs2_clusters_to_blocks(osb->sb,
le32_to_cpu(di->i_clusters));
status = generic_check_addressable(osb->sb->s_blocksize_bits,
total_blocks);
if (status) {
mlog(ML_ERROR, "Volume too large "
"to mount safely on this system");
status = -EFBIG;
goto out_journal;
}
if (ocfs2_setup_osb_uuid(osb, di->id2.i_super.s_uuid,
sizeof(di->id2.i_super.s_uuid))) {
mlog(ML_ERROR, "Out of memory trying to setup our uuid.\n");
status = -ENOMEM;
goto out_journal;
}
strscpy(osb->vol_label, di->id2.i_super.s_label,
OCFS2_MAX_VOL_LABEL_LEN);
osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno);
osb->system_dir_blkno = le64_to_cpu(di->id2.i_super.s_system_dir_blkno);
osb->first_cluster_group_blkno =
le64_to_cpu(di->id2.i_super.s_first_cluster_group);
osb->fs_generation = le32_to_cpu(di->i_fs_generation);
osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash);
trace_ocfs2_initialize_super(osb->vol_label, osb->uuid_str,
(unsigned long long)osb->root_blkno,
(unsigned long long)osb->system_dir_blkno,
osb->s_clustersize_bits);
osb->osb_dlm_debug = ocfs2_new_dlm_debug();
if (!osb->osb_dlm_debug) {
status = -ENOMEM;
mlog_errno(status);
goto out_uuid_str;
}
atomic_set(&osb->vol_state, VOLUME_INIT);
/* load root, system_dir, and all global system inodes */
status = ocfs2_init_global_system_inodes(osb);
if (status < 0) {
mlog_errno(status);
goto out_dlm_out;
}
/*
* global bitmap
*/
inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!inode) {
status = -EINVAL;
mlog_errno(status);
goto out_system_inodes;
}
osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno;
osb->osb_clusters_at_boot = OCFS2_I(inode)->ip_clusters;
iput(inode);
osb->bitmap_cpg = ocfs2_group_bitmap_size(sb, 0,
osb->s_feature_incompat) * 8;
status = ocfs2_init_slot_info(osb);
if (status < 0) {
mlog_errno(status);
goto out_system_inodes;
}
osb->ocfs2_wq = alloc_ordered_workqueue("ocfs2_wq", WQ_MEM_RECLAIM);
if (!osb->ocfs2_wq) {
status = -ENOMEM;
mlog_errno(status);
goto out_slot_info;
}
return status;
out_slot_info:
ocfs2_free_slot_info(osb);
out_system_inodes:
ocfs2_release_system_inodes(osb);
out_dlm_out:
ocfs2_put_dlm_debug(osb->osb_dlm_debug);
out_uuid_str:
kfree(osb->uuid_str);
out_journal:
kfree(osb->journal);
out_orphan_wipes:
kfree(osb->osb_orphan_wipes);
out_slot_recovery_gen:
kfree(osb->slot_recovery_generations);
out_vol_label:
kfree(osb->vol_label);
out_recovery_map:
kfree(osb->recovery_map);
out:
kfree(osb);
sb->s_fs_info = NULL;
return status;
}
/*
* will return: -EAGAIN if it is ok to keep searching for superblocks
* -EINVAL if there is a bad superblock
* 0 on success
*/
static int ocfs2_verify_volume(struct ocfs2_dinode *di,
struct buffer_head *bh,
u32 blksz,
struct ocfs2_blockcheck_stats *stats)
{
int status = -EAGAIN;
if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
/* We have to do a raw check of the feature here */
if (le32_to_cpu(di->id2.i_super.s_feature_incompat) &
OCFS2_FEATURE_INCOMPAT_META_ECC) {
status = ocfs2_block_check_validate(bh->b_data,
bh->b_size,
&di->i_check,
stats);
if (status)
goto out;
}
status = -EINVAL;
if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) {
mlog(ML_ERROR, "found superblock with incorrect block "
"size: found %u, should be %u\n",
1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits),
blksz);
} else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
OCFS2_MAJOR_REV_LEVEL ||
le16_to_cpu(di->id2.i_super.s_minor_rev_level) !=
OCFS2_MINOR_REV_LEVEL) {
mlog(ML_ERROR, "found superblock with bad version: "
"found %u.%u, should be %u.%u\n",
le16_to_cpu(di->id2.i_super.s_major_rev_level),
le16_to_cpu(di->id2.i_super.s_minor_rev_level),
OCFS2_MAJOR_REV_LEVEL,
OCFS2_MINOR_REV_LEVEL);
} else if (bh->b_blocknr != le64_to_cpu(di->i_blkno)) {
mlog(ML_ERROR, "bad block number on superblock: "
"found %llu, should be %llu\n",
(unsigned long long)le64_to_cpu(di->i_blkno),
(unsigned long long)bh->b_blocknr);
} else if (le32_to_cpu(di->id2.i_super.s_clustersize_bits) < 12 ||
le32_to_cpu(di->id2.i_super.s_clustersize_bits) > 20) {
mlog(ML_ERROR, "bad cluster size found: %u\n",
1 << le32_to_cpu(di->id2.i_super.s_clustersize_bits));
} else if (!le64_to_cpu(di->id2.i_super.s_root_blkno)) {
mlog(ML_ERROR, "bad root_blkno: 0\n");
} else if (!le64_to_cpu(di->id2.i_super.s_system_dir_blkno)) {
mlog(ML_ERROR, "bad system_dir_blkno: 0\n");
} else if (le16_to_cpu(di->id2.i_super.s_max_slots) > OCFS2_MAX_SLOTS) {
mlog(ML_ERROR,
"Superblock slots found greater than file system "
"maximum: found %u, max %u\n",
le16_to_cpu(di->id2.i_super.s_max_slots),
OCFS2_MAX_SLOTS);
} else {
/* found it! */
status = 0;
}
}
out:
if (status && status != -EAGAIN)
mlog_errno(status);
return status;
}
static int ocfs2_check_volume(struct ocfs2_super *osb)
{
int status;
int dirty;
int local;
struct ocfs2_dinode *local_alloc = NULL; /* only used if we
* recover
* ourselves. */
/* Init our journal object. */
status = ocfs2_journal_init(osb, &dirty);
if (status < 0) {
mlog(ML_ERROR, "Could not initialize journal!\n");
goto finally;
}
/* Now that journal has been initialized, check to make sure
entire volume is addressable. */
status = ocfs2_journal_addressable(osb);
if (status)
goto finally;
/* If the journal was unmounted cleanly then we don't want to
* recover anything. Otherwise, journal_load will do that
* dirty work for us :) */
if (!dirty) {
status = ocfs2_journal_wipe(osb->journal, 0);
if (status < 0) {
mlog_errno(status);
goto finally;
}
} else {
printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
"unmounted cleanly, recovering it.\n", osb->dev_str);
}
local = ocfs2_mount_local(osb);
/* will play back anything left in the journal. */
status = ocfs2_journal_load(osb->journal, local, dirty);
if (status < 0) {
mlog(ML_ERROR, "ocfs2 journal load failed! %d\n", status);
goto finally;
}
if (osb->s_mount_opt & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
jbd2_journal_set_features(osb->journal->j_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
else
jbd2_journal_clear_features(osb->journal->j_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
if (dirty) {
/* recover my local alloc if we didn't unmount cleanly. */
status = ocfs2_begin_local_alloc_recovery(osb,
osb->slot_num,
&local_alloc);
if (status < 0) {
mlog_errno(status);
goto finally;
}
/* we complete the recovery process after we've marked
* ourselves as mounted. */
}
status = ocfs2_load_local_alloc(osb);
if (status < 0) {
mlog_errno(status);
goto finally;
}
if (dirty) {
/* Recovery will be completed after we've mounted the
* rest of the volume. */
osb->local_alloc_copy = local_alloc;
local_alloc = NULL;
}
/* go through each journal, trylock it and if you get the
* lock, and it's marked as dirty, set the bit in the recover
* map and launch a recovery thread for it. */
status = ocfs2_mark_dead_nodes(osb);
if (status < 0) {
mlog_errno(status);
goto finally;
}
status = ocfs2_compute_replay_slots(osb);
if (status < 0)
mlog_errno(status);
finally:
kfree(local_alloc);
if (status)
mlog_errno(status);
return status;
}
/*
* The routine gets called from dismount or close whenever a dismount on
* volume is requested and the osb open count becomes 1.
* It will remove the osb from the global list and also free up all the
* initialized resources and fileobject.
*/
static void ocfs2_delete_osb(struct ocfs2_super *osb)
{
/* This function assumes that the caller has the main osb resource */
/* ocfs2_initializer_super have already created this workqueue */
if (osb->ocfs2_wq)
destroy_workqueue(osb->ocfs2_wq);
ocfs2_free_slot_info(osb);
kfree(osb->osb_orphan_wipes);
kfree(osb->slot_recovery_generations);
/* FIXME
* This belongs in journal shutdown, but because we have to
* allocate osb->journal at the middle of ocfs2_initialize_super(),
* we free it here.
*/
kfree(osb->journal);
kfree(osb->local_alloc_copy);
kfree(osb->uuid_str);
kfree(osb->vol_label);
ocfs2_put_dlm_debug(osb->osb_dlm_debug);
memset(osb, 0, sizeof(struct ocfs2_super));
}
/* Depending on the mount option passed, perform one of the following:
* Put OCFS2 into a readonly state (default)
* Return EIO so that only the process errs
* Fix the error as if fsck.ocfs2 -y
* panic
*/
static int ocfs2_handle_error(struct super_block *sb)
{
struct ocfs2_super *osb = OCFS2_SB(sb);
int rv = 0;
ocfs2_set_osb_flag(osb, OCFS2_OSB_ERROR_FS);
pr_crit("On-disk corruption discovered. "
"Please run fsck.ocfs2 once the filesystem is unmounted.\n");
if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC) {
panic("OCFS2: (device %s): panic forced after error\n",
sb->s_id);
} else if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_CONT) {
pr_crit("OCFS2: Returning error to the calling process.\n");
rv = -EIO;
} else { /* default option */
rv = -EROFS;
if (sb_rdonly(sb) && (ocfs2_is_soft_readonly(osb) || ocfs2_is_hard_readonly(osb)))
return rv;
pr_crit("OCFS2: File system is now read-only.\n");
sb->s_flags |= SB_RDONLY;
ocfs2_set_ro_flag(osb, 0);
}
return rv;
}
int __ocfs2_error(struct super_block *sb, const char *function,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
/* Not using mlog here because we want to show the actual
* function the error came from. */
printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV",
sb->s_id, function, &vaf);
va_end(args);
return ocfs2_handle_error(sb);
}
/* Handle critical errors. This is intentionally more drastic than
* ocfs2_handle_error, so we only use for things like journal errors,
* etc. */
void __ocfs2_abort(struct super_block *sb, const char *function,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV",
sb->s_id, function, &vaf);
va_end(args);
/* We don't have the cluster support yet to go straight to
* hard readonly in here. Until then, we want to keep
* ocfs2_abort() so that we can at least mark critical
* errors.
*
* TODO: This should abort the journal and alert other nodes
* that our slot needs recovery. */
/* Force a panic(). This stinks, but it's better than letting
* things continue without having a proper hard readonly
* here. */
if (!ocfs2_mount_local(OCFS2_SB(sb)))
OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
ocfs2_handle_error(sb);
}
/*
* Void signal blockers, because in-kernel sigprocmask() only fails
* when SIG_* is wrong.
*/
void ocfs2_block_signals(sigset_t *oldset)
{
int rc;
sigset_t blocked;
sigfillset(&blocked);
rc = sigprocmask(SIG_BLOCK, &blocked, oldset);
BUG_ON(rc);
}
void ocfs2_unblock_signals(sigset_t *oldset)
{
int rc = sigprocmask(SIG_SETMASK, oldset, NULL);
BUG_ON(rc);
}
module_init(ocfs2_init);
module_exit(ocfs2_exit);
| linux-master | fs/ocfs2/super.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* suballoc.c
*
* metadata alloc and free
* Inspired by ext3 block groups.
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
#include "suballoc.h"
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#define NOT_ALLOC_NEW_GROUP 0
#define ALLOC_NEW_GROUP 0x1
#define ALLOC_GROUPS_FROM_GLOBAL 0x2
#define OCFS2_MAX_TO_STEAL 1024
struct ocfs2_suballoc_result {
u64 sr_bg_blkno; /* The bg we allocated from. Set
to 0 when a block group is
contiguous. */
u64 sr_bg_stable_blkno; /*
* Doesn't change, always
* set to target block
* group descriptor
* block.
*/
u64 sr_blkno; /* The first allocated block */
unsigned int sr_bit_offset; /* The bit in the bg */
unsigned int sr_bits; /* How many bits we claimed */
};
static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
{
if (res->sr_blkno == 0)
return 0;
if (res->sr_bg_blkno)
return res->sr_bg_blkno;
return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
}
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
static int ocfs2_block_group_fill(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
u64 group_blkno,
unsigned int group_clusters,
u16 my_chain,
struct ocfs2_chain_list *cl);
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
struct inode *alloc_inode,
struct buffer_head *bh,
u64 max_block,
u64 *last_alloc_group,
int flags);
static int ocfs2_cluster_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res);
static int ocfs2_block_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res);
static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res);
static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
int nr);
static int ocfs2_relink_block_group(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *fe_bh,
struct buffer_head *bg_bh,
struct buffer_head *prev_bg_bh,
u16 chain);
static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
u32 wanted);
static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
u64 bg_blkno,
u16 bg_bit_off);
static inline void ocfs2_block_to_cluster_group(struct inode *inode,
u64 data_blkno,
u64 *bg_blkno,
u16 *bg_bit_off);
static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
u32 bits_wanted, u64 max_block,
int flags,
struct ocfs2_alloc_context **ac);
void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
{
struct inode *inode = ac->ac_inode;
if (inode) {
if (ac->ac_which != OCFS2_AC_USE_LOCAL)
ocfs2_inode_unlock(inode, 1);
inode_unlock(inode);
iput(inode);
ac->ac_inode = NULL;
}
brelse(ac->ac_bh);
ac->ac_bh = NULL;
ac->ac_resv = NULL;
kfree(ac->ac_find_loc_priv);
ac->ac_find_loc_priv = NULL;
}
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
{
ocfs2_free_ac_resource(ac);
kfree(ac);
}
static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
{
return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
}
#define do_error(fmt, ...) \
do { \
if (resize) \
mlog(ML_ERROR, fmt, ##__VA_ARGS__); \
else \
return ocfs2_error(sb, fmt, ##__VA_ARGS__); \
} while (0)
static int ocfs2_validate_gd_self(struct super_block *sb,
struct buffer_head *bh,
int resize)
{
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
do_error("Group descriptor #%llu has bad signature %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
gd->bg_signature);
}
if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) {
do_error("Group descriptor #%llu has an invalid bg_blkno of %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(gd->bg_blkno));
}
if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) {
do_error("Group descriptor #%llu has an invalid fs_generation of #%u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(gd->bg_generation));
}
if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
do_error("Group descriptor #%llu has bit count %u but claims that %u are free\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_bits),
le16_to_cpu(gd->bg_free_bits_count));
}
if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
do_error("Group descriptor #%llu has bit count %u but max bitmap bits of %u\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_bits),
8 * le16_to_cpu(gd->bg_size));
}
return 0;
}
static int ocfs2_validate_gd_parent(struct super_block *sb,
struct ocfs2_dinode *di,
struct buffer_head *bh,
int resize)
{
unsigned int max_bits;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
if (di->i_blkno != gd->bg_parent_dinode) {
do_error("Group descriptor #%llu has bad parent pointer (%llu, expected %llu)\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
(unsigned long long)le64_to_cpu(di->i_blkno));
}
max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
if (le16_to_cpu(gd->bg_bits) > max_bits) {
do_error("Group descriptor #%llu has bit count of %u\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_bits));
}
/* In resize, we may meet the case bg_chain == cl_next_free_rec. */
if ((le16_to_cpu(gd->bg_chain) >
le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
((le16_to_cpu(gd->bg_chain) ==
le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
do_error("Group descriptor #%llu has bad chain %u\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_chain));
}
return 0;
}
#undef do_error
/*
* This version only prints errors. It does not fail the filesystem, and
* exists only for resize.
*/
int ocfs2_check_group_descriptor(struct super_block *sb,
struct ocfs2_dinode *di,
struct buffer_head *bh)
{
int rc;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
if (rc) {
mlog(ML_ERROR,
"Checksum failed for group descriptor %llu\n",
(unsigned long long)bh->b_blocknr);
} else
rc = ocfs2_validate_gd_self(sb, bh, 1);
if (!rc)
rc = ocfs2_validate_gd_parent(sb, di, bh, 1);
return rc;
}
static int ocfs2_validate_group_descriptor(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
trace_ocfs2_validate_group_descriptor(
(unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
if (rc)
return rc;
/*
* Errors after here are fatal.
*/
return ocfs2_validate_gd_self(sb, bh, 0);
}
int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di,
u64 gd_blkno, struct buffer_head **bh)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_block(INODE_CACHE(inode), gd_blkno, &tmp,
ocfs2_validate_group_descriptor);
if (rc)
goto out;
rc = ocfs2_validate_gd_parent(inode->i_sb, di, tmp, 0);
if (rc) {
brelse(tmp);
goto out;
}
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!*bh)
*bh = tmp;
out:
return rc;
}
static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
struct ocfs2_group_desc *bg,
struct ocfs2_chain_list *cl,
u64 p_blkno, unsigned int clusters)
{
struct ocfs2_extent_list *el = &bg->bg_list;
struct ocfs2_extent_rec *rec;
BUG_ON(!ocfs2_supports_discontig_bg(osb));
if (!el->l_next_free_rec)
el->l_count = cpu_to_le16(ocfs2_extent_recs_per_gd(osb->sb));
rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec)];
rec->e_blkno = cpu_to_le64(p_blkno);
rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
le16_to_cpu(cl->cl_bpc));
rec->e_leaf_clusters = cpu_to_le16(clusters);
le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
le16_add_cpu(&bg->bg_free_bits_count,
clusters * le16_to_cpu(cl->cl_bpc));
le16_add_cpu(&el->l_next_free_rec, 1);
}
static int ocfs2_block_group_fill(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
u64 group_blkno,
unsigned int group_clusters,
u16 my_chain,
struct ocfs2_chain_list *cl)
{
int status = 0;
struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct super_block * sb = alloc_inode->i_sb;
if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
status = ocfs2_error(alloc_inode->i_sb,
"group block (%llu) != b_blocknr (%llu)\n",
(unsigned long long)group_blkno,
(unsigned long long) bg_bh->b_blocknr);
goto bail;
}
status = ocfs2_journal_access_gd(handle,
INODE_CACHE(alloc_inode),
bg_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
memset(bg, 0, sb->s_blocksize);
strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
bg->bg_generation = cpu_to_le32(osb->fs_generation);
bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1,
osb->s_feature_incompat));
bg->bg_chain = cpu_to_le16(my_chain);
bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
bg->bg_blkno = cpu_to_le64(group_blkno);
if (group_clusters == le16_to_cpu(cl->cl_cpg))
bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
else
ocfs2_bg_discontig_add_extent(osb, bg, cl, group_blkno,
group_clusters);
/* set the 1st bit in the bitmap to account for the descriptor block */
ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
ocfs2_journal_dirty(handle, bg_bh);
/* There is no need to zero out or otherwise initialize the
* other blocks in a group - All valid FS metadata in a block
* group stores the superblock fs_generation value at
* allocation time. */
bail:
if (status)
mlog_errno(status);
return status;
}
static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
{
u16 curr, best;
best = curr = 0;
while (curr < le16_to_cpu(cl->cl_count)) {
if (le32_to_cpu(cl->cl_recs[best].c_total) >
le32_to_cpu(cl->cl_recs[curr].c_total))
best = curr;
curr++;
}
return best;
}
static struct buffer_head *
ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl)
{
int status;
u32 bit_off, num_bits;
u64 bg_blkno;
struct buffer_head *bg_bh;
unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
status = ocfs2_claim_clusters(handle, ac,
le16_to_cpu(cl->cl_cpg), &bit_off,
&num_bits);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
trace_ocfs2_block_group_alloc_contig(
(unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
bg_blkno, num_bits, alloc_rec, cl);
if (status < 0) {
brelse(bg_bh);
mlog_errno(status);
}
bail:
return status ? ERR_PTR(status) : bg_bh;
}
static int ocfs2_block_group_claim_bits(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac,
unsigned int min_bits,
u32 *bit_off, u32 *num_bits)
{
int status = 0;
while (min_bits) {
status = ocfs2_claim_clusters(handle, ac, min_bits,
bit_off, num_bits);
if (status != -ENOSPC)
break;
min_bits >>= 1;
}
return status;
}
static int ocfs2_block_group_grow_discontig(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl,
unsigned int min_bits)
{
int status;
struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
struct ocfs2_group_desc *bg =
(struct ocfs2_group_desc *)bg_bh->b_data;
unsigned int needed = le16_to_cpu(cl->cl_cpg) -
le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
u32 p_cpos, clusters;
u64 p_blkno;
struct ocfs2_extent_list *el = &bg->bg_list;
status = ocfs2_journal_access_gd(handle,
INODE_CACHE(alloc_inode),
bg_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
while ((needed > 0) && (le16_to_cpu(el->l_next_free_rec) <
le16_to_cpu(el->l_count))) {
if (min_bits > needed)
min_bits = needed;
status = ocfs2_block_group_claim_bits(osb, handle, ac,
min_bits, &p_cpos,
&clusters);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
p_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cpos);
ocfs2_bg_discontig_add_extent(osb, bg, cl, p_blkno,
clusters);
min_bits = clusters;
needed = le16_to_cpu(cl->cl_cpg) -
le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
}
if (needed > 0) {
/*
* We have used up all the extent rec but can't fill up
* the cpg. So bail out.
*/
status = -ENOSPC;
goto bail;
}
ocfs2_journal_dirty(handle, bg_bh);
bail:
return status;
}
static void ocfs2_bg_alloc_cleanup(handle_t *handle,
struct ocfs2_alloc_context *cluster_ac,
struct inode *alloc_inode,
struct buffer_head *bg_bh)
{
int i, ret;
struct ocfs2_group_desc *bg;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
if (!bg_bh)
return;
bg = (struct ocfs2_group_desc *)bg_bh->b_data;
el = &bg->bg_list;
for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
cluster_ac->ac_bh,
le64_to_cpu(rec->e_blkno),
le16_to_cpu(rec->e_leaf_clusters));
if (ret)
mlog_errno(ret);
/* Try all the clusters to free */
}
ocfs2_remove_from_cache(INODE_CACHE(alloc_inode), bg_bh);
brelse(bg_bh);
}
static struct buffer_head *
ocfs2_block_group_alloc_discontig(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl)
{
int status;
u32 bit_off, num_bits;
u64 bg_blkno;
unsigned int min_bits = le16_to_cpu(cl->cl_cpg) >> 1;
struct buffer_head *bg_bh = NULL;
unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
if (!ocfs2_supports_discontig_bg(osb)) {
status = -ENOSPC;
goto bail;
}
status = ocfs2_extend_trans(handle,
ocfs2_calc_bg_discontig_credits(osb->sb));
if (status) {
mlog_errno(status);
goto bail;
}
/*
* We're going to be grabbing from multiple cluster groups.
* We don't have enough credits to relink them all, and the
* cluster groups will be staying in cache for the duration of
* this operation.
*/
ac->ac_disable_chain_relink = 1;
/* Claim the first region */
status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
&bit_off, &num_bits);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
min_bits = num_bits;
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
trace_ocfs2_block_group_alloc_discontig(
(unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
bg_blkno, num_bits, alloc_rec, cl);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_block_group_grow_discontig(handle, alloc_inode,
bg_bh, ac, cl, min_bits);
if (status)
mlog_errno(status);
bail:
if (status)
ocfs2_bg_alloc_cleanup(handle, ac, alloc_inode, bg_bh);
return status ? ERR_PTR(status) : bg_bh;
}
/*
* We expect the block group allocator to already be locked.
*/
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
struct inode *alloc_inode,
struct buffer_head *bh,
u64 max_block,
u64 *last_alloc_group,
int flags)
{
int status, credits;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
struct ocfs2_chain_list *cl;
struct ocfs2_alloc_context *ac = NULL;
handle_t *handle = NULL;
u16 alloc_rec;
struct buffer_head *bg_bh = NULL;
struct ocfs2_group_desc *bg;
BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
cl = &fe->id2.i_chain;
status = ocfs2_reserve_clusters_with_limit(osb,
le16_to_cpu(cl->cl_cpg),
max_block, flags, &ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
credits = ocfs2_calc_group_alloc_credits(osb->sb,
le16_to_cpu(cl->cl_cpg));
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto bail;
}
if (last_alloc_group && *last_alloc_group != 0) {
trace_ocfs2_block_group_alloc(
(unsigned long long)*last_alloc_group);
ac->ac_last_group = *last_alloc_group;
}
bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
ac, cl);
if (PTR_ERR(bg_bh) == -ENOSPC)
bg_bh = ocfs2_block_group_alloc_discontig(handle,
alloc_inode,
ac, cl);
if (IS_ERR(bg_bh)) {
status = PTR_ERR(bg_bh);
bg_bh = NULL;
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
bg = (struct ocfs2_group_desc *) bg_bh->b_data;
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
alloc_rec = le16_to_cpu(bg->bg_chain);
le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
le16_to_cpu(bg->bg_free_bits_count));
le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
le16_to_cpu(bg->bg_bits));
cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
le16_add_cpu(&cl->cl_next_free_rec, 1);
le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
le16_to_cpu(bg->bg_free_bits_count));
le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
ocfs2_journal_dirty(handle, bh);
spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
le32_to_cpu(fe->i_clusters)));
spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
ocfs2_update_inode_fsync_trans(handle, alloc_inode, 0);
status = 0;
/* save the new last alloc group so that the caller can cache it. */
if (last_alloc_group)
*last_alloc_group = ac->ac_last_group;
bail:
if (handle)
ocfs2_commit_trans(osb, handle);
if (ac)
ocfs2_free_alloc_context(ac);
brelse(bg_bh);
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac,
int type,
u32 slot,
u64 *last_alloc_group,
int flags)
{
int status;
u32 bits_wanted = ac->ac_bits_wanted;
struct inode *alloc_inode;
struct buffer_head *bh = NULL;
struct ocfs2_dinode *fe;
u32 free_bits;
alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
if (!alloc_inode) {
mlog_errno(-EINVAL);
return -EINVAL;
}
inode_lock(alloc_inode);
status = ocfs2_inode_lock(alloc_inode, &bh, 1);
if (status < 0) {
inode_unlock(alloc_inode);
iput(alloc_inode);
mlog_errno(status);
return status;
}
ac->ac_inode = alloc_inode;
ac->ac_alloc_slot = slot;
fe = (struct ocfs2_dinode *) bh->b_data;
/* The bh was validated by the inode read inside
* ocfs2_inode_lock(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
status = ocfs2_error(alloc_inode->i_sb,
"Invalid chain allocator %llu\n",
(unsigned long long)le64_to_cpu(fe->i_blkno));
goto bail;
}
free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
le32_to_cpu(fe->id1.bitmap1.i_used);
if (bits_wanted > free_bits) {
/* cluster bitmap never grows */
if (ocfs2_is_cluster_bitmap(alloc_inode)) {
trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted,
free_bits);
status = -ENOSPC;
goto bail;
}
if (!(flags & ALLOC_NEW_GROUP)) {
trace_ocfs2_reserve_suballoc_bits_no_new_group(
slot, bits_wanted, free_bits);
status = -ENOSPC;
goto bail;
}
status = ocfs2_block_group_alloc(osb, alloc_inode, bh,
ac->ac_max_block,
last_alloc_group, flags);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
atomic_inc(&osb->alloc_stats.bg_extends);
/* You should never ask for this much metadata */
BUG_ON(bits_wanted >
(le32_to_cpu(fe->id1.bitmap1.i_total)
- le32_to_cpu(fe->id1.bitmap1.i_used)));
}
get_bh(bh);
ac->ac_bh = bh;
bail:
brelse(bh);
if (status)
mlog_errno(status);
return status;
}
static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
{
spin_lock(&osb->osb_lock);
osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
spin_unlock(&osb->osb_lock);
atomic_set(&osb->s_num_inodes_stolen, 0);
}
static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb)
{
spin_lock(&osb->osb_lock);
osb->s_meta_steal_slot = OCFS2_INVALID_SLOT;
spin_unlock(&osb->osb_lock);
atomic_set(&osb->s_num_meta_stolen, 0);
}
void ocfs2_init_steal_slots(struct ocfs2_super *osb)
{
ocfs2_init_inode_steal_slot(osb);
ocfs2_init_meta_steal_slot(osb);
}
static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
{
spin_lock(&osb->osb_lock);
if (type == INODE_ALLOC_SYSTEM_INODE)
osb->s_inode_steal_slot = (u16)slot;
else if (type == EXTENT_ALLOC_SYSTEM_INODE)
osb->s_meta_steal_slot = (u16)slot;
spin_unlock(&osb->osb_lock);
}
static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type)
{
int slot = OCFS2_INVALID_SLOT;
spin_lock(&osb->osb_lock);
if (type == INODE_ALLOC_SYSTEM_INODE)
slot = osb->s_inode_steal_slot;
else if (type == EXTENT_ALLOC_SYSTEM_INODE)
slot = osb->s_meta_steal_slot;
spin_unlock(&osb->osb_lock);
return slot;
}
static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
{
return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE);
}
static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb)
{
return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE);
}
static int ocfs2_steal_resource(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac,
int type)
{
int i, status = -ENOSPC;
int slot = __ocfs2_get_steal_slot(osb, type);
/* Start to steal resource from the first slot after ours. */
if (slot == OCFS2_INVALID_SLOT)
slot = osb->slot_num + 1;
for (i = 0; i < osb->max_slots; i++, slot++) {
if (slot == osb->max_slots)
slot = 0;
if (slot == osb->slot_num)
continue;
status = ocfs2_reserve_suballoc_bits(osb, ac,
type,
(u32)slot, NULL,
NOT_ALLOC_NEW_GROUP);
if (status >= 0) {
__ocfs2_set_steal_slot(osb, slot, type);
break;
}
ocfs2_free_ac_resource(ac);
}
return status;
}
static int ocfs2_steal_inode(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac)
{
return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE);
}
static int ocfs2_steal_meta(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac)
{
return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE);
}
int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
int blocks,
struct ocfs2_alloc_context **ac)
{
int status;
int slot = ocfs2_get_meta_steal_slot(osb);
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
(*ac)->ac_bits_wanted = blocks;
(*ac)->ac_which = OCFS2_AC_USE_META;
(*ac)->ac_group_search = ocfs2_block_group_search;
if (slot != OCFS2_INVALID_SLOT &&
atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL)
goto extent_steal;
atomic_set(&osb->s_num_meta_stolen, 0);
status = ocfs2_reserve_suballoc_bits(osb, (*ac),
EXTENT_ALLOC_SYSTEM_INODE,
(u32)osb->slot_num, NULL,
ALLOC_GROUPS_FROM_GLOBAL|ALLOC_NEW_GROUP);
if (status >= 0) {
status = 0;
if (slot != OCFS2_INVALID_SLOT)
ocfs2_init_meta_steal_slot(osb);
goto bail;
} else if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
ocfs2_free_ac_resource(*ac);
extent_steal:
status = ocfs2_steal_meta(osb, *ac);
atomic_inc(&osb->s_num_meta_stolen);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
status = 0;
bail:
if ((status < 0) && *ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
if (status)
mlog_errno(status);
return status;
}
int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
struct ocfs2_extent_list *root_el,
struct ocfs2_alloc_context **ac)
{
return ocfs2_reserve_new_metadata_blocks(osb,
ocfs2_extend_meta_needed(root_el),
ac);
}
int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
struct ocfs2_alloc_context **ac)
{
int status;
int slot = ocfs2_get_inode_steal_slot(osb);
u64 alloc_group;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
(*ac)->ac_bits_wanted = 1;
(*ac)->ac_which = OCFS2_AC_USE_INODE;
(*ac)->ac_group_search = ocfs2_block_group_search;
/*
* stat(2) can't handle i_ino > 32bits, so we tell the
* lower levels not to allocate us a block group past that
* limit. The 'inode64' mount option avoids this behavior.
*/
if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64))
(*ac)->ac_max_block = (u32)~0U;
/*
* slot is set when we successfully steal inode from other nodes.
* It is reset in 3 places:
* 1. when we flush the truncate log
* 2. when we complete local alloc recovery.
* 3. when we successfully allocate from our own slot.
* After it is set, we will go on stealing inodes until we find the
* need to check our slots to see whether there is some space for us.
*/
if (slot != OCFS2_INVALID_SLOT &&
atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL)
goto inode_steal;
atomic_set(&osb->s_num_inodes_stolen, 0);
alloc_group = osb->osb_inode_alloc_group;
status = ocfs2_reserve_suballoc_bits(osb, *ac,
INODE_ALLOC_SYSTEM_INODE,
(u32)osb->slot_num,
&alloc_group,
ALLOC_NEW_GROUP |
ALLOC_GROUPS_FROM_GLOBAL);
if (status >= 0) {
status = 0;
spin_lock(&osb->osb_lock);
osb->osb_inode_alloc_group = alloc_group;
spin_unlock(&osb->osb_lock);
trace_ocfs2_reserve_new_inode_new_group(
(unsigned long long)alloc_group);
/*
* Some inodes must be freed by us, so try to allocate
* from our own next time.
*/
if (slot != OCFS2_INVALID_SLOT)
ocfs2_init_inode_steal_slot(osb);
goto bail;
} else if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
ocfs2_free_ac_resource(*ac);
inode_steal:
status = ocfs2_steal_inode(osb, *ac);
atomic_inc(&osb->s_num_inodes_stolen);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
status = 0;
bail:
if ((status < 0) && *ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
if (status)
mlog_errno(status);
return status;
}
/* local alloc code has to do the same thing, so rather than do this
* twice.. */
int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac)
{
int status;
ac->ac_which = OCFS2_AC_USE_MAIN;
ac->ac_group_search = ocfs2_cluster_group_search;
status = ocfs2_reserve_suballoc_bits(osb, ac,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT, NULL,
ALLOC_NEW_GROUP);
if (status < 0 && status != -ENOSPC)
mlog_errno(status);
return status;
}
/* Callers don't need to care which bitmap (local alloc or main) to
* use so we figure it out for them, but unfortunately this clutters
* things a bit. */
static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
u32 bits_wanted, u64 max_block,
int flags,
struct ocfs2_alloc_context **ac)
{
int status, ret = 0;
int retried = 0;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
(*ac)->ac_bits_wanted = bits_wanted;
(*ac)->ac_max_block = max_block;
status = -ENOSPC;
if (!(flags & ALLOC_GROUPS_FROM_GLOBAL) &&
ocfs2_alloc_should_use_local(osb, bits_wanted)) {
status = ocfs2_reserve_local_alloc_bits(osb,
bits_wanted,
*ac);
if ((status < 0) && (status != -ENOSPC)) {
mlog_errno(status);
goto bail;
}
}
if (status == -ENOSPC) {
retry:
status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
/* Retry if there is sufficient space cached in truncate log */
if (status == -ENOSPC && !retried) {
retried = 1;
ocfs2_inode_unlock((*ac)->ac_inode, 1);
inode_unlock((*ac)->ac_inode);
ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted);
if (ret == 1) {
iput((*ac)->ac_inode);
(*ac)->ac_inode = NULL;
goto retry;
}
if (ret < 0)
mlog_errno(ret);
inode_lock((*ac)->ac_inode);
ret = ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
if (ret < 0) {
mlog_errno(ret);
inode_unlock((*ac)->ac_inode);
iput((*ac)->ac_inode);
(*ac)->ac_inode = NULL;
goto bail;
}
}
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
}
status = 0;
bail:
if ((status < 0) && *ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
if (status)
mlog_errno(status);
return status;
}
int ocfs2_reserve_clusters(struct ocfs2_super *osb,
u32 bits_wanted,
struct ocfs2_alloc_context **ac)
{
return ocfs2_reserve_clusters_with_limit(osb, bits_wanted, 0,
ALLOC_NEW_GROUP, ac);
}
/*
* More or less lifted from ext3. I'll leave their description below:
*
* "For ext3 allocations, we must not reuse any blocks which are
* allocated in the bitmap buffer's "last committed data" copy. This
* prevents deletes from freeing up the page for reuse until we have
* committed the delete transaction.
*
* If we didn't do this, then deleting something and reallocating it as
* data would allow the old block to be overwritten before the
* transaction committed (because we force data to disk before commit).
* This would lead to corruption if we crashed between overwriting the
* data and committing the delete.
*
* @@@ We may want to make this allocation behaviour conditional on
* data-writes at some point, and disable it for metadata allocations or
* sync-data inodes."
*
* Note: OCFS2 already does this differently for metadata vs data
* allocations, as those bitmaps are separate and undo access is never
* called on a metadata group descriptor.
*/
static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
int nr)
{
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct journal_head *jh;
int ret;
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
return 0;
jh = jbd2_journal_grab_journal_head(bg_bh);
if (!jh)
return 1;
spin_lock(&jh->b_state_lock);
bg = (struct ocfs2_group_desc *) jh->b_committed_data;
if (bg)
ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
else
ret = 1;
spin_unlock(&jh->b_state_lock);
jbd2_journal_put_journal_head(jh);
return ret;
}
static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
struct buffer_head *bg_bh,
unsigned int bits_wanted,
unsigned int total_bits,
struct ocfs2_suballoc_result *res)
{
void *bitmap;
u16 best_offset, best_size;
int offset, start, found, status = 0;
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
/* Callers got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
found = start = best_offset = best_size = 0;
bitmap = bg->bg_bitmap;
while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
if (offset == total_bits)
break;
if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
/* We found a zero, but we can't use it as it
* hasn't been put to disk yet! */
found = 0;
start = offset + 1;
} else if (offset == start) {
/* we found a zero */
found++;
/* move start to the next bit to test */
start++;
} else {
/* got a zero after some ones */
found = 1;
start = offset + 1;
}
if (found > best_size) {
best_size = found;
best_offset = start - found;
}
/* we got everything we needed */
if (found == bits_wanted) {
/* mlog(0, "Found it all!\n"); */
break;
}
}
if (best_size) {
res->sr_bit_offset = best_offset;
res->sr_bits = best_size;
} else {
status = -ENOSPC;
/* No error log here -- see the comment above
* ocfs2_test_bg_bit_allocatable */
}
return status;
}
int ocfs2_block_group_set_bits(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_group_desc *bg,
struct buffer_head *group_bh,
unsigned int bit_off,
unsigned int num_bits)
{
int status;
void *bitmap = bg->bg_bitmap;
int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
/* All callers get the descriptor via
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
trace_ocfs2_block_group_set_bits(bit_off, num_bits);
if (ocfs2_is_cluster_bitmap(alloc_inode))
journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
status = ocfs2_journal_access_gd(handle,
INODE_CACHE(alloc_inode),
group_bh,
journal_type);
if (status < 0) {
mlog_errno(status);
goto bail;
}
le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
(unsigned long long)le64_to_cpu(bg->bg_blkno),
le16_to_cpu(bg->bg_bits),
le16_to_cpu(bg->bg_free_bits_count),
num_bits);
}
while(num_bits--)
ocfs2_set_bit(bit_off++, bitmap);
ocfs2_journal_dirty(handle, group_bh);
bail:
return status;
}
/* find the one with the most empty bits */
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
{
u16 curr, best;
BUG_ON(!cl->cl_next_free_rec);
best = curr = 0;
while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
if (le32_to_cpu(cl->cl_recs[curr].c_free) >
le32_to_cpu(cl->cl_recs[best].c_free))
best = curr;
curr++;
}
BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
return best;
}
static int ocfs2_relink_block_group(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *fe_bh,
struct buffer_head *bg_bh,
struct buffer_head *prev_bg_bh,
u16 chain)
{
int status;
/* there is a really tiny chance the journal calls could fail,
* but we wouldn't want inconsistent blocks in *any* case. */
u64 bg_ptr, prev_bg_ptr;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
/* The caller got these descriptors from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
trace_ocfs2_relink_block_group(
(unsigned long long)le64_to_cpu(fe->i_blkno), chain,
(unsigned long long)le64_to_cpu(bg->bg_blkno),
(unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
bg_ptr = le64_to_cpu(bg->bg_next_group);
prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
prev_bg_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0)
goto out;
prev_bg->bg_next_group = bg->bg_next_group;
ocfs2_journal_dirty(handle, prev_bg_bh);
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
bg_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0)
goto out_rollback_prev_bg;
bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
ocfs2_journal_dirty(handle, bg_bh);
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
fe_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0)
goto out_rollback_bg;
fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
ocfs2_journal_dirty(handle, fe_bh);
out:
if (status < 0)
mlog_errno(status);
return status;
out_rollback_bg:
bg->bg_next_group = cpu_to_le64(bg_ptr);
out_rollback_prev_bg:
prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
goto out;
}
static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
u32 wanted)
{
return le16_to_cpu(bg->bg_free_bits_count) > wanted;
}
/* return 0 on success, -ENOSPC to keep searching and any other < 0
* value on error. */
static int ocfs2_cluster_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res)
{
int search = -ENOSPC;
int ret;
u64 blkoff;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int max_bits, gd_cluster_off;
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
if (gd->bg_free_bits_count) {
max_bits = le16_to_cpu(gd->bg_bits);
/* Tail groups in cluster bitmaps which aren't cpg
* aligned are prone to partial extension by a failed
* fs resize. If the file system resize never got to
* update the dinode cluster count, then we don't want
* to trust any clusters past it, regardless of what
* the group descriptor says. */
gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
le64_to_cpu(gd->bg_blkno));
if ((gd_cluster_off + max_bits) >
OCFS2_I(inode)->ip_clusters) {
max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
trace_ocfs2_cluster_group_search_wrong_max_bits(
(unsigned long long)le64_to_cpu(gd->bg_blkno),
le16_to_cpu(gd->bg_bits),
OCFS2_I(inode)->ip_clusters, max_bits);
}
ret = ocfs2_block_group_find_clear_bits(osb,
group_bh, bits_wanted,
max_bits, res);
if (ret)
return ret;
if (max_block) {
blkoff = ocfs2_clusters_to_blocks(inode->i_sb,
gd_cluster_off +
res->sr_bit_offset +
res->sr_bits);
trace_ocfs2_cluster_group_search_max_block(
(unsigned long long)blkoff,
(unsigned long long)max_block);
if (blkoff > max_block)
return -ENOSPC;
}
/* ocfs2_block_group_find_clear_bits() might
* return success, but we still want to return
* -ENOSPC unless it found the minimum number
* of bits. */
if (min_bits <= res->sr_bits)
search = 0; /* success */
else if (res->sr_bits) {
/*
* Don't show bits which we'll be returning
* for allocation to the local alloc bitmap.
*/
ocfs2_local_alloc_seen_free_bits(osb, res->sr_bits);
}
}
return search;
}
static int ocfs2_block_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res)
{
int ret = -ENOSPC;
u64 blkoff;
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
BUG_ON(min_bits != 1);
BUG_ON(ocfs2_is_cluster_bitmap(inode));
if (bg->bg_free_bits_count) {
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
group_bh, bits_wanted,
le16_to_cpu(bg->bg_bits),
res);
if (!ret && max_block) {
blkoff = le64_to_cpu(bg->bg_blkno) +
res->sr_bit_offset + res->sr_bits;
trace_ocfs2_block_group_search_max_block(
(unsigned long long)blkoff,
(unsigned long long)max_block);
if (blkoff > max_block)
ret = -ENOSPC;
}
}
return ret;
}
int ocfs2_alloc_dinode_update_counts(struct inode *inode,
handle_t *handle,
struct buffer_head *di_bh,
u32 num_bits,
u16 chain)
{
int ret;
u32 tmp_used;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
ocfs2_journal_dirty(handle, di_bh);
out:
return ret;
}
void ocfs2_rollback_alloc_dinode_counts(struct inode *inode,
struct buffer_head *di_bh,
u32 num_bits,
u16 chain)
{
u32 tmp_used;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
struct ocfs2_chain_list *cl;
cl = (struct ocfs2_chain_list *)&di->id2.i_chain;
tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
di->id1.bitmap1.i_used = cpu_to_le32(tmp_used - num_bits);
le32_add_cpu(&cl->cl_recs[chain].c_free, num_bits);
}
static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res,
struct ocfs2_extent_rec *rec,
struct ocfs2_chain_list *cl)
{
unsigned int bpc = le16_to_cpu(cl->cl_bpc);
unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
if (res->sr_bit_offset < bitoff)
return 0;
if (res->sr_bit_offset >= (bitoff + bitcount))
return 0;
res->sr_blkno = le64_to_cpu(rec->e_blkno) +
(res->sr_bit_offset - bitoff);
if ((res->sr_bit_offset + res->sr_bits) > (bitoff + bitcount))
res->sr_bits = (bitoff + bitcount) - res->sr_bit_offset;
return 1;
}
static void ocfs2_bg_discontig_fix_result(struct ocfs2_alloc_context *ac,
struct ocfs2_group_desc *bg,
struct ocfs2_suballoc_result *res)
{
int i;
u64 bg_blkno = res->sr_bg_blkno; /* Save off */
struct ocfs2_extent_rec *rec;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
struct ocfs2_chain_list *cl = &di->id2.i_chain;
if (ocfs2_is_cluster_bitmap(ac->ac_inode)) {
res->sr_blkno = 0;
return;
}
res->sr_blkno = res->sr_bg_blkno + res->sr_bit_offset;
res->sr_bg_blkno = 0; /* Clear it for contig block groups */
if (!ocfs2_supports_discontig_bg(OCFS2_SB(ac->ac_inode->i_sb)) ||
!bg->bg_list.l_next_free_rec)
return;
for (i = 0; i < le16_to_cpu(bg->bg_list.l_next_free_rec); i++) {
rec = &bg->bg_list.l_recs[i];
if (ocfs2_bg_discontig_fix_by_rec(res, rec, cl)) {
res->sr_bg_blkno = bg_blkno; /* Restore */
break;
}
}
}
static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res,
u16 *bits_left)
{
int ret;
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *gd;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
struct inode *alloc_inode = ac->ac_inode;
ret = ocfs2_read_group_descriptor(alloc_inode, di,
res->sr_bg_blkno, &group_bh);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
gd = (struct ocfs2_group_desc *) group_bh->b_data;
ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
ac->ac_max_block, res);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
if (!ret)
ocfs2_bg_discontig_fix_result(ac, gd, res);
/*
* sr_bg_blkno might have been changed by
* ocfs2_bg_discontig_fix_result
*/
res->sr_bg_stable_blkno = group_bh->b_blocknr;
if (ac->ac_find_loc_only)
goto out_loc_only;
ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
res->sr_bits,
le16_to_cpu(gd->bg_chain));
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
res->sr_bit_offset, res->sr_bits);
if (ret < 0) {
ocfs2_rollback_alloc_dinode_counts(alloc_inode, ac->ac_bh,
res->sr_bits,
le16_to_cpu(gd->bg_chain));
mlog_errno(ret);
}
out_loc_only:
*bits_left = le16_to_cpu(gd->bg_free_bits_count);
out:
brelse(group_bh);
return ret;
}
static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res,
u16 *bits_left)
{
int status;
u16 chain;
u64 next_group;
struct inode *alloc_inode = ac->ac_inode;
struct buffer_head *group_bh = NULL;
struct buffer_head *prev_group_bh = NULL;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
struct ocfs2_group_desc *bg;
chain = ac->ac_chain;
trace_ocfs2_search_chain_begin(
(unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
bits_wanted, chain);
status = ocfs2_read_group_descriptor(alloc_inode, fe,
le64_to_cpu(cl->cl_recs[chain].c_blkno),
&group_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
bg = (struct ocfs2_group_desc *) group_bh->b_data;
status = -ENOSPC;
/* for now, the chain search is a bit simplistic. We just use
* the 1st group with any empty bits. */
while ((status = ac->ac_group_search(alloc_inode, group_bh,
bits_wanted, min_bits,
ac->ac_max_block,
res)) == -ENOSPC) {
if (!bg->bg_next_group)
break;
brelse(prev_group_bh);
prev_group_bh = NULL;
next_group = le64_to_cpu(bg->bg_next_group);
prev_group_bh = group_bh;
group_bh = NULL;
status = ocfs2_read_group_descriptor(alloc_inode, fe,
next_group, &group_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
bg = (struct ocfs2_group_desc *) group_bh->b_data;
}
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
trace_ocfs2_search_chain_succ(
(unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits);
res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno);
BUG_ON(res->sr_bits == 0);
if (!status)
ocfs2_bg_discontig_fix_result(ac, bg, res);
/*
* sr_bg_blkno might have been changed by
* ocfs2_bg_discontig_fix_result
*/
res->sr_bg_stable_blkno = group_bh->b_blocknr;
/*
* Keep track of previous block descriptor read. When
* we find a target, if we have read more than X
* number of descriptors, and the target is reasonably
* empty, relink him to top of his chain.
*
* We've read 0 extra blocks and only send one more to
* the transaction, yet the next guy to search has a
* much easier time.
*
* Do this *after* figuring out how many bits we're taking out
* of our target group.
*/
if (!ac->ac_disable_chain_relink &&
(prev_group_bh) &&
(ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
status = ocfs2_relink_block_group(handle, alloc_inode,
ac->ac_bh, group_bh,
prev_group_bh, chain);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
if (ac->ac_find_loc_only)
goto out_loc_only;
status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
ac->ac_bh, res->sr_bits,
chain);
if (status) {
mlog_errno(status);
goto bail;
}
status = ocfs2_block_group_set_bits(handle,
alloc_inode,
bg,
group_bh,
res->sr_bit_offset,
res->sr_bits);
if (status < 0) {
ocfs2_rollback_alloc_dinode_counts(alloc_inode,
ac->ac_bh, res->sr_bits, chain);
mlog_errno(status);
goto bail;
}
trace_ocfs2_search_chain_end(
(unsigned long long)le64_to_cpu(fe->i_blkno),
res->sr_bits);
out_loc_only:
*bits_left = le16_to_cpu(bg->bg_free_bits_count);
bail:
brelse(group_bh);
brelse(prev_group_bh);
if (status)
mlog_errno(status);
return status;
}
/* will give out up to bits_wanted contiguous bits. */
static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res)
{
int status;
u16 victim, i;
u16 bits_left = 0;
u64 hint = ac->ac_last_group;
struct ocfs2_chain_list *cl;
struct ocfs2_dinode *fe;
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
BUG_ON(!ac->ac_bh);
fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
/* The bh was validated by the inode read during
* ocfs2_reserve_suballoc_bits(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
le32_to_cpu(fe->id1.bitmap1.i_total)) {
status = ocfs2_error(ac->ac_inode->i_sb,
"Chain allocator dinode %llu has %u used bits but only %u total\n",
(unsigned long long)le64_to_cpu(fe->i_blkno),
le32_to_cpu(fe->id1.bitmap1.i_used),
le32_to_cpu(fe->id1.bitmap1.i_total));
goto bail;
}
res->sr_bg_blkno = hint;
if (res->sr_bg_blkno) {
/* Attempt to short-circuit the usual search mechanism
* by jumping straight to the most recently used
* allocation group. This helps us maintain some
* contiguousness across allocations. */
status = ocfs2_search_one_group(ac, handle, bits_wanted,
min_bits, res, &bits_left);
if (!status)
goto set_hint;
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
}
cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
victim = ocfs2_find_victim_chain(cl);
ac->ac_chain = victim;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
if (!status) {
if (ocfs2_is_cluster_bitmap(ac->ac_inode))
hint = res->sr_bg_blkno;
else
hint = ocfs2_group_from_res(res);
goto set_hint;
}
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
trace_ocfs2_claim_suballoc_bits(victim);
/* If we didn't pick a good victim, then just default to
* searching each chain in order. Don't allow chain relinking
* because we only calculate enough journal credits for one
* relink per alloc. */
ac->ac_disable_chain_relink = 1;
for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
if (i == victim)
continue;
if (!cl->cl_recs[i].c_free)
continue;
ac->ac_chain = i;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
if (!status) {
hint = ocfs2_group_from_res(res);
break;
}
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
}
set_hint:
if (status != -ENOSPC) {
/* If the next search of this group is not likely to
* yield a suitable extent, then we reset the last
* group hint so as to not waste a disk read */
if (bits_left < min_bits)
ac->ac_last_group = 0;
else
ac->ac_last_group = hint;
}
bail:
if (status)
mlog_errno(status);
return status;
}
int ocfs2_claim_metadata(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 bits_wanted,
u64 *suballoc_loc,
u16 *suballoc_bit_start,
unsigned int *num_bits,
u64 *blkno_start)
{
int status;
struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
BUG_ON(!ac);
BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
status = ocfs2_claim_suballoc_bits(ac,
handle,
bits_wanted,
1,
&res);
if (status < 0) {
mlog_errno(status);
goto bail;
}
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
*suballoc_loc = res.sr_bg_blkno;
*suballoc_bit_start = res.sr_bit_offset;
*blkno_start = res.sr_blkno;
ac->ac_bits_given += res.sr_bits;
*num_bits = res.sr_bits;
status = 0;
bail:
if (status)
mlog_errno(status);
return status;
}
static void ocfs2_init_inode_ac_group(struct inode *dir,
struct buffer_head *parent_di_bh,
struct ocfs2_alloc_context *ac)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_di_bh->b_data;
/*
* Try to allocate inodes from some specific group.
*
* If the parent dir has recorded the last group used in allocation,
* cool, use it. Otherwise if we try to allocate new inode from the
* same slot the parent dir belongs to, use the same chunk.
*
* We are very careful here to avoid the mistake of setting
* ac_last_group to a group descriptor from a different (unlocked) slot.
*/
if (OCFS2_I(dir)->ip_last_used_group &&
OCFS2_I(dir)->ip_last_used_slot == ac->ac_alloc_slot)
ac->ac_last_group = OCFS2_I(dir)->ip_last_used_group;
else if (le16_to_cpu(di->i_suballoc_slot) == ac->ac_alloc_slot) {
if (di->i_suballoc_loc)
ac->ac_last_group = le64_to_cpu(di->i_suballoc_loc);
else
ac->ac_last_group = ocfs2_which_suballoc_group(
le64_to_cpu(di->i_blkno),
le16_to_cpu(di->i_suballoc_bit));
}
}
static inline void ocfs2_save_inode_ac_group(struct inode *dir,
struct ocfs2_alloc_context *ac)
{
OCFS2_I(dir)->ip_last_used_group = ac->ac_last_group;
OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
}
int ocfs2_find_new_inode_loc(struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *ac,
u64 *fe_blkno)
{
int ret;
handle_t *handle = NULL;
struct ocfs2_suballoc_result *res;
BUG_ON(!ac);
BUG_ON(ac->ac_bits_given != 0);
BUG_ON(ac->ac_bits_wanted != 1);
BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
res = kzalloc(sizeof(*res), GFP_NOFS);
if (res == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
/*
* The handle started here is for chain relink. Alternatively,
* we could just disable relink for these calls.
*/
handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
mlog_errno(ret);
goto out;
}
/*
* This will instruct ocfs2_claim_suballoc_bits and
* ocfs2_search_one_group to search but save actual allocation
* for later.
*/
ac->ac_find_loc_only = 1;
ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ac->ac_find_loc_priv = res;
*fe_blkno = res->sr_blkno;
ocfs2_update_inode_fsync_trans(handle, dir, 0);
out:
if (handle)
ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
if (ret)
kfree(res);
return ret;
}
int ocfs2_claim_new_inode_at_loc(handle_t *handle,
struct inode *dir,
struct ocfs2_alloc_context *ac,
u64 *suballoc_loc,
u16 *suballoc_bit,
u64 di_blkno)
{
int ret;
u16 chain;
struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
struct buffer_head *bg_bh = NULL;
struct ocfs2_group_desc *bg;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
/*
* Since di_blkno is being passed back in, we check for any
* inconsistencies which may have happened between
* calls. These are code bugs as di_blkno is not expected to
* change once returned from ocfs2_find_new_inode_loc()
*/
BUG_ON(res->sr_blkno != di_blkno);
ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
res->sr_bg_stable_blkno, &bg_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
bg = (struct ocfs2_group_desc *) bg_bh->b_data;
chain = le16_to_cpu(bg->bg_chain);
ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
ac->ac_bh, res->sr_bits,
chain);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_block_group_set_bits(handle,
ac->ac_inode,
bg,
bg_bh,
res->sr_bit_offset,
res->sr_bits);
if (ret < 0) {
ocfs2_rollback_alloc_dinode_counts(ac->ac_inode,
ac->ac_bh, res->sr_bits, chain);
mlog_errno(ret);
goto out;
}
trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
res->sr_bits);
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
BUG_ON(res->sr_bits != 1);
*suballoc_loc = res->sr_bg_blkno;
*suballoc_bit = res->sr_bit_offset;
ac->ac_bits_given++;
ocfs2_save_inode_ac_group(dir, ac);
out:
brelse(bg_bh);
return ret;
}
int ocfs2_claim_new_inode(handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *ac,
u64 *suballoc_loc,
u16 *suballoc_bit,
u64 *fe_blkno)
{
int status;
struct ocfs2_suballoc_result res;
BUG_ON(!ac);
BUG_ON(ac->ac_bits_given != 0);
BUG_ON(ac->ac_bits_wanted != 1);
BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
status = ocfs2_claim_suballoc_bits(ac,
handle,
1,
1,
&res);
if (status < 0) {
mlog_errno(status);
goto bail;
}
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
BUG_ON(res.sr_bits != 1);
*suballoc_loc = res.sr_bg_blkno;
*suballoc_bit = res.sr_bit_offset;
*fe_blkno = res.sr_blkno;
ac->ac_bits_given++;
ocfs2_save_inode_ac_group(dir, ac);
status = 0;
bail:
if (status)
mlog_errno(status);
return status;
}
/* translate a group desc. blkno and it's bitmap offset into
* disk cluster offset. */
static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
u64 bg_blkno,
u16 bg_bit_off)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u32 cluster = 0;
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
if (bg_blkno != osb->first_cluster_group_blkno)
cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
cluster += (u32) bg_bit_off;
return cluster;
}
/* given a cluster offset, calculate which block group it belongs to
* and return that block offset. */
u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u32 group_no;
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
group_no = cluster / osb->bitmap_cpg;
if (!group_no)
return osb->first_cluster_group_blkno;
return ocfs2_clusters_to_blocks(inode->i_sb,
group_no * osb->bitmap_cpg);
}
/* given the block number of a cluster start, calculate which cluster
* group and descriptor bitmap offset that corresponds to. */
static inline void ocfs2_block_to_cluster_group(struct inode *inode,
u64 data_blkno,
u64 *bg_blkno,
u16 *bg_bit_off)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
*bg_blkno = ocfs2_which_cluster_group(inode,
data_cluster);
if (*bg_blkno == osb->first_cluster_group_blkno)
*bg_bit_off = (u16) data_cluster;
else
*bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
data_blkno - *bg_blkno);
}
/*
* min_bits - minimum contiguous chunk from this total allocation we
* can handle. set to what we asked for originally for a full
* contig. allocation, set to '1' to indicate we can deal with extents
* of any size.
*/
int __ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 max_clusters,
u32 *cluster_start,
u32 *num_clusters)
{
int status;
unsigned int bits_wanted = max_clusters;
struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb);
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
&& ac->ac_which != OCFS2_AC_USE_MAIN);
if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
WARN_ON(min_clusters > 1);
status = ocfs2_claim_local_alloc_bits(osb,
handle,
ac,
bits_wanted,
cluster_start,
num_clusters);
if (!status)
atomic_inc(&osb->alloc_stats.local_data);
} else {
if (min_clusters > (osb->bitmap_cpg - 1)) {
/* The only paths asking for contiguousness
* should know about this already. */
mlog(ML_ERROR, "minimum allocation requested %u exceeds "
"group bitmap size %u!\n", min_clusters,
osb->bitmap_cpg);
status = -ENOSPC;
goto bail;
}
/* clamp the current request down to a realistic size. */
if (bits_wanted > (osb->bitmap_cpg - 1))
bits_wanted = osb->bitmap_cpg - 1;
status = ocfs2_claim_suballoc_bits(ac,
handle,
bits_wanted,
min_clusters,
&res);
if (!status) {
BUG_ON(res.sr_blkno); /* cluster alloc can't set */
*cluster_start =
ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
res.sr_bg_blkno,
res.sr_bit_offset);
atomic_inc(&osb->alloc_stats.bitmap_data);
*num_clusters = res.sr_bits;
}
}
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
ac->ac_bits_given += *num_clusters;
bail:
if (status)
mlog_errno(status);
return status;
}
int ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 *cluster_start,
u32 *num_clusters)
{
unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
return __ocfs2_claim_clusters(handle, ac, min_clusters,
bits_wanted, cluster_start, num_clusters);
}
static int ocfs2_block_group_clear_bits(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_group_desc *bg,
struct buffer_head *group_bh,
unsigned int bit_off,
unsigned int num_bits,
void (*undo_fn)(unsigned int bit,
unsigned long *bmap))
{
int status;
unsigned int tmp;
struct ocfs2_group_desc *undo_bg = NULL;
struct journal_head *jh;
/* The caller got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
trace_ocfs2_block_group_clear_bits(bit_off, num_bits);
BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
group_bh,
undo_fn ?
OCFS2_JOURNAL_ACCESS_UNDO :
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
jh = bh2jh(group_bh);
if (undo_fn) {
spin_lock(&jh->b_state_lock);
undo_bg = (struct ocfs2_group_desc *) jh->b_committed_data;
BUG_ON(!undo_bg);
}
tmp = num_bits;
while(tmp--) {
ocfs2_clear_bit((bit_off + tmp),
(unsigned long *) bg->bg_bitmap);
if (undo_fn)
undo_fn(bit_off + tmp,
(unsigned long *) undo_bg->bg_bitmap);
}
le16_add_cpu(&bg->bg_free_bits_count, num_bits);
if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
if (undo_fn)
spin_unlock(&jh->b_state_lock);
return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
(unsigned long long)le64_to_cpu(bg->bg_blkno),
le16_to_cpu(bg->bg_bits),
le16_to_cpu(bg->bg_free_bits_count),
num_bits);
}
if (undo_fn)
spin_unlock(&jh->b_state_lock);
ocfs2_journal_dirty(handle, group_bh);
bail:
return status;
}
/*
* expects the suballoc inode to already be locked.
*/
static int _ocfs2_free_suballoc_bits(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *alloc_bh,
unsigned int start_bit,
u64 bg_blkno,
unsigned int count,
void (*undo_fn)(unsigned int bit,
unsigned long *bitmap))
{
int status = 0;
u32 tmp_used;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
struct ocfs2_chain_list *cl = &fe->id2.i_chain;
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *group;
/* The alloc_bh comes from ocfs2_free_dinode() or
* ocfs2_free_clusters(). The callers have all locked the
* allocator and gotten alloc_bh from the lock call. This
* validates the dinode buffer. Any corruption that has happened
* is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
trace_ocfs2_free_suballoc_bits(
(unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
(unsigned long long)bg_blkno,
start_bit, count);
status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
&group_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
group = (struct ocfs2_group_desc *) group_bh->b_data;
BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
status = ocfs2_block_group_clear_bits(handle, alloc_inode,
group, group_bh,
start_bit, count, undo_fn);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh,
start_bit, count);
goto bail;
}
le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
count);
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
ocfs2_journal_dirty(handle, alloc_bh);
bail:
brelse(group_bh);
return status;
}
int ocfs2_free_suballoc_bits(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *alloc_bh,
unsigned int start_bit,
u64 bg_blkno,
unsigned int count)
{
return _ocfs2_free_suballoc_bits(handle, alloc_inode, alloc_bh,
start_bit, bg_blkno, count, NULL);
}
int ocfs2_free_dinode(handle_t *handle,
struct inode *inode_alloc_inode,
struct buffer_head *inode_alloc_bh,
struct ocfs2_dinode *di)
{
u64 blk = le64_to_cpu(di->i_blkno);
u16 bit = le16_to_cpu(di->i_suballoc_bit);
u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
if (di->i_suballoc_loc)
bg_blkno = le64_to_cpu(di->i_suballoc_loc);
return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
inode_alloc_bh, bit, bg_blkno, 1);
}
static int _ocfs2_free_clusters(handle_t *handle,
struct inode *bitmap_inode,
struct buffer_head *bitmap_bh,
u64 start_blk,
unsigned int num_clusters,
void (*undo_fn)(unsigned int bit,
unsigned long *bitmap))
{
int status;
u16 bg_start_bit;
u64 bg_blkno;
/* You can't ever have a contiguous set of clusters
* bigger than a block group bitmap so we never have to worry
* about looping on them.
* This is expensive. We can safely remove once this stuff has
* gotten tested really well. */
BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb,
ocfs2_blocks_to_clusters(bitmap_inode->i_sb,
start_blk)));
ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
&bg_start_bit);
trace_ocfs2_free_clusters((unsigned long long)bg_blkno,
(unsigned long long)start_blk,
bg_start_bit, num_clusters);
status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
bg_start_bit, bg_blkno,
num_clusters, undo_fn);
if (status < 0) {
mlog_errno(status);
goto out;
}
ocfs2_local_alloc_seen_free_bits(OCFS2_SB(bitmap_inode->i_sb),
num_clusters);
out:
return status;
}
int ocfs2_free_clusters(handle_t *handle,
struct inode *bitmap_inode,
struct buffer_head *bitmap_bh,
u64 start_blk,
unsigned int num_clusters)
{
return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
start_blk, num_clusters,
_ocfs2_set_bit);
}
/*
* Give never-used clusters back to the global bitmap. We don't need
* to protect these bits in the undo buffer.
*/
int ocfs2_release_clusters(handle_t *handle,
struct inode *bitmap_inode,
struct buffer_head *bitmap_bh,
u64 start_blk,
unsigned int num_clusters)
{
return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
start_blk, num_clusters,
_ocfs2_clear_bit);
}
/*
* For a given allocation, determine which allocators will need to be
* accessed, and lock them, reserving the appropriate number of bits.
*
* Sparse file systems call this from ocfs2_write_begin_nolock()
* and ocfs2_allocate_unwritten_extents().
*
* File systems which don't support holes call this from
* ocfs2_extend_allocation().
*/
int ocfs2_lock_allocators(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 clusters_to_add, u32 extents_to_split,
struct ocfs2_alloc_context **data_ac,
struct ocfs2_alloc_context **meta_ac)
{
int ret = 0, num_free_extents;
unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
*meta_ac = NULL;
if (data_ac)
*data_ac = NULL;
BUG_ON(clusters_to_add != 0 && data_ac == NULL);
num_free_extents = ocfs2_num_free_extents(et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
goto out;
}
/*
* Sparse allocation file systems need to be more conservative
* with reserving room for expansion - the actual allocation
* happens while we've got a journal handle open so re-taking
* a cluster lock (because we ran out of room for another
* extent) will violate ordering rules.
*
* Most of the time we'll only be seeing this 1 cluster at a time
* anyway.
*
* Always lock for any unwritten extents - we might want to
* add blocks during a split.
*/
if (!num_free_extents ||
(ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
ret = ocfs2_reserve_new_metadata(osb, et->et_root_el, meta_ac);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
}
if (clusters_to_add == 0)
goto out;
ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
out:
if (ret) {
if (*meta_ac) {
ocfs2_free_alloc_context(*meta_ac);
*meta_ac = NULL;
}
/*
* We cannot have an error and a non null *data_ac.
*/
}
return ret;
}
/*
* Read the inode specified by blkno to get suballoc_slot and
* suballoc_bit.
*/
static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
u16 *suballoc_slot, u64 *group_blkno,
u16 *suballoc_bit)
{
int status;
struct buffer_head *inode_bh = NULL;
struct ocfs2_dinode *inode_fe;
trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno);
/* dirty read disk */
status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh);
if (status < 0) {
mlog(ML_ERROR, "read block %llu failed %d\n",
(unsigned long long)blkno, status);
goto bail;
}
inode_fe = (struct ocfs2_dinode *) inode_bh->b_data;
if (!OCFS2_IS_VALID_DINODE(inode_fe)) {
mlog(ML_ERROR, "invalid inode %llu requested\n",
(unsigned long long)blkno);
status = -EINVAL;
goto bail;
}
if (le16_to_cpu(inode_fe->i_suballoc_slot) != (u16)OCFS2_INVALID_SLOT &&
(u32)le16_to_cpu(inode_fe->i_suballoc_slot) > osb->max_slots - 1) {
mlog(ML_ERROR, "inode %llu has invalid suballoc slot %u\n",
(unsigned long long)blkno,
(u32)le16_to_cpu(inode_fe->i_suballoc_slot));
status = -EINVAL;
goto bail;
}
if (suballoc_slot)
*suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
if (suballoc_bit)
*suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
if (group_blkno)
*group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
bail:
brelse(inode_bh);
if (status)
mlog_errno(status);
return status;
}
/*
* test whether bit is SET in allocator bitmap or not. on success, 0
* is returned and *res is 1 for SET; 0 otherwise. when fails, errno
* is returned and *res is meaningless. Call this after you have
* cluster locked against suballoc, or you may get a result based on
* non-up2date contents
*/
static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
struct inode *suballoc,
struct buffer_head *alloc_bh,
u64 group_blkno, u64 blkno,
u16 bit, int *res)
{
struct ocfs2_dinode *alloc_di;
struct ocfs2_group_desc *group;
struct buffer_head *group_bh = NULL;
u64 bg_blkno;
int status;
trace_ocfs2_test_suballoc_bit((unsigned long long)blkno,
(unsigned int)bit);
alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data;
if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) {
mlog(ML_ERROR, "suballoc bit %u out of range of %u\n",
(unsigned int)bit,
ocfs2_bits_per_group(&alloc_di->id2.i_chain));
status = -EINVAL;
goto bail;
}
bg_blkno = group_blkno ? group_blkno :
ocfs2_which_suballoc_group(blkno, bit);
status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
&group_bh);
if (status < 0) {
mlog(ML_ERROR, "read group %llu failed %d\n",
(unsigned long long)bg_blkno, status);
goto bail;
}
group = (struct ocfs2_group_desc *) group_bh->b_data;
*res = ocfs2_test_bit(bit, (unsigned long *)group->bg_bitmap);
bail:
brelse(group_bh);
if (status)
mlog_errno(status);
return status;
}
/*
* Test if the bit representing this inode (blkno) is set in the
* suballocator.
*
* On success, 0 is returned and *res is 1 for SET; 0 otherwise.
*
* In the event of failure, a negative value is returned and *res is
* meaningless.
*
* Callers must make sure to hold nfs_sync_lock to prevent
* ocfs2_delete_inode() on another node from accessing the same
* suballocator concurrently.
*/
int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
{
int status;
u64 group_blkno = 0;
u16 suballoc_bit = 0, suballoc_slot = 0;
struct inode *inode_alloc_inode;
struct buffer_head *alloc_bh = NULL;
trace_ocfs2_test_inode_bit((unsigned long long)blkno);
status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
&group_blkno, &suballoc_bit);
if (status < 0) {
mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
goto bail;
}
if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
inode_alloc_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
else
inode_alloc_inode = ocfs2_get_system_file_inode(osb,
INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
if (!inode_alloc_inode) {
/* the error code could be inaccurate, but we are not able to
* get the correct one. */
status = -EINVAL;
mlog(ML_ERROR, "unable to get alloc inode in slot %u\n",
(u32)suballoc_slot);
goto bail;
}
inode_lock(inode_alloc_inode);
status = ocfs2_inode_lock(inode_alloc_inode, &alloc_bh, 0);
if (status < 0) {
inode_unlock(inode_alloc_inode);
iput(inode_alloc_inode);
mlog(ML_ERROR, "lock on alloc inode on slot %u failed %d\n",
(u32)suballoc_slot, status);
goto bail;
}
status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
group_blkno, blkno, suballoc_bit, res);
if (status < 0)
mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
ocfs2_inode_unlock(inode_alloc_inode, 0);
inode_unlock(inode_alloc_inode);
iput(inode_alloc_inode);
brelse(alloc_bh);
bail:
if (status)
mlog_errno(status);
return status;
}
| linux-master | fs/ocfs2/suballoc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* blockcheck.c
*
* Checksum and ECC codes for the OCFS2 userspace library.
*
* Copyright (C) 2006, 2008 Oracle. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/crc32.h>
#include <linux/buffer_head.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <asm/byteorder.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "blockcheck.h"
/*
* We use the following conventions:
*
* d = # data bits
* p = # parity bits
* c = # total code bits (d + p)
*/
/*
* Calculate the bit offset in the hamming code buffer based on the bit's
* offset in the data buffer. Since the hamming code reserves all
* power-of-two bits for parity, the data bit number and the code bit
* number are offset by all the parity bits beforehand.
*
* Recall that bit numbers in hamming code are 1-based. This function
* takes the 0-based data bit from the caller.
*
* An example. Take bit 1 of the data buffer. 1 is a power of two (2^0),
* so it's a parity bit. 2 is a power of two (2^1), so it's a parity bit.
* 3 is not a power of two. So bit 1 of the data buffer ends up as bit 3
* in the code buffer.
*
* The caller can pass in *p if it wants to keep track of the most recent
* number of parity bits added. This allows the function to start the
* calculation at the last place.
*/
static unsigned int calc_code_bit(unsigned int i, unsigned int *p_cache)
{
unsigned int b, p = 0;
/*
* Data bits are 0-based, but we're talking code bits, which
* are 1-based.
*/
b = i + 1;
/* Use the cache if it is there */
if (p_cache)
p = *p_cache;
b += p;
/*
* For every power of two below our bit number, bump our bit.
*
* We compare with (b + 1) because we have to compare with what b
* would be _if_ it were bumped up by the parity bit. Capice?
*
* p is set above.
*/
for (; (1 << p) < (b + 1); p++)
b++;
if (p_cache)
*p_cache = p;
return b;
}
/*
* This is the low level encoder function. It can be called across
* multiple hunks just like the crc32 code. 'd' is the number of bits
* _in_this_hunk_. nr is the bit offset of this hunk. So, if you had
* two 512B buffers, you would do it like so:
*
* parity = ocfs2_hamming_encode(0, buf1, 512 * 8, 0);
* parity = ocfs2_hamming_encode(parity, buf2, 512 * 8, 512 * 8);
*
* If you just have one buffer, use ocfs2_hamming_encode_block().
*/
u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr)
{
unsigned int i, b, p = 0;
BUG_ON(!d);
/*
* b is the hamming code bit number. Hamming code specifies a
* 1-based array, but C uses 0-based. So 'i' is for C, and 'b' is
* for the algorithm.
*
* The i++ in the for loop is so that the start offset passed
* to ocfs2_find_next_bit_set() is one greater than the previously
* found bit.
*/
for (i = 0; (i = ocfs2_find_next_bit(data, d, i)) < d; i++)
{
/*
* i is the offset in this hunk, nr + i is the total bit
* offset.
*/
b = calc_code_bit(nr + i, &p);
/*
* Data bits in the resultant code are checked by
* parity bits that are part of the bit number
* representation. Huh?
*
* <wikipedia href="https://en.wikipedia.org/wiki/Hamming_code">
* In other words, the parity bit at position 2^k
* checks bits in positions having bit k set in
* their binary representation. Conversely, for
* instance, bit 13, i.e. 1101(2), is checked by
* bits 1000(2) = 8, 0100(2)=4 and 0001(2) = 1.
* </wikipedia>
*
* Note that 'k' is the _code_ bit number. 'b' in
* our loop.
*/
parity ^= b;
}
/* While the data buffer was treated as little endian, the
* return value is in host endian. */
return parity;
}
u32 ocfs2_hamming_encode_block(void *data, unsigned int blocksize)
{
return ocfs2_hamming_encode(0, data, blocksize * 8, 0);
}
/*
* Like ocfs2_hamming_encode(), this can handle hunks. nr is the bit
* offset of the current hunk. If bit to be fixed is not part of the
* current hunk, this does nothing.
*
* If you only have one hunk, use ocfs2_hamming_fix_block().
*/
void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr,
unsigned int fix)
{
unsigned int i, b;
BUG_ON(!d);
/*
* If the bit to fix has an hweight of 1, it's a parity bit. One
* busted parity bit is its own error. Nothing to do here.
*/
if (hweight32(fix) == 1)
return;
/*
* nr + d is the bit right past the data hunk we're looking at.
* If fix after that, nothing to do
*/
if (fix >= calc_code_bit(nr + d, NULL))
return;
/*
* nr is the offset in the data hunk we're starting at. Let's
* start b at the offset in the code buffer. See hamming_encode()
* for a more detailed description of 'b'.
*/
b = calc_code_bit(nr, NULL);
/* If the fix is before this hunk, nothing to do */
if (fix < b)
return;
for (i = 0; i < d; i++, b++)
{
/* Skip past parity bits */
while (hweight32(b) == 1)
b++;
/*
* i is the offset in this data hunk.
* nr + i is the offset in the total data buffer.
* b is the offset in the total code buffer.
*
* Thus, when b == fix, bit i in the current hunk needs
* fixing.
*/
if (b == fix)
{
if (ocfs2_test_bit(i, data))
ocfs2_clear_bit(i, data);
else
ocfs2_set_bit(i, data);
break;
}
}
}
void ocfs2_hamming_fix_block(void *data, unsigned int blocksize,
unsigned int fix)
{
ocfs2_hamming_fix(data, blocksize * 8, 0, fix);
}
/*
* Debugfs handling.
*/
#ifdef CONFIG_DEBUG_FS
static int blockcheck_u64_get(void *data, u64 *val)
{
*val = *(u64 *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(blockcheck_fops, blockcheck_u64_get, NULL, "%llu\n");
static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
{
if (stats) {
debugfs_remove_recursive(stats->b_debug_dir);
stats->b_debug_dir = NULL;
}
}
static void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
struct dentry *parent)
{
struct dentry *dir;
dir = debugfs_create_dir("blockcheck", parent);
stats->b_debug_dir = dir;
debugfs_create_file("blocks_checked", S_IFREG | S_IRUSR, dir,
&stats->b_check_count, &blockcheck_fops);
debugfs_create_file("checksums_failed", S_IFREG | S_IRUSR, dir,
&stats->b_failure_count, &blockcheck_fops);
debugfs_create_file("ecc_recoveries", S_IFREG | S_IRUSR, dir,
&stats->b_recover_count, &blockcheck_fops);
}
#else
static inline void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
struct dentry *parent)
{
}
static inline void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
{
}
#endif /* CONFIG_DEBUG_FS */
/* Always-called wrappers for starting and stopping the debugfs files */
void ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats,
struct dentry *parent)
{
ocfs2_blockcheck_debug_install(stats, parent);
}
void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats)
{
ocfs2_blockcheck_debug_remove(stats);
}
static void ocfs2_blockcheck_inc_check(struct ocfs2_blockcheck_stats *stats)
{
u64 new_count;
if (!stats)
return;
spin_lock(&stats->b_lock);
stats->b_check_count++;
new_count = stats->b_check_count;
spin_unlock(&stats->b_lock);
if (!new_count)
mlog(ML_NOTICE, "Block check count has wrapped\n");
}
static void ocfs2_blockcheck_inc_failure(struct ocfs2_blockcheck_stats *stats)
{
u64 new_count;
if (!stats)
return;
spin_lock(&stats->b_lock);
stats->b_failure_count++;
new_count = stats->b_failure_count;
spin_unlock(&stats->b_lock);
if (!new_count)
mlog(ML_NOTICE, "Checksum failure count has wrapped\n");
}
static void ocfs2_blockcheck_inc_recover(struct ocfs2_blockcheck_stats *stats)
{
u64 new_count;
if (!stats)
return;
spin_lock(&stats->b_lock);
stats->b_recover_count++;
new_count = stats->b_recover_count;
spin_unlock(&stats->b_lock);
if (!new_count)
mlog(ML_NOTICE, "ECC recovery count has wrapped\n");
}
/*
* These are the low-level APIs for using the ocfs2_block_check structure.
*/
/*
* This function generates check information for a block.
* data is the block to be checked. bc is a pointer to the
* ocfs2_block_check structure describing the crc32 and the ecc.
*
* bc should be a pointer inside data, as the function will
* take care of zeroing it before calculating the check information. If
* bc does not point inside data, the caller must make sure any inline
* ocfs2_block_check structures are zeroed.
*
* The data buffer must be in on-disk endian (little endian for ocfs2).
* bc will be filled with little-endian values and will be ready to go to
* disk.
*/
void ocfs2_block_check_compute(void *data, size_t blocksize,
struct ocfs2_block_check *bc)
{
u32 crc;
u32 ecc;
memset(bc, 0, sizeof(struct ocfs2_block_check));
crc = crc32_le(~0, data, blocksize);
ecc = ocfs2_hamming_encode_block(data, blocksize);
/*
* No ecc'd ocfs2 structure is larger than 4K, so ecc will be no
* larger than 16 bits.
*/
BUG_ON(ecc > USHRT_MAX);
bc->bc_crc32e = cpu_to_le32(crc);
bc->bc_ecc = cpu_to_le16((u16)ecc);
}
/*
* This function validates existing check information. Like _compute,
* the function will take care of zeroing bc before calculating check codes.
* If bc is not a pointer inside data, the caller must have zeroed any
* inline ocfs2_block_check structures.
*
* Again, the data passed in should be the on-disk endian.
*/
int ocfs2_block_check_validate(void *data, size_t blocksize,
struct ocfs2_block_check *bc,
struct ocfs2_blockcheck_stats *stats)
{
int rc = 0;
u32 bc_crc32e;
u16 bc_ecc;
u32 crc, ecc;
ocfs2_blockcheck_inc_check(stats);
bc_crc32e = le32_to_cpu(bc->bc_crc32e);
bc_ecc = le16_to_cpu(bc->bc_ecc);
memset(bc, 0, sizeof(struct ocfs2_block_check));
/* Fast path - if the crc32 validates, we're good to go */
crc = crc32_le(~0, data, blocksize);
if (crc == bc_crc32e)
goto out;
ocfs2_blockcheck_inc_failure(stats);
mlog(ML_ERROR,
"CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
(unsigned int)bc_crc32e, (unsigned int)crc);
/* Ok, try ECC fixups */
ecc = ocfs2_hamming_encode_block(data, blocksize);
ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc);
/* And check the crc32 again */
crc = crc32_le(~0, data, blocksize);
if (crc == bc_crc32e) {
ocfs2_blockcheck_inc_recover(stats);
goto out;
}
mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
(unsigned int)bc_crc32e, (unsigned int)crc);
rc = -EIO;
out:
bc->bc_crc32e = cpu_to_le32(bc_crc32e);
bc->bc_ecc = cpu_to_le16(bc_ecc);
return rc;
}
/*
* This function generates check information for a list of buffer_heads.
* bhs is the blocks to be checked. bc is a pointer to the
* ocfs2_block_check structure describing the crc32 and the ecc.
*
* bc should be a pointer inside data, as the function will
* take care of zeroing it before calculating the check information. If
* bc does not point inside data, the caller must make sure any inline
* ocfs2_block_check structures are zeroed.
*
* The data buffer must be in on-disk endian (little endian for ocfs2).
* bc will be filled with little-endian values and will be ready to go to
* disk.
*/
void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr,
struct ocfs2_block_check *bc)
{
int i;
u32 crc, ecc;
BUG_ON(nr < 0);
if (!nr)
return;
memset(bc, 0, sizeof(struct ocfs2_block_check));
for (i = 0, crc = ~0, ecc = 0; i < nr; i++) {
crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
/*
* The number of bits in a buffer is obviously b_size*8.
* The offset of this buffer is b_size*i, so the bit offset
* of this buffer is b_size*8*i.
*/
ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data,
bhs[i]->b_size * 8,
bhs[i]->b_size * 8 * i);
}
/*
* No ecc'd ocfs2 structure is larger than 4K, so ecc will be no
* larger than 16 bits.
*/
BUG_ON(ecc > USHRT_MAX);
bc->bc_crc32e = cpu_to_le32(crc);
bc->bc_ecc = cpu_to_le16((u16)ecc);
}
/*
* This function validates existing check information on a list of
* buffer_heads. Like _compute_bhs, the function will take care of
* zeroing bc before calculating check codes. If bc is not a pointer
* inside data, the caller must have zeroed any inline
* ocfs2_block_check structures.
*
* Again, the data passed in should be the on-disk endian.
*/
int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
struct ocfs2_block_check *bc,
struct ocfs2_blockcheck_stats *stats)
{
int i, rc = 0;
u32 bc_crc32e;
u16 bc_ecc;
u32 crc, ecc, fix;
BUG_ON(nr < 0);
if (!nr)
return 0;
ocfs2_blockcheck_inc_check(stats);
bc_crc32e = le32_to_cpu(bc->bc_crc32e);
bc_ecc = le16_to_cpu(bc->bc_ecc);
memset(bc, 0, sizeof(struct ocfs2_block_check));
/* Fast path - if the crc32 validates, we're good to go */
for (i = 0, crc = ~0; i < nr; i++)
crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
if (crc == bc_crc32e)
goto out;
ocfs2_blockcheck_inc_failure(stats);
mlog(ML_ERROR,
"CRC32 failed: stored: %u, computed %u. Applying ECC.\n",
(unsigned int)bc_crc32e, (unsigned int)crc);
/* Ok, try ECC fixups */
for (i = 0, ecc = 0; i < nr; i++) {
/*
* The number of bits in a buffer is obviously b_size*8.
* The offset of this buffer is b_size*i, so the bit offset
* of this buffer is b_size*8*i.
*/
ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data,
bhs[i]->b_size * 8,
bhs[i]->b_size * 8 * i);
}
fix = ecc ^ bc_ecc;
for (i = 0; i < nr; i++) {
/*
* Try the fix against each buffer. It will only affect
* one of them.
*/
ocfs2_hamming_fix(bhs[i]->b_data, bhs[i]->b_size * 8,
bhs[i]->b_size * 8 * i, fix);
}
/* And check the crc32 again */
for (i = 0, crc = ~0; i < nr; i++)
crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
if (crc == bc_crc32e) {
ocfs2_blockcheck_inc_recover(stats);
goto out;
}
mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
(unsigned int)bc_crc32e, (unsigned int)crc);
rc = -EIO;
out:
bc->bc_crc32e = cpu_to_le32(bc_crc32e);
bc->bc_ecc = cpu_to_le16(bc_ecc);
return rc;
}
/*
* These are the main API. They check the superblock flag before
* calling the underlying operations.
*
* They expect the buffer(s) to be in disk format.
*/
void ocfs2_compute_meta_ecc(struct super_block *sb, void *data,
struct ocfs2_block_check *bc)
{
if (ocfs2_meta_ecc(OCFS2_SB(sb)))
ocfs2_block_check_compute(data, sb->s_blocksize, bc);
}
int ocfs2_validate_meta_ecc(struct super_block *sb, void *data,
struct ocfs2_block_check *bc)
{
int rc = 0;
struct ocfs2_super *osb = OCFS2_SB(sb);
if (ocfs2_meta_ecc(osb))
rc = ocfs2_block_check_validate(data, sb->s_blocksize, bc,
&osb->osb_ecc_stats);
return rc;
}
void ocfs2_compute_meta_ecc_bhs(struct super_block *sb,
struct buffer_head **bhs, int nr,
struct ocfs2_block_check *bc)
{
if (ocfs2_meta_ecc(OCFS2_SB(sb)))
ocfs2_block_check_compute_bhs(bhs, nr, bc);
}
int ocfs2_validate_meta_ecc_bhs(struct super_block *sb,
struct buffer_head **bhs, int nr,
struct ocfs2_block_check *bc)
{
int rc = 0;
struct ocfs2_super *osb = OCFS2_SB(sb);
if (ocfs2_meta_ecc(osb))
rc = ocfs2_block_check_validate_bhs(bhs, nr, bc,
&osb->osb_ecc_stats);
return rc;
}
| linux-master | fs/ocfs2/blockcheck.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* filecheck.c
*
* Code which implements online file check.
*
* Copyright (C) 2016 SuSE. All rights reserved.
*/
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/sysctl.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "ocfs2_fs.h"
#include "stackglue.h"
#include "inode.h"
#include "filecheck.h"
/* File check error strings,
* must correspond with error number in header file.
*/
static const char * const ocfs2_filecheck_errs[] = {
"SUCCESS",
"FAILED",
"INPROGRESS",
"READONLY",
"INJBD",
"INVALIDINO",
"BLOCKECC",
"BLOCKNO",
"VALIDFLAG",
"GENERATION",
"UNSUPPORTED"
};
struct ocfs2_filecheck_entry {
struct list_head fe_list;
unsigned long fe_ino;
unsigned int fe_type;
unsigned int fe_done:1;
unsigned int fe_status:31;
};
struct ocfs2_filecheck_args {
unsigned int fa_type;
union {
unsigned long fa_ino;
unsigned int fa_len;
};
};
static const char *
ocfs2_filecheck_error(int errno)
{
if (!errno)
return ocfs2_filecheck_errs[errno];
BUG_ON(errno < OCFS2_FILECHECK_ERR_START ||
errno > OCFS2_FILECHECK_ERR_END);
return ocfs2_filecheck_errs[errno - OCFS2_FILECHECK_ERR_START + 1];
}
static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf);
static ssize_t ocfs2_filecheck_attr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count);
static struct kobj_attribute ocfs2_filecheck_attr_chk =
__ATTR(check, S_IRUSR | S_IWUSR,
ocfs2_filecheck_attr_show,
ocfs2_filecheck_attr_store);
static struct kobj_attribute ocfs2_filecheck_attr_fix =
__ATTR(fix, S_IRUSR | S_IWUSR,
ocfs2_filecheck_attr_show,
ocfs2_filecheck_attr_store);
static struct kobj_attribute ocfs2_filecheck_attr_set =
__ATTR(set, S_IRUSR | S_IWUSR,
ocfs2_filecheck_attr_show,
ocfs2_filecheck_attr_store);
static struct attribute *ocfs2_filecheck_attrs[] = {
&ocfs2_filecheck_attr_chk.attr,
&ocfs2_filecheck_attr_fix.attr,
&ocfs2_filecheck_attr_set.attr,
NULL
};
ATTRIBUTE_GROUPS(ocfs2_filecheck);
static void ocfs2_filecheck_release(struct kobject *kobj)
{
struct ocfs2_filecheck_sysfs_entry *entry = container_of(kobj,
struct ocfs2_filecheck_sysfs_entry, fs_kobj);
complete(&entry->fs_kobj_unregister);
}
static ssize_t
ocfs2_filecheck_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
ssize_t ret = -EIO;
struct kobj_attribute *kattr = container_of(attr,
struct kobj_attribute, attr);
kobject_get(kobj);
if (kattr->show)
ret = kattr->show(kobj, kattr, buf);
kobject_put(kobj);
return ret;
}
static ssize_t
ocfs2_filecheck_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
ssize_t ret = -EIO;
struct kobj_attribute *kattr = container_of(attr,
struct kobj_attribute, attr);
kobject_get(kobj);
if (kattr->store)
ret = kattr->store(kobj, kattr, buf, count);
kobject_put(kobj);
return ret;
}
static const struct sysfs_ops ocfs2_filecheck_ops = {
.show = ocfs2_filecheck_show,
.store = ocfs2_filecheck_store,
};
static struct kobj_type ocfs2_ktype_filecheck = {
.default_groups = ocfs2_filecheck_groups,
.sysfs_ops = &ocfs2_filecheck_ops,
.release = ocfs2_filecheck_release,
};
static void
ocfs2_filecheck_sysfs_free(struct ocfs2_filecheck_sysfs_entry *entry)
{
struct ocfs2_filecheck_entry *p;
spin_lock(&entry->fs_fcheck->fc_lock);
while (!list_empty(&entry->fs_fcheck->fc_head)) {
p = list_first_entry(&entry->fs_fcheck->fc_head,
struct ocfs2_filecheck_entry, fe_list);
list_del(&p->fe_list);
BUG_ON(!p->fe_done); /* To free a undone file check entry */
kfree(p);
}
spin_unlock(&entry->fs_fcheck->fc_lock);
kfree(entry->fs_fcheck);
entry->fs_fcheck = NULL;
}
int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb)
{
int ret;
struct ocfs2_filecheck *fcheck;
struct ocfs2_filecheck_sysfs_entry *entry = &osb->osb_fc_ent;
fcheck = kmalloc(sizeof(struct ocfs2_filecheck), GFP_NOFS);
if (!fcheck)
return -ENOMEM;
INIT_LIST_HEAD(&fcheck->fc_head);
spin_lock_init(&fcheck->fc_lock);
fcheck->fc_max = OCFS2_FILECHECK_MINSIZE;
fcheck->fc_size = 0;
fcheck->fc_done = 0;
entry->fs_kobj.kset = osb->osb_dev_kset;
init_completion(&entry->fs_kobj_unregister);
ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck,
NULL, "filecheck");
if (ret) {
kobject_put(&entry->fs_kobj);
kfree(fcheck);
return ret;
}
entry->fs_fcheck = fcheck;
return 0;
}
void ocfs2_filecheck_remove_sysfs(struct ocfs2_super *osb)
{
if (!osb->osb_fc_ent.fs_fcheck)
return;
kobject_del(&osb->osb_fc_ent.fs_kobj);
kobject_put(&osb->osb_fc_ent.fs_kobj);
wait_for_completion(&osb->osb_fc_ent.fs_kobj_unregister);
ocfs2_filecheck_sysfs_free(&osb->osb_fc_ent);
}
static int
ocfs2_filecheck_erase_entries(struct ocfs2_filecheck_sysfs_entry *ent,
unsigned int count);
static int
ocfs2_filecheck_adjust_max(struct ocfs2_filecheck_sysfs_entry *ent,
unsigned int len)
{
int ret;
if ((len < OCFS2_FILECHECK_MINSIZE) || (len > OCFS2_FILECHECK_MAXSIZE))
return -EINVAL;
spin_lock(&ent->fs_fcheck->fc_lock);
if (len < (ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done)) {
mlog(ML_NOTICE,
"Cannot set online file check maximum entry number "
"to %u due to too many pending entries(%u)\n",
len, ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done);
ret = -EBUSY;
} else {
if (len < ent->fs_fcheck->fc_size)
BUG_ON(!ocfs2_filecheck_erase_entries(ent,
ent->fs_fcheck->fc_size - len));
ent->fs_fcheck->fc_max = len;
ret = 0;
}
spin_unlock(&ent->fs_fcheck->fc_lock);
return ret;
}
#define OCFS2_FILECHECK_ARGS_LEN 24
static int
ocfs2_filecheck_args_get_long(const char *buf, size_t count,
unsigned long *val)
{
char buffer[OCFS2_FILECHECK_ARGS_LEN];
memcpy(buffer, buf, count);
buffer[count] = '\0';
if (kstrtoul(buffer, 0, val))
return 1;
return 0;
}
static int
ocfs2_filecheck_type_parse(const char *name, unsigned int *type)
{
if (!strncmp(name, "fix", 4))
*type = OCFS2_FILECHECK_TYPE_FIX;
else if (!strncmp(name, "check", 6))
*type = OCFS2_FILECHECK_TYPE_CHK;
else if (!strncmp(name, "set", 4))
*type = OCFS2_FILECHECK_TYPE_SET;
else
return 1;
return 0;
}
static int
ocfs2_filecheck_args_parse(const char *name, const char *buf, size_t count,
struct ocfs2_filecheck_args *args)
{
unsigned long val = 0;
unsigned int type;
/* too short/long args length */
if ((count < 1) || (count >= OCFS2_FILECHECK_ARGS_LEN))
return 1;
if (ocfs2_filecheck_type_parse(name, &type))
return 1;
if (ocfs2_filecheck_args_get_long(buf, count, &val))
return 1;
if (val <= 0)
return 1;
args->fa_type = type;
if (type == OCFS2_FILECHECK_TYPE_SET)
args->fa_len = (unsigned int)val;
else
args->fa_ino = val;
return 0;
}
static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
ssize_t ret = 0, total = 0, remain = PAGE_SIZE;
unsigned int type;
struct ocfs2_filecheck_entry *p;
struct ocfs2_filecheck_sysfs_entry *ent = container_of(kobj,
struct ocfs2_filecheck_sysfs_entry, fs_kobj);
if (ocfs2_filecheck_type_parse(attr->attr.name, &type))
return -EINVAL;
if (type == OCFS2_FILECHECK_TYPE_SET) {
spin_lock(&ent->fs_fcheck->fc_lock);
total = snprintf(buf, remain, "%u\n", ent->fs_fcheck->fc_max);
spin_unlock(&ent->fs_fcheck->fc_lock);
goto exit;
}
ret = snprintf(buf, remain, "INO\t\tDONE\tERROR\n");
total += ret;
remain -= ret;
spin_lock(&ent->fs_fcheck->fc_lock);
list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) {
if (p->fe_type != type)
continue;
ret = snprintf(buf + total, remain, "%lu\t\t%u\t%s\n",
p->fe_ino, p->fe_done,
ocfs2_filecheck_error(p->fe_status));
if (ret >= remain) {
/* snprintf() didn't fit */
total = -E2BIG;
break;
}
total += ret;
remain -= ret;
}
spin_unlock(&ent->fs_fcheck->fc_lock);
exit:
return total;
}
static inline int
ocfs2_filecheck_is_dup_entry(struct ocfs2_filecheck_sysfs_entry *ent,
unsigned long ino)
{
struct ocfs2_filecheck_entry *p;
list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) {
if (!p->fe_done) {
if (p->fe_ino == ino)
return 1;
}
}
return 0;
}
static inline int
ocfs2_filecheck_erase_entry(struct ocfs2_filecheck_sysfs_entry *ent)
{
struct ocfs2_filecheck_entry *p;
list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) {
if (p->fe_done) {
list_del(&p->fe_list);
kfree(p);
ent->fs_fcheck->fc_size--;
ent->fs_fcheck->fc_done--;
return 1;
}
}
return 0;
}
static int
ocfs2_filecheck_erase_entries(struct ocfs2_filecheck_sysfs_entry *ent,
unsigned int count)
{
unsigned int i = 0;
unsigned int ret = 0;
while (i++ < count) {
if (ocfs2_filecheck_erase_entry(ent))
ret++;
else
break;
}
return (ret == count ? 1 : 0);
}
static void
ocfs2_filecheck_done_entry(struct ocfs2_filecheck_sysfs_entry *ent,
struct ocfs2_filecheck_entry *entry)
{
spin_lock(&ent->fs_fcheck->fc_lock);
entry->fe_done = 1;
ent->fs_fcheck->fc_done++;
spin_unlock(&ent->fs_fcheck->fc_lock);
}
static unsigned int
ocfs2_filecheck_handle(struct ocfs2_super *osb,
unsigned long ino, unsigned int flags)
{
unsigned int ret = OCFS2_FILECHECK_ERR_SUCCESS;
struct inode *inode = NULL;
int rc;
inode = ocfs2_iget(osb, ino, flags, 0);
if (IS_ERR(inode)) {
rc = (int)(-(long)inode);
if (rc >= OCFS2_FILECHECK_ERR_START &&
rc < OCFS2_FILECHECK_ERR_END)
ret = rc;
else
ret = OCFS2_FILECHECK_ERR_FAILED;
} else
iput(inode);
return ret;
}
static void
ocfs2_filecheck_handle_entry(struct ocfs2_filecheck_sysfs_entry *ent,
struct ocfs2_filecheck_entry *entry)
{
struct ocfs2_super *osb = container_of(ent, struct ocfs2_super,
osb_fc_ent);
if (entry->fe_type == OCFS2_FILECHECK_TYPE_CHK)
entry->fe_status = ocfs2_filecheck_handle(osb,
entry->fe_ino, OCFS2_FI_FLAG_FILECHECK_CHK);
else if (entry->fe_type == OCFS2_FILECHECK_TYPE_FIX)
entry->fe_status = ocfs2_filecheck_handle(osb,
entry->fe_ino, OCFS2_FI_FLAG_FILECHECK_FIX);
else
entry->fe_status = OCFS2_FILECHECK_ERR_UNSUPPORTED;
ocfs2_filecheck_done_entry(ent, entry);
}
static ssize_t ocfs2_filecheck_attr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t ret = 0;
struct ocfs2_filecheck_args args;
struct ocfs2_filecheck_entry *entry;
struct ocfs2_filecheck_sysfs_entry *ent = container_of(kobj,
struct ocfs2_filecheck_sysfs_entry, fs_kobj);
if (count == 0)
return count;
if (ocfs2_filecheck_args_parse(attr->attr.name, buf, count, &args))
return -EINVAL;
if (args.fa_type == OCFS2_FILECHECK_TYPE_SET) {
ret = ocfs2_filecheck_adjust_max(ent, args.fa_len);
goto exit;
}
entry = kmalloc(sizeof(struct ocfs2_filecheck_entry), GFP_NOFS);
if (!entry) {
ret = -ENOMEM;
goto exit;
}
spin_lock(&ent->fs_fcheck->fc_lock);
if (ocfs2_filecheck_is_dup_entry(ent, args.fa_ino)) {
ret = -EEXIST;
kfree(entry);
} else if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) &&
(ent->fs_fcheck->fc_done == 0)) {
mlog(ML_NOTICE,
"Cannot do more file check "
"since file check queue(%u) is full now\n",
ent->fs_fcheck->fc_max);
ret = -EAGAIN;
kfree(entry);
} else {
if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) &&
(ent->fs_fcheck->fc_done > 0)) {
/* Delete the oldest entry which was done,
* make sure the entry size in list does
* not exceed maximum value
*/
BUG_ON(!ocfs2_filecheck_erase_entry(ent));
}
entry->fe_ino = args.fa_ino;
entry->fe_type = args.fa_type;
entry->fe_done = 0;
entry->fe_status = OCFS2_FILECHECK_ERR_INPROGRESS;
list_add_tail(&entry->fe_list, &ent->fs_fcheck->fc_head);
ent->fs_fcheck->fc_size++;
}
spin_unlock(&ent->fs_fcheck->fc_lock);
if (!ret)
ocfs2_filecheck_handle_entry(ent, entry);
exit:
return (!ret ? count : ret);
}
| linux-master | fs/ocfs2/filecheck.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* resize.c
*
* volume resize.
* Inspired by ext3/resize.c.
*
* Copyright (C) 2007 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dlmglue.h"
#include "inode.h"
#include "journal.h"
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#include "suballoc.h"
#include "resize.h"
/*
* Check whether there are new backup superblocks exist
* in the last group. If there are some, mark them or clear
* them in the bitmap.
*
* Return how many backups we find in the last group.
*/
static u16 ocfs2_calc_new_backup_super(struct inode *inode,
struct ocfs2_group_desc *gd,
u16 cl_cpg,
u16 old_bg_clusters,
int set)
{
int i;
u16 backups = 0;
u32 cluster, lgd_cluster;
u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
gd_blkno = ocfs2_which_cluster_group(inode, cluster);
if (gd_blkno < lgd_blkno)
continue;
else if (gd_blkno > lgd_blkno)
break;
/* check if already done backup super */
lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno);
lgd_cluster += old_bg_clusters;
if (lgd_cluster >= cluster)
continue;
if (set)
ocfs2_set_bit(cluster % cl_cpg,
(unsigned long *)gd->bg_bitmap);
else
ocfs2_clear_bit(cluster % cl_cpg,
(unsigned long *)gd->bg_bitmap);
backups++;
}
return backups;
}
static int ocfs2_update_last_group_and_inode(handle_t *handle,
struct inode *bm_inode,
struct buffer_head *bm_bh,
struct buffer_head *group_bh,
u32 first_new_cluster,
int new_clusters)
{
int ret = 0;
struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb);
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data;
struct ocfs2_chain_list *cl = &fe->id2.i_chain;
struct ocfs2_chain_rec *cr;
struct ocfs2_group_desc *group;
u16 chain, num_bits, backups = 0;
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
u16 old_bg_clusters;
trace_ocfs2_update_last_group_and_inode(new_clusters,
first_new_cluster);
ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
group = (struct ocfs2_group_desc *)group_bh->b_data;
old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc;
/* update the group first. */
num_bits = new_clusters * cl_bpc;
le16_add_cpu(&group->bg_bits, num_bits);
le16_add_cpu(&group->bg_free_bits_count, num_bits);
/*
* check whether there are some new backup superblocks exist in
* this group and update the group bitmap accordingly.
*/
if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
backups = ocfs2_calc_new_backup_super(bm_inode,
group,
cl_cpg, old_bg_clusters, 1);
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
}
ocfs2_journal_dirty(handle, group_bh);
/* update the inode accordingly. */
ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out_rollback;
}
chain = le16_to_cpu(group->bg_chain);
cr = (&cl->cl_recs[chain]);
le32_add_cpu(&cr->c_total, num_bits);
le32_add_cpu(&cr->c_free, num_bits);
le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits);
le32_add_cpu(&fe->i_clusters, new_clusters);
if (backups) {
le32_add_cpu(&cr->c_free, -1 * backups);
le32_add_cpu(&fe->id1.bitmap1.i_used, backups);
}
spin_lock(&OCFS2_I(bm_inode)->ip_lock);
OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
le64_add_cpu(&fe->i_size, (u64)new_clusters << osb->s_clustersize_bits);
spin_unlock(&OCFS2_I(bm_inode)->ip_lock);
i_size_write(bm_inode, le64_to_cpu(fe->i_size));
ocfs2_journal_dirty(handle, bm_bh);
out_rollback:
if (ret < 0) {
ocfs2_calc_new_backup_super(bm_inode,
group,
cl_cpg, old_bg_clusters, 0);
le16_add_cpu(&group->bg_free_bits_count, backups);
le16_add_cpu(&group->bg_bits, -1 * num_bits);
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
}
out:
if (ret)
mlog_errno(ret);
return ret;
}
static int update_backups(struct inode * inode, u32 clusters, char *data)
{
int i, ret = 0;
u32 cluster;
u64 blkno;
struct buffer_head *backup = NULL;
struct ocfs2_dinode *backup_di = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
/* calculate the real backups we need to update. */
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
if (cluster >= clusters)
break;
ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup);
if (ret < 0) {
mlog_errno(ret);
break;
}
memcpy(backup->b_data, data, inode->i_sb->s_blocksize);
backup_di = (struct ocfs2_dinode *)backup->b_data;
backup_di->i_blkno = cpu_to_le64(blkno);
ret = ocfs2_write_super_or_backup(osb, backup);
brelse(backup);
backup = NULL;
if (ret < 0) {
mlog_errno(ret);
break;
}
}
return ret;
}
static void ocfs2_update_super_and_backups(struct inode *inode,
int new_clusters)
{
int ret;
u32 clusters = 0;
struct buffer_head *super_bh = NULL;
struct ocfs2_dinode *super_di = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
/*
* update the superblock last.
* It doesn't matter if the write failed.
*/
ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1,
&super_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
super_di = (struct ocfs2_dinode *)super_bh->b_data;
le32_add_cpu(&super_di->i_clusters, new_clusters);
clusters = le32_to_cpu(super_di->i_clusters);
ret = ocfs2_write_super_or_backup(osb, super_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB))
ret = update_backups(inode, clusters, super_bh->b_data);
out:
brelse(super_bh);
if (ret)
printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s"
" during fs resize. This condition is not fatal,"
" but fsck.ocfs2 should be run to fix it\n",
osb->dev_str);
return;
}
/*
* Extend the filesystem to the new number of clusters specified. This entry
* point is only used to extend the current filesystem to the end of the last
* existing group.
*/
int ocfs2_group_extend(struct inode * inode, int new_clusters)
{
int ret;
handle_t *handle;
struct buffer_head *main_bm_bh = NULL;
struct buffer_head *group_bh = NULL;
struct inode *main_bm_inode = NULL;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_group_desc *group = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u16 cl_bpc;
u32 first_new_cluster;
u64 lgd_blkno;
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
if (new_clusters < 0)
return -EINVAL;
else if (new_clusters == 0)
return 0;
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!main_bm_inode) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
}
inode_lock(main_bm_inode);
ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
if (ret < 0) {
mlog_errno(ret);
goto out_mutex;
}
fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
/* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
* so any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
ocfs2_group_bitmap_size(osb->sb, 0,
osb->s_feature_incompat) * 8) {
mlog(ML_ERROR, "The disk is too old and small. "
"Force to do offline resize.");
ret = -EINVAL;
goto out_unlock;
}
first_new_cluster = le32_to_cpu(fe->i_clusters);
lgd_blkno = ocfs2_which_cluster_group(main_bm_inode,
first_new_cluster - 1);
ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno,
&group_bh);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
group = (struct ocfs2_group_desc *)group_bh->b_data;
cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters >
le16_to_cpu(fe->id2.i_chain.cl_cpg)) {
ret = -EINVAL;
goto out_unlock;
}
trace_ocfs2_group_extend(
(unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
if (IS_ERR(handle)) {
mlog_errno(PTR_ERR(handle));
ret = -EINVAL;
goto out_unlock;
}
/* update the last group descriptor and inode. */
ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode,
main_bm_bh, group_bh,
first_new_cluster,
new_clusters);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ocfs2_update_super_and_backups(main_bm_inode, new_clusters);
out_commit:
ocfs2_commit_trans(osb, handle);
out_unlock:
brelse(group_bh);
brelse(main_bm_bh);
ocfs2_inode_unlock(main_bm_inode, 1);
out_mutex:
inode_unlock(main_bm_inode);
iput(main_bm_inode);
out:
return ret;
}
static int ocfs2_check_new_group(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_new_group_input *input,
struct buffer_head *group_bh)
{
int ret;
struct ocfs2_group_desc *gd =
(struct ocfs2_group_desc *)group_bh->b_data;
u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc);
ret = ocfs2_check_group_descriptor(inode->i_sb, di, group_bh);
if (ret)
goto out;
ret = -EINVAL;
if (le16_to_cpu(gd->bg_chain) != input->chain)
mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u "
"while input has %u set.\n",
(unsigned long long)le64_to_cpu(gd->bg_blkno),
le16_to_cpu(gd->bg_chain), input->chain);
else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc)
mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but "
"input has %u clusters set\n",
(unsigned long long)le64_to_cpu(gd->bg_blkno),
le16_to_cpu(gd->bg_bits), input->clusters);
else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc)
mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u "
"but it should have %u set\n",
(unsigned long long)le64_to_cpu(gd->bg_blkno),
le16_to_cpu(gd->bg_bits),
input->frees * cl_bpc);
else
ret = 0;
out:
return ret;
}
static int ocfs2_verify_group_and_input(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_new_group_input *input,
struct buffer_head *group_bh)
{
u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count);
u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec);
u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group);
u32 total_clusters = le32_to_cpu(di->i_clusters);
int ret = -EINVAL;
if (cluster < total_clusters)
mlog(ML_ERROR, "add a group which is in the current volume.\n");
else if (input->chain >= cl_count)
mlog(ML_ERROR, "input chain exceeds the limit.\n");
else if (next_free != cl_count && next_free != input->chain)
mlog(ML_ERROR,
"the add group should be in chain %u\n", next_free);
else if (total_clusters + input->clusters < total_clusters)
mlog(ML_ERROR, "add group's clusters overflow.\n");
else if (input->clusters > cl_cpg)
mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n");
else if (input->frees > input->clusters)
mlog(ML_ERROR, "the free cluster exceeds the total clusters\n");
else if (total_clusters % cl_cpg != 0)
mlog(ML_ERROR,
"the last group isn't full. Use group extend first.\n");
else if (input->group != ocfs2_which_cluster_group(inode, cluster))
mlog(ML_ERROR, "group blkno is invalid\n");
else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh)))
mlog(ML_ERROR, "group descriptor check failed.\n");
else
ret = 0;
return ret;
}
/* Add a new group descriptor to global_bitmap. */
int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
{
int ret;
handle_t *handle;
struct buffer_head *main_bm_bh = NULL;
struct inode *main_bm_inode = NULL;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *group = NULL;
struct ocfs2_chain_list *cl;
struct ocfs2_chain_rec *cr;
u16 cl_bpc;
u64 bg_ptr;
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!main_bm_inode) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
}
inode_lock(main_bm_inode);
ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
if (ret < 0) {
mlog_errno(ret);
goto out_mutex;
}
fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
ocfs2_group_bitmap_size(osb->sb, 0,
osb->s_feature_incompat) * 8) {
mlog(ML_ERROR, "The disk is too old and small."
" Force to do offline resize.");
ret = -EINVAL;
goto out_unlock;
}
ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh);
if (ret < 0) {
mlog(ML_ERROR, "Can't read the group descriptor # %llu "
"from the device.", (unsigned long long)input->group);
goto out_unlock;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh);
ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
if (ret) {
mlog_errno(ret);
goto out_free_group_bh;
}
trace_ocfs2_group_add((unsigned long long)input->group,
input->chain, input->clusters, input->frees);
handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
if (IS_ERR(handle)) {
mlog_errno(PTR_ERR(handle));
ret = -EINVAL;
goto out_free_group_bh;
}
cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
cl = &fe->id2.i_chain;
cr = &cl->cl_recs[input->chain];
ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out_commit;
}
group = (struct ocfs2_group_desc *)group_bh->b_data;
bg_ptr = le64_to_cpu(group->bg_next_group);
group->bg_next_group = cr->c_blkno;
ocfs2_journal_dirty(handle, group_bh);
ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
group->bg_next_group = cpu_to_le64(bg_ptr);
mlog_errno(ret);
goto out_commit;
}
if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
le16_add_cpu(&cl->cl_next_free_rec, 1);
memset(cr, 0, sizeof(struct ocfs2_chain_rec));
}
cr->c_blkno = cpu_to_le64(input->group);
le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
le32_add_cpu(&cr->c_free, input->frees * cl_bpc);
le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
le32_add_cpu(&fe->id1.bitmap1.i_used,
(input->clusters - input->frees) * cl_bpc);
le32_add_cpu(&fe->i_clusters, input->clusters);
ocfs2_journal_dirty(handle, main_bm_bh);
spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
le64_add_cpu(&fe->i_size, (u64)input->clusters << osb->s_clustersize_bits);
spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));
ocfs2_update_super_and_backups(main_bm_inode, input->clusters);
out_commit:
ocfs2_commit_trans(osb, handle);
out_free_group_bh:
brelse(group_bh);
out_unlock:
brelse(main_bm_bh);
ocfs2_inode_unlock(main_bm_inode, 1);
out_mutex:
inode_unlock(main_bm_inode);
iput(main_bm_inode);
out:
return ret;
}
| linux-master | fs/ocfs2/resize.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* extent_map.c
*
* Block/Cluster mapping functions
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fiemap.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "inode.h"
#include "super.h"
#include "symlink.h"
#include "aops.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
/*
* The extent caching implementation is intentionally trivial.
*
* We only cache a small number of extents stored directly on the
* inode, so linear order operations are acceptable. If we ever want
* to increase the size of the extent map, then these algorithms must
* get smarter.
*/
void ocfs2_extent_map_init(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
oi->ip_extent_map.em_num_items = 0;
INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
}
static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
unsigned int cpos,
struct ocfs2_extent_map_item **ret_emi)
{
unsigned int range;
struct ocfs2_extent_map_item *emi;
*ret_emi = NULL;
list_for_each_entry(emi, &em->em_list, ei_list) {
range = emi->ei_cpos + emi->ei_clusters;
if (cpos >= emi->ei_cpos && cpos < range) {
list_move(&emi->ei_list, &em->em_list);
*ret_emi = emi;
break;
}
}
}
static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
unsigned int *phys, unsigned int *len,
unsigned int *flags)
{
unsigned int coff;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_extent_map_item *emi;
spin_lock(&oi->ip_lock);
__ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
if (emi) {
coff = cpos - emi->ei_cpos;
*phys = emi->ei_phys + coff;
if (len)
*len = emi->ei_clusters - coff;
if (flags)
*flags = emi->ei_flags;
}
spin_unlock(&oi->ip_lock);
if (emi == NULL)
return -ENOENT;
return 0;
}
/*
* Forget about all clusters equal to or greater than cpos.
*/
void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
{
struct ocfs2_extent_map_item *emi, *n;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_extent_map *em = &oi->ip_extent_map;
LIST_HEAD(tmp_list);
unsigned int range;
spin_lock(&oi->ip_lock);
list_for_each_entry_safe(emi, n, &em->em_list, ei_list) {
if (emi->ei_cpos >= cpos) {
/* Full truncate of this record. */
list_move(&emi->ei_list, &tmp_list);
BUG_ON(em->em_num_items == 0);
em->em_num_items--;
continue;
}
range = emi->ei_cpos + emi->ei_clusters;
if (range > cpos) {
/* Partial truncate */
emi->ei_clusters = cpos - emi->ei_cpos;
}
}
spin_unlock(&oi->ip_lock);
list_for_each_entry_safe(emi, n, &tmp_list, ei_list) {
list_del(&emi->ei_list);
kfree(emi);
}
}
/*
* Is any part of emi2 contained within emi1
*/
static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
struct ocfs2_extent_map_item *emi2)
{
unsigned int range1, range2;
/*
* Check if logical start of emi2 is inside emi1
*/
range1 = emi1->ei_cpos + emi1->ei_clusters;
if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
return 1;
/*
* Check if logical end of emi2 is inside emi1
*/
range2 = emi2->ei_cpos + emi2->ei_clusters;
if (range2 > emi1->ei_cpos && range2 <= range1)
return 1;
return 0;
}
static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
struct ocfs2_extent_map_item *src)
{
dest->ei_cpos = src->ei_cpos;
dest->ei_phys = src->ei_phys;
dest->ei_clusters = src->ei_clusters;
dest->ei_flags = src->ei_flags;
}
/*
* Try to merge emi with ins. Returns 1 if merge succeeds, zero
* otherwise.
*/
static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
struct ocfs2_extent_map_item *ins)
{
/*
* Handle contiguousness
*/
if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
ins->ei_flags == emi->ei_flags) {
emi->ei_clusters += ins->ei_clusters;
return 1;
} else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
(ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
ins->ei_flags == emi->ei_flags) {
emi->ei_phys = ins->ei_phys;
emi->ei_cpos = ins->ei_cpos;
emi->ei_clusters += ins->ei_clusters;
return 1;
}
/*
* Overlapping extents - this shouldn't happen unless we've
* split an extent to change it's flags. That is exceedingly
* rare, so there's no sense in trying to optimize it yet.
*/
if (ocfs2_ei_is_contained(emi, ins) ||
ocfs2_ei_is_contained(ins, emi)) {
ocfs2_copy_emi_fields(emi, ins);
return 1;
}
/* No merge was possible. */
return 0;
}
/*
* In order to reduce complexity on the caller, this insert function
* is intentionally liberal in what it will accept.
*
* The only rule is that the truncate call *must* be used whenever
* records have been deleted. This avoids inserting overlapping
* records with different physical mappings.
*/
void ocfs2_extent_map_insert_rec(struct inode *inode,
struct ocfs2_extent_rec *rec)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_extent_map *em = &oi->ip_extent_map;
struct ocfs2_extent_map_item *emi, *new_emi = NULL;
struct ocfs2_extent_map_item ins;
ins.ei_cpos = le32_to_cpu(rec->e_cpos);
ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
le64_to_cpu(rec->e_blkno));
ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
ins.ei_flags = rec->e_flags;
search:
spin_lock(&oi->ip_lock);
list_for_each_entry(emi, &em->em_list, ei_list) {
if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
list_move(&emi->ei_list, &em->em_list);
spin_unlock(&oi->ip_lock);
goto out;
}
}
/*
* No item could be merged.
*
* Either allocate and add a new item, or overwrite the last recently
* inserted.
*/
if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
if (new_emi == NULL) {
spin_unlock(&oi->ip_lock);
new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
if (new_emi == NULL)
goto out;
goto search;
}
ocfs2_copy_emi_fields(new_emi, &ins);
list_add(&new_emi->ei_list, &em->em_list);
em->em_num_items++;
new_emi = NULL;
} else {
BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
emi = list_entry(em->em_list.prev,
struct ocfs2_extent_map_item, ei_list);
list_move(&emi->ei_list, &em->em_list);
ocfs2_copy_emi_fields(emi, &ins);
}
spin_unlock(&oi->ip_lock);
out:
kfree(new_emi);
}
static int ocfs2_last_eb_is_empty(struct inode *inode,
struct ocfs2_dinode *di)
{
int ret, next_free;
u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk);
struct buffer_head *eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ocfs2_error(inode->i_sb,
"Inode %lu has non zero tree depth in leaf block %llu\n",
inode->i_ino,
(unsigned long long)eb_bh->b_blocknr);
ret = -EROFS;
goto out;
}
next_free = le16_to_cpu(el->l_next_free_rec);
if (next_free == 0 ||
(next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0])))
ret = 1;
out:
brelse(eb_bh);
return ret;
}
/*
* Return the 1st index within el which contains an extent start
* larger than v_cluster.
*/
static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
u32 v_cluster)
{
int i;
struct ocfs2_extent_rec *rec;
for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
if (v_cluster < le32_to_cpu(rec->e_cpos))
break;
}
return i;
}
/*
* Figure out the size of a hole which starts at v_cluster within the given
* extent list.
*
* If there is no more allocation past v_cluster, we return the maximum
* cluster size minus v_cluster.
*
* If we have in-inode extents, then el points to the dinode list and
* eb_bh is NULL. Otherwise, eb_bh should point to the extent block
* containing el.
*/
int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci,
struct ocfs2_extent_list *el,
struct buffer_head *eb_bh,
u32 v_cluster,
u32 *num_clusters)
{
int ret, i;
struct buffer_head *next_eb_bh = NULL;
struct ocfs2_extent_block *eb, *next_eb;
i = ocfs2_search_for_hole_index(el, v_cluster);
if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
eb = (struct ocfs2_extent_block *)eb_bh->b_data;
/*
* Check the next leaf for any extents.
*/
if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
goto no_more_extents;
ret = ocfs2_read_extent_block(ci,
le64_to_cpu(eb->h_next_leaf_blk),
&next_eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
el = &next_eb->h_list;
i = ocfs2_search_for_hole_index(el, v_cluster);
}
no_more_extents:
if (i == le16_to_cpu(el->l_next_free_rec)) {
/*
* We're at the end of our existing allocation. Just
* return the maximum number of clusters we could
* possibly allocate.
*/
*num_clusters = UINT_MAX - v_cluster;
} else {
*num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
}
ret = 0;
out:
brelse(next_eb_bh);
return ret;
}
static int ocfs2_get_clusters_nocache(struct inode *inode,
struct buffer_head *di_bh,
u32 v_cluster, unsigned int *hole_len,
struct ocfs2_extent_rec *ret_rec,
unsigned int *is_last)
{
int i, ret, tree_height, len;
struct ocfs2_dinode *di;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
struct buffer_head *eb_bh = NULL;
memset(ret_rec, 0, sizeof(*ret_rec));
if (is_last)
*is_last = 0;
di = (struct ocfs2_dinode *) di_bh->b_data;
el = &di->id2.i_list;
tree_height = le16_to_cpu(el->l_tree_depth);
if (tree_height > 0) {
ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
&eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ocfs2_error(inode->i_sb,
"Inode %lu has non zero tree depth in leaf block %llu\n",
inode->i_ino,
(unsigned long long)eb_bh->b_blocknr);
ret = -EROFS;
goto out;
}
}
i = ocfs2_search_extent_list(el, v_cluster);
if (i == -1) {
/*
* Holes can be larger than the maximum size of an
* extent, so we return their lengths in a separate
* field.
*/
if (hole_len) {
ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode),
el, eb_bh,
v_cluster, &len);
if (ret) {
mlog_errno(ret);
goto out;
}
*hole_len = len;
}
goto out_hole;
}
rec = &el->l_recs[i];
BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
if (!rec->e_blkno) {
ocfs2_error(inode->i_sb,
"Inode %lu has bad extent record (%u, %u, 0)\n",
inode->i_ino,
le32_to_cpu(rec->e_cpos),
ocfs2_rec_clusters(el, rec));
ret = -EROFS;
goto out;
}
*ret_rec = *rec;
/*
* Checking for last extent is potentially expensive - we
* might have to look at the next leaf over to see if it's
* empty.
*
* The first two checks are to see whether the caller even
* cares for this information, and if the extent is at least
* the last in it's list.
*
* If those hold true, then the extent is last if any of the
* additional conditions hold true:
* - Extent list is in-inode
* - Extent list is right-most
* - Extent list is 2nd to rightmost, with empty right-most
*/
if (is_last) {
if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) {
if (tree_height == 0)
*is_last = 1;
else if (eb->h_blkno == di->i_last_eb_blk)
*is_last = 1;
else if (eb->h_next_leaf_blk == di->i_last_eb_blk) {
ret = ocfs2_last_eb_is_empty(inode, di);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
if (ret == 1)
*is_last = 1;
}
}
}
out_hole:
ret = 0;
out:
brelse(eb_bh);
return ret;
}
static void ocfs2_relative_extent_offsets(struct super_block *sb,
u32 v_cluster,
struct ocfs2_extent_rec *rec,
u32 *p_cluster, u32 *num_clusters)
{
u32 coff = v_cluster - le32_to_cpu(rec->e_cpos);
*p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno));
*p_cluster = *p_cluster + coff;
if (num_clusters)
*num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff;
}
int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
u32 *p_cluster, u32 *num_clusters,
struct ocfs2_extent_list *el,
unsigned int *extent_flags)
{
int ret = 0, i;
struct buffer_head *eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_rec *rec;
u32 coff;
if (el->l_tree_depth) {
ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
&eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ocfs2_error(inode->i_sb,
"Inode %lu has non zero tree depth in xattr leaf block %llu\n",
inode->i_ino,
(unsigned long long)eb_bh->b_blocknr);
ret = -EROFS;
goto out;
}
}
i = ocfs2_search_extent_list(el, v_cluster);
if (i == -1) {
ret = -EROFS;
mlog_errno(ret);
goto out;
} else {
rec = &el->l_recs[i];
BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
if (!rec->e_blkno) {
ocfs2_error(inode->i_sb,
"Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
inode->i_ino,
le32_to_cpu(rec->e_cpos),
ocfs2_rec_clusters(el, rec));
ret = -EROFS;
goto out;
}
coff = v_cluster - le32_to_cpu(rec->e_cpos);
*p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
le64_to_cpu(rec->e_blkno));
*p_cluster = *p_cluster + coff;
if (num_clusters)
*num_clusters = ocfs2_rec_clusters(el, rec) - coff;
if (extent_flags)
*extent_flags = rec->e_flags;
}
out:
brelse(eb_bh);
return ret;
}
int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
u32 *p_cluster, u32 *num_clusters,
unsigned int *extent_flags)
{
int ret;
unsigned int hole_len, flags = 0;
struct buffer_head *di_bh = NULL;
struct ocfs2_extent_rec rec;
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ret = -ERANGE;
mlog_errno(ret);
goto out;
}
ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
num_clusters, extent_flags);
if (ret == 0)
goto out;
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len,
&rec, NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
if (rec.e_blkno == 0ULL) {
/*
* A hole was found. Return some canned values that
* callers can key on. If asked for, num_clusters will
* be populated with the size of the hole.
*/
*p_cluster = 0;
if (num_clusters) {
*num_clusters = hole_len;
}
} else {
ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec,
p_cluster, num_clusters);
flags = rec.e_flags;
ocfs2_extent_map_insert_rec(inode, &rec);
}
if (extent_flags)
*extent_flags = flags;
out:
brelse(di_bh);
return ret;
}
/*
* This expects alloc_sem to be held. The allocation cannot change at
* all while the map is in the process of being updated.
*/
int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
u64 *ret_count, unsigned int *extent_flags)
{
int ret;
int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
u32 cpos, num_clusters, p_cluster;
u64 boff = 0;
cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
extent_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* p_cluster == 0 indicates a hole.
*/
if (p_cluster) {
boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
boff += (v_blkno & (u64)(bpc - 1));
}
*p_blkno = boff;
if (ret_count) {
*ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
*ret_count -= v_blkno & (u64)(bpc - 1);
}
out:
return ret;
}
/*
* The ocfs2_fiemap_inline() may be a little bit misleading, since
* it not only handles the fiemap for inlined files, but also deals
* with the fast symlink, cause they have no difference for extent
* mapping per se.
*/
static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
struct fiemap_extent_info *fieinfo,
u64 map_start)
{
int ret;
unsigned int id_count;
struct ocfs2_dinode *di;
u64 phys;
u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
di = (struct ocfs2_dinode *)di_bh->b_data;
if (ocfs2_inode_is_fast_symlink(inode))
id_count = ocfs2_fast_symlink_chars(inode->i_sb);
else
id_count = le16_to_cpu(di->id2.i_data.id_count);
if (map_start < id_count) {
phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits;
if (ocfs2_inode_is_fast_symlink(inode))
phys += offsetof(struct ocfs2_dinode, id2.i_symlink);
else
phys += offsetof(struct ocfs2_dinode,
id2.i_data.id_data);
ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
flags);
if (ret < 0)
return ret;
}
return 0;
}
int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 map_start, u64 map_len)
{
int ret, is_last;
u32 mapping_end, cpos;
unsigned int hole_size;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u64 len_bytes, phys_bytes, virt_bytes;
struct buffer_head *di_bh = NULL;
struct ocfs2_extent_rec rec;
ret = fiemap_prep(inode, fieinfo, map_start, &map_len, 0);
if (ret)
return ret;
ret = ocfs2_inode_lock(inode, &di_bh, 0);
if (ret) {
mlog_errno(ret);
goto out;
}
down_read(&OCFS2_I(inode)->ip_alloc_sem);
/*
* Handle inline-data and fast symlink separately.
*/
if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
ocfs2_inode_is_fast_symlink(inode)) {
ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start);
goto out_unlock;
}
cpos = map_start >> osb->s_clustersize_bits;
mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
map_start + map_len);
is_last = 0;
while (cpos < mapping_end && !is_last) {
u32 fe_flags;
ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
&hole_size, &rec, &is_last);
if (ret) {
mlog_errno(ret);
goto out_unlock;
}
if (rec.e_blkno == 0ULL) {
cpos += hole_size;
continue;
}
fe_flags = 0;
if (rec.e_flags & OCFS2_EXT_UNWRITTEN)
fe_flags |= FIEMAP_EXTENT_UNWRITTEN;
if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
fe_flags |= FIEMAP_EXTENT_SHARED;
if (is_last)
fe_flags |= FIEMAP_EXTENT_LAST;
len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
len_bytes, fe_flags);
if (ret)
break;
cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters);
}
if (ret > 0)
ret = 0;
out_unlock:
brelse(di_bh);
up_read(&OCFS2_I(inode)->ip_alloc_sem);
ocfs2_inode_unlock(inode, 0);
out:
return ret;
}
/* Is IO overwriting allocated blocks? */
int ocfs2_overwrite_io(struct inode *inode, struct buffer_head *di_bh,
u64 map_start, u64 map_len)
{
int ret = 0, is_last;
u32 mapping_end, cpos;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_rec rec;
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
if (ocfs2_size_fits_inline_data(di_bh, map_start + map_len))
return ret;
else
return -EAGAIN;
}
cpos = map_start >> osb->s_clustersize_bits;
mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
map_start + map_len);
is_last = 0;
while (cpos < mapping_end && !is_last) {
ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
NULL, &rec, &is_last);
if (ret) {
mlog_errno(ret);
goto out;
}
if (rec.e_blkno == 0ULL)
break;
if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
break;
cpos = le32_to_cpu(rec.e_cpos) +
le16_to_cpu(rec.e_leaf_clusters);
}
if (cpos < mapping_end)
ret = -EAGAIN;
out:
return ret;
}
int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int ret;
unsigned int is_last = 0, is_data = 0;
u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
u32 cpos, cend, clen, hole_size;
u64 extoff, extlen;
struct buffer_head *di_bh = NULL;
struct ocfs2_extent_rec rec;
BUG_ON(whence != SEEK_DATA && whence != SEEK_HOLE);
ret = ocfs2_inode_lock(inode, &di_bh, 0);
if (ret) {
mlog_errno(ret);
goto out;
}
down_read(&OCFS2_I(inode)->ip_alloc_sem);
if (*offset >= i_size_read(inode)) {
ret = -ENXIO;
goto out_unlock;
}
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
if (whence == SEEK_HOLE)
*offset = i_size_read(inode);
goto out_unlock;
}
clen = 0;
cpos = *offset >> cs_bits;
cend = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
while (cpos < cend && !is_last) {
ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
&rec, &is_last);
if (ret) {
mlog_errno(ret);
goto out_unlock;
}
extoff = cpos;
extoff <<= cs_bits;
if (rec.e_blkno == 0ULL) {
clen = hole_size;
is_data = 0;
} else {
clen = le16_to_cpu(rec.e_leaf_clusters) -
(cpos - le32_to_cpu(rec.e_cpos));
is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
}
if ((!is_data && whence == SEEK_HOLE) ||
(is_data && whence == SEEK_DATA)) {
if (extoff > *offset)
*offset = extoff;
goto out_unlock;
}
if (!is_last)
cpos += clen;
}
if (whence == SEEK_HOLE) {
extoff = cpos;
extoff <<= cs_bits;
extlen = clen;
extlen <<= cs_bits;
if ((extoff + extlen) > i_size_read(inode))
extlen = i_size_read(inode) - extoff;
extoff += extlen;
if (extoff > *offset)
*offset = extoff;
goto out_unlock;
}
ret = -ENXIO;
out_unlock:
brelse(di_bh);
up_read(&OCFS2_I(inode)->ip_alloc_sem);
ocfs2_inode_unlock(inode, 0);
out:
return ret;
}
int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
struct buffer_head *bh))
{
int rc = 0;
u64 p_block, p_count;
int i, count, done = 0;
trace_ocfs2_read_virt_blocks(
inode, (unsigned long long)v_block, nr, bhs, flags,
validate);
if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
i_size_read(inode)) {
BUG_ON(!(flags & OCFS2_BH_READAHEAD));
goto out;
}
while (done < nr) {
down_read(&OCFS2_I(inode)->ip_alloc_sem);
rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
&p_block, &p_count, NULL);
up_read(&OCFS2_I(inode)->ip_alloc_sem);
if (rc) {
mlog_errno(rc);
break;
}
if (!p_block) {
rc = -EIO;
mlog(ML_ERROR,
"Inode #%llu contains a hole at offset %llu\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)(v_block + done) <<
inode->i_sb->s_blocksize_bits);
break;
}
count = nr - done;
if (p_count < count)
count = p_count;
/*
* If the caller passed us bhs, they should have come
* from a previous readahead call to this function. Thus,
* they should have the right b_blocknr.
*/
for (i = 0; i < count; i++) {
if (!bhs[done + i])
continue;
BUG_ON(bhs[done + i]->b_blocknr != (p_block + i));
}
rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count,
bhs + done, flags, validate);
if (rc) {
mlog_errno(rc);
break;
}
done += count;
}
out:
return rc;
}
| linux-master | fs/ocfs2/extent_map.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xattr.c
*
* Copyright (C) 2004, 2008 Oracle. All rights reserved.
*
* CREDITS:
* Lots of code in this file is copy from linux/fs/ext3/xattr.c.
* Copyright (C) 2001-2003 Andreas Gruenbacher, <[email protected]>
*/
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
#include <linux/sched.h>
#include <linux/splice.h>
#include <linux/mount.h>
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/sort.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/security.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "file.h"
#include "symlink.h"
#include "sysfile.h"
#include "inode.h"
#include "journal.h"
#include "ocfs2_fs.h"
#include "suballoc.h"
#include "uptodate.h"
#include "buffer_head_io.h"
#include "super.h"
#include "xattr.h"
#include "refcounttree.h"
#include "acl.h"
#include "ocfs2_trace.h"
struct ocfs2_xattr_def_value_root {
struct ocfs2_xattr_value_root xv;
struct ocfs2_extent_rec er;
};
struct ocfs2_xattr_bucket {
/* The inode these xattrs are associated with */
struct inode *bu_inode;
/* The actual buffers that make up the bucket */
struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET];
/* How many blocks make up one bucket for this filesystem */
int bu_blocks;
};
struct ocfs2_xattr_set_ctxt {
handle_t *handle;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_alloc_context *data_ac;
struct ocfs2_cached_dealloc_ctxt dealloc;
int set_abort;
};
#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
#define OCFS2_XATTR_INLINE_SIZE 80
#define OCFS2_XATTR_HEADER_GAP 4
#define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
- sizeof(struct ocfs2_xattr_header) \
- OCFS2_XATTR_HEADER_GAP)
#define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
- sizeof(struct ocfs2_xattr_block) \
- sizeof(struct ocfs2_xattr_header) \
- OCFS2_XATTR_HEADER_GAP)
static struct ocfs2_xattr_def_value_root def_xv = {
.xv.xr_list.l_count = cpu_to_le16(1),
};
const struct xattr_handler *ocfs2_xattr_handlers[] = {
&ocfs2_xattr_user_handler,
&ocfs2_xattr_trusted_handler,
&ocfs2_xattr_security_handler,
NULL
};
static const struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
[OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
[OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
[OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default,
[OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler,
[OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler,
};
struct ocfs2_xattr_info {
int xi_name_index;
const char *xi_name;
int xi_name_len;
const void *xi_value;
size_t xi_value_len;
};
struct ocfs2_xattr_search {
struct buffer_head *inode_bh;
/*
* xattr_bh point to the block buffer head which has extended attribute
* when extended attribute in inode, xattr_bh is equal to inode_bh.
*/
struct buffer_head *xattr_bh;
struct ocfs2_xattr_header *header;
struct ocfs2_xattr_bucket *bucket;
void *base;
void *end;
struct ocfs2_xattr_entry *here;
int not_found;
};
/* Operations on struct ocfs2_xa_entry */
struct ocfs2_xa_loc;
struct ocfs2_xa_loc_operations {
/*
* Journal functions
*/
int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
int type);
void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
/*
* Return a pointer to the appropriate buffer in loc->xl_storage
* at the given offset from loc->xl_header.
*/
void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
/* Can we reuse the existing entry for the new value? */
int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi);
/* How much space is needed for the new value? */
int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi);
/*
* Return the offset of the first name+value pair. This is
* the start of our downward-filling free space.
*/
int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
/*
* Remove the name+value at this location. Do whatever is
* appropriate with the remaining name+value pairs.
*/
void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
/* Fill xl_entry with a new entry */
void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
/* Add name+value storage to an entry */
void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
/*
* Initialize the value buf's access and bh fields for this entry.
* ocfs2_xa_fill_value_buf() will handle the xv pointer.
*/
void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_value_buf *vb);
};
/*
* Describes an xattr entry location. This is a memory structure
* tracking the on-disk structure.
*/
struct ocfs2_xa_loc {
/* This xattr belongs to this inode */
struct inode *xl_inode;
/* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
struct ocfs2_xattr_header *xl_header;
/* Bytes from xl_header to the end of the storage */
int xl_size;
/*
* The ocfs2_xattr_entry this location describes. If this is
* NULL, this location describes the on-disk structure where it
* would have been.
*/
struct ocfs2_xattr_entry *xl_entry;
/*
* Internal housekeeping
*/
/* Buffer(s) containing this entry */
void *xl_storage;
/* Operations on the storage backing this location */
const struct ocfs2_xa_loc_operations *xl_ops;
};
/*
* Convenience functions to calculate how much space is needed for a
* given name+value pair
*/
static int namevalue_size(int name_len, uint64_t value_len)
{
if (value_len > OCFS2_XATTR_INLINE_SIZE)
return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
else
return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
}
static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
{
return namevalue_size(xi->xi_name_len, xi->xi_value_len);
}
static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
{
u64 value_len = le64_to_cpu(xe->xe_value_size);
BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
ocfs2_xattr_is_local(xe));
return namevalue_size(xe->xe_name_len, value_len);
}
static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
struct ocfs2_xattr_header *xh,
int index,
int *block_off,
int *new_offset);
static int ocfs2_xattr_block_find(struct inode *inode,
int name_index,
const char *name,
struct ocfs2_xattr_search *xs);
static int ocfs2_xattr_index_block_find(struct inode *inode,
struct buffer_head *root_bh,
int name_index,
const char *name,
struct ocfs2_xattr_search *xs);
static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
struct buffer_head *blk_bh,
char *buffer,
size_t buffer_size);
static int ocfs2_xattr_create_index_block(struct inode *inode,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt);
static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt);
typedef int (xattr_tree_rec_func)(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno, u32 cpos, u32 len, void *para);
static int ocfs2_iterate_xattr_index_block(struct inode *inode,
struct buffer_head *root_bh,
xattr_tree_rec_func *rec_func,
void *para);
static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
void *para);
static int ocfs2_rm_xattr_cluster(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno,
u32 cpos,
u32 len,
void *para);
static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
u64 src_blk, u64 last_blk, u64 to_blk,
unsigned int start_bucket,
u32 *first_hash);
static int ocfs2_prepare_refcount_xattr(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xis,
struct ocfs2_xattr_search *xbs,
struct ocfs2_refcount_tree **ref_tree,
int *meta_need,
int *credits);
static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
struct ocfs2_xattr_bucket *bucket,
int offset,
struct ocfs2_xattr_value_root **xv,
struct buffer_head **bh);
static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
{
return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE;
}
static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
{
return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
}
#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode)
{
struct ocfs2_xattr_bucket *bucket;
int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET);
bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS);
if (bucket) {
bucket->bu_inode = inode;
bucket->bu_blocks = blks;
}
return bucket;
}
static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket)
{
int i;
for (i = 0; i < bucket->bu_blocks; i++) {
brelse(bucket->bu_bhs[i]);
bucket->bu_bhs[i] = NULL;
}
}
static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket)
{
if (bucket) {
ocfs2_xattr_bucket_relse(bucket);
bucket->bu_inode = NULL;
kfree(bucket);
}
}
/*
* A bucket that has never been written to disk doesn't need to be
* read. We just need the buffer_heads. Don't call this for
* buckets that are already on disk. ocfs2_read_xattr_bucket() initializes
* them fully.
*/
static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
u64 xb_blkno, int new)
{
int i, rc = 0;
for (i = 0; i < bucket->bu_blocks; i++) {
bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
xb_blkno + i);
if (!bucket->bu_bhs[i]) {
rc = -ENOMEM;
mlog_errno(rc);
break;
}
if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i])) {
if (new)
ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i]);
else {
set_buffer_uptodate(bucket->bu_bhs[i]);
ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i]);
}
}
}
if (rc)
ocfs2_xattr_bucket_relse(bucket);
return rc;
}
/* Read the xattr bucket at xb_blkno */
static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
u64 xb_blkno)
{
int rc;
rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
bucket->bu_blocks, bucket->bu_bhs, 0,
NULL);
if (!rc) {
spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
bucket->bu_bhs,
bucket->bu_blocks,
&bucket_xh(bucket)->xh_check);
spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
if (rc)
mlog_errno(rc);
}
if (rc)
ocfs2_xattr_bucket_relse(bucket);
return rc;
}
static int ocfs2_xattr_bucket_journal_access(handle_t *handle,
struct ocfs2_xattr_bucket *bucket,
int type)
{
int i, rc = 0;
for (i = 0; i < bucket->bu_blocks; i++) {
rc = ocfs2_journal_access(handle,
INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i], type);
if (rc) {
mlog_errno(rc);
break;
}
}
return rc;
}
static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle,
struct ocfs2_xattr_bucket *bucket)
{
int i;
spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
bucket->bu_bhs, bucket->bu_blocks,
&bucket_xh(bucket)->xh_check);
spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
for (i = 0; i < bucket->bu_blocks; i++)
ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
}
static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest,
struct ocfs2_xattr_bucket *src)
{
int i;
int blocksize = src->bu_inode->i_sb->s_blocksize;
BUG_ON(dest->bu_blocks != src->bu_blocks);
BUG_ON(dest->bu_inode != src->bu_inode);
for (i = 0; i < src->bu_blocks; i++) {
memcpy(bucket_block(dest, i), bucket_block(src, i),
blocksize);
}
}
static int ocfs2_validate_xattr_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)bh->b_data;
trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check);
if (rc)
return rc;
/*
* Errors after here are fatal
*/
if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
return ocfs2_error(sb,
"Extended attribute block #%llu has bad signature %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
xb->xb_signature);
}
if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
return ocfs2_error(sb,
"Extended attribute block #%llu has an invalid xb_blkno of %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(xb->xb_blkno));
}
if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
return ocfs2_error(sb,
"Extended attribute block #%llu has an invalid xb_fs_generation of #%u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(xb->xb_fs_generation));
}
return 0;
}
static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
struct buffer_head **bh)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp,
ocfs2_validate_xattr_block);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
static inline const char *ocfs2_xattr_prefix(int name_index)
{
const struct xattr_handler *handler = NULL;
if (name_index > 0 && name_index < OCFS2_XATTR_MAX)
handler = ocfs2_xattr_handler_map[name_index];
return handler ? xattr_prefix(handler) : NULL;
}
static u32 ocfs2_xattr_name_hash(struct inode *inode,
const char *name,
int name_len)
{
/* Get hash value of uuid from super block */
u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash;
int i;
/* hash extended attribute name */
for (i = 0; i < name_len; i++) {
hash = (hash << OCFS2_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^
*name++;
}
return hash;
}
static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
{
return namevalue_size(name_len, value_len) +
sizeof(struct ocfs2_xattr_entry);
}
static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
{
return namevalue_size_xi(xi) +
sizeof(struct ocfs2_xattr_entry);
}
static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
{
return namevalue_size_xe(xe) +
sizeof(struct ocfs2_xattr_entry);
}
int ocfs2_calc_security_init(struct inode *dir,
struct ocfs2_security_xattr_info *si,
int *want_clusters,
int *xattr_credits,
struct ocfs2_alloc_context **xattr_ac)
{
int ret = 0;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
int s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
si->value_len);
/*
* The max space of security xattr taken inline is
* 256(name) + 80(value) + 16(entry) = 352 bytes,
* So reserve one metadata block for it is ok.
*/
if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
s_size > OCFS2_XATTR_FREE_IN_IBODY) {
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac);
if (ret) {
mlog_errno(ret);
return ret;
}
*xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
}
/* reserve clusters for xattr value which will be set in B tree*/
if (si->value_len > OCFS2_XATTR_INLINE_SIZE) {
int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
si->value_len);
*xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
new_clusters);
*want_clusters += new_clusters;
}
return ret;
}
int ocfs2_calc_xattr_init(struct inode *dir,
struct buffer_head *dir_bh,
umode_t mode,
struct ocfs2_security_xattr_info *si,
int *want_clusters,
int *xattr_credits,
int *want_meta)
{
int ret = 0;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
int s_size = 0, a_size = 0, acl_len = 0, new_clusters;
if (si->enable)
s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
si->value_len);
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
down_read(&OCFS2_I(dir)->ip_xattr_sem);
acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
"", NULL, 0);
up_read(&OCFS2_I(dir)->ip_xattr_sem);
if (acl_len > 0) {
a_size = ocfs2_xattr_entry_real_size(0, acl_len);
if (S_ISDIR(mode))
a_size <<= 1;
} else if (acl_len != 0 && acl_len != -ENODATA) {
ret = acl_len;
mlog_errno(ret);
return ret;
}
}
if (!(s_size + a_size))
return ret;
/*
* The max space of security xattr taken inline is
* 256(name) + 80(value) + 16(entry) = 352 bytes,
* The max space of acl xattr taken inline is
* 80(value) + 16(entry) * 2(if directory) = 192 bytes,
* when blocksize = 512, may reserve one more cluser for
* xattr bucket, otherwise reserve one metadata block
* for them is ok.
* If this is a new directory with inline data,
* we choose to reserve the entire inline area for
* directory contents and force an external xattr block.
*/
if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
(S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) ||
(s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) {
*want_meta = *want_meta + 1;
*xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
}
if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE &&
(s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) {
*want_clusters += 1;
*xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb);
}
/*
* reserve credits and clusters for xattrs which has large value
* and have to be set outside
*/
if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) {
new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
si->value_len);
*xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
new_clusters);
*want_clusters += new_clusters;
}
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL &&
acl_len > OCFS2_XATTR_INLINE_SIZE) {
/* for directory, it has DEFAULT and ACCESS two types of acls */
new_clusters = (S_ISDIR(mode) ? 2 : 1) *
ocfs2_clusters_for_bytes(dir->i_sb, acl_len);
*xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
new_clusters);
*want_clusters += new_clusters;
}
return ret;
}
static int ocfs2_xattr_extend_allocation(struct inode *inode,
u32 clusters_to_add,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int status = 0, credits;
handle_t *handle = ctxt->handle;
enum ocfs2_alloc_restarted why;
u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
struct ocfs2_extent_tree et;
ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
while (clusters_to_add) {
trace_ocfs2_xattr_extend_allocation(clusters_to_add);
status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
break;
}
prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
status = ocfs2_add_clusters_in_btree(handle,
&et,
&logical_start,
clusters_to_add,
0,
ctxt->data_ac,
ctxt->meta_ac,
&why);
if ((status < 0) && (status != -EAGAIN)) {
if (status != -ENOSPC)
mlog_errno(status);
break;
}
ocfs2_journal_dirty(handle, vb->vb_bh);
clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
prev_clusters;
if (why != RESTART_NONE && clusters_to_add) {
/*
* We can only fail in case the alloc file doesn't give
* up enough clusters.
*/
BUG_ON(why == RESTART_META);
credits = ocfs2_calc_extend_credits(inode->i_sb,
&vb->vb_xv->xr_list);
status = ocfs2_extend_trans(handle, credits);
if (status < 0) {
status = -ENOMEM;
mlog_errno(status);
break;
}
}
}
return status;
}
static int __ocfs2_remove_xattr_range(struct inode *inode,
struct ocfs2_xattr_value_buf *vb,
u32 cpos, u32 phys_cpos, u32 len,
unsigned int ext_flags,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
handle_t *handle = ctxt->handle;
struct ocfs2_extent_tree et;
ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac,
&ctxt->dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
ocfs2_journal_dirty(handle, vb->vb_bh);
if (ext_flags & OCFS2_EXT_REFCOUNTED)
ret = ocfs2_decrease_refcount(inode, handle,
ocfs2_blocks_to_clusters(inode->i_sb,
phys_blkno),
len, ctxt->meta_ac, &ctxt->dealloc, 1);
else
ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc,
phys_blkno, len);
if (ret)
mlog_errno(ret);
out:
return ret;
}
static int ocfs2_xattr_shrink_size(struct inode *inode,
u32 old_clusters,
u32 new_clusters,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret = 0;
unsigned int ext_flags;
u32 trunc_len, cpos, phys_cpos, alloc_size;
u64 block;
if (old_clusters <= new_clusters)
return 0;
cpos = new_clusters;
trunc_len = old_clusters - new_clusters;
while (trunc_len) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
&alloc_size,
&vb->vb_xv->xr_list, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
if (alloc_size > trunc_len)
alloc_size = trunc_len;
ret = __ocfs2_remove_xattr_range(inode, vb, cpos,
phys_cpos, alloc_size,
ext_flags, ctxt);
if (ret) {
mlog_errno(ret);
goto out;
}
block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode),
block, alloc_size);
cpos += alloc_size;
trunc_len -= alloc_size;
}
out:
return ret;
}
static int ocfs2_xattr_value_truncate(struct inode *inode,
struct ocfs2_xattr_value_buf *vb,
int len,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len);
u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
if (new_clusters == old_clusters)
return 0;
if (new_clusters > old_clusters)
ret = ocfs2_xattr_extend_allocation(inode,
new_clusters - old_clusters,
vb, ctxt);
else
ret = ocfs2_xattr_shrink_size(inode,
old_clusters, new_clusters,
vb, ctxt);
return ret;
}
static int ocfs2_xattr_list_entry(struct super_block *sb,
char *buffer, size_t size,
size_t *result, int type,
const char *name, int name_len)
{
char *p = buffer + *result;
const char *prefix;
int prefix_len;
int total_len;
switch(type) {
case OCFS2_XATTR_INDEX_USER:
if (OCFS2_SB(sb)->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return 0;
break;
case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS:
case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT:
if (!(sb->s_flags & SB_POSIXACL))
return 0;
break;
case OCFS2_XATTR_INDEX_TRUSTED:
if (!capable(CAP_SYS_ADMIN))
return 0;
break;
}
prefix = ocfs2_xattr_prefix(type);
if (!prefix)
return 0;
prefix_len = strlen(prefix);
total_len = prefix_len + name_len + 1;
*result += total_len;
/* we are just looking for how big our buffer needs to be */
if (!size)
return 0;
if (*result > size)
return -ERANGE;
memcpy(p, prefix, prefix_len);
memcpy(p + prefix_len, name, name_len);
p[prefix_len + name_len] = '\0';
return 0;
}
static int ocfs2_xattr_list_entries(struct inode *inode,
struct ocfs2_xattr_header *header,
char *buffer, size_t buffer_size)
{
size_t result = 0;
int i, type, ret;
const char *name;
for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) {
struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
type = ocfs2_xattr_get_type(entry);
name = (const char *)header +
le16_to_cpu(entry->xe_name_offset);
ret = ocfs2_xattr_list_entry(inode->i_sb,
buffer, buffer_size,
&result, type, name,
entry->xe_name_len);
if (ret)
return ret;
}
return result;
}
int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
struct ocfs2_dinode *di)
{
struct ocfs2_xattr_header *xh;
int i;
xh = (struct ocfs2_xattr_header *)
((void *)di + inode->i_sb->s_blocksize -
le16_to_cpu(di->i_xattr_inline_size));
for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
return 1;
return 0;
}
static int ocfs2_xattr_ibody_list(struct inode *inode,
struct ocfs2_dinode *di,
char *buffer,
size_t buffer_size)
{
struct ocfs2_xattr_header *header = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int ret = 0;
if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
return ret;
header = (struct ocfs2_xattr_header *)
((void *)di + inode->i_sb->s_blocksize -
le16_to_cpu(di->i_xattr_inline_size));
ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
return ret;
}
static int ocfs2_xattr_block_list(struct inode *inode,
struct ocfs2_dinode *di,
char *buffer,
size_t buffer_size)
{
struct buffer_head *blk_bh = NULL;
struct ocfs2_xattr_block *xb;
int ret = 0;
if (!di->i_xattr_loc)
return ret;
ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
&blk_bh);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
ret = ocfs2_xattr_list_entries(inode, header,
buffer, buffer_size);
} else
ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh,
buffer, buffer_size);
brelse(blk_bh);
return ret;
}
ssize_t ocfs2_listxattr(struct dentry *dentry,
char *buffer,
size_t size)
{
int ret = 0, i_ret = 0, b_ret = 0;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(d_inode(dentry));
if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb)))
return -EOPNOTSUPP;
if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
return ret;
ret = ocfs2_inode_lock(d_inode(dentry), &di_bh, 0);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
down_read(&oi->ip_xattr_sem);
i_ret = ocfs2_xattr_ibody_list(d_inode(dentry), di, buffer, size);
if (i_ret < 0)
b_ret = 0;
else {
if (buffer) {
buffer += i_ret;
size -= i_ret;
}
b_ret = ocfs2_xattr_block_list(d_inode(dentry), di,
buffer, size);
if (b_ret < 0)
i_ret = 0;
}
up_read(&oi->ip_xattr_sem);
ocfs2_inode_unlock(d_inode(dentry), 0);
brelse(di_bh);
return i_ret + b_ret;
}
static int ocfs2_xattr_find_entry(int name_index,
const char *name,
struct ocfs2_xattr_search *xs)
{
struct ocfs2_xattr_entry *entry;
size_t name_len;
int i, cmp = 1;
if (name == NULL)
return -EINVAL;
name_len = strlen(name);
entry = xs->here;
for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
cmp = name_index - ocfs2_xattr_get_type(entry);
if (!cmp)
cmp = name_len - entry->xe_name_len;
if (!cmp)
cmp = memcmp(name, (xs->base +
le16_to_cpu(entry->xe_name_offset)),
name_len);
if (cmp == 0)
break;
entry += 1;
}
xs->here = entry;
return cmp ? -ENODATA : 0;
}
static int ocfs2_xattr_get_value_outside(struct inode *inode,
struct ocfs2_xattr_value_root *xv,
void *buffer,
size_t len)
{
u32 cpos, p_cluster, num_clusters, bpc, clusters;
u64 blkno;
int i, ret = 0;
size_t cplen, blocksize;
struct buffer_head *bh = NULL;
struct ocfs2_extent_list *el;
el = &xv->xr_list;
clusters = le32_to_cpu(xv->xr_clusters);
bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
blocksize = inode->i_sb->s_blocksize;
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
&num_clusters, el, NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
/* Copy ocfs2_xattr_value */
for (i = 0; i < num_clusters * bpc; i++, blkno++) {
ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
&bh, NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
cplen = len >= blocksize ? blocksize : len;
memcpy(buffer, bh->b_data, cplen);
len -= cplen;
buffer += cplen;
brelse(bh);
bh = NULL;
if (len == 0)
break;
}
cpos += num_clusters;
}
out:
return ret;
}
static int ocfs2_xattr_ibody_get(struct inode *inode,
int name_index,
const char *name,
void *buffer,
size_t buffer_size,
struct ocfs2_xattr_search *xs)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
struct ocfs2_xattr_value_root *xv;
size_t size;
int ret = 0;
if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
return -ENODATA;
xs->end = (void *)di + inode->i_sb->s_blocksize;
xs->header = (struct ocfs2_xattr_header *)
(xs->end - le16_to_cpu(di->i_xattr_inline_size));
xs->base = (void *)xs->header;
xs->here = xs->header->xh_entries;
ret = ocfs2_xattr_find_entry(name_index, name, xs);
if (ret)
return ret;
size = le64_to_cpu(xs->here->xe_value_size);
if (buffer) {
if (size > buffer_size)
return -ERANGE;
if (ocfs2_xattr_is_local(xs->here)) {
memcpy(buffer, (void *)xs->base +
le16_to_cpu(xs->here->xe_name_offset) +
OCFS2_XATTR_SIZE(xs->here->xe_name_len), size);
} else {
xv = (struct ocfs2_xattr_value_root *)
(xs->base + le16_to_cpu(
xs->here->xe_name_offset) +
OCFS2_XATTR_SIZE(xs->here->xe_name_len));
ret = ocfs2_xattr_get_value_outside(inode, xv,
buffer, size);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
}
}
return size;
}
static int ocfs2_xattr_block_get(struct inode *inode,
int name_index,
const char *name,
void *buffer,
size_t buffer_size,
struct ocfs2_xattr_search *xs)
{
struct ocfs2_xattr_block *xb;
struct ocfs2_xattr_value_root *xv;
size_t size;
int ret = -ENODATA, name_offset, name_len, i;
int block_off;
xs->bucket = ocfs2_xattr_bucket_new(inode);
if (!xs->bucket) {
ret = -ENOMEM;
mlog_errno(ret);
goto cleanup;
}
ret = ocfs2_xattr_block_find(inode, name_index, name, xs);
if (ret) {
mlog_errno(ret);
goto cleanup;
}
if (xs->not_found) {
ret = -ENODATA;
goto cleanup;
}
xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
size = le64_to_cpu(xs->here->xe_value_size);
if (buffer) {
ret = -ERANGE;
if (size > buffer_size)
goto cleanup;
name_offset = le16_to_cpu(xs->here->xe_name_offset);
name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len);
i = xs->here - xs->header->xh_entries;
if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
bucket_xh(xs->bucket),
i,
&block_off,
&name_offset);
if (ret) {
mlog_errno(ret);
goto cleanup;
}
xs->base = bucket_block(xs->bucket, block_off);
}
if (ocfs2_xattr_is_local(xs->here)) {
memcpy(buffer, (void *)xs->base +
name_offset + name_len, size);
} else {
xv = (struct ocfs2_xattr_value_root *)
(xs->base + name_offset + name_len);
ret = ocfs2_xattr_get_value_outside(inode, xv,
buffer, size);
if (ret < 0) {
mlog_errno(ret);
goto cleanup;
}
}
}
ret = size;
cleanup:
ocfs2_xattr_bucket_free(xs->bucket);
brelse(xs->xattr_bh);
xs->xattr_bh = NULL;
return ret;
}
int ocfs2_xattr_get_nolock(struct inode *inode,
struct buffer_head *di_bh,
int name_index,
const char *name,
void *buffer,
size_t buffer_size)
{
int ret;
struct ocfs2_dinode *di = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_xattr_search xis = {
.not_found = -ENODATA,
};
struct ocfs2_xattr_search xbs = {
.not_found = -ENODATA,
};
if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
return -ENODATA;
xis.inode_bh = xbs.inode_bh = di_bh;
di = (struct ocfs2_dinode *)di_bh->b_data;
ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
buffer_size, &xis);
if (ret == -ENODATA && di->i_xattr_loc)
ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
buffer_size, &xbs);
return ret;
}
/* ocfs2_xattr_get()
*
* Copy an extended attribute into the buffer provided.
* Buffer is NULL to compute the size of buffer required.
*/
static int ocfs2_xattr_get(struct inode *inode,
int name_index,
const char *name,
void *buffer,
size_t buffer_size)
{
int ret, had_lock;
struct buffer_head *di_bh = NULL;
struct ocfs2_lock_holder oh;
had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
if (had_lock < 0) {
mlog_errno(had_lock);
return had_lock;
}
down_read(&OCFS2_I(inode)->ip_xattr_sem);
ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
name, buffer, buffer_size);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
brelse(di_bh);
return ret;
}
static int __ocfs2_xattr_set_value_outside(struct inode *inode,
handle_t *handle,
struct ocfs2_xattr_value_buf *vb,
const void *value,
int value_len)
{
int ret = 0, i, cp_len;
u16 blocksize = inode->i_sb->s_blocksize;
u32 p_cluster, num_clusters;
u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len);
u64 blkno;
struct buffer_head *bh = NULL;
unsigned int ext_flags;
struct ocfs2_xattr_value_root *xv = vb->vb_xv;
BUG_ON(clusters > le32_to_cpu(xv->xr_clusters));
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &xv->xr_list,
&ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
for (i = 0; i < num_clusters * bpc; i++, blkno++) {
ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
&bh, NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access(handle,
INODE_CACHE(inode),
bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
cp_len = value_len > blocksize ? blocksize : value_len;
memcpy(bh->b_data, value, cp_len);
value_len -= cp_len;
value += cp_len;
if (cp_len < blocksize)
memset(bh->b_data + cp_len, 0,
blocksize - cp_len);
ocfs2_journal_dirty(handle, bh);
brelse(bh);
bh = NULL;
/*
* XXX: do we need to empty all the following
* blocks in this cluster?
*/
if (!value_len)
break;
}
cpos += num_clusters;
}
out:
brelse(bh);
return ret;
}
static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
int num_entries)
{
int free_space;
if (!needed_space)
return 0;
free_space = free_start -
sizeof(struct ocfs2_xattr_header) -
(num_entries * sizeof(struct ocfs2_xattr_entry)) -
OCFS2_XATTR_HEADER_GAP;
if (free_space < 0)
return -EIO;
if (free_space < needed_space)
return -ENOSPC;
return 0;
}
static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
int type)
{
return loc->xl_ops->xlo_journal_access(handle, loc, type);
}
static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
{
loc->xl_ops->xlo_journal_dirty(handle, loc);
}
/* Give a pointer into the storage for the given offset */
static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
{
BUG_ON(offset >= loc->xl_size);
return loc->xl_ops->xlo_offset_pointer(loc, offset);
}
/*
* Wipe the name+value pair and allow the storage to reclaim it. This
* must be followed by either removal of the entry or a call to
* ocfs2_xa_add_namevalue().
*/
static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
{
loc->xl_ops->xlo_wipe_namevalue(loc);
}
/*
* Find lowest offset to a name+value pair. This is the start of our
* downward-growing free space.
*/
static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
{
return loc->xl_ops->xlo_get_free_start(loc);
}
/* Can we reuse loc->xl_entry for xi? */
static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
return loc->xl_ops->xlo_can_reuse(loc, xi);
}
/* How much free space is needed to set the new value */
static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
return loc->xl_ops->xlo_check_space(loc, xi);
}
static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
loc->xl_ops->xlo_add_entry(loc, name_hash);
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
/*
* We can't leave the new entry's xe_name_offset at zero or
* add_namevalue() will go nuts. We set it to the size of our
* storage so that it can never be less than any other entry.
*/
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
}
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
int size = namevalue_size_xi(xi);
int nameval_offset;
char *nameval_buf;
loc->xl_ops->xlo_add_namevalue(loc, size);
loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
loc->xl_entry->xe_name_len = xi->xi_name_len;
ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
ocfs2_xattr_set_local(loc->xl_entry,
xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
memset(nameval_buf, 0, size);
memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
}
static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_value_buf *vb)
{
int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
/* Value bufs are for value trees */
BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
BUG_ON(namevalue_size_xe(loc->xl_entry) !=
(name_size + OCFS2_XATTR_ROOT_SIZE));
loc->xl_ops->xlo_fill_value_buf(loc, vb);
vb->vb_xv =
(struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
nameval_offset +
name_size);
}
static int ocfs2_xa_block_journal_access(handle_t *handle,
struct ocfs2_xa_loc *loc, int type)
{
struct buffer_head *bh = loc->xl_storage;
ocfs2_journal_access_func access;
if (loc->xl_size == (bh->b_size -
offsetof(struct ocfs2_xattr_block,
xb_attrs.xb_header)))
access = ocfs2_journal_access_xb;
else
access = ocfs2_journal_access_di;
return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
}
static void ocfs2_xa_block_journal_dirty(handle_t *handle,
struct ocfs2_xa_loc *loc)
{
struct buffer_head *bh = loc->xl_storage;
ocfs2_journal_dirty(handle, bh);
}
static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
int offset)
{
return (char *)loc->xl_header + offset;
}
static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
/*
* Block storage is strict. If the sizes aren't exact, we will
* remove the old one and reinsert the new.
*/
return namevalue_size_xe(loc->xl_entry) ==
namevalue_size_xi(xi);
}
static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
{
struct ocfs2_xattr_header *xh = loc->xl_header;
int i, count = le16_to_cpu(xh->xh_count);
int offset, free_start = loc->xl_size;
for (i = 0; i < count; i++) {
offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
if (offset < free_start)
free_start = offset;
}
return free_start;
}
static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
int count = le16_to_cpu(loc->xl_header->xh_count);
int free_start = ocfs2_xa_get_free_start(loc);
int needed_space = ocfs2_xi_entry_usage(xi);
/*
* Block storage will reclaim the original entry before inserting
* the new value, so we only need the difference. If the new
* entry is smaller than the old one, we don't need anything.
*/
if (loc->xl_entry) {
/* Don't need space if we're reusing! */
if (ocfs2_xa_can_reuse_entry(loc, xi))
needed_space = 0;
else
needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
}
if (needed_space < 0)
needed_space = 0;
return ocfs2_xa_check_space_helper(needed_space, free_start, count);
}
/*
* Block storage for xattrs keeps the name+value pairs compacted. When
* we remove one, we have to shift any that preceded it towards the end.
*/
static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
{
int i, offset;
int namevalue_offset, first_namevalue_offset, namevalue_size;
struct ocfs2_xattr_entry *entry = loc->xl_entry;
struct ocfs2_xattr_header *xh = loc->xl_header;
int count = le16_to_cpu(xh->xh_count);
namevalue_offset = le16_to_cpu(entry->xe_name_offset);
namevalue_size = namevalue_size_xe(entry);
first_namevalue_offset = ocfs2_xa_get_free_start(loc);
/* Shift the name+value pairs */
memmove((char *)xh + first_namevalue_offset + namevalue_size,
(char *)xh + first_namevalue_offset,
namevalue_offset - first_namevalue_offset);
memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
/* Now tell xh->xh_entries about it */
for (i = 0; i < count; i++) {
offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
if (offset <= namevalue_offset)
le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
namevalue_size);
}
/*
* Note that we don't update xh_free_start or xh_name_value_len
* because they're not used in block-stored xattrs.
*/
}
static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
int count = le16_to_cpu(loc->xl_header->xh_count);
loc->xl_entry = &(loc->xl_header->xh_entries[count]);
le16_add_cpu(&loc->xl_header->xh_count, 1);
memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
}
static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
{
int free_start = ocfs2_xa_get_free_start(loc);
loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
}
static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_value_buf *vb)
{
struct buffer_head *bh = loc->xl_storage;
if (loc->xl_size == (bh->b_size -
offsetof(struct ocfs2_xattr_block,
xb_attrs.xb_header)))
vb->vb_access = ocfs2_journal_access_xb;
else
vb->vb_access = ocfs2_journal_access_di;
vb->vb_bh = bh;
}
/*
* Operations for xattrs stored in blocks. This includes inline inode
* storage and unindexed ocfs2_xattr_blocks.
*/
static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
.xlo_journal_access = ocfs2_xa_block_journal_access,
.xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
.xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
.xlo_check_space = ocfs2_xa_block_check_space,
.xlo_can_reuse = ocfs2_xa_block_can_reuse,
.xlo_get_free_start = ocfs2_xa_block_get_free_start,
.xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
.xlo_add_entry = ocfs2_xa_block_add_entry,
.xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
.xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
};
static int ocfs2_xa_bucket_journal_access(handle_t *handle,
struct ocfs2_xa_loc *loc, int type)
{
struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
}
static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
struct ocfs2_xa_loc *loc)
{
struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
ocfs2_xattr_bucket_journal_dirty(handle, bucket);
}
static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
int offset)
{
struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
int block, block_offset;
/* The header is at the front of the bucket */
block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
return bucket_block(bucket, block) + block_offset;
}
static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
return namevalue_size_xe(loc->xl_entry) >=
namevalue_size_xi(xi);
}
static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
{
struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
}
static int ocfs2_bucket_align_free_start(struct super_block *sb,
int free_start, int size)
{
/*
* We need to make sure that the name+value pair fits within
* one block.
*/
if (((free_start - size) >> sb->s_blocksize_bits) !=
((free_start - 1) >> sb->s_blocksize_bits))
free_start -= free_start % sb->s_blocksize;
return free_start;
}
static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
int rc;
int count = le16_to_cpu(loc->xl_header->xh_count);
int free_start = ocfs2_xa_get_free_start(loc);
int needed_space = ocfs2_xi_entry_usage(xi);
int size = namevalue_size_xi(xi);
struct super_block *sb = loc->xl_inode->i_sb;
/*
* Bucket storage does not reclaim name+value pairs it cannot
* reuse. They live as holes until the bucket fills, and then
* the bucket is defragmented. However, the bucket can reclaim
* the ocfs2_xattr_entry.
*/
if (loc->xl_entry) {
/* Don't need space if we're reusing! */
if (ocfs2_xa_can_reuse_entry(loc, xi))
needed_space = 0;
else
needed_space -= sizeof(struct ocfs2_xattr_entry);
}
BUG_ON(needed_space < 0);
if (free_start < size) {
if (needed_space)
return -ENOSPC;
} else {
/*
* First we check if it would fit in the first place.
* Below, we align the free start to a block. This may
* slide us below the minimum gap. By checking unaligned
* first, we avoid that error.
*/
rc = ocfs2_xa_check_space_helper(needed_space, free_start,
count);
if (rc)
return rc;
free_start = ocfs2_bucket_align_free_start(sb, free_start,
size);
}
return ocfs2_xa_check_space_helper(needed_space, free_start, count);
}
static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
{
le16_add_cpu(&loc->xl_header->xh_name_value_len,
-namevalue_size_xe(loc->xl_entry));
}
static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
struct ocfs2_xattr_header *xh = loc->xl_header;
int count = le16_to_cpu(xh->xh_count);
int low = 0, high = count - 1, tmp;
struct ocfs2_xattr_entry *tmp_xe;
/*
* We keep buckets sorted by name_hash, so we need to find
* our insert place.
*/
while (low <= high && count) {
tmp = (low + high) / 2;
tmp_xe = &xh->xh_entries[tmp];
if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
low = tmp + 1;
else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
high = tmp - 1;
else {
low = tmp;
break;
}
}
if (low != count)
memmove(&xh->xh_entries[low + 1],
&xh->xh_entries[low],
((count - low) * sizeof(struct ocfs2_xattr_entry)));
le16_add_cpu(&xh->xh_count, 1);
loc->xl_entry = &xh->xh_entries[low];
memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
}
static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
{
int free_start = ocfs2_xa_get_free_start(loc);
struct ocfs2_xattr_header *xh = loc->xl_header;
struct super_block *sb = loc->xl_inode->i_sb;
int nameval_offset;
free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
nameval_offset = free_start - size;
loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
xh->xh_free_start = cpu_to_le16(nameval_offset);
le16_add_cpu(&xh->xh_name_value_len, size);
}
static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_value_buf *vb)
{
struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
struct super_block *sb = loc->xl_inode->i_sb;
int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
int size = namevalue_size_xe(loc->xl_entry);
int block_offset = nameval_offset >> sb->s_blocksize_bits;
/* Values are not allowed to straddle block boundaries */
BUG_ON(block_offset !=
((nameval_offset + size - 1) >> sb->s_blocksize_bits));
/* We expect the bucket to be filled in */
BUG_ON(!bucket->bu_bhs[block_offset]);
vb->vb_access = ocfs2_journal_access;
vb->vb_bh = bucket->bu_bhs[block_offset];
}
/* Operations for xattrs stored in buckets. */
static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
.xlo_journal_access = ocfs2_xa_bucket_journal_access,
.xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
.xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
.xlo_check_space = ocfs2_xa_bucket_check_space,
.xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
.xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
.xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
.xlo_add_entry = ocfs2_xa_bucket_add_entry,
.xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
.xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
};
static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
{
struct ocfs2_xattr_value_buf vb;
if (ocfs2_xattr_is_local(loc->xl_entry))
return 0;
ocfs2_xa_fill_value_buf(loc, &vb);
return le32_to_cpu(vb.vb_xv->xr_clusters);
}
static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int trunc_rc, access_rc;
struct ocfs2_xattr_value_buf vb;
ocfs2_xa_fill_value_buf(loc, &vb);
trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
ctxt);
/*
* The caller of ocfs2_xa_value_truncate() has already called
* ocfs2_xa_journal_access on the loc. However, The truncate code
* calls ocfs2_extend_trans(). This may commit the previous
* transaction and open a new one. If this is a bucket, truncate
* could leave only vb->vb_bh set up for journaling. Meanwhile,
* the caller is expecting to dirty the entire bucket. So we must
* reset the journal work. We do this even if truncate has failed,
* as it could have failed after committing the extend.
*/
access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
OCFS2_JOURNAL_ACCESS_WRITE);
/* Errors in truncate take precedence */
return trunc_rc ? trunc_rc : access_rc;
}
static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
{
int index, count;
struct ocfs2_xattr_header *xh = loc->xl_header;
struct ocfs2_xattr_entry *entry = loc->xl_entry;
ocfs2_xa_wipe_namevalue(loc);
loc->xl_entry = NULL;
le16_add_cpu(&xh->xh_count, -1);
count = le16_to_cpu(xh->xh_count);
/*
* Only zero out the entry if there are more remaining. This is
* important for an empty bucket, as it keeps track of the
* bucket's hash value. It doesn't hurt empty block storage.
*/
if (count) {
index = ((char *)entry - (char *)&xh->xh_entries) /
sizeof(struct ocfs2_xattr_entry);
memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
(count - index) * sizeof(struct ocfs2_xattr_entry));
memset(&xh->xh_entries[count], 0,
sizeof(struct ocfs2_xattr_entry));
}
}
/*
* If we have a problem adjusting the size of an external value during
* ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
* in an intermediate state. For example, the value may be partially
* truncated.
*
* If the value tree hasn't changed, the extend/truncate went nowhere.
* We have nothing to do. The caller can treat it as a straight error.
*
* If the value tree got partially truncated, we now have a corrupted
* extended attribute. We're going to wipe its entry and leak the
* clusters. Better to leak some storage than leave a corrupt entry.
*
* If the value tree grew, it obviously didn't grow enough for the
* new entry. We're not going to try and reclaim those clusters either.
* If there was already an external value there (orig_clusters != 0),
* the new clusters are attached safely and we can just leave the old
* value in place. If there was no external value there, we remove
* the entry.
*
* This way, the xattr block we store in the journal will be consistent.
* If the size change broke because of the journal, no changes will hit
* disk anyway.
*/
static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
const char *what,
unsigned int orig_clusters)
{
unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
char *nameval_buf = ocfs2_xa_offset_pointer(loc,
le16_to_cpu(loc->xl_entry->xe_name_offset));
if (new_clusters < orig_clusters) {
mlog(ML_ERROR,
"Partial truncate while %s xattr %.*s. Leaking "
"%u clusters and removing the entry\n",
what, loc->xl_entry->xe_name_len, nameval_buf,
orig_clusters - new_clusters);
ocfs2_xa_remove_entry(loc);
} else if (!orig_clusters) {
mlog(ML_ERROR,
"Unable to allocate an external value for xattr "
"%.*s safely. Leaking %u clusters and removing the "
"entry\n",
loc->xl_entry->xe_name_len, nameval_buf,
new_clusters - orig_clusters);
ocfs2_xa_remove_entry(loc);
} else if (new_clusters > orig_clusters)
mlog(ML_ERROR,
"Unable to grow xattr %.*s safely. %u new clusters "
"have been added, but the value will not be "
"modified\n",
loc->xl_entry->xe_name_len, nameval_buf,
new_clusters - orig_clusters);
}
static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int rc = 0;
unsigned int orig_clusters;
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
/*
* Since this is remove, we can return 0 if
* ocfs2_xa_cleanup_value_truncate() is going to
* wipe the entry anyway. So we check the
* cluster count as well.
*/
if (orig_clusters != ocfs2_xa_value_clusters(loc))
rc = 0;
ocfs2_xa_cleanup_value_truncate(loc, "removing",
orig_clusters);
if (rc)
goto out;
}
}
ocfs2_xa_remove_entry(loc);
out:
return rc;
}
static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
{
int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
char *nameval_buf;
nameval_buf = ocfs2_xa_offset_pointer(loc,
le16_to_cpu(loc->xl_entry->xe_name_offset));
memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
}
/*
* Take an existing entry and make it ready for the new value. This
* won't allocate space, but it may free space. It should be ready for
* ocfs2_xa_prepare_entry() to finish the work.
*/
static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int rc = 0;
int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
unsigned int orig_clusters;
char *nameval_buf;
int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
name_size);
nameval_buf = ocfs2_xa_offset_pointer(loc,
le16_to_cpu(loc->xl_entry->xe_name_offset));
if (xe_local) {
memset(nameval_buf + name_size, 0,
namevalue_size_xe(loc->xl_entry) - name_size);
if (!xi_local)
ocfs2_xa_install_value_root(loc);
} else {
orig_clusters = ocfs2_xa_value_clusters(loc);
if (xi_local) {
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc < 0)
mlog_errno(rc);
else
memset(nameval_buf + name_size, 0,
namevalue_size_xe(loc->xl_entry) -
name_size);
} else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
xi->xi_value_len) {
rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
ctxt);
if (rc < 0)
mlog_errno(rc);
}
if (rc) {
ocfs2_xa_cleanup_value_truncate(loc, "reusing",
orig_clusters);
goto out;
}
}
loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
ocfs2_xattr_set_local(loc->xl_entry, xi_local);
out:
return rc;
}
/*
* Prepares loc->xl_entry to receive the new xattr. This includes
* properly setting up the name+value pair region. If loc->xl_entry
* already exists, it will take care of modifying it appropriately.
*
* Note that this modifies the data. You did journal_access already,
* right?
*/
static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi,
u32 name_hash,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int rc = 0;
unsigned int orig_clusters;
__le64 orig_value_size = 0;
rc = ocfs2_xa_check_space(loc, xi);
if (rc)
goto out;
if (loc->xl_entry) {
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
orig_value_size = loc->xl_entry->xe_value_size;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
goto out;
goto alloc_value;
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc,
"overwriting",
orig_clusters);
goto out;
}
}
ocfs2_xa_wipe_namevalue(loc);
} else
ocfs2_xa_add_entry(loc, name_hash);
/*
* If we get here, we have a blank entry. Fill it. We grow our
* name+value pair back from the end.
*/
ocfs2_xa_add_namevalue(loc, xi);
if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
ocfs2_xa_install_value_root(loc);
alloc_value:
if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
if (rc < 0) {
ctxt->set_abort = 1;
ocfs2_xa_cleanup_value_truncate(loc, "growing",
orig_clusters);
/*
* If we were growing an existing value,
* ocfs2_xa_cleanup_value_truncate() won't remove
* the entry. We need to restore the original value
* size.
*/
if (loc->xl_entry) {
BUG_ON(!orig_value_size);
loc->xl_entry->xe_value_size = orig_value_size;
}
mlog_errno(rc);
}
}
out:
return rc;
}
/*
* Store the value portion of the name+value pair. This will skip
* values that are stored externally. Their tree roots were set up
* by ocfs2_xa_prepare_entry().
*/
static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int rc = 0;
int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
char *nameval_buf;
struct ocfs2_xattr_value_buf vb;
nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
ocfs2_xa_fill_value_buf(loc, &vb);
rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
ctxt->handle, &vb,
xi->xi_value,
xi->xi_value_len);
} else
memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
return rc;
}
static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
xi->xi_name_len);
ret = ocfs2_xa_journal_access(ctxt->handle, loc,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* From here on out, everything is going to modify the buffer a
* little. Errors are going to leave the xattr header in a
* sane state. Thus, even with errors we dirty the sucker.
*/
/* Don't worry, we are never called with !xi_value and !xl_entry */
if (!xi->xi_value) {
ret = ocfs2_xa_remove(loc, ctxt);
goto out_dirty;
}
ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out_dirty;
}
ret = ocfs2_xa_store_value(loc, xi, ctxt);
if (ret)
mlog_errno(ret);
out_dirty:
ocfs2_xa_journal_dirty(ctxt->handle, loc);
out:
return ret;
}
static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
struct inode *inode,
struct buffer_head *bh,
struct ocfs2_xattr_entry *entry)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
loc->xl_inode = inode;
loc->xl_ops = &ocfs2_xa_block_loc_ops;
loc->xl_storage = bh;
loc->xl_entry = entry;
loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
loc->xl_header =
(struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
loc->xl_size);
}
static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
struct inode *inode,
struct buffer_head *bh,
struct ocfs2_xattr_entry *entry)
{
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)bh->b_data;
BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
loc->xl_inode = inode;
loc->xl_ops = &ocfs2_xa_block_loc_ops;
loc->xl_storage = bh;
loc->xl_header = &(xb->xb_attrs.xb_header);
loc->xl_entry = entry;
loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
xb_attrs.xb_header);
}
static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_bucket *bucket,
struct ocfs2_xattr_entry *entry)
{
loc->xl_inode = bucket->bu_inode;
loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
loc->xl_storage = bucket;
loc->xl_header = bucket_xh(bucket);
loc->xl_entry = entry;
loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
}
/*
* In xattr remove, if it is stored outside and refcounted, we may have
* the chance to split the refcount tree. So need the allocators.
*/
static int ocfs2_lock_xattr_remove_allocators(struct inode *inode,
struct ocfs2_xattr_value_root *xv,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_alloc_context **meta_ac,
int *ref_credits)
{
int ret, meta_add = 0;
u32 p_cluster, num_clusters;
unsigned int ext_flags;
*ref_credits = 0;
ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
&num_clusters,
&xv->xr_list,
&ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
goto out;
ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
ref_root_bh, xv,
&meta_add, ref_credits);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
meta_add, meta_ac);
if (ret)
mlog_errno(ret);
out:
return ret;
}
static int ocfs2_remove_value_outside(struct inode*inode,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_xattr_header *header,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh)
{
int ret = 0, i, ref_credits;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
void *val;
ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
if (ocfs2_xattr_is_local(entry))
continue;
val = (void *)header +
le16_to_cpu(entry->xe_name_offset);
vb->vb_xv = (struct ocfs2_xattr_value_root *)
(val + OCFS2_XATTR_SIZE(entry->xe_name_len));
ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv,
ref_ci, ref_root_bh,
&ctxt.meta_ac,
&ref_credits);
ctxt.handle = ocfs2_start_trans(osb, ref_credits +
ocfs2_remove_extent_credits(osb->sb));
if (IS_ERR(ctxt.handle)) {
ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
break;
}
ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
ocfs2_commit_trans(osb, ctxt.handle);
if (ctxt.meta_ac) {
ocfs2_free_alloc_context(ctxt.meta_ac);
ctxt.meta_ac = NULL;
}
if (ret < 0) {
mlog_errno(ret);
break;
}
}
if (ctxt.meta_ac)
ocfs2_free_alloc_context(ctxt.meta_ac);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &ctxt.dealloc);
return ret;
}
static int ocfs2_xattr_ibody_remove(struct inode *inode,
struct buffer_head *di_bh,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_xattr_header *header;
int ret;
struct ocfs2_xattr_value_buf vb = {
.vb_bh = di_bh,
.vb_access = ocfs2_journal_access_di,
};
header = (struct ocfs2_xattr_header *)
((void *)di + inode->i_sb->s_blocksize -
le16_to_cpu(di->i_xattr_inline_size));
ret = ocfs2_remove_value_outside(inode, &vb, header,
ref_ci, ref_root_bh);
return ret;
}
struct ocfs2_rm_xattr_bucket_para {
struct ocfs2_caching_info *ref_ci;
struct buffer_head *ref_root_bh;
};
static int ocfs2_xattr_block_remove(struct inode *inode,
struct buffer_head *blk_bh,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh)
{
struct ocfs2_xattr_block *xb;
int ret = 0;
struct ocfs2_xattr_value_buf vb = {
.vb_bh = blk_bh,
.vb_access = ocfs2_journal_access_xb,
};
struct ocfs2_rm_xattr_bucket_para args = {
.ref_ci = ref_ci,
.ref_root_bh = ref_root_bh,
};
xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header);
ret = ocfs2_remove_value_outside(inode, &vb, header,
ref_ci, ref_root_bh);
} else
ret = ocfs2_iterate_xattr_index_block(inode,
blk_bh,
ocfs2_rm_xattr_cluster,
&args);
return ret;
}
static int ocfs2_xattr_free_block(struct inode *inode,
u64 block,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh)
{
struct inode *xb_alloc_inode;
struct buffer_head *xb_alloc_bh = NULL;
struct buffer_head *blk_bh = NULL;
struct ocfs2_xattr_block *xb;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle;
int ret = 0;
u64 blk, bg_blkno;
u16 bit;
ret = ocfs2_read_xattr_block(inode, block, &blk_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
blk = le64_to_cpu(xb->xb_blkno);
bit = le16_to_cpu(xb->xb_suballoc_bit);
if (xb->xb_suballoc_loc)
bg_blkno = le64_to_cpu(xb->xb_suballoc_loc);
else
bg_blkno = ocfs2_which_suballoc_group(blk, bit);
xb_alloc_inode = ocfs2_get_system_file_inode(osb,
EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(xb->xb_suballoc_slot));
if (!xb_alloc_inode) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
inode_lock(xb_alloc_inode);
ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1);
if (ret < 0) {
mlog_errno(ret);
goto out_mutex;
}
handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_unlock;
}
ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh,
bit, bg_blkno, 1);
if (ret < 0)
mlog_errno(ret);
ocfs2_commit_trans(osb, handle);
out_unlock:
ocfs2_inode_unlock(xb_alloc_inode, 1);
brelse(xb_alloc_bh);
out_mutex:
inode_unlock(xb_alloc_inode);
iput(xb_alloc_inode);
out:
brelse(blk_bh);
return ret;
}
/*
* ocfs2_xattr_remove()
*
* Free extended attribute resources associated with this inode.
*/
int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_refcount_tree *ref_tree = NULL;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_caching_info *ref_ci = NULL;
handle_t *handle;
int ret;
if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
return 0;
if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
return 0;
if (ocfs2_is_refcount_inode(inode)) {
ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb),
le64_to_cpu(di->i_refcount_loc),
1, &ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ref_ci = &ref_tree->rf_ci;
}
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
ret = ocfs2_xattr_ibody_remove(inode, di_bh,
ref_ci, ref_root_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
if (di->i_xattr_loc) {
ret = ocfs2_xattr_free_block(inode,
le64_to_cpu(di->i_xattr_loc),
ref_ci, ref_root_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)),
OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
di->i_xattr_loc = 0;
spin_lock(&oi->ip_lock);
oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL);
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
if (ref_tree)
ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1);
brelse(ref_root_bh);
return ret;
}
static int ocfs2_xattr_has_space_inline(struct inode *inode,
struct ocfs2_dinode *di)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size;
int free;
if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE)
return 0;
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
struct ocfs2_inline_data *idata = &di->id2.i_data;
free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size);
} else if (ocfs2_inode_is_fast_symlink(inode)) {
free = ocfs2_fast_symlink_chars(inode->i_sb) -
le64_to_cpu(di->i_size);
} else {
struct ocfs2_extent_list *el = &di->id2.i_list;
free = (le16_to_cpu(el->l_count) -
le16_to_cpu(el->l_next_free_rec)) *
sizeof(struct ocfs2_extent_rec);
}
if (free >= xattrsize)
return 1;
return 0;
}
/*
* ocfs2_xattr_ibody_find()
*
* Find extended attribute in inode block and
* fill search info into struct ocfs2_xattr_search.
*/
static int ocfs2_xattr_ibody_find(struct inode *inode,
int name_index,
const char *name,
struct ocfs2_xattr_search *xs)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
int ret;
int has_space = 0;
if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
return 0;
if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
down_read(&oi->ip_alloc_sem);
has_space = ocfs2_xattr_has_space_inline(inode, di);
up_read(&oi->ip_alloc_sem);
if (!has_space)
return 0;
}
xs->xattr_bh = xs->inode_bh;
xs->end = (void *)di + inode->i_sb->s_blocksize;
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)
xs->header = (struct ocfs2_xattr_header *)
(xs->end - le16_to_cpu(di->i_xattr_inline_size));
else
xs->header = (struct ocfs2_xattr_header *)
(xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size);
xs->base = (void *)xs->header;
xs->here = xs->header->xh_entries;
/* Find the named attribute. */
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
ret = ocfs2_xattr_find_entry(name_index, name, xs);
if (ret && ret != -ENODATA)
return ret;
xs->not_found = ret;
}
return 0;
}
static int ocfs2_xattr_ibody_init(struct inode *inode,
struct buffer_head *di_bh,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int xattrsize = osb->s_xattr_inline_size;
if (!ocfs2_xattr_has_space_inline(inode, di)) {
ret = -ENOSPC;
goto out;
}
ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Adjust extent record count or inline data size
* to reserve space for extended attribute.
*/
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
struct ocfs2_inline_data *idata = &di->id2.i_data;
le16_add_cpu(&idata->id_count, -xattrsize);
} else if (!(ocfs2_inode_is_fast_symlink(inode))) {
struct ocfs2_extent_list *el = &di->id2.i_list;
le16_add_cpu(&el->l_count, -(xattrsize /
sizeof(struct ocfs2_extent_rec)));
}
di->i_xattr_inline_size = cpu_to_le16(xattrsize);
spin_lock(&oi->ip_lock);
oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
ocfs2_journal_dirty(ctxt->handle, di_bh);
out:
return ret;
}
/*
* ocfs2_xattr_ibody_set()
*
* Set, replace or remove an extended attribute into inode block.
*
*/
static int ocfs2_xattr_ibody_set(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_xa_loc loc;
if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
return -ENOSPC;
down_write(&oi->ip_alloc_sem);
if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
}
ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
xs->not_found ? NULL : xs->here);
ret = ocfs2_xa_set(&loc, xi, ctxt);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
xs->here = loc.xl_entry;
out:
up_write(&oi->ip_alloc_sem);
return ret;
}
/*
* ocfs2_xattr_block_find()
*
* Find extended attribute in external block and
* fill search info into struct ocfs2_xattr_search.
*/
static int ocfs2_xattr_block_find(struct inode *inode,
int name_index,
const char *name,
struct ocfs2_xattr_search *xs)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
struct buffer_head *blk_bh = NULL;
struct ocfs2_xattr_block *xb;
int ret = 0;
if (!di->i_xattr_loc)
return ret;
ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
&blk_bh);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
xs->xattr_bh = blk_bh;
xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
xs->header = &xb->xb_attrs.xb_header;
xs->base = (void *)xs->header;
xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
xs->here = xs->header->xh_entries;
ret = ocfs2_xattr_find_entry(name_index, name, xs);
} else
ret = ocfs2_xattr_index_block_find(inode, blk_bh,
name_index,
name, xs);
if (ret && ret != -ENODATA) {
xs->xattr_bh = NULL;
goto cleanup;
}
xs->not_found = ret;
return 0;
cleanup:
brelse(blk_bh);
return ret;
}
static int ocfs2_create_xattr_block(struct inode *inode,
struct buffer_head *inode_bh,
struct ocfs2_xattr_set_ctxt *ctxt,
int indexed,
struct buffer_head **ret_bh)
{
int ret;
u16 suballoc_bit_start;
u32 num_got;
u64 suballoc_loc, first_blkno;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
struct buffer_head *new_bh = NULL;
struct ocfs2_xattr_block *xblk;
ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
goto end;
}
ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1,
&suballoc_loc, &suballoc_bit_start,
&num_got, &first_blkno);
if (ret < 0) {
mlog_errno(ret);
goto end;
}
new_bh = sb_getblk(inode->i_sb, first_blkno);
if (!new_bh) {
ret = -ENOMEM;
mlog_errno(ret);
goto end;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
goto end;
}
/* Initialize ocfs2_xattr_block */
xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
memset(xblk, 0, inode->i_sb->s_blocksize);
strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
xblk->xb_suballoc_loc = cpu_to_le64(suballoc_loc);
xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
xblk->xb_fs_generation =
cpu_to_le32(OCFS2_SB(inode->i_sb)->fs_generation);
xblk->xb_blkno = cpu_to_le64(first_blkno);
if (indexed) {
struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
xr->xt_clusters = cpu_to_le32(1);
xr->xt_last_eb_blk = 0;
xr->xt_list.l_tree_depth = 0;
xr->xt_list.l_count = cpu_to_le16(
ocfs2_xattr_recs_per_xb(inode->i_sb));
xr->xt_list.l_next_free_rec = cpu_to_le16(1);
xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
}
ocfs2_journal_dirty(ctxt->handle, new_bh);
/* Add it to the inode */
di->i_xattr_loc = cpu_to_le64(first_blkno);
spin_lock(&OCFS2_I(inode)->ip_lock);
OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
spin_unlock(&OCFS2_I(inode)->ip_lock);
ocfs2_journal_dirty(ctxt->handle, inode_bh);
*ret_bh = new_bh;
new_bh = NULL;
end:
brelse(new_bh);
return ret;
}
/*
* ocfs2_xattr_block_set()
*
* Set, replace or remove an extended attribute into external block.
*
*/
static int ocfs2_xattr_block_set(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
struct buffer_head *new_bh = NULL;
struct ocfs2_xattr_block *xblk = NULL;
int ret;
struct ocfs2_xa_loc loc;
if (!xs->xattr_bh) {
ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
0, &new_bh);
if (ret) {
mlog_errno(ret);
goto end;
}
xs->xattr_bh = new_bh;
xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
xs->header = &xblk->xb_attrs.xb_header;
xs->base = (void *)xs->header;
xs->end = (void *)xblk + inode->i_sb->s_blocksize;
xs->here = xs->header->xh_entries;
} else
xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
xs->not_found ? NULL : xs->here);
ret = ocfs2_xa_set(&loc, xi, ctxt);
if (!ret)
xs->here = loc.xl_entry;
else if ((ret != -ENOSPC) || ctxt->set_abort)
goto end;
else {
ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
if (ret)
goto end;
}
}
if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
end:
return ret;
}
/* Check whether the new xattr can be inserted into the inode. */
static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs)
{
struct ocfs2_xattr_entry *last;
int free, i;
size_t min_offs = xs->end - xs->base;
if (!xs->header)
return 0;
last = xs->header->xh_entries;
for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
size_t offs = le16_to_cpu(last->xe_name_offset);
if (offs < min_offs)
min_offs = offs;
last += 1;
}
free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
if (free < 0)
return 0;
BUG_ON(!xs->not_found);
if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
return 1;
return 0;
}
static int ocfs2_calc_xattr_set_need(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xis,
struct ocfs2_xattr_search *xbs,
int *clusters_need,
int *meta_need,
int *credits_need)
{
int ret = 0, old_in_xb = 0;
int clusters_add = 0, meta_add = 0, credits = 0;
struct buffer_head *bh = NULL;
struct ocfs2_xattr_block *xb = NULL;
struct ocfs2_xattr_entry *xe = NULL;
struct ocfs2_xattr_value_root *xv = NULL;
char *base = NULL;
int name_offset, name_len = 0;
u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
xi->xi_value_len);
u64 value_size;
/*
* Calculate the clusters we need to write.
* No matter whether we replace an old one or add a new one,
* we need this for writing.
*/
if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
credits += new_clusters *
ocfs2_clusters_to_blocks(inode->i_sb, 1);
if (xis->not_found && xbs->not_found) {
credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
clusters_add += new_clusters;
credits += ocfs2_calc_extend_credits(inode->i_sb,
&def_xv.xv.xr_list);
}
goto meta_guess;
}
if (!xis->not_found) {
xe = xis->here;
name_offset = le16_to_cpu(xe->xe_name_offset);
name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
base = xis->base;
credits += OCFS2_INODE_UPDATE_CREDITS;
} else {
int i, block_off = 0;
xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
xe = xbs->here;
name_offset = le16_to_cpu(xe->xe_name_offset);
name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
i = xbs->here - xbs->header->xh_entries;
old_in_xb = 1;
if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
bucket_xh(xbs->bucket),
i, &block_off,
&name_offset);
base = bucket_block(xbs->bucket, block_off);
credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
} else {
base = xbs->base;
credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS;
}
}
/*
* delete a xattr doesn't need metadata and cluster allocation.
* so just calculate the credits and return.
*
* The credits for removing the value tree will be extended
* by ocfs2_remove_extent itself.
*/
if (!xi->xi_value) {
if (!ocfs2_xattr_is_local(xe))
credits += ocfs2_remove_extent_credits(inode->i_sb);
goto out;
}
/* do cluster allocation guess first. */
value_size = le64_to_cpu(xe->xe_value_size);
if (old_in_xb) {
/*
* In xattr set, we always try to set the xe in inode first,
* so if it can be inserted into inode successfully, the old
* one will be removed from the xattr block, and this xattr
* will be inserted into inode as a new xattr in inode.
*/
if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) {
clusters_add += new_clusters;
credits += ocfs2_remove_extent_credits(inode->i_sb) +
OCFS2_INODE_UPDATE_CREDITS;
if (!ocfs2_xattr_is_local(xe))
credits += ocfs2_calc_extend_credits(
inode->i_sb,
&def_xv.xv.xr_list);
goto out;
}
}
if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
/* the new values will be stored outside. */
u32 old_clusters = 0;
if (!ocfs2_xattr_is_local(xe)) {
old_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
value_size);
xv = (struct ocfs2_xattr_value_root *)
(base + name_offset + name_len);
value_size = OCFS2_XATTR_ROOT_SIZE;
} else
xv = &def_xv.xv;
if (old_clusters >= new_clusters) {
credits += ocfs2_remove_extent_credits(inode->i_sb);
goto out;
} else {
meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
clusters_add += new_clusters - old_clusters;
credits += ocfs2_calc_extend_credits(inode->i_sb,
&xv->xr_list);
if (value_size >= OCFS2_XATTR_ROOT_SIZE)
goto out;
}
} else {
/*
* Now the new value will be stored inside. So if the new
* value is smaller than the size of value root or the old
* value, we don't need any allocation, otherwise we have
* to guess metadata allocation.
*/
if ((ocfs2_xattr_is_local(xe) &&
(value_size >= xi->xi_value_len)) ||
(!ocfs2_xattr_is_local(xe) &&
OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
goto out;
}
meta_guess:
/* calculate metadata allocation. */
if (di->i_xattr_loc) {
if (!xbs->xattr_bh) {
ret = ocfs2_read_xattr_block(inode,
le64_to_cpu(di->i_xattr_loc),
&bh);
if (ret) {
mlog_errno(ret);
goto out;
}
xb = (struct ocfs2_xattr_block *)bh->b_data;
} else
xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
/*
* If there is already an xattr tree, good, we can calculate
* like other b-trees. Otherwise we may have the chance of
* create a tree, the credit calculation is borrowed from
* ocfs2_calc_extend_credits with root_el = NULL. And the
* new tree will be cluster based, so no meta is needed.
*/
if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
struct ocfs2_extent_list *el =
&xb->xb_attrs.xb_root.xt_list;
meta_add += ocfs2_extend_meta_needed(el);
credits += ocfs2_calc_extend_credits(inode->i_sb,
el);
} else
credits += OCFS2_SUBALLOC_ALLOC + 1;
/*
* This cluster will be used either for new bucket or for
* new xattr block.
* If the cluster size is the same as the bucket size, one
* more is needed since we may need to extend the bucket
* also.
*/
clusters_add += 1;
credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
if (OCFS2_XATTR_BUCKET_SIZE ==
OCFS2_SB(inode->i_sb)->s_clustersize) {
credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
clusters_add += 1;
}
} else {
credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
struct ocfs2_extent_list *el = &def_xv.xv.xr_list;
meta_add += ocfs2_extend_meta_needed(el);
credits += ocfs2_calc_extend_credits(inode->i_sb,
el);
} else {
meta_add += 1;
}
}
out:
if (clusters_need)
*clusters_need = clusters_add;
if (meta_need)
*meta_need = meta_add;
if (credits_need)
*credits_need = credits;
brelse(bh);
return ret;
}
static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xis,
struct ocfs2_xattr_search *xbs,
struct ocfs2_xattr_set_ctxt *ctxt,
int extra_meta,
int *credits)
{
int clusters_add, meta_add, ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt));
ocfs2_init_dealloc_ctxt(&ctxt->dealloc);
ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs,
&clusters_add, &meta_add, credits);
if (ret) {
mlog_errno(ret);
return ret;
}
meta_add += extra_meta;
trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add,
clusters_add, *credits);
if (meta_add) {
ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
&ctxt->meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (clusters_add) {
ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac);
if (ret)
mlog_errno(ret);
}
out:
if (ret) {
if (ctxt->meta_ac) {
ocfs2_free_alloc_context(ctxt->meta_ac);
ctxt->meta_ac = NULL;
}
/*
* We cannot have an error and a non null ctxt->data_ac.
*/
}
return ret;
}
static int __ocfs2_xattr_set_handle(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xis,
struct ocfs2_xattr_search *xbs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret = 0, credits, old_found;
if (!xi->xi_value) {
/* Remove existing extended attribute */
if (!xis->not_found)
ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
else if (!xbs->not_found)
ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
} else {
/* We always try to set extended attribute into inode first*/
ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
if (!ret && !xbs->not_found) {
/*
* If succeed and that extended attribute existing in
* external block, then we will remove it.
*/
xi->xi_value = NULL;
xi->xi_value_len = 0;
old_found = xis->not_found;
xis->not_found = -ENODATA;
ret = ocfs2_calc_xattr_set_need(inode,
di,
xi,
xis,
xbs,
NULL,
NULL,
&credits);
xis->not_found = old_found;
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
} else if ((ret == -ENOSPC) && !ctxt->set_abort) {
if (di->i_xattr_loc && !xbs->xattr_bh) {
ret = ocfs2_xattr_block_find(inode,
xi->xi_name_index,
xi->xi_name, xbs);
if (ret)
goto out;
old_found = xis->not_found;
xis->not_found = -ENODATA;
ret = ocfs2_calc_xattr_set_need(inode,
di,
xi,
xis,
xbs,
NULL,
NULL,
&credits);
xis->not_found = old_found;
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/*
* If no space in inode, we will set extended attribute
* into external block.
*/
ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
if (ret)
goto out;
if (!xis->not_found) {
/*
* If succeed and that extended attribute
* existing in inode, we will remove it.
*/
xi->xi_value = NULL;
xi->xi_value_len = 0;
xbs->not_found = -ENODATA;
ret = ocfs2_calc_xattr_set_need(inode,
di,
xi,
xis,
xbs,
NULL,
NULL,
&credits);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_ibody_set(inode, xi,
xis, ctxt);
}
}
}
if (!ret) {
/* Update inode ctime. */
ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
xis->inode_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
inode_set_ctime_current(inode);
di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
}
out:
return ret;
}
/*
* This function only called duing creating inode
* for init security/acl xattrs of the new inode.
* All transanction credits have been reserved in mknod.
*/
int ocfs2_xattr_set_handle(handle_t *handle,
struct inode *inode,
struct buffer_head *di_bh,
int name_index,
const char *name,
const void *value,
size_t value_len,
int flags,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac)
{
struct ocfs2_dinode *di;
int ret;
struct ocfs2_xattr_info xi = {
.xi_name_index = name_index,
.xi_name = name,
.xi_name_len = strlen(name),
.xi_value = value,
.xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
.not_found = -ENODATA,
};
struct ocfs2_xattr_search xbs = {
.not_found = -ENODATA,
};
struct ocfs2_xattr_set_ctxt ctxt = {
.handle = handle,
.meta_ac = meta_ac,
.data_ac = data_ac,
};
if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
/*
* In extreme situation, may need xattr bucket when
* block size is too small. And we have already reserved
* the credits for bucket in mknod.
*/
if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) {
xbs.bucket = ocfs2_xattr_bucket_new(inode);
if (!xbs.bucket) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
}
xis.inode_bh = xbs.inode_bh = di_bh;
di = (struct ocfs2_dinode *)di_bh->b_data;
down_write(&OCFS2_I(inode)->ip_xattr_sem);
ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
if (ret)
goto cleanup;
if (xis.not_found) {
ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
if (ret)
goto cleanup;
}
ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
cleanup:
up_write(&OCFS2_I(inode)->ip_xattr_sem);
brelse(xbs.xattr_bh);
ocfs2_xattr_bucket_free(xbs.bucket);
return ret;
}
/*
* ocfs2_xattr_set()
*
* Set, replace or remove an extended attribute for this inode.
* value is NULL to remove an existing extended attribute, else either
* create or replace an extended attribute.
*/
int ocfs2_xattr_set(struct inode *inode,
int name_index,
const char *name,
const void *value,
size_t value_len,
int flags)
{
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
struct ocfs2_refcount_tree *ref_tree = NULL;
struct ocfs2_lock_holder oh;
struct ocfs2_xattr_info xi = {
.xi_name_index = name_index,
.xi_name = name,
.xi_name_len = strlen(name),
.xi_value = value,
.xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
.not_found = -ENODATA,
};
struct ocfs2_xattr_search xbs = {
.not_found = -ENODATA,
};
if (!ocfs2_supports_xattr(osb))
return -EOPNOTSUPP;
/*
* Only xbs will be used on indexed trees. xis doesn't need a
* bucket.
*/
xbs.bucket = ocfs2_xattr_bucket_new(inode);
if (!xbs.bucket) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
if (had_lock < 0) {
ret = had_lock;
mlog_errno(ret);
goto cleanup_nolock;
}
xis.inode_bh = xbs.inode_bh = di_bh;
di = (struct ocfs2_dinode *)di_bh->b_data;
down_write(&OCFS2_I(inode)->ip_xattr_sem);
/*
* Scan inode and external block to find the same name
* extended attribute and collect search information.
*/
ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
if (ret)
goto cleanup;
if (xis.not_found) {
ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
if (ret)
goto cleanup;
}
if (xis.not_found && xbs.not_found) {
ret = -ENODATA;
if (flags & XATTR_REPLACE)
goto cleanup;
ret = 0;
if (!value)
goto cleanup;
} else {
ret = -EEXIST;
if (flags & XATTR_CREATE)
goto cleanup;
}
/* Check whether the value is refcounted and do some preparation. */
if (ocfs2_is_refcount_inode(inode) &&
(!xis.not_found || !xbs.not_found)) {
ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
&xis, &xbs, &ref_tree,
&ref_meta, &ref_credits);
if (ret) {
mlog_errno(ret);
goto cleanup;
}
}
inode_lock(tl_inode);
if (ocfs2_truncate_log_needs_flush(osb)) {
ret = __ocfs2_flush_truncate_log(osb);
if (ret < 0) {
inode_unlock(tl_inode);
mlog_errno(ret);
goto cleanup;
}
}
inode_unlock(tl_inode);
ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis,
&xbs, &ctxt, ref_meta, &credits);
if (ret) {
mlog_errno(ret);
goto cleanup;
}
/* we need to update inode's ctime field, so add credit for it. */
credits += OCFS2_INODE_UPDATE_CREDITS;
ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
if (IS_ERR(ctxt.handle)) {
ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
goto out_free_ac;
}
ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
ocfs2_update_inode_fsync_trans(ctxt.handle, inode, 0);
ocfs2_commit_trans(osb, ctxt.handle);
out_free_ac:
if (ctxt.data_ac)
ocfs2_free_alloc_context(ctxt.data_ac);
if (ctxt.meta_ac)
ocfs2_free_alloc_context(ctxt.meta_ac);
if (ocfs2_dealloc_has_cluster(&ctxt.dealloc))
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &ctxt.dealloc);
cleanup:
if (ref_tree)
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
up_write(&OCFS2_I(inode)->ip_xattr_sem);
if (!value && !ret) {
ret = ocfs2_try_remove_refcount_tree(inode, di_bh);
if (ret)
mlog_errno(ret);
}
ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
cleanup_nolock:
brelse(di_bh);
brelse(xbs.xattr_bh);
ocfs2_xattr_bucket_free(xbs.bucket);
return ret;
}
/*
* Find the xattr extent rec which may contains name_hash.
* e_cpos will be the first name hash of the xattr rec.
* el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list.
*/
static int ocfs2_xattr_get_rec(struct inode *inode,
u32 name_hash,
u64 *p_blkno,
u32 *e_cpos,
u32 *num_clusters,
struct ocfs2_extent_list *el)
{
int ret = 0, i;
struct buffer_head *eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_rec *rec = NULL;
u64 e_blkno = 0;
if (el->l_tree_depth) {
ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash,
&eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ret = ocfs2_error(inode->i_sb,
"Inode %lu has non zero tree depth in xattr tree block %llu\n",
inode->i_ino,
(unsigned long long)eb_bh->b_blocknr);
goto out;
}
}
for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
rec = &el->l_recs[i];
if (le32_to_cpu(rec->e_cpos) <= name_hash) {
e_blkno = le64_to_cpu(rec->e_blkno);
break;
}
}
if (!e_blkno) {
ret = ocfs2_error(inode->i_sb, "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
inode->i_ino,
le32_to_cpu(rec->e_cpos),
ocfs2_rec_clusters(el, rec));
goto out;
}
*p_blkno = le64_to_cpu(rec->e_blkno);
*num_clusters = le16_to_cpu(rec->e_leaf_clusters);
if (e_cpos)
*e_cpos = le32_to_cpu(rec->e_cpos);
out:
brelse(eb_bh);
return ret;
}
typedef int (xattr_bucket_func)(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
void *para);
static int ocfs2_find_xe_in_bucket(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
int name_index,
const char *name,
u32 name_hash,
u16 *xe_index,
int *found)
{
int i, ret = 0, cmp = 1, block_off, new_offset;
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
size_t name_len = strlen(name);
struct ocfs2_xattr_entry *xe = NULL;
char *xe_name;
/*
* We don't use binary search in the bucket because there
* may be multiple entries with the same name hash.
*/
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
if (name_hash > le32_to_cpu(xe->xe_name_hash))
continue;
else if (name_hash < le32_to_cpu(xe->xe_name_hash))
break;
cmp = name_index - ocfs2_xattr_get_type(xe);
if (!cmp)
cmp = name_len - xe->xe_name_len;
if (cmp)
continue;
ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
xh,
i,
&block_off,
&new_offset);
if (ret) {
mlog_errno(ret);
break;
}
xe_name = bucket_block(bucket, block_off) + new_offset;
if (!memcmp(name, xe_name, name_len)) {
*xe_index = i;
*found = 1;
ret = 0;
break;
}
}
return ret;
}
/*
* Find the specified xattr entry in a series of buckets.
* This series start from p_blkno and last for num_clusters.
* The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
* the num of the valid buckets.
*
* Return the buffer_head this xattr should reside in. And if the xattr's
* hash is in the gap of 2 buckets, return the lower bucket.
*/
static int ocfs2_xattr_bucket_find(struct inode *inode,
int name_index,
const char *name,
u32 name_hash,
u64 p_blkno,
u32 first_hash,
u32 num_clusters,
struct ocfs2_xattr_search *xs)
{
int ret, found = 0;
struct ocfs2_xattr_header *xh = NULL;
struct ocfs2_xattr_entry *xe = NULL;
u16 index = 0;
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int low_bucket = 0, bucket, high_bucket;
struct ocfs2_xattr_bucket *search;
u64 blkno, lower_blkno = 0;
search = ocfs2_xattr_bucket_new(inode);
if (!search) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_xattr_bucket(search, p_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
xh = bucket_xh(search);
high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1;
while (low_bucket <= high_bucket) {
ocfs2_xattr_bucket_relse(search);
bucket = (low_bucket + high_bucket) / 2;
blkno = p_blkno + bucket * blk_per_bucket;
ret = ocfs2_read_xattr_bucket(search, blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
xh = bucket_xh(search);
xe = &xh->xh_entries[0];
if (name_hash < le32_to_cpu(xe->xe_name_hash)) {
high_bucket = bucket - 1;
continue;
}
/*
* Check whether the hash of the last entry in our
* bucket is larger than the search one. for an empty
* bucket, the last one is also the first one.
*/
if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
/* record lower_blkno which may be the insert place. */
lower_blkno = blkno;
if (name_hash > le32_to_cpu(xe->xe_name_hash)) {
low_bucket = bucket + 1;
continue;
}
/* the searched xattr should reside in this bucket if exists. */
ret = ocfs2_find_xe_in_bucket(inode, search,
name_index, name, name_hash,
&index, &found);
if (ret) {
mlog_errno(ret);
goto out;
}
break;
}
/*
* Record the bucket we have found.
* When the xattr's hash value is in the gap of 2 buckets, we will
* always set it to the previous bucket.
*/
if (!lower_blkno)
lower_blkno = p_blkno;
/* This should be in cache - we just read it during the search */
ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
xs->header = bucket_xh(xs->bucket);
xs->base = bucket_block(xs->bucket, 0);
xs->end = xs->base + inode->i_sb->s_blocksize;
if (found) {
xs->here = &xs->header->xh_entries[index];
trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno,
name, name_index, name_hash,
(unsigned long long)bucket_blkno(xs->bucket),
index);
} else
ret = -ENODATA;
out:
ocfs2_xattr_bucket_free(search);
return ret;
}
static int ocfs2_xattr_index_block_find(struct inode *inode,
struct buffer_head *root_bh,
int name_index,
const char *name,
struct ocfs2_xattr_search *xs)
{
int ret;
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)root_bh->b_data;
struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
struct ocfs2_extent_list *el = &xb_root->xt_list;
u64 p_blkno = 0;
u32 first_hash, num_clusters = 0;
u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
if (le16_to_cpu(el->l_next_free_rec) == 0)
return -ENODATA;
trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno,
name, name_index, name_hash,
(unsigned long long)root_bh->b_blocknr,
-1);
ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
&num_clusters, el);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno,
name, name_index, first_hash,
(unsigned long long)p_blkno,
num_clusters);
ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
p_blkno, first_hash, num_clusters, xs);
out:
return ret;
}
static int ocfs2_iterate_xattr_buckets(struct inode *inode,
u64 blkno,
u32 clusters,
xattr_bucket_func *func,
void *para)
{
int i, ret = 0;
u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
u32 num_buckets = clusters * bpc;
struct ocfs2_xattr_bucket *bucket;
bucket = ocfs2_xattr_bucket_new(inode);
if (!bucket) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
trace_ocfs2_iterate_xattr_buckets(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)blkno, clusters);
for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
ret = ocfs2_read_xattr_bucket(bucket, blkno);
if (ret) {
mlog_errno(ret);
break;
}
/*
* The real bucket num in this series of blocks is stored
* in the 1st bucket.
*/
if (i == 0)
num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno,
le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
if (func) {
ret = func(inode, bucket, para);
if (ret && ret != -ERANGE)
mlog_errno(ret);
/* Fall through to bucket_relse() */
}
ocfs2_xattr_bucket_relse(bucket);
if (ret)
break;
}
ocfs2_xattr_bucket_free(bucket);
return ret;
}
struct ocfs2_xattr_tree_list {
char *buffer;
size_t buffer_size;
size_t result;
};
static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
struct ocfs2_xattr_header *xh,
int index,
int *block_off,
int *new_offset)
{
u16 name_offset;
if (index < 0 || index >= le16_to_cpu(xh->xh_count))
return -EINVAL;
name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
*block_off = name_offset >> sb->s_blocksize_bits;
*new_offset = name_offset % sb->s_blocksize;
return 0;
}
static int ocfs2_list_xattr_bucket(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
void *para)
{
int ret = 0, type;
struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para;
int i, block_off, new_offset;
const char *name;
for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) {
struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i];
type = ocfs2_xattr_get_type(entry);
ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
bucket_xh(bucket),
i,
&block_off,
&new_offset);
if (ret)
break;
name = (const char *)bucket_block(bucket, block_off) +
new_offset;
ret = ocfs2_xattr_list_entry(inode->i_sb,
xl->buffer,
xl->buffer_size,
&xl->result,
type, name,
entry->xe_name_len);
if (ret)
break;
}
return ret;
}
static int ocfs2_iterate_xattr_index_block(struct inode *inode,
struct buffer_head *blk_bh,
xattr_tree_rec_func *rec_func,
void *para)
{
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)blk_bh->b_data;
struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
int ret = 0;
u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0;
u64 p_blkno = 0;
if (!el->l_next_free_rec || !rec_func)
return 0;
while (name_hash > 0) {
ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno,
&e_cpos, &num_clusters, el);
if (ret) {
mlog_errno(ret);
break;
}
ret = rec_func(inode, blk_bh, p_blkno, e_cpos,
num_clusters, para);
if (ret) {
if (ret != -ERANGE)
mlog_errno(ret);
break;
}
if (e_cpos == 0)
break;
name_hash = e_cpos - 1;
}
return ret;
}
static int ocfs2_list_xattr_tree_rec(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno, u32 cpos, u32 len, void *para)
{
return ocfs2_iterate_xattr_buckets(inode, blkno, len,
ocfs2_list_xattr_bucket, para);
}
static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
struct buffer_head *blk_bh,
char *buffer,
size_t buffer_size)
{
int ret;
struct ocfs2_xattr_tree_list xl = {
.buffer = buffer,
.buffer_size = buffer_size,
.result = 0,
};
ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
ocfs2_list_xattr_tree_rec, &xl);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = xl.result;
out:
return ret;
}
static int cmp_xe(const void *a, const void *b)
{
const struct ocfs2_xattr_entry *l = a, *r = b;
u32 l_hash = le32_to_cpu(l->xe_name_hash);
u32 r_hash = le32_to_cpu(r->xe_name_hash);
if (l_hash > r_hash)
return 1;
if (l_hash < r_hash)
return -1;
return 0;
}
static void swap_xe(void *a, void *b, int size)
{
struct ocfs2_xattr_entry *l = a, *r = b, tmp;
tmp = *l;
memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
}
/*
* When the ocfs2_xattr_block is filled up, new bucket will be created
* and all the xattr entries will be moved to the new bucket.
* The header goes at the start of the bucket, and the names+values are
* filled from the end. This is why *target starts as the last buffer.
* Note: we need to sort the entries since they are not saved in order
* in the ocfs2_xattr_block.
*/
static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
struct buffer_head *xb_bh,
struct ocfs2_xattr_bucket *bucket)
{
int i, blocksize = inode->i_sb->s_blocksize;
int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
u16 offset, size, off_change;
struct ocfs2_xattr_entry *xe;
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)xb_bh->b_data;
struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header;
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
u16 count = le16_to_cpu(xb_xh->xh_count);
char *src = xb_bh->b_data;
char *target = bucket_block(bucket, blks - 1);
trace_ocfs2_cp_xattr_block_to_bucket_begin(
(unsigned long long)xb_bh->b_blocknr,
(unsigned long long)bucket_blkno(bucket));
for (i = 0; i < blks; i++)
memset(bucket_block(bucket, i), 0, blocksize);
/*
* Since the xe_name_offset is based on ocfs2_xattr_header,
* there is a offset change corresponding to the change of
* ocfs2_xattr_header's position.
*/
off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
xe = &xb_xh->xh_entries[count - 1];
offset = le16_to_cpu(xe->xe_name_offset) + off_change;
size = blocksize - offset;
/* copy all the names and values. */
memcpy(target + offset, src + offset, size);
/* Init new header now. */
xh->xh_count = xb_xh->xh_count;
xh->xh_num_buckets = cpu_to_le16(1);
xh->xh_name_value_len = cpu_to_le16(size);
xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size);
/* copy all the entries. */
target = bucket_block(bucket, 0);
offset = offsetof(struct ocfs2_xattr_header, xh_entries);
size = count * sizeof(struct ocfs2_xattr_entry);
memcpy(target + offset, (char *)xb_xh + offset, size);
/* Change the xe offset for all the xe because of the move. */
off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize +
offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
for (i = 0; i < count; i++)
le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
cmp_xe, swap_xe);
}
/*
* After we move xattr from block to index btree, we have to
* update ocfs2_xattr_search to the new xe and base.
*
* When the entry is in xattr block, xattr_bh indicates the storage place.
* While if the entry is in index b-tree, "bucket" indicates the
* real place of the xattr.
*/
static void ocfs2_xattr_update_xattr_search(struct inode *inode,
struct ocfs2_xattr_search *xs,
struct buffer_head *old_bh)
{
char *buf = old_bh->b_data;
struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf;
struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header;
int i;
xs->header = bucket_xh(xs->bucket);
xs->base = bucket_block(xs->bucket, 0);
xs->end = xs->base + inode->i_sb->s_blocksize;
if (xs->not_found)
return;
i = xs->here - old_xh->xh_entries;
xs->here = &xs->header->xh_entries[i];
}
static int ocfs2_xattr_create_index_block(struct inode *inode,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
u32 bit_off, len;
u64 blkno;
handle_t *handle = ctxt->handle;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct buffer_head *xb_bh = xs->xattr_bh;
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)xb_bh->b_data;
struct ocfs2_xattr_tree_root *xr;
u16 xb_flags = le16_to_cpu(xb->xb_flags);
trace_ocfs2_xattr_create_index_block_begin(
(unsigned long long)xb_bh->b_blocknr);
BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
BUG_ON(!xs->bucket);
/*
* XXX:
* We can use this lock for now, and maybe move to a dedicated mutex
* if performance becomes a problem later.
*/
down_write(&oi->ip_alloc_sem);
ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = __ocfs2_claim_clusters(handle, ctxt->data_ac,
1, 1, &bit_off, &len);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* The bucket may spread in many blocks, and
* we will only touch the 1st block and the last block
* in the whole bucket(one for entry and one for data).
*/
blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
trace_ocfs2_xattr_create_index_block((unsigned long long)blkno);
ret = ocfs2_init_xattr_bucket(xs->bucket, blkno, 1);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket);
ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
ocfs2_xattr_update_xattr_search(inode, xs, xb_bh);
/* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */
memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize -
offsetof(struct ocfs2_xattr_block, xb_attrs));
xr = &xb->xb_attrs.xb_root;
xr->xt_clusters = cpu_to_le32(1);
xr->xt_last_eb_blk = 0;
xr->xt_list.l_tree_depth = 0;
xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb));
xr->xt_list.l_next_free_rec = cpu_to_le16(1);
xr->xt_list.l_recs[0].e_cpos = 0;
xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED);
ocfs2_journal_dirty(handle, xb_bh);
out:
up_write(&oi->ip_alloc_sem);
return ret;
}
static int cmp_xe_offset(const void *a, const void *b)
{
const struct ocfs2_xattr_entry *l = a, *r = b;
u32 l_name_offset = le16_to_cpu(l->xe_name_offset);
u32 r_name_offset = le16_to_cpu(r->xe_name_offset);
if (l_name_offset < r_name_offset)
return 1;
if (l_name_offset > r_name_offset)
return -1;
return 0;
}
/*
* defrag a xattr bucket if we find that the bucket has some
* holes beteen name/value pairs.
* We will move all the name/value pairs to the end of the bucket
* so that we can spare some space for insertion.
*/
static int ocfs2_defrag_xattr_bucket(struct inode *inode,
handle_t *handle,
struct ocfs2_xattr_bucket *bucket)
{
int ret, i;
size_t end, offset, len;
struct ocfs2_xattr_header *xh;
char *entries, *buf, *bucket_buf = NULL;
u64 blkno = bucket_blkno(bucket);
u16 xh_free_start;
size_t blocksize = inode->i_sb->s_blocksize;
struct ocfs2_xattr_entry *xe;
/*
* In order to make the operation more efficient and generic,
* we copy all the blocks into a contiguous memory and do the
* defragment there, so if anything is error, we will not touch
* the real block.
*/
bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS);
if (!bucket_buf) {
ret = -EIO;
goto out;
}
buf = bucket_buf;
for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
memcpy(buf, bucket_block(bucket, i), blocksize);
ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
xh = (struct ocfs2_xattr_header *)bucket_buf;
entries = (char *)xh->xh_entries;
xh_free_start = le16_to_cpu(xh->xh_free_start);
trace_ocfs2_defrag_xattr_bucket(
(unsigned long long)blkno, le16_to_cpu(xh->xh_count),
xh_free_start, le16_to_cpu(xh->xh_name_value_len));
/*
* sort all the entries by their offset.
* the largest will be the first, so that we can
* move them to the end one by one.
*/
sort(entries, le16_to_cpu(xh->xh_count),
sizeof(struct ocfs2_xattr_entry),
cmp_xe_offset, swap_xe);
/* Move all name/values to the end of the bucket. */
xe = xh->xh_entries;
end = OCFS2_XATTR_BUCKET_SIZE;
for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
offset = le16_to_cpu(xe->xe_name_offset);
len = namevalue_size_xe(xe);
/*
* We must make sure that the name/value pair
* exist in the same block. So adjust end to
* the previous block end if needed.
*/
if (((end - len) / blocksize !=
(end - 1) / blocksize))
end = end - end % blocksize;
if (end > offset + len) {
memmove(bucket_buf + end - len,
bucket_buf + offset, len);
xe->xe_name_offset = cpu_to_le16(end - len);
}
mlog_bug_on_msg(end < offset + len, "Defrag check failed for "
"bucket %llu\n", (unsigned long long)blkno);
end -= len;
}
mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for "
"bucket %llu\n", (unsigned long long)blkno);
if (xh_free_start == end)
goto out;
memset(bucket_buf + xh_free_start, 0, end - xh_free_start);
xh->xh_free_start = cpu_to_le16(end);
/* sort the entries by their name_hash. */
sort(entries, le16_to_cpu(xh->xh_count),
sizeof(struct ocfs2_xattr_entry),
cmp_xe, swap_xe);
buf = bucket_buf;
for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
memcpy(bucket_block(bucket, i), buf, blocksize);
ocfs2_xattr_bucket_journal_dirty(handle, bucket);
out:
kfree(bucket_buf);
return ret;
}
/*
* prev_blkno points to the start of an existing extent. new_blkno
* points to a newly allocated extent. Because we know each of our
* clusters contains more than bucket, we can easily split one cluster
* at a bucket boundary. So we take the last cluster of the existing
* extent and split it down the middle. We move the last half of the
* buckets in the last cluster of the existing extent over to the new
* extent.
*
* first_bh is the buffer at prev_blkno so we can update the existing
* extent's bucket count. header_bh is the bucket were we were hoping
* to insert our xattr. If the bucket move places the target in the new
* extent, we'll update first_bh and header_bh after modifying the old
* extent.
*
* first_hash will be set as the 1st xe's name_hash in the new extent.
*/
static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
handle_t *handle,
struct ocfs2_xattr_bucket *first,
struct ocfs2_xattr_bucket *target,
u64 new_blkno,
u32 num_clusters,
u32 *first_hash)
{
int ret;
struct super_block *sb = inode->i_sb;
int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb);
int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
int to_move = num_buckets / 2;
u64 src_blkno;
u64 last_cluster_blkno = bucket_blkno(first) +
((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1));
BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
trace_ocfs2_mv_xattr_bucket_cross_cluster(
(unsigned long long)last_cluster_blkno,
(unsigned long long)new_blkno);
ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
last_cluster_blkno, new_blkno,
to_move, first_hash);
if (ret) {
mlog_errno(ret);
goto out;
}
/* This is the first bucket that got moved */
src_blkno = last_cluster_blkno + (to_move * blks_per_bucket);
/*
* If the target bucket was part of the moved buckets, we need to
* update first and target.
*/
if (bucket_blkno(target) >= src_blkno) {
/* Find the block for the new target bucket */
src_blkno = new_blkno +
(bucket_blkno(target) - src_blkno);
ocfs2_xattr_bucket_relse(first);
ocfs2_xattr_bucket_relse(target);
/*
* These shouldn't fail - the buffers are in the
* journal from ocfs2_cp_xattr_bucket().
*/
ret = ocfs2_read_xattr_bucket(first, new_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_xattr_bucket(target, src_blkno);
if (ret)
mlog_errno(ret);
}
out:
return ret;
}
/*
* Find the suitable pos when we divide a bucket into 2.
* We have to make sure the xattrs with the same hash value exist
* in the same bucket.
*
* If this ocfs2_xattr_header covers more than one hash value, find a
* place where the hash value changes. Try to find the most even split.
* The most common case is that all entries have different hash values,
* and the first check we make will find a place to split.
*/
static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh)
{
struct ocfs2_xattr_entry *entries = xh->xh_entries;
int count = le16_to_cpu(xh->xh_count);
int delta, middle = count / 2;
/*
* We start at the middle. Each step gets farther away in both
* directions. We therefore hit the change in hash value
* nearest to the middle. Note that this loop does not execute for
* count < 2.
*/
for (delta = 0; delta < middle; delta++) {
/* Let's check delta earlier than middle */
if (cmp_xe(&entries[middle - delta - 1],
&entries[middle - delta]))
return middle - delta;
/* For even counts, don't walk off the end */
if ((middle + delta + 1) == count)
continue;
/* Now try delta past middle */
if (cmp_xe(&entries[middle + delta],
&entries[middle + delta + 1]))
return middle + delta + 1;
}
/* Every entry had the same hash */
return count;
}
/*
* Move some xattrs in old bucket(blk) to new bucket(new_blk).
* first_hash will record the 1st hash of the new bucket.
*
* Normally half of the xattrs will be moved. But we have to make
* sure that the xattrs with the same hash value are stored in the
* same bucket. If all the xattrs in this bucket have the same hash
* value, the new bucket will be initialized as an empty one and the
* first_hash will be initialized as (hash_value+1).
*/
static int ocfs2_divide_xattr_bucket(struct inode *inode,
handle_t *handle,
u64 blk,
u64 new_blk,
u32 *first_hash,
int new_bucket_head)
{
int ret, i;
int count, start, len, name_value_len = 0, name_offset = 0;
struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
struct ocfs2_xattr_header *xh;
struct ocfs2_xattr_entry *xe;
int blocksize = inode->i_sb->s_blocksize;
trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk,
(unsigned long long)new_blk);
s_bucket = ocfs2_xattr_bucket_new(inode);
t_bucket = ocfs2_xattr_bucket_new(inode);
if (!s_bucket || !t_bucket) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_xattr_bucket(s_bucket, blk);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Even if !new_bucket_head, we're overwriting t_bucket. Thus,
* there's no need to read it.
*/
ret = ocfs2_init_xattr_bucket(t_bucket, new_blk, new_bucket_head);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Hey, if we're overwriting t_bucket, what difference does
* ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the
* same part of ocfs2_cp_xattr_bucket().
*/
ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
new_bucket_head ?
OCFS2_JOURNAL_ACCESS_CREATE :
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
xh = bucket_xh(s_bucket);
count = le16_to_cpu(xh->xh_count);
start = ocfs2_xattr_find_divide_pos(xh);
if (start == count) {
xe = &xh->xh_entries[start-1];
/*
* initialized a new empty bucket here.
* The hash value is set as one larger than
* that of the last entry in the previous bucket.
*/
for (i = 0; i < t_bucket->bu_blocks; i++)
memset(bucket_block(t_bucket, i), 0, blocksize);
xh = bucket_xh(t_bucket);
xh->xh_free_start = cpu_to_le16(blocksize);
xh->xh_entries[0].xe_name_hash = xe->xe_name_hash;
le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1);
goto set_num_buckets;
}
/* copy the whole bucket to the new first. */
ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
/* update the new bucket. */
xh = bucket_xh(t_bucket);
/*
* Calculate the total name/value len and xh_free_start for
* the old bucket first.
*/
name_offset = OCFS2_XATTR_BUCKET_SIZE;
name_value_len = 0;
for (i = 0; i < start; i++) {
xe = &xh->xh_entries[i];
name_value_len += namevalue_size_xe(xe);
if (le16_to_cpu(xe->xe_name_offset) < name_offset)
name_offset = le16_to_cpu(xe->xe_name_offset);
}
/*
* Now begin the modification to the new bucket.
*
* In the new bucket, We just move the xattr entry to the beginning
* and don't touch the name/value. So there will be some holes in the
* bucket, and they will be removed when ocfs2_defrag_xattr_bucket is
* called.
*/
xe = &xh->xh_entries[start];
len = sizeof(struct ocfs2_xattr_entry) * (count - start);
trace_ocfs2_divide_xattr_bucket_move(len,
(int)((char *)xe - (char *)xh),
(int)((char *)xh->xh_entries - (char *)xh));
memmove((char *)xh->xh_entries, (char *)xe, len);
xe = &xh->xh_entries[count - start];
len = sizeof(struct ocfs2_xattr_entry) * start;
memset((char *)xe, 0, len);
le16_add_cpu(&xh->xh_count, -start);
le16_add_cpu(&xh->xh_name_value_len, -name_value_len);
/* Calculate xh_free_start for the new bucket. */
xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
if (le16_to_cpu(xe->xe_name_offset) <
le16_to_cpu(xh->xh_free_start))
xh->xh_free_start = xe->xe_name_offset;
}
set_num_buckets:
/* set xh->xh_num_buckets for the new xh. */
if (new_bucket_head)
xh->xh_num_buckets = cpu_to_le16(1);
else
xh->xh_num_buckets = 0;
ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
/* store the first_hash of the new bucket. */
if (first_hash)
*first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash);
/*
* Now only update the 1st block of the old bucket. If we
* just added a new empty bucket, there is no need to modify
* it.
*/
if (start == count)
goto out;
xh = bucket_xh(s_bucket);
memset(&xh->xh_entries[start], 0,
sizeof(struct ocfs2_xattr_entry) * (count - start));
xh->xh_count = cpu_to_le16(start);
xh->xh_free_start = cpu_to_le16(name_offset);
xh->xh_name_value_len = cpu_to_le16(name_value_len);
ocfs2_xattr_bucket_journal_dirty(handle, s_bucket);
out:
ocfs2_xattr_bucket_free(s_bucket);
ocfs2_xattr_bucket_free(t_bucket);
return ret;
}
/*
* Copy xattr from one bucket to another bucket.
*
* The caller must make sure that the journal transaction
* has enough space for journaling.
*/
static int ocfs2_cp_xattr_bucket(struct inode *inode,
handle_t *handle,
u64 s_blkno,
u64 t_blkno,
int t_is_new)
{
int ret;
struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
BUG_ON(s_blkno == t_blkno);
trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno,
(unsigned long long)t_blkno,
t_is_new);
s_bucket = ocfs2_xattr_bucket_new(inode);
t_bucket = ocfs2_xattr_bucket_new(inode);
if (!s_bucket || !t_bucket) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno);
if (ret)
goto out;
/*
* Even if !t_is_new, we're overwriting t_bucket. Thus,
* there's no need to read it.
*/
ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno, t_is_new);
if (ret)
goto out;
/*
* Hey, if we're overwriting t_bucket, what difference does
* ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new
* cluster to fill, we came here from
* ocfs2_mv_xattr_buckets(), and it is really new -
* ACCESS_CREATE is required. But we also might have moved data
* out of t_bucket before extending back into it.
* ocfs2_add_new_xattr_bucket() can do this - its call to
* ocfs2_add_new_xattr_cluster() may have created a new extent
* and copied out the end of the old extent. Then it re-extends
* the old extent back to create space for new xattrs. That's
* how we get here, and the bucket isn't really new.
*/
ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
t_is_new ?
OCFS2_JOURNAL_ACCESS_CREATE :
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret)
goto out;
ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
out:
ocfs2_xattr_bucket_free(t_bucket);
ocfs2_xattr_bucket_free(s_bucket);
return ret;
}
/*
* src_blk points to the start of an existing extent. last_blk points to
* last cluster in that extent. to_blk points to a newly allocated
* extent. We copy the buckets from the cluster at last_blk to the new
* extent. If start_bucket is non-zero, we skip that many buckets before
* we start copying. The new extent's xh_num_buckets gets set to the
* number of buckets we copied. The old extent's xh_num_buckets shrinks
* by the same amount.
*/
static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
u64 src_blk, u64 last_blk, u64 to_blk,
unsigned int start_bucket,
u32 *first_hash)
{
int i, ret, credits;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
struct ocfs2_xattr_bucket *old_first, *new_first;
trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk,
(unsigned long long)to_blk);
BUG_ON(start_bucket >= num_buckets);
if (start_bucket) {
num_buckets -= start_bucket;
last_blk += (start_bucket * blks_per_bucket);
}
/* The first bucket of the original extent */
old_first = ocfs2_xattr_bucket_new(inode);
/* The first bucket of the new extent */
new_first = ocfs2_xattr_bucket_new(inode);
if (!old_first || !new_first) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_xattr_bucket(old_first, src_blk);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We need to update the first bucket of the old extent and all
* the buckets going to the new extent.
*/
credits = ((num_buckets + 1) * blks_per_bucket);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_bucket_journal_access(handle, old_first,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
for (i = 0; i < num_buckets; i++) {
ret = ocfs2_cp_xattr_bucket(inode, handle,
last_blk + (i * blks_per_bucket),
to_blk + (i * blks_per_bucket),
1);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/*
* Get the new bucket ready before we dirty anything
* (This actually shouldn't fail, because we already dirtied
* it once in ocfs2_cp_xattr_bucket()).
*/
ret = ocfs2_read_xattr_bucket(new_first, to_blk);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_bucket_journal_access(handle, new_first,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
/* Now update the headers */
le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets);
ocfs2_xattr_bucket_journal_dirty(handle, old_first);
bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets);
ocfs2_xattr_bucket_journal_dirty(handle, new_first);
if (first_hash)
*first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash);
out:
ocfs2_xattr_bucket_free(new_first);
ocfs2_xattr_bucket_free(old_first);
return ret;
}
/*
* Move some xattrs in this cluster to the new cluster.
* This function should only be called when bucket size == cluster size.
* Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead.
*/
static int ocfs2_divide_xattr_cluster(struct inode *inode,
handle_t *handle,
u64 prev_blk,
u64 new_blk,
u32 *first_hash)
{
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int ret, credits = 2 * blk_per_bucket;
BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
return ret;
}
/* Move half of the xattr in start_blk to the next bucket. */
return ocfs2_divide_xattr_bucket(inode, handle, prev_blk,
new_blk, first_hash, 1);
}
/*
* Move some xattrs from the old cluster to the new one since they are not
* contiguous in ocfs2 xattr tree.
*
* new_blk starts a new separate cluster, and we will move some xattrs from
* prev_blk to it. v_start will be set as the first name hash value in this
* new cluster so that it can be used as e_cpos during tree insertion and
* don't collide with our original b-tree operations. first_bh and header_bh
* will also be updated since they will be used in ocfs2_extend_xattr_bucket
* to extend the insert bucket.
*
* The problem is how much xattr should we move to the new one and when should
* we update first_bh and header_bh?
* 1. If cluster size > bucket size, that means the previous cluster has more
* than 1 bucket, so just move half nums of bucket into the new cluster and
* update the first_bh and header_bh if the insert bucket has been moved
* to the new cluster.
* 2. If cluster_size == bucket_size:
* a) If the previous extent rec has more than one cluster and the insert
* place isn't in the last cluster, copy the entire last cluster to the
* new one. This time, we don't need to upate the first_bh and header_bh
* since they will not be moved into the new cluster.
* b) Otherwise, move the bottom half of the xattrs in the last cluster into
* the new one. And we set the extend flag to zero if the insert place is
* moved into the new allocated cluster since no extend is needed.
*/
static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
handle_t *handle,
struct ocfs2_xattr_bucket *first,
struct ocfs2_xattr_bucket *target,
u64 new_blk,
u32 prev_clusters,
u32 *v_start,
int *extend)
{
int ret;
trace_ocfs2_adjust_xattr_cross_cluster(
(unsigned long long)bucket_blkno(first),
(unsigned long long)new_blk, prev_clusters);
if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
handle,
first, target,
new_blk,
prev_clusters,
v_start);
if (ret)
mlog_errno(ret);
} else {
/* The start of the last cluster in the first extent */
u64 last_blk = bucket_blkno(first) +
((prev_clusters - 1) *
ocfs2_clusters_to_blocks(inode->i_sb, 1));
if (prev_clusters > 1 && bucket_blkno(target) != last_blk) {
ret = ocfs2_mv_xattr_buckets(inode, handle,
bucket_blkno(first),
last_blk, new_blk, 0,
v_start);
if (ret)
mlog_errno(ret);
} else {
ret = ocfs2_divide_xattr_cluster(inode, handle,
last_blk, new_blk,
v_start);
if (ret)
mlog_errno(ret);
if ((bucket_blkno(target) == last_blk) && extend)
*extend = 0;
}
}
return ret;
}
/*
* Add a new cluster for xattr storage.
*
* If the new cluster is contiguous with the previous one, it will be
* appended to the same extent record, and num_clusters will be updated.
* If not, we will insert a new extent for it and move some xattrs in
* the last cluster into the new allocated one.
* We also need to limit the maximum size of a btree leaf, otherwise we'll
* lose the benefits of hashing because we'll have to search large leaves.
* So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize,
* if it's bigger).
*
* first_bh is the first block of the previous extent rec and header_bh
* indicates the bucket we will insert the new xattrs. They will be updated
* when the header_bh is moved into the new cluster.
*/
static int ocfs2_add_new_xattr_cluster(struct inode *inode,
struct buffer_head *root_bh,
struct ocfs2_xattr_bucket *first,
struct ocfs2_xattr_bucket *target,
u32 *num_clusters,
u32 prev_cpos,
int *extend,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
u32 prev_clusters = *num_clusters;
u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0;
u64 block;
handle_t *handle = ctxt->handle;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_tree et;
trace_ocfs2_add_new_xattr_cluster_begin(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)bucket_blkno(first),
prev_cpos, prev_clusters);
ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto leave;
}
ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1,
clusters_to_add, &bit_off, &num_bits);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto leave;
}
BUG_ON(num_bits > clusters_to_add);
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
(prev_clusters + num_bits) << osb->s_clustersize_bits <=
OCFS2_MAX_XATTR_TREE_LEAF_SIZE) {
/*
* If this cluster is contiguous with the old one and
* adding this new cluster, we don't surpass the limit of
* OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be
* initialized and used like other buckets in the previous
* cluster.
* So add it as a contiguous one. The caller will handle
* its init process.
*/
v_start = prev_cpos + prev_clusters;
*num_clusters = prev_clusters + num_bits;
} else {
ret = ocfs2_adjust_xattr_cross_cluster(inode,
handle,
first,
target,
block,
prev_clusters,
&v_start,
extend);
if (ret) {
mlog_errno(ret);
goto leave;
}
}
trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
v_start, num_bits);
ret = ocfs2_insert_extent(handle, &et, v_start, block,
num_bits, 0, ctxt->meta_ac);
if (ret < 0) {
mlog_errno(ret);
goto leave;
}
ocfs2_journal_dirty(handle, root_bh);
leave:
return ret;
}
/*
* We are given an extent. 'first' is the bucket at the very front of
* the extent. The extent has space for an additional bucket past
* bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number
* of the target bucket. We wish to shift every bucket past the target
* down one, filling in that additional space. When we get back to the
* target, we split the target between itself and the now-empty bucket
* at target+1 (aka, target_blkno + blks_per_bucket).
*/
static int ocfs2_extend_xattr_bucket(struct inode *inode,
handle_t *handle,
struct ocfs2_xattr_bucket *first,
u64 target_blk,
u32 num_clusters)
{
int ret, credits;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
u64 end_blk;
u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk,
(unsigned long long)bucket_blkno(first),
num_clusters, new_bucket);
/* The extent must have room for an additional bucket */
BUG_ON(new_bucket >=
(num_clusters * ocfs2_xattr_buckets_per_cluster(osb)));
/* end_blk points to the last existing bucket */
end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket);
/*
* end_blk is the start of the last existing bucket.
* Thus, (end_blk - target_blk) covers the target bucket and
* every bucket after it up to, but not including, the last
* existing bucket. Then we add the last existing bucket, the
* new bucket, and the first bucket (3 * blk_per_bucket).
*/
credits = (end_blk - target_blk) + (3 * blk_per_bucket);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_bucket_journal_access(handle, first,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
while (end_blk != target_blk) {
ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk,
end_blk + blk_per_bucket, 0);
if (ret)
goto out;
end_blk -= blk_per_bucket;
}
/* Move half of the xattr in target_blkno to the next bucket. */
ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk,
target_blk + blk_per_bucket, NULL, 0);
le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1);
ocfs2_xattr_bucket_journal_dirty(handle, first);
out:
return ret;
}
/*
* Add new xattr bucket in an extent record and adjust the buckets
* accordingly. xb_bh is the ocfs2_xattr_block, and target is the
* bucket we want to insert into.
*
* In the easy case, we will move all the buckets after target down by
* one. Half of target's xattrs will be moved to the next bucket.
*
* If current cluster is full, we'll allocate a new one. This may not
* be contiguous. The underlying calls will make sure that there is
* space for the insert, shifting buckets around if necessary.
* 'target' may be moved by those calls.
*/
static int ocfs2_add_new_xattr_bucket(struct inode *inode,
struct buffer_head *xb_bh,
struct ocfs2_xattr_bucket *target,
struct ocfs2_xattr_set_ctxt *ctxt)
{
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)xb_bh->b_data;
struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
struct ocfs2_extent_list *el = &xb_root->xt_list;
u32 name_hash =
le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int ret, num_buckets, extend = 1;
u64 p_blkno;
u32 e_cpos, num_clusters;
/* The bucket at the front of the extent */
struct ocfs2_xattr_bucket *first;
trace_ocfs2_add_new_xattr_bucket(
(unsigned long long)bucket_blkno(target));
/* The first bucket of the original extent */
first = ocfs2_xattr_bucket_new(inode);
if (!first) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos,
&num_clusters, el);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_xattr_bucket(first, p_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters;
if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) {
/*
* This can move first+target if the target bucket moves
* to the new extent.
*/
ret = ocfs2_add_new_xattr_cluster(inode,
xb_bh,
first,
target,
&num_clusters,
e_cpos,
&extend,
ctxt);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (extend) {
ret = ocfs2_extend_xattr_bucket(inode,
ctxt->handle,
first,
bucket_blkno(target),
num_clusters);
if (ret)
mlog_errno(ret);
}
out:
ocfs2_xattr_bucket_free(first);
return ret;
}
/*
* Truncate the specified xe_off entry in xattr bucket.
* bucket is indicated by header_bh and len is the new length.
* Both the ocfs2_xattr_value_root and the entry will be updated here.
*
* Copy the new updated xe and xe_value_root to new_xe and new_xv if needed.
*/
static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
int xe_off,
int len,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret, offset;
u64 value_blk;
struct ocfs2_xattr_entry *xe;
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
size_t blocksize = inode->i_sb->s_blocksize;
struct ocfs2_xattr_value_buf vb = {
.vb_access = ocfs2_journal_access,
};
xe = &xh->xh_entries[xe_off];
BUG_ON(!xe || ocfs2_xattr_is_local(xe));
offset = le16_to_cpu(xe->xe_name_offset) +
OCFS2_XATTR_SIZE(xe->xe_name_len);
value_blk = offset / blocksize;
/* We don't allow ocfs2_xattr_value to be stored in different block. */
BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize);
vb.vb_bh = bucket->bu_bhs[value_blk];
BUG_ON(!vb.vb_bh);
vb.vb_xv = (struct ocfs2_xattr_value_root *)
(vb.vb_bh->b_data + offset % blocksize);
/*
* From here on out we have to dirty the bucket. The generic
* value calls only modify one of the bucket's bhs, but we need
* to send the bucket at once. So if they error, they *could* have
* modified something. We have to assume they did, and dirty
* the whole bucket. This leaves us in a consistent state.
*/
trace_ocfs2_xattr_bucket_value_truncate(
(unsigned long long)bucket_blkno(bucket), xe_off, len);
ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
xe->xe_value_size = cpu_to_le64(len);
ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
out:
return ret;
}
static int ocfs2_rm_xattr_cluster(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno,
u32 cpos,
u32 len,
void *para)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
handle_t *handle;
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)root_bh->b_data;
struct ocfs2_alloc_context *meta_ac = NULL;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_extent_tree et;
ret = ocfs2_iterate_xattr_buckets(inode, blkno, len,
ocfs2_delete_xattr_in_bucket, para);
if (ret) {
mlog_errno(ret);
return ret;
}
ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
trace_ocfs2_rm_xattr_cluster(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)blkno, cpos, len);
ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
len);
ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
if (ret) {
mlog_errno(ret);
return ret;
}
inode_lock(tl_inode);
if (ocfs2_truncate_log_needs_flush(osb)) {
ret = __ocfs2_flush_truncate_log(osb);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb));
if (IS_ERR(handle)) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac,
&dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
ocfs2_journal_dirty(handle, root_bh);
ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
if (ret)
mlog_errno(ret);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
ocfs2_schedule_truncate_log_flush(osb, 1);
inode_unlock(tl_inode);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
ocfs2_run_deallocs(osb, &dealloc);
return ret;
}
/*
* check whether the xattr bucket is filled up with the same hash value.
* If we want to insert the xattr with the same hash, return -ENOSPC.
* If we want to insert a xattr with different hash value, go ahead
* and ocfs2_divide_xattr_bucket will handle this.
*/
static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
const char *name)
{
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash))
return 0;
if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash ==
xh->xh_entries[0].xe_name_hash) {
mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, "
"hash = %u\n",
(unsigned long long)bucket_blkno(bucket),
le32_to_cpu(xh->xh_entries[0].xe_name_hash));
return -ENOSPC;
}
return 0;
}
/*
* Try to set the entry in the current bucket. If we fail, the caller
* will handle getting us another bucket.
*/
static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
struct ocfs2_xa_loc loc;
trace_ocfs2_xattr_set_entry_bucket(xi->xi_name);
ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
xs->not_found ? NULL : xs->here);
ret = ocfs2_xa_set(&loc, xi, ctxt);
if (!ret) {
xs->here = loc.xl_entry;
goto out;
}
if (ret != -ENOSPC) {
mlog_errno(ret);
goto out;
}
/* Ok, we need space. Let's try defragmenting the bucket. */
ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
xs->bucket);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xa_set(&loc, xi, ctxt);
if (!ret) {
xs->here = loc.xl_entry;
goto out;
}
if (ret != -ENOSPC)
mlog_errno(ret);
out:
return ret;
}
static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
trace_ocfs2_xattr_set_entry_index_block(xi->xi_name);
ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
if (!ret)
goto out;
if (ret != -ENOSPC) {
mlog_errno(ret);
goto out;
}
/* Ack, need more space. Let's try to get another bucket! */
/*
* We do not allow for overlapping ranges between buckets. And
* the maximum number of collisions we will allow for then is
* one bucket's worth, so check it here whether we need to
* add a new bucket for the insert.
*/
ret = ocfs2_check_xattr_bucket_collision(inode,
xs->bucket,
xi->xi_name);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_add_new_xattr_bucket(inode,
xs->xattr_bh,
xs->bucket,
ctxt);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* ocfs2_add_new_xattr_bucket() will have updated
* xs->bucket if it moved, but it will not have updated
* any of the other search fields. Thus, we drop it and
* re-search. Everything should be cached, so it'll be
* quick.
*/
ocfs2_xattr_bucket_relse(xs->bucket);
ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
xi->xi_name_index,
xi->xi_name, xs);
if (ret && ret != -ENODATA)
goto out;
xs->not_found = ret;
/* Ok, we have a new bucket, let's try again */
ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
if (ret && (ret != -ENOSPC))
mlog_errno(ret);
out:
return ret;
}
static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
void *para)
{
int ret = 0, ref_credits;
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
u16 i;
struct ocfs2_xattr_entry *xe;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,};
int credits = ocfs2_remove_extent_credits(osb->sb) +
ocfs2_blocks_per_xattr_bucket(inode->i_sb);
struct ocfs2_xattr_value_root *xv;
struct ocfs2_rm_xattr_bucket_para *args =
(struct ocfs2_rm_xattr_bucket_para *)para;
ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
if (ocfs2_xattr_is_local(xe))
continue;
ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
i, &xv, NULL);
if (ret) {
mlog_errno(ret);
break;
}
ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
args->ref_ci,
args->ref_root_bh,
&ctxt.meta_ac,
&ref_credits);
ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
if (IS_ERR(ctxt.handle)) {
ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
break;
}
ret = ocfs2_xattr_bucket_value_truncate(inode, bucket,
i, 0, &ctxt);
ocfs2_commit_trans(osb, ctxt.handle);
if (ctxt.meta_ac) {
ocfs2_free_alloc_context(ctxt.meta_ac);
ctxt.meta_ac = NULL;
}
if (ret) {
mlog_errno(ret);
break;
}
}
if (ctxt.meta_ac)
ocfs2_free_alloc_context(ctxt.meta_ac);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &ctxt.dealloc);
return ret;
}
/*
* Whenever we modify a xattr value root in the bucket(e.g, CoW
* or change the extent record flag), we need to recalculate
* the metaecc for the whole bucket. So it is done here.
*
* Note:
* We have to give the extra credits for the caller.
*/
static int ocfs2_xattr_bucket_post_refcount(struct inode *inode,
handle_t *handle,
void *para)
{
int ret;
struct ocfs2_xattr_bucket *bucket =
(struct ocfs2_xattr_bucket *)para;
ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
return ret;
}
ocfs2_xattr_bucket_journal_dirty(handle, bucket);
return 0;
}
/*
* Special action we need if the xattr value is refcounted.
*
* 1. If the xattr is refcounted, lock the tree.
* 2. CoW the xattr if we are setting the new value and the value
* will be stored outside.
* 3. In other case, decrease_refcount will work for us, so just
* lock the refcount tree, calculate the meta and credits is OK.
*
* We have to do CoW before ocfs2_init_xattr_set_ctxt since
* currently CoW is a completed transaction, while this function
* will also lock the allocators and let us deadlock. So we will
* CoW the whole xattr value.
*/
static int ocfs2_prepare_refcount_xattr(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xis,
struct ocfs2_xattr_search *xbs,
struct ocfs2_refcount_tree **ref_tree,
int *meta_add,
int *credits)
{
int ret = 0;
struct ocfs2_xattr_block *xb;
struct ocfs2_xattr_entry *xe;
char *base;
u32 p_cluster, num_clusters;
unsigned int ext_flags;
int name_offset, name_len;
struct ocfs2_xattr_value_buf vb;
struct ocfs2_xattr_bucket *bucket = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_post_refcount refcount;
struct ocfs2_post_refcount *p = NULL;
struct buffer_head *ref_root_bh = NULL;
if (!xis->not_found) {
xe = xis->here;
name_offset = le16_to_cpu(xe->xe_name_offset);
name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
base = xis->base;
vb.vb_bh = xis->inode_bh;
vb.vb_access = ocfs2_journal_access_di;
} else {
int i, block_off = 0;
xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
xe = xbs->here;
name_offset = le16_to_cpu(xe->xe_name_offset);
name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
i = xbs->here - xbs->header->xh_entries;
if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
bucket_xh(xbs->bucket),
i, &block_off,
&name_offset);
if (ret) {
mlog_errno(ret);
goto out;
}
base = bucket_block(xbs->bucket, block_off);
vb.vb_bh = xbs->bucket->bu_bhs[block_off];
vb.vb_access = ocfs2_journal_access;
if (ocfs2_meta_ecc(osb)) {
/*create parameters for ocfs2_post_refcount. */
bucket = xbs->bucket;
refcount.credits = bucket->bu_blocks;
refcount.para = bucket;
refcount.func =
ocfs2_xattr_bucket_post_refcount;
p = &refcount;
}
} else {
base = xbs->base;
vb.vb_bh = xbs->xattr_bh;
vb.vb_access = ocfs2_journal_access_xb;
}
}
if (ocfs2_xattr_is_local(xe))
goto out;
vb.vb_xv = (struct ocfs2_xattr_value_root *)
(base + name_offset + name_len);
ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
&num_clusters, &vb.vb_xv->xr_list,
&ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We just need to check the 1st extent record, since we always
* CoW the whole xattr. So there shouldn't be a xattr with
* some REFCOUNT extent recs after the 1st one.
*/
if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
goto out;
ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
1, ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* If we are deleting the xattr or the new size will be stored inside,
* cool, leave it there, the xattr truncate process will remove them
* for us(it still needs the refcount tree lock and the meta, credits).
* And the worse case is that every cluster truncate will split the
* refcount tree, and make the original extent become 3. So we will need
* 2 * cluster more extent recs at most.
*/
if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
ret = ocfs2_refcounted_xattr_delete_need(inode,
&(*ref_tree)->rf_ci,
ref_root_bh, vb.vb_xv,
meta_add, credits);
if (ret)
mlog_errno(ret);
goto out;
}
ret = ocfs2_refcount_cow_xattr(inode, di, &vb,
*ref_tree, ref_root_bh, 0,
le32_to_cpu(vb.vb_xv->xr_clusters), p);
if (ret)
mlog_errno(ret);
out:
brelse(ref_root_bh);
return ret;
}
/*
* Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
* The physical clusters will be added to refcount tree.
*/
static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
struct ocfs2_xattr_value_root *xv,
struct ocfs2_extent_tree *value_et,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_cached_dealloc_ctxt *dealloc,
struct ocfs2_post_refcount *refcount)
{
int ret = 0;
u32 clusters = le32_to_cpu(xv->xr_clusters);
u32 cpos, p_cluster, num_clusters;
struct ocfs2_extent_list *el = &xv->xr_list;
unsigned int ext_flags;
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
&num_clusters, el, &ext_flags);
if (ret) {
mlog_errno(ret);
break;
}
cpos += num_clusters;
if ((ext_flags & OCFS2_EXT_REFCOUNTED))
continue;
BUG_ON(!p_cluster);
ret = ocfs2_add_refcount_flag(inode, value_et,
ref_ci, ref_root_bh,
cpos - num_clusters,
p_cluster, num_clusters,
dealloc, refcount);
if (ret) {
mlog_errno(ret);
break;
}
}
return ret;
}
/*
* Given a normal ocfs2_xattr_header, refcount all the entries which
* have value stored outside.
* Used for xattrs stored in inode and ocfs2_xattr_block.
*/
static int ocfs2_xattr_attach_refcount_normal(struct inode *inode,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_xattr_header *header,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
struct ocfs2_xattr_entry *xe;
struct ocfs2_xattr_value_root *xv;
struct ocfs2_extent_tree et;
int i, ret = 0;
for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
xe = &header->xh_entries[i];
if (ocfs2_xattr_is_local(xe))
continue;
xv = (struct ocfs2_xattr_value_root *)((void *)header +
le16_to_cpu(xe->xe_name_offset) +
OCFS2_XATTR_SIZE(xe->xe_name_len));
vb->vb_xv = xv;
ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et,
ref_ci, ref_root_bh,
dealloc, NULL);
if (ret) {
mlog_errno(ret);
break;
}
}
return ret;
}
static int ocfs2_xattr_inline_attach_refcount(struct inode *inode,
struct buffer_head *fe_bh,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *)
(fe_bh->b_data + inode->i_sb->s_blocksize -
le16_to_cpu(di->i_xattr_inline_size));
struct ocfs2_xattr_value_buf vb = {
.vb_bh = fe_bh,
.vb_access = ocfs2_journal_access_di,
};
return ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
ref_ci, ref_root_bh, dealloc);
}
struct ocfs2_xattr_tree_value_refcount_para {
struct ocfs2_caching_info *ref_ci;
struct buffer_head *ref_root_bh;
struct ocfs2_cached_dealloc_ctxt *dealloc;
};
static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
struct ocfs2_xattr_bucket *bucket,
int offset,
struct ocfs2_xattr_value_root **xv,
struct buffer_head **bh)
{
int ret, block_off, name_offset;
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
void *base;
ret = ocfs2_xattr_bucket_get_name_value(sb,
bucket_xh(bucket),
offset,
&block_off,
&name_offset);
if (ret) {
mlog_errno(ret);
goto out;
}
base = bucket_block(bucket, block_off);
*xv = (struct ocfs2_xattr_value_root *)(base + name_offset +
OCFS2_XATTR_SIZE(xe->xe_name_len));
if (bh)
*bh = bucket->bu_bhs[block_off];
out:
return ret;
}
/*
* For a given xattr bucket, refcount all the entries which
* have value stored outside.
*/
static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
void *para)
{
int i, ret = 0;
struct ocfs2_extent_tree et;
struct ocfs2_xattr_tree_value_refcount_para *ref =
(struct ocfs2_xattr_tree_value_refcount_para *)para;
struct ocfs2_xattr_header *xh =
(struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
struct ocfs2_xattr_entry *xe;
struct ocfs2_xattr_value_buf vb = {
.vb_access = ocfs2_journal_access,
};
struct ocfs2_post_refcount refcount = {
.credits = bucket->bu_blocks,
.para = bucket,
.func = ocfs2_xattr_bucket_post_refcount,
};
struct ocfs2_post_refcount *p = NULL;
/* We only need post_refcount if we support metaecc. */
if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
p = &refcount;
trace_ocfs2_xattr_bucket_value_refcount(
(unsigned long long)bucket_blkno(bucket),
le16_to_cpu(xh->xh_count));
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
if (ocfs2_xattr_is_local(xe))
continue;
ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
&vb.vb_xv, &vb.vb_bh);
if (ret) {
mlog_errno(ret);
break;
}
ocfs2_init_xattr_value_extent_tree(&et,
INODE_CACHE(inode), &vb);
ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv,
&et, ref->ref_ci,
ref->ref_root_bh,
ref->dealloc, p);
if (ret) {
mlog_errno(ret);
break;
}
}
return ret;
}
static int ocfs2_refcount_xattr_tree_rec(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno, u32 cpos, u32 len, void *para)
{
return ocfs2_iterate_xattr_buckets(inode, blkno, len,
ocfs2_xattr_bucket_value_refcount,
para);
}
static int ocfs2_xattr_block_attach_refcount(struct inode *inode,
struct buffer_head *blk_bh,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0;
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)blk_bh->b_data;
if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
struct ocfs2_xattr_value_buf vb = {
.vb_bh = blk_bh,
.vb_access = ocfs2_journal_access_xb,
};
ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
ref_ci, ref_root_bh,
dealloc);
} else {
struct ocfs2_xattr_tree_value_refcount_para para = {
.ref_ci = ref_ci,
.ref_root_bh = ref_root_bh,
.dealloc = dealloc,
};
ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
ocfs2_refcount_xattr_tree_rec,
¶);
}
return ret;
}
int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
struct buffer_head *fe_bh,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
struct buffer_head *blk_bh = NULL;
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh,
ref_ci, ref_root_bh,
dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (!di->i_xattr_loc)
goto out;
ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
&blk_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci,
ref_root_bh, dealloc);
if (ret)
mlog_errno(ret);
brelse(blk_bh);
out:
return ret;
}
typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe);
/*
* Store the information we need in xattr reflink.
* old_bh and new_bh are inode bh for the old and new inode.
*/
struct ocfs2_xattr_reflink {
struct inode *old_inode;
struct inode *new_inode;
struct buffer_head *old_bh;
struct buffer_head *new_bh;
struct ocfs2_caching_info *ref_ci;
struct buffer_head *ref_root_bh;
struct ocfs2_cached_dealloc_ctxt *dealloc;
should_xattr_reflinked *xattr_reflinked;
};
/*
* Given a xattr header and xe offset,
* return the proper xv and the corresponding bh.
* xattr in inode, block and xattr tree have different implementaions.
*/
typedef int (get_xattr_value_root)(struct super_block *sb,
struct buffer_head *bh,
struct ocfs2_xattr_header *xh,
int offset,
struct ocfs2_xattr_value_root **xv,
struct buffer_head **ret_bh,
void *para);
/*
* Calculate all the xattr value root metadata stored in this xattr header and
* credits we need if we create them from the scratch.
* We use get_xattr_value_root so that all types of xattr container can use it.
*/
static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
struct buffer_head *bh,
struct ocfs2_xattr_header *xh,
int *metas, int *credits,
int *num_recs,
get_xattr_value_root *func,
void *para)
{
int i, ret = 0;
struct ocfs2_xattr_value_root *xv;
struct ocfs2_xattr_entry *xe;
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
if (ocfs2_xattr_is_local(xe))
continue;
ret = func(sb, bh, xh, i, &xv, NULL, para);
if (ret) {
mlog_errno(ret);
break;
}
*metas += le16_to_cpu(xv->xr_list.l_tree_depth) *
le16_to_cpu(xv->xr_list.l_next_free_rec);
*credits += ocfs2_calc_extend_credits(sb,
&def_xv.xv.xr_list);
/*
* If the value is a tree with depth > 1, We don't go deep
* to the extent block, so just calculate a maximum record num.
*/
if (!xv->xr_list.l_tree_depth)
*num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec);
else
*num_recs += ocfs2_clusters_for_bytes(sb,
XATTR_SIZE_MAX);
}
return ret;
}
/* Used by xattr inode and block to return the right xv and buffer_head. */
static int ocfs2_get_xattr_value_root(struct super_block *sb,
struct buffer_head *bh,
struct ocfs2_xattr_header *xh,
int offset,
struct ocfs2_xattr_value_root **xv,
struct buffer_head **ret_bh,
void *para)
{
struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
*xv = (struct ocfs2_xattr_value_root *)((void *)xh +
le16_to_cpu(xe->xe_name_offset) +
OCFS2_XATTR_SIZE(xe->xe_name_len));
if (ret_bh)
*ret_bh = bh;
return 0;
}
/*
* Lock the meta_ac and caculate how much credits we need for reflink xattrs.
* It is only used for inline xattr and xattr block.
*/
static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
struct ocfs2_xattr_header *xh,
struct buffer_head *ref_root_bh,
int *credits,
struct ocfs2_alloc_context **meta_ac)
{
int ret, meta_add = 0, num_recs = 0;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
*credits = 0;
ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
&meta_add, credits, &num_recs,
ocfs2_get_xattr_value_root,
NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We need to add/modify num_recs in refcount tree, so just calculate
* an approximate number we need for refcount tree change.
* Sometimes we need to split the tree, and after split, half recs
* will be moved to the new block, and a new block can only provide
* half number of recs. So we multiple new blocks by 2.
*/
num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2;
meta_add += num_recs;
*credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
*credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
else
*credits += 1;
ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac);
if (ret)
mlog_errno(ret);
out:
return ret;
}
/*
* Given a xattr header, reflink all the xattrs in this container.
* It can be used for inode, block and bucket.
*
* NOTE:
* Before we call this function, the caller has memcpy the xattr in
* old_xh to the new_xh.
*
* If args.xattr_reflinked is set, call it to decide whether the xe should
* be reflinked or not. If not, remove it from the new xattr header.
*/
static int ocfs2_reflink_xattr_header(handle_t *handle,
struct ocfs2_xattr_reflink *args,
struct buffer_head *old_bh,
struct ocfs2_xattr_header *xh,
struct buffer_head *new_bh,
struct ocfs2_xattr_header *new_xh,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_alloc_context *meta_ac,
get_xattr_value_root *func,
void *para)
{
int ret = 0, i, j;
struct super_block *sb = args->old_inode->i_sb;
struct buffer_head *value_bh;
struct ocfs2_xattr_entry *xe, *last;
struct ocfs2_xattr_value_root *xv, *new_xv;
struct ocfs2_extent_tree data_et;
u32 clusters, cpos, p_cluster, num_clusters;
unsigned int ext_flags = 0;
trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr,
le16_to_cpu(xh->xh_count));
last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
xe = &xh->xh_entries[i];
if (args->xattr_reflinked && !args->xattr_reflinked(xe)) {
xe = &new_xh->xh_entries[j];
le16_add_cpu(&new_xh->xh_count, -1);
if (new_xh->xh_count) {
memmove(xe, xe + 1,
(void *)last - (void *)xe);
memset(last, 0,
sizeof(struct ocfs2_xattr_entry));
}
/*
* We don't want j to increase in the next round since
* it is already moved ahead.
*/
j--;
continue;
}
if (ocfs2_xattr_is_local(xe))
continue;
ret = func(sb, old_bh, xh, i, &xv, NULL, para);
if (ret) {
mlog_errno(ret);
break;
}
ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para);
if (ret) {
mlog_errno(ret);
break;
}
/*
* For the xattr which has l_tree_depth = 0, all the extent
* recs have already be copied to the new xh with the
* propriate OCFS2_EXT_REFCOUNTED flag we just need to
* increase the refount count int the refcount tree.
*
* For the xattr which has l_tree_depth > 0, we need
* to initialize it to the empty default value root,
* and then insert the extents one by one.
*/
if (xv->xr_list.l_tree_depth) {
memcpy(new_xv, &def_xv, OCFS2_XATTR_ROOT_SIZE);
vb->vb_xv = new_xv;
vb->vb_bh = value_bh;
ocfs2_init_xattr_value_extent_tree(&data_et,
INODE_CACHE(args->new_inode), vb);
}
clusters = le32_to_cpu(xv->xr_clusters);
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(args->old_inode,
cpos,
&p_cluster,
&num_clusters,
&xv->xr_list,
&ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(!p_cluster);
if (xv->xr_list.l_tree_depth) {
ret = ocfs2_insert_extent(handle,
&data_et, cpos,
ocfs2_clusters_to_blocks(
args->old_inode->i_sb,
p_cluster),
num_clusters, ext_flags,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_increase_refcount(handle, args->ref_ci,
args->ref_root_bh,
p_cluster, num_clusters,
meta_ac, args->dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
cpos += num_clusters;
}
}
out:
return ret;
}
static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
{
int ret = 0, credits = 0;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data;
int inline_size = le16_to_cpu(di->i_xattr_inline_size);
int header_off = osb->sb->s_blocksize - inline_size;
struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
(args->old_bh->b_data + header_off);
struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *)
(args->new_bh->b_data + header_off);
struct ocfs2_alloc_context *meta_ac = NULL;
struct ocfs2_inode_info *new_oi;
struct ocfs2_dinode *new_di;
struct ocfs2_xattr_value_buf vb = {
.vb_bh = args->new_bh,
.vb_access = ocfs2_journal_access_di,
};
ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
&credits, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode),
args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
memcpy(args->new_bh->b_data + header_off,
args->old_bh->b_data + header_off, inline_size);
new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
new_di->i_xattr_inline_size = cpu_to_le16(inline_size);
ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
args->new_bh, new_xh, &vb, meta_ac,
ocfs2_get_xattr_value_root, NULL);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
new_oi = OCFS2_I(args->new_inode);
/*
* Adjust extent record count to reserve space for extended attribute.
* Inline data count had been adjusted in ocfs2_duplicate_inline_data().
*/
if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
!(ocfs2_inode_is_fast_symlink(args->new_inode))) {
struct ocfs2_extent_list *el = &new_di->id2.i_list;
le16_add_cpu(&el->l_count, -(inline_size /
sizeof(struct ocfs2_extent_rec)));
}
spin_lock(&new_oi->ip_lock);
new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
spin_unlock(&new_oi->ip_lock);
ocfs2_journal_dirty(handle, args->new_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
return ret;
}
static int ocfs2_create_empty_xattr_block(struct inode *inode,
struct buffer_head *fe_bh,
struct buffer_head **ret_bh,
int indexed)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_xattr_set_ctxt ctxt;
memset(&ctxt, 0, sizeof(ctxt));
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
if (IS_ERR(ctxt.handle)) {
ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
goto out;
}
trace_ocfs2_create_empty_xattr_block(
(unsigned long long)fe_bh->b_blocknr, indexed);
ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
ret_bh);
if (ret)
mlog_errno(ret);
ocfs2_commit_trans(osb, ctxt.handle);
out:
ocfs2_free_alloc_context(ctxt.meta_ac);
return ret;
}
static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args,
struct buffer_head *blk_bh,
struct buffer_head *new_blk_bh)
{
int ret = 0, credits = 0;
handle_t *handle;
struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode);
struct ocfs2_dinode *new_di;
struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb);
int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)blk_bh->b_data;
struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
struct ocfs2_xattr_block *new_xb =
(struct ocfs2_xattr_block *)new_blk_bh->b_data;
struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_xattr_value_buf vb = {
.vb_bh = new_blk_bh,
.vb_access = ocfs2_journal_access_xb,
};
ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
&credits, &meta_ac);
if (ret) {
mlog_errno(ret);
return ret;
}
/* One more credits in case we need to add xattr flags in new inode. */
handle = ocfs2_start_trans(osb, credits + 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
ret = ocfs2_journal_access_di(handle,
INODE_CACHE(args->new_inode),
args->new_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
}
ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode),
new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off,
osb->sb->s_blocksize - header_off);
ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
new_blk_bh, new_xh, &vb, meta_ac,
ocfs2_get_xattr_value_root, NULL);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ocfs2_journal_dirty(handle, new_blk_bh);
if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
spin_lock(&new_oi->ip_lock);
new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
spin_unlock(&new_oi->ip_lock);
ocfs2_journal_dirty(handle, args->new_bh);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out:
ocfs2_free_alloc_context(meta_ac);
return ret;
}
struct ocfs2_reflink_xattr_tree_args {
struct ocfs2_xattr_reflink *reflink;
struct buffer_head *old_blk_bh;
struct buffer_head *new_blk_bh;
struct ocfs2_xattr_bucket *old_bucket;
struct ocfs2_xattr_bucket *new_bucket;
};
/*
* NOTE:
* We have to handle the case that both old bucket and new bucket
* will call this function to get the right ret_bh.
* So The caller must give us the right bh.
*/
static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb,
struct buffer_head *bh,
struct ocfs2_xattr_header *xh,
int offset,
struct ocfs2_xattr_value_root **xv,
struct buffer_head **ret_bh,
void *para)
{
struct ocfs2_reflink_xattr_tree_args *args =
(struct ocfs2_reflink_xattr_tree_args *)para;
struct ocfs2_xattr_bucket *bucket;
if (bh == args->old_bucket->bu_bhs[0])
bucket = args->old_bucket;
else
bucket = args->new_bucket;
return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
xv, ret_bh);
}
struct ocfs2_value_tree_metas {
int num_metas;
int credits;
int num_recs;
};
static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb,
struct buffer_head *bh,
struct ocfs2_xattr_header *xh,
int offset,
struct ocfs2_xattr_value_root **xv,
struct buffer_head **ret_bh,
void *para)
{
struct ocfs2_xattr_bucket *bucket =
(struct ocfs2_xattr_bucket *)para;
return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
xv, ret_bh);
}
static int ocfs2_calc_value_tree_metas(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
void *para)
{
struct ocfs2_value_tree_metas *metas =
(struct ocfs2_value_tree_metas *)para;
struct ocfs2_xattr_header *xh =
(struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
/* Add the credits for this bucket first. */
metas->credits += bucket->bu_blocks;
return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
xh, &metas->num_metas,
&metas->credits, &metas->num_recs,
ocfs2_value_tree_metas_in_bucket,
bucket);
}
/*
* Given a xattr extent rec starting from blkno and having len clusters,
* iterate all the buckets calculate how much metadata we need for reflinking
* all the ocfs2_xattr_value_root and lock the allocators accordingly.
*/
static int ocfs2_lock_reflink_xattr_rec_allocators(
struct ocfs2_reflink_xattr_tree_args *args,
struct ocfs2_extent_tree *xt_et,
u64 blkno, u32 len, int *credits,
struct ocfs2_alloc_context **meta_ac,
struct ocfs2_alloc_context **data_ac)
{
int ret, num_free_extents;
struct ocfs2_value_tree_metas metas;
struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb);
struct ocfs2_refcount_block *rb;
memset(&metas, 0, sizeof(metas));
ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len,
ocfs2_calc_value_tree_metas, &metas);
if (ret) {
mlog_errno(ret);
goto out;
}
*credits = metas.credits;
/*
* Calculate we need for refcount tree change.
*
* We need to add/modify num_recs in refcount tree, so just calculate
* an approximate number we need for refcount tree change.
* Sometimes we need to split the tree, and after split, half recs
* will be moved to the new block, and a new block can only provide
* half number of recs. So we multiple new blocks by 2.
* In the end, we have to add credits for modifying the already
* existed refcount block.
*/
rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
metas.num_recs =
(metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) /
ocfs2_refcount_recs_per_rb(osb->sb) * 2;
metas.num_metas += metas.num_recs;
*credits += metas.num_recs +
metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
*credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
else
*credits += 1;
/* count in the xattr tree change. */
num_free_extents = ocfs2_num_free_extents(xt_et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
goto out;
}
if (num_free_extents < len)
metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
*credits += ocfs2_calc_extend_credits(osb->sb,
xt_et->et_root_el);
if (metas.num_metas) {
ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (len) {
ret = ocfs2_reserve_clusters(osb, len, data_ac);
if (ret)
mlog_errno(ret);
}
out:
if (ret) {
if (*meta_ac) {
ocfs2_free_alloc_context(*meta_ac);
*meta_ac = NULL;
}
}
return ret;
}
static int ocfs2_reflink_xattr_bucket(handle_t *handle,
u64 blkno, u64 new_blkno, u32 clusters,
u32 *cpos, int num_buckets,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_reflink_xattr_tree_args *args)
{
int i, j, ret = 0;
struct super_block *sb = args->reflink->old_inode->i_sb;
int bpb = args->old_bucket->bu_blocks;
struct ocfs2_xattr_value_buf vb = {
.vb_access = ocfs2_journal_access,
};
for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) {
ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
if (ret) {
mlog_errno(ret);
break;
}
ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno, 1);
if (ret) {
mlog_errno(ret);
break;
}
ret = ocfs2_xattr_bucket_journal_access(handle,
args->new_bucket,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
break;
}
for (j = 0; j < bpb; j++)
memcpy(bucket_block(args->new_bucket, j),
bucket_block(args->old_bucket, j),
sb->s_blocksize);
/*
* Record the start cpos so that we can use it to initialize
* our xattr tree we also set the xh_num_bucket for the new
* bucket.
*/
if (i == 0) {
*cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
xh_entries[0].xe_name_hash);
bucket_xh(args->new_bucket)->xh_num_buckets =
cpu_to_le16(num_buckets);
}
ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
ret = ocfs2_reflink_xattr_header(handle, args->reflink,
args->old_bucket->bu_bhs[0],
bucket_xh(args->old_bucket),
args->new_bucket->bu_bhs[0],
bucket_xh(args->new_bucket),
&vb, meta_ac,
ocfs2_get_reflink_xattr_value_root,
args);
if (ret) {
mlog_errno(ret);
break;
}
/*
* Re-access and dirty the bucket to calculate metaecc.
* Because we may extend the transaction in reflink_xattr_header
* which will let the already accessed block gone.
*/
ret = ocfs2_xattr_bucket_journal_access(handle,
args->new_bucket,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
break;
}
ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
ocfs2_xattr_bucket_relse(args->old_bucket);
ocfs2_xattr_bucket_relse(args->new_bucket);
}
ocfs2_xattr_bucket_relse(args->old_bucket);
ocfs2_xattr_bucket_relse(args->new_bucket);
return ret;
}
static int ocfs2_reflink_xattr_buckets(handle_t *handle,
struct inode *inode,
struct ocfs2_reflink_xattr_tree_args *args,
struct ocfs2_extent_tree *et,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac,
u64 blkno, u32 cpos, u32 len)
{
int ret, first_inserted = 0;
u32 p_cluster, num_clusters, reflink_cpos = 0;
u64 new_blkno;
unsigned int num_buckets, reflink_buckets;
unsigned int bpc =
ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
ocfs2_xattr_bucket_relse(args->old_bucket);
while (len && num_buckets) {
ret = ocfs2_claim_clusters(handle, data_ac,
1, &p_cluster, &num_clusters);
if (ret) {
mlog_errno(ret);
goto out;
}
new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
reflink_buckets = min(num_buckets, bpc * num_clusters);
ret = ocfs2_reflink_xattr_bucket(handle, blkno,
new_blkno, num_clusters,
&reflink_cpos, reflink_buckets,
meta_ac, data_ac, args);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* For the 1st allocated cluster, we make it use the same cpos
* so that the xattr tree looks the same as the original one
* in the most case.
*/
if (!first_inserted) {
reflink_cpos = cpos;
first_inserted = 1;
}
ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
num_clusters, 0, meta_ac);
if (ret)
mlog_errno(ret);
trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno,
num_clusters, reflink_cpos);
len -= num_clusters;
blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
num_buckets -= reflink_buckets;
}
out:
return ret;
}
/*
* Create the same xattr extent record in the new inode's xattr tree.
*/
static int ocfs2_reflink_xattr_rec(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno,
u32 cpos,
u32 len,
void *para)
{
int ret, credits = 0;
handle_t *handle;
struct ocfs2_reflink_xattr_tree_args *args =
(struct ocfs2_reflink_xattr_tree_args *)para;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_alloc_context *meta_ac = NULL;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_extent_tree et;
trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len);
ocfs2_init_xattr_tree_extent_tree(&et,
INODE_CACHE(args->reflink->new_inode),
args->new_blk_bh);
ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno,
len, &credits,
&meta_ac, &data_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
meta_ac, data_ac,
blkno, cpos, len);
if (ret)
mlog_errno(ret);
ocfs2_commit_trans(osb, handle);
out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
if (data_ac)
ocfs2_free_alloc_context(data_ac);
return ret;
}
/*
* Create reflinked xattr buckets.
* We will add bucket one by one, and refcount all the xattrs in the bucket
* if they are stored outside.
*/
static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args,
struct buffer_head *blk_bh,
struct buffer_head *new_blk_bh)
{
int ret;
struct ocfs2_reflink_xattr_tree_args para;
memset(¶, 0, sizeof(para));
para.reflink = args;
para.old_blk_bh = blk_bh;
para.new_blk_bh = new_blk_bh;
para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode);
if (!para.old_bucket) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode);
if (!para.new_bucket) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh,
ocfs2_reflink_xattr_rec,
¶);
if (ret)
mlog_errno(ret);
out:
ocfs2_xattr_bucket_free(para.old_bucket);
ocfs2_xattr_bucket_free(para.new_bucket);
return ret;
}
static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args,
struct buffer_head *blk_bh)
{
int ret, indexed = 0;
struct buffer_head *new_blk_bh = NULL;
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)blk_bh->b_data;
if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)
indexed = 1;
ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh,
&new_blk_bh, indexed);
if (ret) {
mlog_errno(ret);
goto out;
}
if (!indexed)
ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh);
else
ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh);
if (ret)
mlog_errno(ret);
out:
brelse(new_blk_bh);
return ret;
}
static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe)
{
int type = ocfs2_xattr_get_type(xe);
return type != OCFS2_XATTR_INDEX_SECURITY &&
type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS &&
type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
}
int ocfs2_reflink_xattrs(struct inode *old_inode,
struct buffer_head *old_bh,
struct inode *new_inode,
struct buffer_head *new_bh,
bool preserve_security)
{
int ret;
struct ocfs2_xattr_reflink args;
struct ocfs2_inode_info *oi = OCFS2_I(old_inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data;
struct buffer_head *blk_bh = NULL;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_refcount_tree *ref_tree;
struct buffer_head *ref_root_bh = NULL;
ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb),
le64_to_cpu(di->i_refcount_loc),
1, &ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_init_dealloc_ctxt(&dealloc);
args.old_inode = old_inode;
args.new_inode = new_inode;
args.old_bh = old_bh;
args.new_bh = new_bh;
args.ref_ci = &ref_tree->rf_ci;
args.ref_root_bh = ref_root_bh;
args.dealloc = &dealloc;
if (preserve_security)
args.xattr_reflinked = NULL;
else
args.xattr_reflinked = ocfs2_reflink_xattr_no_security;
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
ret = ocfs2_reflink_xattr_inline(&args);
if (ret) {
mlog_errno(ret);
goto out_unlock;
}
}
if (!di->i_xattr_loc)
goto out_unlock;
ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc),
&blk_bh);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
ret = ocfs2_reflink_xattr_in_block(&args, blk_bh);
if (ret)
mlog_errno(ret);
brelse(blk_bh);
out_unlock:
ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb),
ref_tree, 1);
brelse(ref_root_bh);
if (ocfs2_dealloc_has_cluster(&dealloc)) {
ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1);
ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc);
}
out:
return ret;
}
/*
* Initialize security and acl for a already created inode.
* Used for reflink a non-preserve-security file.
*
* It uses common api like ocfs2_xattr_set, so the caller
* must not hold any lock expect i_rwsem.
*/
int ocfs2_init_security_and_acl(struct inode *dir,
struct inode *inode,
const struct qstr *qstr)
{
int ret = 0;
struct buffer_head *dir_bh = NULL;
ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
if (ret) {
mlog_errno(ret);
goto leave;
}
ret = ocfs2_inode_lock(dir, &dir_bh, 0);
if (ret) {
mlog_errno(ret);
goto leave;
}
ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
if (ret)
mlog_errno(ret);
ocfs2_inode_unlock(dir, 0);
brelse(dir_bh);
leave:
return ret;
}
/*
* 'security' attributes support
*/
static int ocfs2_xattr_security_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY,
name, buffer, size);
}
static int ocfs2_xattr_security_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
static int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
void *fs_info)
{
struct ocfs2_security_xattr_info *si = fs_info;
const struct xattr *xattr;
int err = 0;
if (si) {
si->value = kmemdup(xattr_array->value, xattr_array->value_len,
GFP_KERNEL);
if (!si->value)
return -ENOMEM;
si->name = xattr_array->name;
si->value_len = xattr_array->value_len;
return 0;
}
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
xattr->name, xattr->value,
xattr->value_len, XATTR_CREATE);
if (err)
break;
}
return err;
}
int ocfs2_init_security_get(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
struct ocfs2_security_xattr_info *si)
{
int ret;
/* check whether ocfs2 support feature xattr */
if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
return -EOPNOTSUPP;
if (si) {
ret = security_inode_init_security(inode, dir, qstr,
&ocfs2_initxattrs, si);
/*
* security_inode_init_security() does not return -EOPNOTSUPP,
* we have to check the xattr ourselves.
*/
if (!ret && !si->name)
si->enable = 0;
return ret;
}
return security_inode_init_security(inode, dir, qstr,
&ocfs2_initxattrs, NULL);
}
int ocfs2_init_security_set(handle_t *handle,
struct inode *inode,
struct buffer_head *di_bh,
struct ocfs2_security_xattr_info *si,
struct ocfs2_alloc_context *xattr_ac,
struct ocfs2_alloc_context *data_ac)
{
return ocfs2_xattr_set_handle(handle, inode, di_bh,
OCFS2_XATTR_INDEX_SECURITY,
si->name, si->value, si->value_len, 0,
xattr_ac, data_ac);
}
const struct xattr_handler ocfs2_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = ocfs2_xattr_security_get,
.set = ocfs2_xattr_security_set,
};
/*
* 'trusted' attributes support
*/
static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED,
name, buffer, size);
}
static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED,
name, value, size, flags);
}
const struct xattr_handler ocfs2_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = ocfs2_xattr_trusted_get,
.set = ocfs2_xattr_trusted_set,
};
/*
* 'user' attributes support
*/
static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return -EOPNOTSUPP;
return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_USER, name,
buffer, size);
}
static int ocfs2_xattr_user_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return -EOPNOTSUPP;
return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER,
name, value, size, flags);
}
const struct xattr_handler ocfs2_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.get = ocfs2_xattr_user_get,
.set = ocfs2_xattr_user_set,
};
| linux-master | fs/ocfs2/xattr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* mmap.c
*
* Code to deal with the mess that is clustered mmap.
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
#include <linux/signal.h>
#include <linux/rbtree.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "aops.h"
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
#include "mmap.h"
#include "super.h"
#include "ocfs2_trace.h"
static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
sigset_t oldset;
vm_fault_t ret;
ocfs2_block_signals(&oldset);
ret = filemap_fault(vmf);
ocfs2_unblock_signals(&oldset);
trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno,
vma, vmf->page, vmf->pgoff);
return ret;
}
static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
struct buffer_head *di_bh, struct page *page)
{
int err;
vm_fault_t ret = VM_FAULT_NOPAGE;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
unsigned int len = PAGE_SIZE;
pgoff_t last_index;
struct page *locked_page = NULL;
void *fsdata;
loff_t size = i_size_read(inode);
last_index = (size - 1) >> PAGE_SHIFT;
/*
* There are cases that lead to the page no longer belonging to the
* mapping.
* 1) pagecache truncates locally due to memory pressure.
* 2) pagecache truncates when another is taking EX lock against
* inode lock. see ocfs2_data_convert_worker.
*
* The i_size check doesn't catch the case where nodes truncated and
* then re-extended the file. We'll re-check the page mapping after
* taking the page lock inside of ocfs2_write_begin_nolock().
*
* Let VM retry with these cases.
*/
if ((page->mapping != inode->i_mapping) ||
(!PageUptodate(page)) ||
(page_offset(page) >= size))
goto out;
/*
* Call ocfs2_write_begin() and ocfs2_write_end() to take
* advantage of the allocation code there. We pass a write
* length of the whole page (chopped to i_size) to make sure
* the whole thing is allocated.
*
* Since we know the page is up to date, we don't have to
* worry about ocfs2_write_begin() skipping some buffer reads
* because the "write" would invalidate their data.
*/
if (page->index == last_index)
len = ((size - 1) & ~PAGE_MASK) + 1;
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
&locked_page, &fsdata, di_bh, page);
if (err) {
if (err != -ENOSPC)
mlog_errno(err);
ret = vmf_error(err);
goto out;
}
if (!locked_page) {
ret = VM_FAULT_NOPAGE;
goto out;
}
err = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata);
BUG_ON(err != len);
ret = VM_FAULT_LOCKED;
out:
return ret;
}
static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct buffer_head *di_bh = NULL;
sigset_t oldset;
int err;
vm_fault_t ret;
sb_start_pagefault(inode->i_sb);
ocfs2_block_signals(&oldset);
/*
* The cluster locks taken will block a truncate from another
* node. Taking the data lock will also ensure that we don't
* attempt page truncation as part of a downconvert.
*/
err = ocfs2_inode_lock(inode, &di_bh, 1);
if (err < 0) {
mlog_errno(err);
ret = vmf_error(err);
goto out;
}
/*
* The alloc sem should be enough to serialize with
* ocfs2_truncate_file() changing i_size as well as any thread
* modifying the inode btree.
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
up_write(&OCFS2_I(inode)->ip_alloc_sem);
brelse(di_bh);
ocfs2_inode_unlock(inode, 1);
out:
ocfs2_unblock_signals(&oldset);
sb_end_pagefault(inode->i_sb);
return ret;
}
static const struct vm_operations_struct ocfs2_file_vm_ops = {
.fault = ocfs2_fault,
.page_mkwrite = ocfs2_page_mkwrite,
};
int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret = 0, lock_level = 0;
ret = ocfs2_inode_lock_atime(file_inode(file),
file->f_path.mnt, &lock_level, 1);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ocfs2_inode_unlock(file_inode(file), lock_level);
out:
vma->vm_ops = &ocfs2_file_vm_ops;
return 0;
}
| linux-master | fs/ocfs2/mmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* stack_user.c
*
* Code which interfaces ocfs2 with fs/dlm and a userspace stack.
*
* Copyright (C) 2007 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include "stackglue.h"
#include <linux/dlm_plock.h>
/*
* The control protocol starts with a handshake. Until the handshake
* is complete, the control device will fail all write(2)s.
*
* The handshake is simple. First, the client reads until EOF. Each line
* of output is a supported protocol tag. All protocol tags are a single
* character followed by a two hex digit version number. Currently the
* only things supported is T01, for "Text-base version 0x01". Next, the
* client writes the version they would like to use, including the newline.
* Thus, the protocol tag is 'T01\n'. If the version tag written is
* unknown, -EINVAL is returned. Once the negotiation is complete, the
* client can start sending messages.
*
* The T01 protocol has three messages. First is the "SETN" message.
* It has the following syntax:
*
* SETN<space><8-char-hex-nodenum><newline>
*
* This is 14 characters.
*
* The "SETN" message must be the first message following the protocol.
* It tells ocfs2_control the local node number.
*
* Next comes the "SETV" message. It has the following syntax:
*
* SETV<space><2-char-hex-major><space><2-char-hex-minor><newline>
*
* This is 11 characters.
*
* The "SETV" message sets the filesystem locking protocol version as
* negotiated by the client. The client negotiates based on the maximum
* version advertised in /sys/fs/ocfs2/max_locking_protocol. The major
* number from the "SETV" message must match
* ocfs2_user_plugin.sp_max_proto.pv_major, and the minor number
* must be less than or equal to ...sp_max_version.pv_minor.
*
* Once this information has been set, mounts will be allowed. From this
* point on, the "DOWN" message can be sent for node down notification.
* It has the following syntax:
*
* DOWN<space><32-char-cap-hex-uuid><space><8-char-hex-nodenum><newline>
*
* eg:
*
* DOWN 632A924FDD844190BDA93C0DF6B94899 00000001\n
*
* This is 47 characters.
*/
/*
* Whether or not the client has done the handshake.
* For now, we have just one protocol version.
*/
#define OCFS2_CONTROL_PROTO "T01\n"
#define OCFS2_CONTROL_PROTO_LEN 4
/* Handshake states */
#define OCFS2_CONTROL_HANDSHAKE_INVALID (0)
#define OCFS2_CONTROL_HANDSHAKE_READ (1)
#define OCFS2_CONTROL_HANDSHAKE_PROTOCOL (2)
#define OCFS2_CONTROL_HANDSHAKE_VALID (3)
/* Messages */
#define OCFS2_CONTROL_MESSAGE_OP_LEN 4
#define OCFS2_CONTROL_MESSAGE_SETNODE_OP "SETN"
#define OCFS2_CONTROL_MESSAGE_SETNODE_TOTAL_LEN 14
#define OCFS2_CONTROL_MESSAGE_SETVERSION_OP "SETV"
#define OCFS2_CONTROL_MESSAGE_SETVERSION_TOTAL_LEN 11
#define OCFS2_CONTROL_MESSAGE_DOWN_OP "DOWN"
#define OCFS2_CONTROL_MESSAGE_DOWN_TOTAL_LEN 47
#define OCFS2_TEXT_UUID_LEN 32
#define OCFS2_CONTROL_MESSAGE_VERNUM_LEN 2
#define OCFS2_CONTROL_MESSAGE_NODENUM_LEN 8
#define VERSION_LOCK "version_lock"
enum ocfs2_connection_type {
WITH_CONTROLD,
NO_CONTROLD
};
/*
* ocfs2_live_connection is refcounted because the filesystem and
* miscdevice sides can detach in different order. Let's just be safe.
*/
struct ocfs2_live_connection {
struct list_head oc_list;
struct ocfs2_cluster_connection *oc_conn;
enum ocfs2_connection_type oc_type;
atomic_t oc_this_node;
int oc_our_slot;
struct dlm_lksb oc_version_lksb;
char oc_lvb[DLM_LVB_LEN];
struct completion oc_sync_wait;
wait_queue_head_t oc_wait;
};
struct ocfs2_control_private {
struct list_head op_list;
int op_state;
int op_this_node;
struct ocfs2_protocol_version op_proto;
};
/* SETN<space><8-char-hex-nodenum><newline> */
struct ocfs2_control_message_setn {
char tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
char space;
char nodestr[OCFS2_CONTROL_MESSAGE_NODENUM_LEN];
char newline;
};
/* SETV<space><2-char-hex-major><space><2-char-hex-minor><newline> */
struct ocfs2_control_message_setv {
char tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
char space1;
char major[OCFS2_CONTROL_MESSAGE_VERNUM_LEN];
char space2;
char minor[OCFS2_CONTROL_MESSAGE_VERNUM_LEN];
char newline;
};
/* DOWN<space><32-char-cap-hex-uuid><space><8-char-hex-nodenum><newline> */
struct ocfs2_control_message_down {
char tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
char space1;
char uuid[OCFS2_TEXT_UUID_LEN];
char space2;
char nodestr[OCFS2_CONTROL_MESSAGE_NODENUM_LEN];
char newline;
};
union ocfs2_control_message {
char tag[OCFS2_CONTROL_MESSAGE_OP_LEN];
struct ocfs2_control_message_setn u_setn;
struct ocfs2_control_message_setv u_setv;
struct ocfs2_control_message_down u_down;
};
static struct ocfs2_stack_plugin ocfs2_user_plugin;
static atomic_t ocfs2_control_opened;
static int ocfs2_control_this_node = -1;
static struct ocfs2_protocol_version running_proto;
static LIST_HEAD(ocfs2_live_connection_list);
static LIST_HEAD(ocfs2_control_private_list);
static DEFINE_MUTEX(ocfs2_control_lock);
static inline void ocfs2_control_set_handshake_state(struct file *file,
int state)
{
struct ocfs2_control_private *p = file->private_data;
p->op_state = state;
}
static inline int ocfs2_control_get_handshake_state(struct file *file)
{
struct ocfs2_control_private *p = file->private_data;
return p->op_state;
}
static struct ocfs2_live_connection *ocfs2_connection_find(const char *name)
{
size_t len = strlen(name);
struct ocfs2_live_connection *c;
BUG_ON(!mutex_is_locked(&ocfs2_control_lock));
list_for_each_entry(c, &ocfs2_live_connection_list, oc_list) {
if ((c->oc_conn->cc_namelen == len) &&
!strncmp(c->oc_conn->cc_name, name, len))
return c;
}
return NULL;
}
/*
* ocfs2_live_connection structures are created underneath the ocfs2
* mount path. Since the VFS prevents multiple calls to
* fill_super(), we can't get dupes here.
*/
static int ocfs2_live_connection_attach(struct ocfs2_cluster_connection *conn,
struct ocfs2_live_connection *c)
{
int rc = 0;
mutex_lock(&ocfs2_control_lock);
c->oc_conn = conn;
if ((c->oc_type == NO_CONTROLD) || atomic_read(&ocfs2_control_opened))
list_add(&c->oc_list, &ocfs2_live_connection_list);
else {
printk(KERN_ERR
"ocfs2: Userspace control daemon is not present\n");
rc = -ESRCH;
}
mutex_unlock(&ocfs2_control_lock);
return rc;
}
/*
* This function disconnects the cluster connection from ocfs2_control.
* Afterwards, userspace can't affect the cluster connection.
*/
static void ocfs2_live_connection_drop(struct ocfs2_live_connection *c)
{
mutex_lock(&ocfs2_control_lock);
list_del_init(&c->oc_list);
c->oc_conn = NULL;
mutex_unlock(&ocfs2_control_lock);
kfree(c);
}
static int ocfs2_control_cfu(void *target, size_t target_len,
const char __user *buf, size_t count)
{
/* The T01 expects write(2) calls to have exactly one command */
if ((count != target_len) ||
(count > sizeof(union ocfs2_control_message)))
return -EINVAL;
if (copy_from_user(target, buf, target_len))
return -EFAULT;
return 0;
}
static ssize_t ocfs2_control_validate_protocol(struct file *file,
const char __user *buf,
size_t count)
{
ssize_t ret;
char kbuf[OCFS2_CONTROL_PROTO_LEN];
ret = ocfs2_control_cfu(kbuf, OCFS2_CONTROL_PROTO_LEN,
buf, count);
if (ret)
return ret;
if (strncmp(kbuf, OCFS2_CONTROL_PROTO, OCFS2_CONTROL_PROTO_LEN))
return -EINVAL;
ocfs2_control_set_handshake_state(file,
OCFS2_CONTROL_HANDSHAKE_PROTOCOL);
return count;
}
static void ocfs2_control_send_down(const char *uuid,
int nodenum)
{
struct ocfs2_live_connection *c;
mutex_lock(&ocfs2_control_lock);
c = ocfs2_connection_find(uuid);
if (c) {
BUG_ON(c->oc_conn == NULL);
c->oc_conn->cc_recovery_handler(nodenum,
c->oc_conn->cc_recovery_data);
}
mutex_unlock(&ocfs2_control_lock);
}
/*
* Called whenever configuration elements are sent to /dev/ocfs2_control.
* If all configuration elements are present, try to set the global
* values. If there is a problem, return an error. Skip any missing
* elements, and only bump ocfs2_control_opened when we have all elements
* and are successful.
*/
static int ocfs2_control_install_private(struct file *file)
{
int rc = 0;
int set_p = 1;
struct ocfs2_control_private *p = file->private_data;
BUG_ON(p->op_state != OCFS2_CONTROL_HANDSHAKE_PROTOCOL);
mutex_lock(&ocfs2_control_lock);
if (p->op_this_node < 0) {
set_p = 0;
} else if ((ocfs2_control_this_node >= 0) &&
(ocfs2_control_this_node != p->op_this_node)) {
rc = -EINVAL;
goto out_unlock;
}
if (!p->op_proto.pv_major) {
set_p = 0;
} else if (!list_empty(&ocfs2_live_connection_list) &&
((running_proto.pv_major != p->op_proto.pv_major) ||
(running_proto.pv_minor != p->op_proto.pv_minor))) {
rc = -EINVAL;
goto out_unlock;
}
if (set_p) {
ocfs2_control_this_node = p->op_this_node;
running_proto.pv_major = p->op_proto.pv_major;
running_proto.pv_minor = p->op_proto.pv_minor;
}
out_unlock:
mutex_unlock(&ocfs2_control_lock);
if (!rc && set_p) {
/* We set the global values successfully */
atomic_inc(&ocfs2_control_opened);
ocfs2_control_set_handshake_state(file,
OCFS2_CONTROL_HANDSHAKE_VALID);
}
return rc;
}
static int ocfs2_control_get_this_node(void)
{
int rc;
mutex_lock(&ocfs2_control_lock);
if (ocfs2_control_this_node < 0)
rc = -EINVAL;
else
rc = ocfs2_control_this_node;
mutex_unlock(&ocfs2_control_lock);
return rc;
}
static int ocfs2_control_do_setnode_msg(struct file *file,
struct ocfs2_control_message_setn *msg)
{
long nodenum;
char *ptr = NULL;
struct ocfs2_control_private *p = file->private_data;
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
return -EINVAL;
if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_SETNODE_OP,
OCFS2_CONTROL_MESSAGE_OP_LEN))
return -EINVAL;
if ((msg->space != ' ') || (msg->newline != '\n'))
return -EINVAL;
msg->space = msg->newline = '\0';
nodenum = simple_strtol(msg->nodestr, &ptr, 16);
if (!ptr || *ptr)
return -EINVAL;
if ((nodenum == LONG_MIN) || (nodenum == LONG_MAX) ||
(nodenum > INT_MAX) || (nodenum < 0))
return -ERANGE;
p->op_this_node = nodenum;
return ocfs2_control_install_private(file);
}
static int ocfs2_control_do_setversion_msg(struct file *file,
struct ocfs2_control_message_setv *msg)
{
long major, minor;
char *ptr = NULL;
struct ocfs2_control_private *p = file->private_data;
struct ocfs2_protocol_version *max =
&ocfs2_user_plugin.sp_max_proto;
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
return -EINVAL;
if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_SETVERSION_OP,
OCFS2_CONTROL_MESSAGE_OP_LEN))
return -EINVAL;
if ((msg->space1 != ' ') || (msg->space2 != ' ') ||
(msg->newline != '\n'))
return -EINVAL;
msg->space1 = msg->space2 = msg->newline = '\0';
major = simple_strtol(msg->major, &ptr, 16);
if (!ptr || *ptr)
return -EINVAL;
minor = simple_strtol(msg->minor, &ptr, 16);
if (!ptr || *ptr)
return -EINVAL;
/*
* The major must be between 1 and 255, inclusive. The minor
* must be between 0 and 255, inclusive. The version passed in
* must be within the maximum version supported by the filesystem.
*/
if ((major == LONG_MIN) || (major == LONG_MAX) ||
(major > (u8)-1) || (major < 1))
return -ERANGE;
if ((minor == LONG_MIN) || (minor == LONG_MAX) ||
(minor > (u8)-1) || (minor < 0))
return -ERANGE;
if ((major != max->pv_major) ||
(minor > max->pv_minor))
return -EINVAL;
p->op_proto.pv_major = major;
p->op_proto.pv_minor = minor;
return ocfs2_control_install_private(file);
}
static int ocfs2_control_do_down_msg(struct file *file,
struct ocfs2_control_message_down *msg)
{
long nodenum;
char *p = NULL;
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_VALID)
return -EINVAL;
if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_DOWN_OP,
OCFS2_CONTROL_MESSAGE_OP_LEN))
return -EINVAL;
if ((msg->space1 != ' ') || (msg->space2 != ' ') ||
(msg->newline != '\n'))
return -EINVAL;
msg->space1 = msg->space2 = msg->newline = '\0';
nodenum = simple_strtol(msg->nodestr, &p, 16);
if (!p || *p)
return -EINVAL;
if ((nodenum == LONG_MIN) || (nodenum == LONG_MAX) ||
(nodenum > INT_MAX) || (nodenum < 0))
return -ERANGE;
ocfs2_control_send_down(msg->uuid, nodenum);
return 0;
}
static ssize_t ocfs2_control_message(struct file *file,
const char __user *buf,
size_t count)
{
ssize_t ret;
union ocfs2_control_message msg;
/* Try to catch padding issues */
WARN_ON(offsetof(struct ocfs2_control_message_down, uuid) !=
(sizeof(msg.u_down.tag) + sizeof(msg.u_down.space1)));
memset(&msg, 0, sizeof(union ocfs2_control_message));
ret = ocfs2_control_cfu(&msg, count, buf, count);
if (ret)
goto out;
if ((count == OCFS2_CONTROL_MESSAGE_SETNODE_TOTAL_LEN) &&
!strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_SETNODE_OP,
OCFS2_CONTROL_MESSAGE_OP_LEN))
ret = ocfs2_control_do_setnode_msg(file, &msg.u_setn);
else if ((count == OCFS2_CONTROL_MESSAGE_SETVERSION_TOTAL_LEN) &&
!strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_SETVERSION_OP,
OCFS2_CONTROL_MESSAGE_OP_LEN))
ret = ocfs2_control_do_setversion_msg(file, &msg.u_setv);
else if ((count == OCFS2_CONTROL_MESSAGE_DOWN_TOTAL_LEN) &&
!strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_DOWN_OP,
OCFS2_CONTROL_MESSAGE_OP_LEN))
ret = ocfs2_control_do_down_msg(file, &msg.u_down);
else
ret = -EINVAL;
out:
return ret ? ret : count;
}
static ssize_t ocfs2_control_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
ssize_t ret;
switch (ocfs2_control_get_handshake_state(file)) {
case OCFS2_CONTROL_HANDSHAKE_INVALID:
ret = -EINVAL;
break;
case OCFS2_CONTROL_HANDSHAKE_READ:
ret = ocfs2_control_validate_protocol(file, buf,
count);
break;
case OCFS2_CONTROL_HANDSHAKE_PROTOCOL:
case OCFS2_CONTROL_HANDSHAKE_VALID:
ret = ocfs2_control_message(file, buf, count);
break;
default:
BUG();
ret = -EIO;
break;
}
return ret;
}
/*
* This is a naive version. If we ever have a new protocol, we'll expand
* it. Probably using seq_file.
*/
static ssize_t ocfs2_control_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
ssize_t ret;
ret = simple_read_from_buffer(buf, count, ppos,
OCFS2_CONTROL_PROTO, OCFS2_CONTROL_PROTO_LEN);
/* Have we read the whole protocol list? */
if (ret > 0 && *ppos >= OCFS2_CONTROL_PROTO_LEN)
ocfs2_control_set_handshake_state(file,
OCFS2_CONTROL_HANDSHAKE_READ);
return ret;
}
static int ocfs2_control_release(struct inode *inode, struct file *file)
{
struct ocfs2_control_private *p = file->private_data;
mutex_lock(&ocfs2_control_lock);
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_VALID)
goto out;
if (atomic_dec_and_test(&ocfs2_control_opened)) {
if (!list_empty(&ocfs2_live_connection_list)) {
/* XXX: Do bad things! */
printk(KERN_ERR
"ocfs2: Unexpected release of ocfs2_control!\n"
" Loss of cluster connection requires "
"an emergency restart!\n");
emergency_restart();
}
/*
* Last valid close clears the node number and resets
* the locking protocol version
*/
ocfs2_control_this_node = -1;
running_proto.pv_major = 0;
running_proto.pv_minor = 0;
}
out:
list_del_init(&p->op_list);
file->private_data = NULL;
mutex_unlock(&ocfs2_control_lock);
kfree(p);
return 0;
}
static int ocfs2_control_open(struct inode *inode, struct file *file)
{
struct ocfs2_control_private *p;
p = kzalloc(sizeof(struct ocfs2_control_private), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->op_this_node = -1;
mutex_lock(&ocfs2_control_lock);
file->private_data = p;
list_add(&p->op_list, &ocfs2_control_private_list);
mutex_unlock(&ocfs2_control_lock);
return 0;
}
static const struct file_operations ocfs2_control_fops = {
.open = ocfs2_control_open,
.release = ocfs2_control_release,
.read = ocfs2_control_read,
.write = ocfs2_control_write,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static struct miscdevice ocfs2_control_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ocfs2_control",
.fops = &ocfs2_control_fops,
};
static int ocfs2_control_init(void)
{
int rc;
atomic_set(&ocfs2_control_opened, 0);
rc = misc_register(&ocfs2_control_device);
if (rc)
printk(KERN_ERR
"ocfs2: Unable to register ocfs2_control device "
"(errno %d)\n",
-rc);
return rc;
}
static void ocfs2_control_exit(void)
{
misc_deregister(&ocfs2_control_device);
}
static void fsdlm_lock_ast_wrapper(void *astarg)
{
struct ocfs2_dlm_lksb *lksb = astarg;
int status = lksb->lksb_fsdlm.sb_status;
/*
* For now we're punting on the issue of other non-standard errors
* where we can't tell if the unlock_ast or lock_ast should be called.
* The main "other error" that's possible is EINVAL which means the
* function was called with invalid args, which shouldn't be possible
* since the caller here is under our control. Other non-standard
* errors probably fall into the same category, or otherwise are fatal
* which means we can't carry on anyway.
*/
if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL)
lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, 0);
else
lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void fsdlm_blocking_ast_wrapper(void *astarg, int level)
{
struct ocfs2_dlm_lksb *lksb = astarg;
lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
unsigned int namelen)
{
if (!lksb->lksb_fsdlm.sb_lvbptr)
lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb +
sizeof(struct dlm_lksb);
return dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm,
flags|DLM_LKF_NODLCKWT, name, namelen, 0,
fsdlm_lock_ast_wrapper, lksb,
fsdlm_blocking_ast_wrapper);
}
static int user_dlm_unlock(struct ocfs2_cluster_connection *conn,
struct ocfs2_dlm_lksb *lksb,
u32 flags)
{
return dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid,
flags, &lksb->lksb_fsdlm, lksb);
}
static int user_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return lksb->lksb_fsdlm.sb_status;
}
static int user_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID;
return !invalid;
}
static void *user_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
if (!lksb->lksb_fsdlm.sb_lvbptr)
lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb +
sizeof(struct dlm_lksb);
return (void *)(lksb->lksb_fsdlm.sb_lvbptr);
}
static void user_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
}
static int user_plock(struct ocfs2_cluster_connection *conn,
u64 ino,
struct file *file,
int cmd,
struct file_lock *fl)
{
/*
* This more or less just demuxes the plock request into any
* one of three dlm calls.
*
* Internally, fs/dlm will pass these to a misc device, which
* a userspace daemon will read and write to.
*/
if (cmd == F_CANCELLK)
return dlm_posix_cancel(conn->cc_lockspace, ino, file, fl);
else if (IS_GETLK(cmd))
return dlm_posix_get(conn->cc_lockspace, ino, file, fl);
else if (fl->fl_type == F_UNLCK)
return dlm_posix_unlock(conn->cc_lockspace, ino, file, fl);
else
return dlm_posix_lock(conn->cc_lockspace, ino, file, cmd, fl);
}
/*
* Compare a requested locking protocol version against the current one.
*
* If the major numbers are different, they are incompatible.
* If the current minor is greater than the request, they are incompatible.
* If the current minor is less than or equal to the request, they are
* compatible, and the requester should run at the current minor version.
*/
static int fs_protocol_compare(struct ocfs2_protocol_version *existing,
struct ocfs2_protocol_version *request)
{
if (existing->pv_major != request->pv_major)
return 1;
if (existing->pv_minor > request->pv_minor)
return 1;
if (existing->pv_minor < request->pv_minor)
request->pv_minor = existing->pv_minor;
return 0;
}
static void lvb_to_version(char *lvb, struct ocfs2_protocol_version *ver)
{
struct ocfs2_protocol_version *pv =
(struct ocfs2_protocol_version *)lvb;
/*
* ocfs2_protocol_version has two u8 variables, so we don't
* need any endian conversion.
*/
ver->pv_major = pv->pv_major;
ver->pv_minor = pv->pv_minor;
}
static void version_to_lvb(struct ocfs2_protocol_version *ver, char *lvb)
{
struct ocfs2_protocol_version *pv =
(struct ocfs2_protocol_version *)lvb;
/*
* ocfs2_protocol_version has two u8 variables, so we don't
* need any endian conversion.
*/
pv->pv_major = ver->pv_major;
pv->pv_minor = ver->pv_minor;
}
static void sync_wait_cb(void *arg)
{
struct ocfs2_cluster_connection *conn = arg;
struct ocfs2_live_connection *lc = conn->cc_private;
complete(&lc->oc_sync_wait);
}
static int sync_unlock(struct ocfs2_cluster_connection *conn,
struct dlm_lksb *lksb, char *name)
{
int error;
struct ocfs2_live_connection *lc = conn->cc_private;
error = dlm_unlock(conn->cc_lockspace, lksb->sb_lkid, 0, lksb, conn);
if (error) {
printk(KERN_ERR "%s lkid %x error %d\n",
name, lksb->sb_lkid, error);
return error;
}
wait_for_completion(&lc->oc_sync_wait);
if (lksb->sb_status != -DLM_EUNLOCK) {
printk(KERN_ERR "%s lkid %x status %d\n",
name, lksb->sb_lkid, lksb->sb_status);
return -1;
}
return 0;
}
static int sync_lock(struct ocfs2_cluster_connection *conn,
int mode, uint32_t flags,
struct dlm_lksb *lksb, char *name)
{
int error, status;
struct ocfs2_live_connection *lc = conn->cc_private;
error = dlm_lock(conn->cc_lockspace, mode, lksb, flags,
name, strlen(name),
0, sync_wait_cb, conn, NULL);
if (error) {
printk(KERN_ERR "%s lkid %x flags %x mode %d error %d\n",
name, lksb->sb_lkid, flags, mode, error);
return error;
}
wait_for_completion(&lc->oc_sync_wait);
status = lksb->sb_status;
if (status && status != -EAGAIN) {
printk(KERN_ERR "%s lkid %x flags %x mode %d status %d\n",
name, lksb->sb_lkid, flags, mode, status);
}
return status;
}
static int version_lock(struct ocfs2_cluster_connection *conn, int mode,
int flags)
{
struct ocfs2_live_connection *lc = conn->cc_private;
return sync_lock(conn, mode, flags,
&lc->oc_version_lksb, VERSION_LOCK);
}
static int version_unlock(struct ocfs2_cluster_connection *conn)
{
struct ocfs2_live_connection *lc = conn->cc_private;
return sync_unlock(conn, &lc->oc_version_lksb, VERSION_LOCK);
}
/* get_protocol_version()
*
* To exchange ocfs2 versioning, we use the LVB of the version dlm lock.
* The algorithm is:
* 1. Attempt to take the lock in EX mode (non-blocking).
* 2. If successful (which means it is the first mount), write the
* version number and downconvert to PR lock.
* 3. If unsuccessful (returns -EAGAIN), read the version from the LVB after
* taking the PR lock.
*/
static int get_protocol_version(struct ocfs2_cluster_connection *conn)
{
int ret;
struct ocfs2_live_connection *lc = conn->cc_private;
struct ocfs2_protocol_version pv;
running_proto.pv_major =
ocfs2_user_plugin.sp_max_proto.pv_major;
running_proto.pv_minor =
ocfs2_user_plugin.sp_max_proto.pv_minor;
lc->oc_version_lksb.sb_lvbptr = lc->oc_lvb;
ret = version_lock(conn, DLM_LOCK_EX,
DLM_LKF_VALBLK|DLM_LKF_NOQUEUE);
if (!ret) {
conn->cc_version.pv_major = running_proto.pv_major;
conn->cc_version.pv_minor = running_proto.pv_minor;
version_to_lvb(&running_proto, lc->oc_lvb);
version_lock(conn, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
} else if (ret == -EAGAIN) {
ret = version_lock(conn, DLM_LOCK_PR, DLM_LKF_VALBLK);
if (ret)
goto out;
lvb_to_version(lc->oc_lvb, &pv);
if ((pv.pv_major != running_proto.pv_major) ||
(pv.pv_minor > running_proto.pv_minor)) {
ret = -EINVAL;
goto out;
}
conn->cc_version.pv_major = pv.pv_major;
conn->cc_version.pv_minor = pv.pv_minor;
}
out:
return ret;
}
static void user_recover_prep(void *arg)
{
}
static void user_recover_slot(void *arg, struct dlm_slot *slot)
{
struct ocfs2_cluster_connection *conn = arg;
printk(KERN_INFO "ocfs2: Node %d/%d down. Initiating recovery.\n",
slot->nodeid, slot->slot);
conn->cc_recovery_handler(slot->nodeid, conn->cc_recovery_data);
}
static void user_recover_done(void *arg, struct dlm_slot *slots,
int num_slots, int our_slot,
uint32_t generation)
{
struct ocfs2_cluster_connection *conn = arg;
struct ocfs2_live_connection *lc = conn->cc_private;
int i;
for (i = 0; i < num_slots; i++)
if (slots[i].slot == our_slot) {
atomic_set(&lc->oc_this_node, slots[i].nodeid);
break;
}
lc->oc_our_slot = our_slot;
wake_up(&lc->oc_wait);
}
static const struct dlm_lockspace_ops ocfs2_ls_ops = {
.recover_prep = user_recover_prep,
.recover_slot = user_recover_slot,
.recover_done = user_recover_done,
};
static int user_cluster_disconnect(struct ocfs2_cluster_connection *conn)
{
version_unlock(conn);
dlm_release_lockspace(conn->cc_lockspace, 2);
conn->cc_lockspace = NULL;
ocfs2_live_connection_drop(conn->cc_private);
conn->cc_private = NULL;
return 0;
}
static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
{
dlm_lockspace_t *fsdlm;
struct ocfs2_live_connection *lc;
int rc, ops_rv;
BUG_ON(conn == NULL);
lc = kzalloc(sizeof(struct ocfs2_live_connection), GFP_KERNEL);
if (!lc)
return -ENOMEM;
init_waitqueue_head(&lc->oc_wait);
init_completion(&lc->oc_sync_wait);
atomic_set(&lc->oc_this_node, 0);
conn->cc_private = lc;
lc->oc_type = NO_CONTROLD;
rc = dlm_new_lockspace(conn->cc_name, conn->cc_cluster_name,
DLM_LSFL_NEWEXCL, DLM_LVB_LEN,
&ocfs2_ls_ops, conn, &ops_rv, &fsdlm);
if (rc) {
if (rc == -EEXIST || rc == -EPROTO)
printk(KERN_ERR "ocfs2: Unable to create the "
"lockspace %s (%d), because a ocfs2-tools "
"program is running on this file system "
"with the same name lockspace\n",
conn->cc_name, rc);
goto out;
}
if (ops_rv == -EOPNOTSUPP) {
lc->oc_type = WITH_CONTROLD;
printk(KERN_NOTICE "ocfs2: You seem to be using an older "
"version of dlm_controld and/or ocfs2-tools."
" Please consider upgrading.\n");
} else if (ops_rv) {
rc = ops_rv;
goto out;
}
conn->cc_lockspace = fsdlm;
rc = ocfs2_live_connection_attach(conn, lc);
if (rc)
goto out;
if (lc->oc_type == NO_CONTROLD) {
rc = get_protocol_version(conn);
if (rc) {
printk(KERN_ERR "ocfs2: Could not determine"
" locking version\n");
user_cluster_disconnect(conn);
goto out;
}
wait_event(lc->oc_wait, (atomic_read(&lc->oc_this_node) > 0));
}
/*
* running_proto must have been set before we allowed any mounts
* to proceed.
*/
if (fs_protocol_compare(&running_proto, &conn->cc_version)) {
printk(KERN_ERR
"Unable to mount with fs locking protocol version "
"%u.%u because negotiated protocol is %u.%u\n",
conn->cc_version.pv_major, conn->cc_version.pv_minor,
running_proto.pv_major, running_proto.pv_minor);
rc = -EPROTO;
ocfs2_live_connection_drop(lc);
lc = NULL;
}
out:
if (rc)
kfree(lc);
return rc;
}
static int user_cluster_this_node(struct ocfs2_cluster_connection *conn,
unsigned int *this_node)
{
int rc;
struct ocfs2_live_connection *lc = conn->cc_private;
if (lc->oc_type == WITH_CONTROLD)
rc = ocfs2_control_get_this_node();
else if (lc->oc_type == NO_CONTROLD)
rc = atomic_read(&lc->oc_this_node);
else
rc = -EINVAL;
if (rc < 0)
return rc;
*this_node = rc;
return 0;
}
static struct ocfs2_stack_operations ocfs2_user_plugin_ops = {
.connect = user_cluster_connect,
.disconnect = user_cluster_disconnect,
.this_node = user_cluster_this_node,
.dlm_lock = user_dlm_lock,
.dlm_unlock = user_dlm_unlock,
.lock_status = user_dlm_lock_status,
.lvb_valid = user_dlm_lvb_valid,
.lock_lvb = user_dlm_lvb,
.plock = user_plock,
.dump_lksb = user_dlm_dump_lksb,
};
static struct ocfs2_stack_plugin ocfs2_user_plugin = {
.sp_name = "user",
.sp_ops = &ocfs2_user_plugin_ops,
.sp_owner = THIS_MODULE,
};
static int __init ocfs2_user_plugin_init(void)
{
int rc;
rc = ocfs2_control_init();
if (!rc) {
rc = ocfs2_stack_glue_register(&ocfs2_user_plugin);
if (rc)
ocfs2_control_exit();
}
return rc;
}
static void __exit ocfs2_user_plugin_exit(void)
{
ocfs2_stack_glue_unregister(&ocfs2_user_plugin);
ocfs2_control_exit();
}
MODULE_AUTHOR("Oracle");
MODULE_DESCRIPTION("ocfs2 driver for userspace cluster stacks");
MODULE_LICENSE("GPL");
module_init(ocfs2_user_plugin_init);
module_exit(ocfs2_user_plugin_exit);
| linux-master | fs/ocfs2/stack_user.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* uptodate.c
*
* Tracking the up-to-date-ness of a local buffer_head with respect to
* the cluster.
*
* Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved.
*
* Standard buffer head caching flags (uptodate, etc) are insufficient
* in a clustered environment - a buffer may be marked up to date on
* our local node but could have been modified by another cluster
* member. As a result an additional (and performant) caching scheme
* is required. A further requirement is that we consume as little
* memory as possible - we never pin buffer_head structures in order
* to cache them.
*
* We track the existence of up to date buffers on the inodes which
* are associated with them. Because we don't want to pin
* buffer_heads, this is only a (strong) hint and several other checks
* are made in the I/O path to ensure that we don't use a stale or
* invalid buffer without going to disk:
* - buffer_jbd is used liberally - if a bh is in the journal on
* this node then it *must* be up to date.
* - the standard buffer_uptodate() macro is used to detect buffers
* which may be invalid (even if we have an up to date tracking
* item for them)
*
* For a full understanding of how this code works together, one
* should read the callers in dlmglue.c, the I/O functions in
* buffer_head_io.c and ocfs2_journal_access in journal.c
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/buffer_head.h>
#include <linux/rbtree.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "inode.h"
#include "uptodate.h"
#include "ocfs2_trace.h"
struct ocfs2_meta_cache_item {
struct rb_node c_node;
sector_t c_block;
};
static struct kmem_cache *ocfs2_uptodate_cachep;
u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci)
{
BUG_ON(!ci || !ci->ci_ops);
return ci->ci_ops->co_owner(ci);
}
struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci)
{
BUG_ON(!ci || !ci->ci_ops);
return ci->ci_ops->co_get_super(ci);
}
static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci)
{
BUG_ON(!ci || !ci->ci_ops);
ci->ci_ops->co_cache_lock(ci);
}
static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci)
{
BUG_ON(!ci || !ci->ci_ops);
ci->ci_ops->co_cache_unlock(ci);
}
void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci)
{
BUG_ON(!ci || !ci->ci_ops);
ci->ci_ops->co_io_lock(ci);
}
void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci)
{
BUG_ON(!ci || !ci->ci_ops);
ci->ci_ops->co_io_unlock(ci);
}
static void ocfs2_metadata_cache_reset(struct ocfs2_caching_info *ci,
int clear)
{
ci->ci_flags |= OCFS2_CACHE_FL_INLINE;
ci->ci_num_cached = 0;
if (clear) {
ci->ci_created_trans = 0;
ci->ci_last_trans = 0;
}
}
void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
const struct ocfs2_caching_operations *ops)
{
BUG_ON(!ops);
ci->ci_ops = ops;
ocfs2_metadata_cache_reset(ci, 1);
}
void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci)
{
ocfs2_metadata_cache_purge(ci);
ocfs2_metadata_cache_reset(ci, 1);
}
/* No lock taken here as 'root' is not expected to be visible to other
* processes. */
static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
{
unsigned int purged = 0;
struct rb_node *node;
struct ocfs2_meta_cache_item *item;
while ((node = rb_last(root)) != NULL) {
item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
trace_ocfs2_purge_copied_metadata_tree(
(unsigned long long) item->c_block);
rb_erase(&item->c_node, root);
kmem_cache_free(ocfs2_uptodate_cachep, item);
purged++;
}
return purged;
}
/* Called from locking and called from ocfs2_clear_inode. Dump the
* cache for a given inode.
*
* This function is a few more lines longer than necessary due to some
* accounting done here, but I think it's worth tracking down those
* bugs sooner -- Mark */
void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci)
{
unsigned int tree, to_purge, purged;
struct rb_root root = RB_ROOT;
BUG_ON(!ci || !ci->ci_ops);
ocfs2_metadata_cache_lock(ci);
tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
to_purge = ci->ci_num_cached;
trace_ocfs2_metadata_cache_purge(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
to_purge, tree);
/* If we're a tree, save off the root so that we can safely
* initialize the cache. We do the work to free tree members
* without the spinlock. */
if (tree)
root = ci->ci_cache.ci_tree;
ocfs2_metadata_cache_reset(ci, 0);
ocfs2_metadata_cache_unlock(ci);
purged = ocfs2_purge_copied_metadata_tree(&root);
/* If possible, track the number wiped so that we can more
* easily detect counting errors. Unfortunately, this is only
* meaningful for trees. */
if (tree && purged != to_purge)
mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci),
to_purge, purged);
}
/* Returns the index in the cache array, -1 if not found.
* Requires ip_lock. */
static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci,
sector_t item)
{
int i;
for (i = 0; i < ci->ci_num_cached; i++) {
if (item == ci->ci_cache.ci_array[i])
return i;
}
return -1;
}
/* Returns the cache item if found, otherwise NULL.
* Requires ip_lock. */
static struct ocfs2_meta_cache_item *
ocfs2_search_cache_tree(struct ocfs2_caching_info *ci,
sector_t block)
{
struct rb_node * n = ci->ci_cache.ci_tree.rb_node;
struct ocfs2_meta_cache_item *item = NULL;
while (n) {
item = rb_entry(n, struct ocfs2_meta_cache_item, c_node);
if (block < item->c_block)
n = n->rb_left;
else if (block > item->c_block)
n = n->rb_right;
else
return item;
}
return NULL;
}
static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
int index = -1;
struct ocfs2_meta_cache_item *item = NULL;
ocfs2_metadata_cache_lock(ci);
trace_ocfs2_buffer_cached_begin(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long) bh->b_blocknr,
!!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
index = ocfs2_search_cache_array(ci, bh->b_blocknr);
else
item = ocfs2_search_cache_tree(ci, bh->b_blocknr);
ocfs2_metadata_cache_unlock(ci);
trace_ocfs2_buffer_cached_end(index, item);
return (index != -1) || (item != NULL);
}
/* Warning: even if it returns true, this does *not* guarantee that
* the block is stored in our inode metadata cache.
*
* This can be called under lock_buffer()
*/
int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
/* Doesn't matter if the bh is in our cache or not -- if it's
* not marked uptodate then we know it can't have correct
* data. */
if (!buffer_uptodate(bh))
return 0;
/* OCFS2 does not allow multiple nodes to be changing the same
* block at the same time. */
if (buffer_jbd(bh))
return 1;
/* Ok, locally the buffer is marked as up to date, now search
* our cache to see if we can trust that. */
return ocfs2_buffer_cached(ci, bh);
}
/*
* Determine whether a buffer is currently out on a read-ahead request.
* ci_io_sem should be held to serialize submitters with the logic here.
*/
int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
return buffer_locked(bh) && ocfs2_buffer_cached(ci, bh);
}
/* Requires ip_lock */
static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
sector_t block)
{
BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
trace_ocfs2_append_cache_array(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)block, ci->ci_num_cached);
ci->ci_cache.ci_array[ci->ci_num_cached] = block;
ci->ci_num_cached++;
}
/* By now the caller should have checked that the item does *not*
* exist in the tree.
* Requires ip_lock. */
static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *new)
{
sector_t block = new->c_block;
struct rb_node *parent = NULL;
struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
struct ocfs2_meta_cache_item *tmp;
trace_ocfs2_insert_cache_tree(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)block, ci->ci_num_cached);
while(*p) {
parent = *p;
tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node);
if (block < tmp->c_block)
p = &(*p)->rb_left;
else if (block > tmp->c_block)
p = &(*p)->rb_right;
else {
/* This should never happen! */
mlog(ML_ERROR, "Duplicate block %llu cached!\n",
(unsigned long long) block);
BUG();
}
}
rb_link_node(&new->c_node, parent, p);
rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree);
ci->ci_num_cached++;
}
/* co_cache_lock() must be held */
static inline int ocfs2_insert_can_use_array(struct ocfs2_caching_info *ci)
{
return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) &&
(ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY);
}
/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
* pointers in tree after we use them - this allows caller to detect
* when to free in case of error.
*
* The co_cache_lock() must be held. */
static void ocfs2_expand_cache(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item **tree)
{
int i;
mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY,
"Owner %llu, num cached = %u, should be %u\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci),
ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY);
mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE),
"Owner %llu not marked as inline anymore!\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci));
/* Be careful to initialize the tree members *first* because
* once the ci_tree is used, the array is junk... */
for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
tree[i]->c_block = ci->ci_cache.ci_array[i];
ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE;
ci->ci_cache.ci_tree = RB_ROOT;
/* this will be set again by __ocfs2_insert_cache_tree */
ci->ci_num_cached = 0;
for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
__ocfs2_insert_cache_tree(ci, tree[i]);
tree[i] = NULL;
}
trace_ocfs2_expand_cache(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
ci->ci_flags, ci->ci_num_cached);
}
/* Slow path function - memory allocation is necessary. See the
* comment above ocfs2_set_buffer_uptodate for more information. */
static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
sector_t block,
int expand_tree)
{
int i;
struct ocfs2_meta_cache_item *new = NULL;
struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
{ NULL, };
trace_ocfs2_set_buffer_uptodate(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)block, expand_tree);
new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
if (!new) {
mlog_errno(-ENOMEM);
return;
}
new->c_block = block;
if (expand_tree) {
/* Do *not* allocate an array here - the removal code
* has no way of tracking that. */
for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
GFP_NOFS);
if (!tree[i]) {
mlog_errno(-ENOMEM);
goto out_free;
}
/* These are initialized in ocfs2_expand_cache! */
}
}
ocfs2_metadata_cache_lock(ci);
if (ocfs2_insert_can_use_array(ci)) {
/* Ok, items were removed from the cache in between
* locks. Detect this and revert back to the fast path */
ocfs2_append_cache_array(ci, block);
ocfs2_metadata_cache_unlock(ci);
goto out_free;
}
if (expand_tree)
ocfs2_expand_cache(ci, tree);
__ocfs2_insert_cache_tree(ci, new);
ocfs2_metadata_cache_unlock(ci);
new = NULL;
out_free:
if (new)
kmem_cache_free(ocfs2_uptodate_cachep, new);
/* If these were used, then ocfs2_expand_cache re-set them to
* NULL for us. */
if (tree[0]) {
for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
if (tree[i])
kmem_cache_free(ocfs2_uptodate_cachep,
tree[i]);
}
}
/* Item insertion is guarded by co_io_lock(), so the insertion path takes
* advantage of this by not rechecking for a duplicate insert during
* the slow case. Additionally, if the cache needs to be bumped up to
* a tree, the code will not recheck after acquiring the lock --
* multiple paths cannot be expanding to a tree at the same time.
*
* The slow path takes into account that items can be removed
* (including the whole tree wiped and reset) when this process it out
* allocating memory. In those cases, it reverts back to the fast
* path.
*
* Note that this function may actually fail to insert the block if
* memory cannot be allocated. This is not fatal however (but may
* result in a performance penalty)
*
* Readahead buffers can be passed in here before the I/O request is
* completed.
*/
void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
int expand;
/* The block may very well exist in our cache already, so avoid
* doing any more work in that case. */
if (ocfs2_buffer_cached(ci, bh))
return;
trace_ocfs2_set_buffer_uptodate_begin(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)bh->b_blocknr);
/* No need to recheck under spinlock - insertion is guarded by
* co_io_lock() */
ocfs2_metadata_cache_lock(ci);
if (ocfs2_insert_can_use_array(ci)) {
/* Fast case - it's an array and there's a free
* spot. */
ocfs2_append_cache_array(ci, bh->b_blocknr);
ocfs2_metadata_cache_unlock(ci);
return;
}
expand = 0;
if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
/* We need to bump things up to a tree. */
expand = 1;
}
ocfs2_metadata_cache_unlock(ci);
__ocfs2_set_buffer_uptodate(ci, bh->b_blocknr, expand);
}
/* Called against a newly allocated buffer. Most likely nobody should
* be able to read this sort of metadata while it's still being
* allocated, but this is careful to take co_io_lock() anyway. */
void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
/* This should definitely *not* exist in our cache */
BUG_ON(ocfs2_buffer_cached(ci, bh));
set_buffer_uptodate(bh);
ocfs2_metadata_cache_io_lock(ci);
ocfs2_set_buffer_uptodate(ci, bh);
ocfs2_metadata_cache_io_unlock(ci);
}
/* Requires ip_lock. */
static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
int index)
{
sector_t *array = ci->ci_cache.ci_array;
int bytes;
BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY);
BUG_ON(index >= ci->ci_num_cached);
BUG_ON(!ci->ci_num_cached);
trace_ocfs2_remove_metadata_array(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
index, ci->ci_num_cached);
ci->ci_num_cached--;
/* don't need to copy if the array is now empty, or if we
* removed at the tail */
if (ci->ci_num_cached && index < ci->ci_num_cached) {
bytes = sizeof(sector_t) * (ci->ci_num_cached - index);
memmove(&array[index], &array[index + 1], bytes);
}
}
/* Requires ip_lock. */
static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *item)
{
trace_ocfs2_remove_metadata_tree(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)item->c_block);
rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
ci->ci_num_cached--;
}
static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci,
sector_t block)
{
int index;
struct ocfs2_meta_cache_item *item = NULL;
ocfs2_metadata_cache_lock(ci);
trace_ocfs2_remove_block_from_cache(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long) block, ci->ci_num_cached,
ci->ci_flags);
if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
index = ocfs2_search_cache_array(ci, block);
if (index != -1)
ocfs2_remove_metadata_array(ci, index);
} else {
item = ocfs2_search_cache_tree(ci, block);
if (item)
ocfs2_remove_metadata_tree(ci, item);
}
ocfs2_metadata_cache_unlock(ci);
if (item)
kmem_cache_free(ocfs2_uptodate_cachep, item);
}
/*
* Called when we remove a chunk of metadata from an inode. We don't
* bother reverting things to an inlined array in the case of a remove
* which moves us back under the limit.
*/
void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
sector_t block = bh->b_blocknr;
ocfs2_remove_block_from_cache(ci, block);
}
/* Called when we remove xattr clusters from an inode. */
void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci,
sector_t block,
u32 c_len)
{
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
unsigned int i, b_len = ocfs2_clusters_to_blocks(sb, 1) * c_len;
for (i = 0; i < b_len; i++, block++)
ocfs2_remove_block_from_cache(ci, block);
}
int __init init_ocfs2_uptodate_cache(void)
{
ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
sizeof(struct ocfs2_meta_cache_item),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!ocfs2_uptodate_cachep)
return -ENOMEM;
return 0;
}
void exit_ocfs2_uptodate_cache(void)
{
kmem_cache_destroy(ocfs2_uptodate_cachep);
}
| linux-master | fs/ocfs2/uptodate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* refcounttree.c
*
* Copyright (C) 2009 Oracle. All rights reserved.
*/
#include <linux/sort.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "inode.h"
#include "alloc.h"
#include "suballoc.h"
#include "journal.h"
#include "uptodate.h"
#include "super.h"
#include "buffer_head_io.h"
#include "blockcheck.h"
#include "refcounttree.h"
#include "sysfile.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "aops.h"
#include "xattr.h"
#include "namei.h"
#include "ocfs2_trace.h"
#include "file.h"
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
struct ocfs2_cow_context {
struct inode *inode;
u32 cow_start;
u32 cow_len;
struct ocfs2_extent_tree data_et;
struct ocfs2_refcount_tree *ref_tree;
struct buffer_head *ref_root_bh;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_alloc_context *data_ac;
struct ocfs2_cached_dealloc_ctxt dealloc;
void *cow_object;
struct ocfs2_post_refcount *post_refcount;
int extra_credits;
int (*get_clusters)(struct ocfs2_cow_context *context,
u32 v_cluster, u32 *p_cluster,
u32 *num_clusters,
unsigned int *extent_flags);
int (*cow_duplicate_clusters)(handle_t *handle,
struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
};
static inline struct ocfs2_refcount_tree *
cache_info_to_refcount(struct ocfs2_caching_info *ci)
{
return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
}
static int ocfs2_validate_refcount_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)bh->b_data;
trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
if (rc) {
mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
(unsigned long long)bh->b_blocknr);
return rc;
}
if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
rc = ocfs2_error(sb,
"Refcount block #%llu has bad signature %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
rb->rf_signature);
goto out;
}
if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
rc = ocfs2_error(sb,
"Refcount block #%llu has an invalid rf_blkno of %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(rb->rf_blkno));
goto out;
}
if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
rc = ocfs2_error(sb,
"Refcount block #%llu has an invalid rf_fs_generation of #%u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(rb->rf_fs_generation));
goto out;
}
out:
return rc;
}
static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
u64 rb_blkno,
struct buffer_head **bh)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_block(ci, rb_blkno, &tmp,
ocfs2_validate_refcount_block);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
return rf->rf_blkno;
}
static struct super_block *
ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
return rf->rf_sb;
}
static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
__acquires(&rf->rf_lock)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
spin_lock(&rf->rf_lock);
}
static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
__releases(&rf->rf_lock)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
spin_unlock(&rf->rf_lock);
}
static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
mutex_lock(&rf->rf_io_mutex);
}
static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
mutex_unlock(&rf->rf_io_mutex);
}
static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
.co_owner = ocfs2_refcount_cache_owner,
.co_get_super = ocfs2_refcount_cache_get_super,
.co_cache_lock = ocfs2_refcount_cache_lock,
.co_cache_unlock = ocfs2_refcount_cache_unlock,
.co_io_lock = ocfs2_refcount_cache_io_lock,
.co_io_unlock = ocfs2_refcount_cache_io_unlock,
};
static struct ocfs2_refcount_tree *
ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
{
struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
struct ocfs2_refcount_tree *tree = NULL;
while (n) {
tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
if (blkno < tree->rf_blkno)
n = n->rb_left;
else if (blkno > tree->rf_blkno)
n = n->rb_right;
else
return tree;
}
return NULL;
}
/* osb_lock is already locked. */
static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *new)
{
u64 rf_blkno = new->rf_blkno;
struct rb_node *parent = NULL;
struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
struct ocfs2_refcount_tree *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct ocfs2_refcount_tree,
rf_node);
if (rf_blkno < tmp->rf_blkno)
p = &(*p)->rb_left;
else if (rf_blkno > tmp->rf_blkno)
p = &(*p)->rb_right;
else {
/* This should never happen! */
mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
(unsigned long long)rf_blkno);
BUG();
}
}
rb_link_node(&new->rf_node, parent, p);
rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
}
static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
{
ocfs2_metadata_cache_exit(&tree->rf_ci);
ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
ocfs2_lock_res_free(&tree->rf_lockres);
kfree(tree);
}
static inline void
ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree)
{
rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
osb->osb_ref_tree_lru = NULL;
}
static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree)
{
spin_lock(&osb->osb_lock);
ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
spin_unlock(&osb->osb_lock);
}
static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
{
struct ocfs2_refcount_tree *tree =
container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
ocfs2_free_refcount_tree(tree);
}
static inline void
ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
{
kref_get(&tree->rf_getcnt);
}
static inline void
ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
{
kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
}
static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
struct super_block *sb)
{
ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
mutex_init(&new->rf_io_mutex);
new->rf_sb = sb;
spin_lock_init(&new->rf_lock);
}
static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *new,
u64 rf_blkno, u32 generation)
{
init_rwsem(&new->rf_sem);
ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
rf_blkno, generation);
}
static struct ocfs2_refcount_tree*
ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
{
struct ocfs2_refcount_tree *new;
new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
if (!new)
return NULL;
new->rf_blkno = rf_blkno;
kref_init(&new->rf_getcnt);
ocfs2_init_refcount_tree_ci(new, osb->sb);
return new;
}
static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
struct ocfs2_refcount_tree **ret_tree)
{
int ret = 0;
struct ocfs2_refcount_tree *tree, *new = NULL;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_block *ref_rb;
spin_lock(&osb->osb_lock);
if (osb->osb_ref_tree_lru &&
osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
tree = osb->osb_ref_tree_lru;
else
tree = ocfs2_find_refcount_tree(osb, rf_blkno);
if (tree)
goto out;
spin_unlock(&osb->osb_lock);
new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
if (!new) {
ret = -ENOMEM;
mlog_errno(ret);
return ret;
}
/*
* We need the generation to create the refcount tree lock and since
* it isn't changed during the tree modification, we are safe here to
* read without protection.
* We also have to purge the cache after we create the lock since the
* refcount block may have the stale data. It can only be trusted when
* we hold the refcount lock.
*/
ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
if (ret) {
mlog_errno(ret);
ocfs2_metadata_cache_exit(&new->rf_ci);
kfree(new);
return ret;
}
ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
new->rf_generation);
ocfs2_metadata_cache_purge(&new->rf_ci);
spin_lock(&osb->osb_lock);
tree = ocfs2_find_refcount_tree(osb, rf_blkno);
if (tree)
goto out;
ocfs2_insert_refcount_tree(osb, new);
tree = new;
new = NULL;
out:
*ret_tree = tree;
osb->osb_ref_tree_lru = tree;
spin_unlock(&osb->osb_lock);
if (new)
ocfs2_free_refcount_tree(new);
brelse(ref_root_bh);
return ret;
}
static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
{
int ret;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(!ocfs2_is_refcount_inode(inode));
di = (struct ocfs2_dinode *)di_bh->b_data;
*ref_blkno = le64_to_cpu(di->i_refcount_loc);
brelse(di_bh);
out:
return ret;
}
static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree, int rw)
{
int ret;
ret = ocfs2_refcount_lock(tree, rw);
if (ret) {
mlog_errno(ret);
goto out;
}
if (rw)
down_write(&tree->rf_sem);
else
down_read(&tree->rf_sem);
out:
return ret;
}
/*
* Lock the refcount tree pointed by ref_blkno and return the tree.
* In most case, we lock the tree and read the refcount block.
* So read it here if the caller really needs it.
*
* If the tree has been re-created by other node, it will free the
* old one and re-create it.
*/
int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
u64 ref_blkno, int rw,
struct ocfs2_refcount_tree **ret_tree,
struct buffer_head **ref_bh)
{
int ret, delete_tree = 0;
struct ocfs2_refcount_tree *tree = NULL;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_block *rb;
again:
ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
if (ret) {
mlog_errno(ret);
return ret;
}
ocfs2_refcount_tree_get(tree);
ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
if (ret) {
mlog_errno(ret);
ocfs2_refcount_tree_put(tree);
goto out;
}
ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
&ref_root_bh);
if (ret) {
mlog_errno(ret);
ocfs2_unlock_refcount_tree(osb, tree, rw);
goto out;
}
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
/*
* If the refcount block has been freed and re-created, we may need
* to recreate the refcount tree also.
*
* Here we just remove the tree from the rb-tree, and the last
* kref holder will unlock and delete this refcount_tree.
* Then we goto "again" and ocfs2_get_refcount_tree will create
* the new refcount tree for us.
*/
if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
if (!tree->rf_removed) {
ocfs2_erase_refcount_tree_from_list(osb, tree);
tree->rf_removed = 1;
delete_tree = 1;
}
ocfs2_unlock_refcount_tree(osb, tree, rw);
/*
* We get an extra reference when we create the refcount
* tree, so another put will destroy it.
*/
if (delete_tree)
ocfs2_refcount_tree_put(tree);
brelse(ref_root_bh);
ref_root_bh = NULL;
goto again;
}
*ret_tree = tree;
if (ref_bh) {
*ref_bh = ref_root_bh;
ref_root_bh = NULL;
}
out:
brelse(ref_root_bh);
return ret;
}
void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree, int rw)
{
if (rw)
up_write(&tree->rf_sem);
else
up_read(&tree->rf_sem);
ocfs2_refcount_unlock(tree, rw);
ocfs2_refcount_tree_put(tree);
}
void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
{
struct rb_node *node;
struct ocfs2_refcount_tree *tree;
struct rb_root *root = &osb->osb_rf_lock_tree;
while ((node = rb_last(root)) != NULL) {
tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
trace_ocfs2_purge_refcount_trees(
(unsigned long long) tree->rf_blkno);
rb_erase(&tree->rf_node, root);
ocfs2_free_refcount_tree(tree);
}
}
/*
* Create a refcount tree for an inode.
* We take for granted that the inode is already locked.
*/
static int ocfs2_create_refcount_tree(struct inode *inode,
struct buffer_head *di_bh)
{
int ret;
handle_t *handle = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *new_bh = NULL;
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
u16 suballoc_bit_start;
u32 num_got;
u64 suballoc_loc, first_blkno;
BUG_ON(ocfs2_is_refcount_inode(inode));
trace_ocfs2_create_refcount_tree(
(unsigned long long)oi->ip_blkno);
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&first_blkno);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
if (!new_tree) {
ret = -ENOMEM;
mlog_errno(ret);
goto out_commit;
}
new_bh = sb_getblk(inode->i_sb, first_blkno);
if (!new_bh) {
ret = -ENOMEM;
mlog_errno(ret);
goto out_commit;
}
ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/* Initialize ocfs2_refcount_block. */
rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(rb, 0, inode->i_sb->s_blocksize);
strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
rb->rf_blkno = cpu_to_le64(first_blkno);
rb->rf_count = cpu_to_le32(1);
rb->rf_records.rl_count =
cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
spin_lock(&osb->osb_lock);
rb->rf_generation = osb->s_next_generation++;
spin_unlock(&osb->osb_lock);
ocfs2_journal_dirty(handle, new_bh);
spin_lock(&oi->ip_lock);
oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
di->i_refcount_loc = cpu_to_le64(first_blkno);
spin_unlock(&oi->ip_lock);
trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
ocfs2_journal_dirty(handle, di_bh);
/*
* We have to init the tree lock here since it will use
* the generation number to create it.
*/
new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
new_tree->rf_generation);
spin_lock(&osb->osb_lock);
tree = ocfs2_find_refcount_tree(osb, first_blkno);
/*
* We've just created a new refcount tree in this block. If
* we found a refcount tree on the ocfs2_super, it must be
* one we just deleted. We free the old tree before
* inserting the new tree.
*/
BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
if (tree)
ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
ocfs2_insert_refcount_tree(osb, new_tree);
spin_unlock(&osb->osb_lock);
new_tree = NULL;
if (tree)
ocfs2_refcount_tree_put(tree);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (new_tree) {
ocfs2_metadata_cache_exit(&new_tree->rf_ci);
kfree(new_tree);
}
brelse(new_bh);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
return ret;
}
static int ocfs2_set_refcount_tree(struct inode *inode,
struct buffer_head *di_bh,
u64 refcount_loc)
{
int ret;
handle_t *handle = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_tree *ref_tree;
BUG_ON(ocfs2_is_refcount_inode(inode));
ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
&ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
return ret;
}
handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
le32_add_cpu(&rb->rf_count, 1);
ocfs2_journal_dirty(handle, ref_root_bh);
spin_lock(&oi->ip_lock);
oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
di->i_refcount_loc = cpu_to_le64(refcount_loc);
spin_unlock(&oi->ip_lock);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
return ret;
}
int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
{
int ret, delete_tree = 0;
handle_t *handle = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_refcount_block *rb;
struct inode *alloc_inode = NULL;
struct buffer_head *alloc_bh = NULL;
struct buffer_head *blk_bh = NULL;
struct ocfs2_refcount_tree *ref_tree;
int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
u16 bit = 0;
if (!ocfs2_is_refcount_inode(inode))
return 0;
BUG_ON(!ref_blkno);
ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
if (ret) {
mlog_errno(ret);
return ret;
}
rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
/*
* If we are the last user, we need to free the block.
* So lock the allocator ahead.
*/
if (le32_to_cpu(rb->rf_count) == 1) {
blk = le64_to_cpu(rb->rf_blkno);
bit = le16_to_cpu(rb->rf_suballoc_bit);
if (rb->rf_suballoc_loc)
bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
else
bg_blkno = ocfs2_which_suballoc_group(blk, bit);
alloc_inode = ocfs2_get_system_file_inode(osb,
EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(rb->rf_suballoc_slot));
if (!alloc_inode) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
inode_lock(alloc_inode);
ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
if (ret) {
mlog_errno(ret);
goto out_mutex;
}
credits += OCFS2_SUBALLOC_FREE;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_unlock;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
spin_lock(&oi->ip_lock);
oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
di->i_refcount_loc = 0;
spin_unlock(&oi->ip_lock);
ocfs2_journal_dirty(handle, di_bh);
le32_add_cpu(&rb->rf_count , -1);
ocfs2_journal_dirty(handle, blk_bh);
if (!rb->rf_count) {
delete_tree = 1;
ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
alloc_bh, bit, bg_blkno, 1);
if (ret)
mlog_errno(ret);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out_unlock:
if (alloc_inode) {
ocfs2_inode_unlock(alloc_inode, 1);
brelse(alloc_bh);
}
out_mutex:
if (alloc_inode) {
inode_unlock(alloc_inode);
iput(alloc_inode);
}
out:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
if (delete_tree)
ocfs2_refcount_tree_put(ref_tree);
brelse(blk_bh);
return ret;
}
static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
struct buffer_head *ref_leaf_bh,
u64 cpos, unsigned int len,
struct ocfs2_refcount_rec *ret_rec,
int *index)
{
int i = 0;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_rec *rec = NULL;
for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
rec = &rb->rf_records.rl_recs[i];
if (le64_to_cpu(rec->r_cpos) +
le32_to_cpu(rec->r_clusters) <= cpos)
continue;
else if (le64_to_cpu(rec->r_cpos) > cpos)
break;
/* ok, cpos fail in this rec. Just return. */
if (ret_rec)
*ret_rec = *rec;
goto out;
}
if (ret_rec) {
/* We meet with a hole here, so fake the rec. */
ret_rec->r_cpos = cpu_to_le64(cpos);
ret_rec->r_refcount = 0;
if (i < le16_to_cpu(rb->rf_records.rl_used) &&
le64_to_cpu(rec->r_cpos) < cpos + len)
ret_rec->r_clusters =
cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
else
ret_rec->r_clusters = cpu_to_le32(len);
}
out:
*index = i;
}
/*
* Try to remove refcount tree. The mechanism is:
* 1) Check whether i_clusters == 0, if no, exit.
* 2) check whether we have i_xattr_loc in dinode. if yes, exit.
* 3) Check whether we have inline xattr stored outside, if yes, exit.
* 4) Remove the tree.
*/
int ocfs2_try_remove_refcount_tree(struct inode *inode,
struct buffer_head *di_bh)
{
int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
down_write(&oi->ip_xattr_sem);
down_write(&oi->ip_alloc_sem);
if (oi->ip_clusters)
goto out;
if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
goto out;
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
ocfs2_has_inline_xattr_value_outside(inode, di))
goto out;
ret = ocfs2_remove_refcount_tree(inode, di_bh);
if (ret)
mlog_errno(ret);
out:
up_write(&oi->ip_alloc_sem);
up_write(&oi->ip_xattr_sem);
return 0;
}
/*
* Find the end range for a leaf refcount block indicated by
* el->l_recs[index].e_blkno.
*/
static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct ocfs2_extent_block *eb,
struct ocfs2_extent_list *el,
int index, u32 *cpos_end)
{
int ret, i, subtree_root;
u32 cpos;
u64 blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_path *left_path = NULL, *right_path = NULL;
struct ocfs2_extent_tree et;
struct ocfs2_extent_list *tmp_el;
if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
/*
* We have a extent rec after index, so just use the e_cpos
* of the next extent rec.
*/
*cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
return 0;
}
if (!eb || !eb->h_next_leaf_blk) {
/*
* We are the last extent rec, so any high cpos should
* be stored in this leaf refcount block.
*/
*cpos_end = UINT_MAX;
return 0;
}
/*
* If the extent block isn't the last one, we have to find
* the subtree root between this extent block and the next
* leaf extent block and get the corresponding e_cpos from
* the subroot. Otherwise we may corrupt the b-tree.
*/
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
left_path = ocfs2_new_path_from_et(&et);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
ret = ocfs2_find_path(ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
right_path = ocfs2_new_path_from_path(left_path);
if (!right_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(ci, right_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
subtree_root = ocfs2_find_subtree_root(&et, left_path,
right_path);
tmp_el = left_path->p_node[subtree_root].el;
blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
break;
}
}
BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
out:
ocfs2_free_path(left_path);
ocfs2_free_path(right_path);
return ret;
}
/*
* Given a cpos and len, try to find the refcount record which contains cpos.
* 1. If cpos can be found in one refcount record, return the record.
* 2. If cpos can't be found, return a fake record which start from cpos
* and end at a small value between cpos+len and start of the next record.
* This fake record has r_refcount = 0.
*/
static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, unsigned int len,
struct ocfs2_refcount_rec *ret_rec,
int *index,
struct buffer_head **ret_bh)
{
int ret = 0, i, found;
u32 low_cpos, cpos_end;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec = NULL;
struct ocfs2_extent_block *eb = NULL;
struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
ret_rec, index);
*ret_bh = ref_root_bh;
get_bh(ref_root_bh);
return 0;
}
el = &rb->rf_list;
low_cpos = cpos & OCFS2_32BIT_POS_MASK;
if (el->l_tree_depth) {
ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ret = ocfs2_error(sb,
"refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)eb_bh->b_blocknr);
goto out;
}
}
found = 0;
for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
rec = &el->l_recs[i];
if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
found = 1;
break;
}
}
if (found) {
ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
eb, el, i, &cpos_end);
if (ret) {
mlog_errno(ret);
goto out;
}
if (cpos_end < low_cpos + len)
len = cpos_end - low_cpos;
}
ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
ret_rec, index);
*ret_bh = ref_leaf_bh;
out:
brelse(eb_bh);
return ret;
}
enum ocfs2_ref_rec_contig {
REF_CONTIG_NONE = 0,
REF_CONTIG_LEFT,
REF_CONTIG_RIGHT,
REF_CONTIG_LEFTRIGHT,
};
static enum ocfs2_ref_rec_contig
ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
int index)
{
if ((rb->rf_records.rl_recs[index].r_refcount ==
rb->rf_records.rl_recs[index + 1].r_refcount) &&
(le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
return REF_CONTIG_RIGHT;
return REF_CONTIG_NONE;
}
static enum ocfs2_ref_rec_contig
ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
int index)
{
enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
ret = ocfs2_refcount_rec_adjacent(rb, index);
if (index > 0) {
enum ocfs2_ref_rec_contig tmp;
tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
if (tmp == REF_CONTIG_RIGHT) {
if (ret == REF_CONTIG_RIGHT)
ret = REF_CONTIG_LEFTRIGHT;
else
ret = REF_CONTIG_LEFT;
}
}
return ret;
}
static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
int index)
{
BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
rb->rf_records.rl_recs[index+1].r_refcount);
le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
memmove(&rb->rf_records.rl_recs[index + 1],
&rb->rf_records.rl_recs[index + 2],
sizeof(struct ocfs2_refcount_rec) *
(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
0, sizeof(struct ocfs2_refcount_rec));
le16_add_cpu(&rb->rf_records.rl_used, -1);
}
/*
* Merge the refcount rec if we are contiguous with the adjacent recs.
*/
static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
int index)
{
enum ocfs2_ref_rec_contig contig =
ocfs2_refcount_rec_contig(rb, index);
if (contig == REF_CONTIG_NONE)
return;
if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
BUG_ON(index == 0);
index--;
}
ocfs2_rotate_refcount_rec_left(rb, index);
if (contig == REF_CONTIG_LEFTRIGHT)
ocfs2_rotate_refcount_rec_left(rb, index);
}
/*
* Change the refcount indexed by "index" in ref_bh.
* If refcount reaches 0, remove it.
*/
static int ocfs2_change_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_leaf_bh,
int index, int merge, int change)
{
int ret;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rl = &rb->rf_records;
struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_change_refcount_rec(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
index, le32_to_cpu(rec->r_refcount), change);
le32_add_cpu(&rec->r_refcount, change);
if (!rec->r_refcount) {
if (index != le16_to_cpu(rl->rl_used) - 1) {
memmove(rec, rec + 1,
(le16_to_cpu(rl->rl_used) - index - 1) *
sizeof(struct ocfs2_refcount_rec));
memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
0, sizeof(struct ocfs2_refcount_rec));
}
le16_add_cpu(&rl->rl_used, -1);
} else if (merge)
ocfs2_refcount_rec_merge(rb, index);
ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
return ret;
}
static int ocfs2_expand_inline_ref_root(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head **ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
u16 suballoc_bit_start;
u32 num_got;
u64 suballoc_loc, blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct buffer_head *new_bh = NULL;
struct ocfs2_refcount_block *new_rb;
struct ocfs2_refcount_block *root_rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
new_bh = sb_getblk(sb, blkno);
if (new_bh == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ocfs2_set_new_buffer_uptodate(ci, new_bh);
ret = ocfs2_journal_access_rb(handle, ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Initialize ocfs2_refcount_block.
* It should contain the same information as the old root.
* so just memcpy it and change the corresponding field.
*/
memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_cpos = cpu_to_le32(0);
new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
ocfs2_journal_dirty(handle, new_bh);
/* Now change the root. */
memset(&root_rb->rf_list, 0, sb->s_blocksize -
offsetof(struct ocfs2_refcount_block, rf_list));
root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
root_rb->rf_clusters = cpu_to_le32(1);
root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
ocfs2_journal_dirty(handle, ref_root_bh);
trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
le16_to_cpu(new_rb->rf_records.rl_used));
*ref_leaf_bh = new_bh;
new_bh = NULL;
out:
brelse(new_bh);
return ret;
}
static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
struct ocfs2_refcount_rec *next)
{
if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
ocfs2_get_ref_rec_low_cpos(next))
return 1;
return 0;
}
static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
{
const struct ocfs2_refcount_rec *l = a, *r = b;
u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
if (l_cpos > r_cpos)
return 1;
if (l_cpos < r_cpos)
return -1;
return 0;
}
static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
{
const struct ocfs2_refcount_rec *l = a, *r = b;
u64 l_cpos = le64_to_cpu(l->r_cpos);
u64 r_cpos = le64_to_cpu(r->r_cpos);
if (l_cpos > r_cpos)
return 1;
if (l_cpos < r_cpos)
return -1;
return 0;
}
static void swap_refcount_rec(void *a, void *b, int size)
{
struct ocfs2_refcount_rec *l = a, *r = b;
swap(*l, *r);
}
/*
* The refcount cpos are ordered by their 64bit cpos,
* But we will use the low 32 bit to be the e_cpos in the b-tree.
* So we need to make sure that this pos isn't intersected with others.
*
* Note: The refcount block is already sorted by their low 32 bit cpos,
* So just try the middle pos first, and we will exit when we find
* the good position.
*/
static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
u32 *split_pos, int *split_index)
{
int num_used = le16_to_cpu(rl->rl_used);
int delta, middle = num_used / 2;
for (delta = 0; delta < middle; delta++) {
/* Let's check delta earlier than middle */
if (ocfs2_refcount_rec_no_intersect(
&rl->rl_recs[middle - delta - 1],
&rl->rl_recs[middle - delta])) {
*split_index = middle - delta;
break;
}
/* For even counts, don't walk off the end */
if ((middle + delta + 1) == num_used)
continue;
/* Now try delta past middle */
if (ocfs2_refcount_rec_no_intersect(
&rl->rl_recs[middle + delta],
&rl->rl_recs[middle + delta + 1])) {
*split_index = middle + delta + 1;
break;
}
}
if (delta >= middle)
return -ENOSPC;
*split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
return 0;
}
static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
struct buffer_head *new_bh,
u32 *split_cpos)
{
int split_index = 0, num_moved, ret;
u32 cpos = 0;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rl = &rb->rf_records;
struct ocfs2_refcount_block *new_rb =
(struct ocfs2_refcount_block *)new_bh->b_data;
struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
trace_ocfs2_divide_leaf_refcount_block(
(unsigned long long)ref_leaf_bh->b_blocknr,
le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
/*
* XXX: Improvement later.
* If we know all the high 32 bit cpos is the same, no need to sort.
*
* In order to make the whole process safe, we do:
* 1. sort the entries by their low 32 bit cpos first so that we can
* find the split cpos easily.
* 2. call ocfs2_insert_extent to insert the new refcount block.
* 3. move the refcount rec to the new block.
* 4. sort the entries by their 64 bit cpos.
* 5. dirty the new_rb and rb.
*/
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
if (ret) {
mlog_errno(ret);
return ret;
}
new_rb->rf_cpos = cpu_to_le32(cpos);
/* move refcount records starting from split_index to the new block. */
num_moved = le16_to_cpu(rl->rl_used) - split_index;
memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
num_moved * sizeof(struct ocfs2_refcount_rec));
/*ok, remove the entries we just moved over to the other block. */
memset(&rl->rl_recs[split_index], 0,
num_moved * sizeof(struct ocfs2_refcount_rec));
/* change old and new rl_used accordingly. */
le16_add_cpu(&rl->rl_used, -num_moved);
new_rl->rl_used = cpu_to_le16(num_moved);
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
cmp_refcount_rec_by_cpos, swap_refcount_rec);
sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
cmp_refcount_rec_by_cpos, swap_refcount_rec);
*split_cpos = cpos;
return 0;
}
static int ocfs2_new_leaf_refcount_block(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
u16 suballoc_bit_start;
u32 num_got, new_cpos;
u64 suballoc_loc, blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *root_rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
struct buffer_head *new_bh = NULL;
struct ocfs2_refcount_block *new_rb;
struct ocfs2_extent_tree ref_et;
BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
new_bh = sb_getblk(sb, blkno);
if (new_bh == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ocfs2_set_new_buffer_uptodate(ci, new_bh);
ret = ocfs2_journal_access_rb(handle, ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out;
}
/* Initialize ocfs2_refcount_block. */
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(new_rb, 0, sb->s_blocksize);
strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
new_rb->rf_records.rl_count =
cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
new_rb->rf_generation = root_rb->rf_generation;
ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_journal_dirty(handle, ref_leaf_bh);
ocfs2_journal_dirty(handle, new_bh);
ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
trace_ocfs2_new_leaf_refcount_block(
(unsigned long long)new_bh->b_blocknr, new_cpos);
/* Insert the new leaf block with the specific offset cpos. */
ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1, 0, meta_ac);
if (ret)
mlog_errno(ret);
out:
brelse(new_bh);
return ret;
}
static int ocfs2_expand_refcount_tree(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
struct buffer_head *expand_bh = NULL;
if (ref_root_bh == ref_leaf_bh) {
/*
* the old root bh hasn't been expanded to a b-tree,
* so expand it first.
*/
ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
&expand_bh, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
expand_bh = ref_leaf_bh;
get_bh(expand_bh);
}
/* Now add a new refcount block into the tree.*/
ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
expand_bh, meta_ac);
if (ret)
mlog_errno(ret);
out:
brelse(expand_bh);
return ret;
}
/*
* Adjust the extent rec in b-tree representing ref_leaf_bh.
*
* Only called when we have inserted a new refcount rec at index 0
* which means ocfs2_extent_rec.e_cpos may need some change.
*/
static int ocfs2_adjust_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_refcount_rec *rec)
{
int ret = 0, i;
u32 new_cpos, old_cpos;
struct ocfs2_path *path = NULL;
struct ocfs2_extent_tree et;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
struct ocfs2_extent_list *el;
if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
goto out;
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
old_cpos = le32_to_cpu(rb->rf_cpos);
new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
if (old_cpos <= new_cpos)
goto out;
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
path = ocfs2_new_path_from_et(&et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(ci, path, old_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* 2 more credits, one for the leaf refcount block, one for
* the extent block contains the extent rec.
*/
ret = ocfs2_extend_trans(handle, 2);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
/* change the leaf extent block first. */
el = path_leaf_el(path);
for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
break;
BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
/* change the r_cpos in the leaf block. */
rb->rf_cpos = cpu_to_le32(new_cpos);
ocfs2_journal_dirty(handle, path_leaf_bh(path));
ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
ocfs2_free_path(path);
return ret;
}
static int ocfs2_insert_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_refcount_rec *rec,
int index, int merge,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rf_list = &rb->rf_records;
struct buffer_head *new_bh = NULL;
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
if (rf_list->rl_used == rf_list->rl_count) {
u64 cpos = le64_to_cpu(rec->r_cpos);
u32 len = le32_to_cpu(rec->r_clusters);
ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
ref_leaf_bh, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, NULL, &index,
&new_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ref_leaf_bh = new_bh;
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rf_list = &rb->rf_records;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
if (index < le16_to_cpu(rf_list->rl_used))
memmove(&rf_list->rl_recs[index + 1],
&rf_list->rl_recs[index],
(le16_to_cpu(rf_list->rl_used) - index) *
sizeof(struct ocfs2_refcount_rec));
trace_ocfs2_insert_refcount_rec(
(unsigned long long)ref_leaf_bh->b_blocknr, index,
(unsigned long long)le64_to_cpu(rec->r_cpos),
le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
rf_list->rl_recs[index] = *rec;
le16_add_cpu(&rf_list->rl_used, 1);
if (merge)
ocfs2_refcount_rec_merge(rb, index);
ocfs2_journal_dirty(handle, ref_leaf_bh);
if (index == 0) {
ret = ocfs2_adjust_refcount_rec(handle, ci,
ref_root_bh,
ref_leaf_bh, rec);
if (ret)
mlog_errno(ret);
}
out:
brelse(new_bh);
return ret;
}
/*
* Split the refcount_rec indexed by "index" in ref_leaf_bh.
* This is much simple than our b-tree code.
* split_rec is the new refcount rec we want to insert.
* If split_rec->r_refcount > 0, we are changing the refcount(in case we
* increase refcount or decrease a refcount to non-zero).
* If split_rec->r_refcount == 0, we are punching a hole in current refcount
* rec( in case we decrease a refcount to zero).
*/
static int ocfs2_split_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_refcount_rec *split_rec,
int index, int merge,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, recs_need;
u32 len;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rf_list = &rb->rf_records;
struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
struct ocfs2_refcount_rec *tail_rec = NULL;
struct buffer_head *new_bh = NULL;
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
le32_to_cpu(orig_rec->r_clusters),
le32_to_cpu(orig_rec->r_refcount),
le64_to_cpu(split_rec->r_cpos),
le32_to_cpu(split_rec->r_clusters),
le32_to_cpu(split_rec->r_refcount));
/*
* If we just need to split the header or tail clusters,
* no more recs are needed, just split is OK.
* Otherwise we at least need one new recs.
*/
if (!split_rec->r_refcount &&
(split_rec->r_cpos == orig_rec->r_cpos ||
le64_to_cpu(split_rec->r_cpos) +
le32_to_cpu(split_rec->r_clusters) ==
le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
recs_need = 0;
else
recs_need = 1;
/*
* We need one more rec if we split in the middle and the new rec have
* some refcount in it.
*/
if (split_rec->r_refcount &&
(split_rec->r_cpos != orig_rec->r_cpos &&
le64_to_cpu(split_rec->r_cpos) +
le32_to_cpu(split_rec->r_clusters) !=
le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
recs_need++;
/* If the leaf block don't have enough record, expand it. */
if (le16_to_cpu(rf_list->rl_used) + recs_need >
le16_to_cpu(rf_list->rl_count)) {
struct ocfs2_refcount_rec tmp_rec;
u64 cpos = le64_to_cpu(orig_rec->r_cpos);
len = le32_to_cpu(orig_rec->r_clusters);
ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
ref_leaf_bh, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We have to re-get it since now cpos may be moved to
* another leaf block.
*/
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, &tmp_rec, &index,
&new_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ref_leaf_bh = new_bh;
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rf_list = &rb->rf_records;
orig_rec = &rf_list->rl_recs[index];
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We have calculated out how many new records we need and store
* in recs_need, so spare enough space first by moving the records
* after "index" to the end.
*/
if (index != le16_to_cpu(rf_list->rl_used) - 1)
memmove(&rf_list->rl_recs[index + 1 + recs_need],
&rf_list->rl_recs[index + 1],
(le16_to_cpu(rf_list->rl_used) - index - 1) *
sizeof(struct ocfs2_refcount_rec));
len = (le64_to_cpu(orig_rec->r_cpos) +
le32_to_cpu(orig_rec->r_clusters)) -
(le64_to_cpu(split_rec->r_cpos) +
le32_to_cpu(split_rec->r_clusters));
/*
* If we have "len", the we will split in the tail and move it
* to the end of the space we have just spared.
*/
if (len) {
tail_rec = &rf_list->rl_recs[index + recs_need];
memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
le64_add_cpu(&tail_rec->r_cpos,
le32_to_cpu(tail_rec->r_clusters) - len);
tail_rec->r_clusters = cpu_to_le32(len);
}
/*
* If the split pos isn't the same as the original one, we need to
* split in the head.
*
* Note: We have the chance that split_rec.r_refcount = 0,
* recs_need = 0 and len > 0, which means we just cut the head from
* the orig_rec and in that case we have done some modification in
* orig_rec above, so the check for r_cpos is faked.
*/
if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
len = le64_to_cpu(split_rec->r_cpos) -
le64_to_cpu(orig_rec->r_cpos);
orig_rec->r_clusters = cpu_to_le32(len);
index++;
}
le16_add_cpu(&rf_list->rl_used, recs_need);
if (split_rec->r_refcount) {
rf_list->rl_recs[index] = *split_rec;
trace_ocfs2_split_refcount_rec_insert(
(unsigned long long)ref_leaf_bh->b_blocknr, index,
(unsigned long long)le64_to_cpu(split_rec->r_cpos),
le32_to_cpu(split_rec->r_clusters),
le32_to_cpu(split_rec->r_refcount));
if (merge)
ocfs2_refcount_rec_merge(rb, index);
}
ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
brelse(new_bh);
return ret;
}
static int __ocfs2_increase_refcount(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, u32 len, int merge,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0, index;
struct buffer_head *ref_leaf_bh = NULL;
struct ocfs2_refcount_rec rec;
unsigned int set_len = 0;
trace_ocfs2_increase_refcount_begin(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len);
while (len) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, &rec, &index,
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
set_len = le32_to_cpu(rec.r_clusters);
/*
* Here we may meet with 3 situations:
*
* 1. If we find an already existing record, and the length
* is the same, cool, we just need to increase the r_refcount
* and it is OK.
* 2. If we find a hole, just insert it with r_refcount = 1.
* 3. If we are in the middle of one extent record, split
* it.
*/
if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
set_len <= len) {
trace_ocfs2_increase_refcount_change(
(unsigned long long)cpos, set_len,
le32_to_cpu(rec.r_refcount));
ret = ocfs2_change_refcount_rec(handle, ci,
ref_leaf_bh, index,
merge, 1);
if (ret) {
mlog_errno(ret);
goto out;
}
} else if (!rec.r_refcount) {
rec.r_refcount = cpu_to_le32(1);
trace_ocfs2_increase_refcount_insert(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len);
ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
ref_leaf_bh,
&rec, index,
merge, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
set_len = min((u64)(cpos + len),
le64_to_cpu(rec.r_cpos) + set_len) - cpos;
rec.r_cpos = cpu_to_le64(cpos);
rec.r_clusters = cpu_to_le32(set_len);
le32_add_cpu(&rec.r_refcount, 1);
trace_ocfs2_increase_refcount_split(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len, le32_to_cpu(rec.r_refcount));
ret = ocfs2_split_refcount_rec(handle, ci,
ref_root_bh, ref_leaf_bh,
&rec, index, merge,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
}
cpos += set_len;
len -= set_len;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
}
out:
brelse(ref_leaf_bh);
return ret;
}
static int ocfs2_remove_refcount_extent(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_extent_tree et;
BUG_ON(rb->rf_records.rl_used);
trace_ocfs2_remove_refcount_extent(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)ref_leaf_bh->b_blocknr,
le32_to_cpu(rb->rf_cpos));
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
1, meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_remove_from_cache(ci, ref_leaf_bh);
/*
* add the freed block to the dealloc so that it will be freed
* when we run dealloc.
*/
ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(rb->rf_suballoc_slot),
le64_to_cpu(rb->rf_suballoc_loc),
le64_to_cpu(rb->rf_blkno),
le16_to_cpu(rb->rf_suballoc_bit));
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
le32_add_cpu(&rb->rf_clusters, -1);
/*
* check whether we need to restore the root refcount block if
* there is no leaf extent block at atll.
*/
if (!rb->rf_list.l_next_free_rec) {
BUG_ON(rb->rf_clusters);
trace_ocfs2_restore_refcount_block(
(unsigned long long)ref_root_bh->b_blocknr);
rb->rf_flags = 0;
rb->rf_parent = 0;
rb->rf_cpos = 0;
memset(&rb->rf_records, 0, sb->s_blocksize -
offsetof(struct ocfs2_refcount_block, rf_records));
rb->rf_records.rl_count =
cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
}
ocfs2_journal_dirty(handle, ref_root_bh);
out:
return ret;
}
int ocfs2_increase_refcount(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
cpos, len, 1,
meta_ac, dealloc);
}
static int ocfs2_decrease_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
int index, u64 cpos, unsigned int len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
BUG_ON(cpos + len >
le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
trace_ocfs2_decrease_refcount_rec(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len);
if (cpos == le64_to_cpu(rec->r_cpos) &&
len == le32_to_cpu(rec->r_clusters))
ret = ocfs2_change_refcount_rec(handle, ci,
ref_leaf_bh, index, 1, -1);
else {
struct ocfs2_refcount_rec split = *rec;
split.r_cpos = cpu_to_le64(cpos);
split.r_clusters = cpu_to_le32(len);
le32_add_cpu(&split.r_refcount, -1);
ret = ocfs2_split_refcount_rec(handle, ci,
ref_root_bh, ref_leaf_bh,
&split, index, 1,
meta_ac, dealloc);
}
if (ret) {
mlog_errno(ret);
goto out;
}
/* Remove the leaf refcount block if it contains no refcount record. */
if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
ref_leaf_bh, meta_ac,
dealloc);
if (ret)
mlog_errno(ret);
}
out:
return ret;
}
static int __ocfs2_decrease_refcount(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int delete)
{
int ret = 0, index = 0;
struct ocfs2_refcount_rec rec;
unsigned int r_count = 0, r_len;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct buffer_head *ref_leaf_bh = NULL;
trace_ocfs2_decrease_refcount(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len, delete);
while (len) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, &rec, &index,
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
r_count = le32_to_cpu(rec.r_refcount);
BUG_ON(r_count == 0);
if (!delete)
BUG_ON(r_count > 1);
r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
ref_leaf_bh, index,
cpos, r_len,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
ret = ocfs2_cache_cluster_dealloc(dealloc,
ocfs2_clusters_to_blocks(sb, cpos),
r_len);
if (ret) {
mlog_errno(ret);
goto out;
}
}
cpos += r_len;
len -= r_len;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
}
out:
brelse(ref_leaf_bh);
return ret;
}
/* Caller must hold refcount tree lock. */
int ocfs2_decrease_refcount(struct inode *inode,
handle_t *handle, u32 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int delete)
{
int ret;
u64 ref_blkno;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *tree;
BUG_ON(!ocfs2_is_refcount_inode(inode));
ret = ocfs2_get_refcount_block(inode, &ref_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
&ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
cpos, len, meta_ac, dealloc, delete);
if (ret)
mlog_errno(ret);
out:
brelse(ref_root_bh);
return ret;
}
/*
* Mark the already-existing extent at cpos as refcounted for len clusters.
* This adds the refcount extent flag.
*
* If the existing extent is larger than the request, initiate a
* split. An attempt will be made at merging with adjacent extents.
*
* The caller is responsible for passing down meta_ac if we'll need it.
*/
static int ocfs2_mark_extent_refcounted(struct inode *inode,
struct ocfs2_extent_tree *et,
handle_t *handle, u32 cpos,
u32 len, u32 phys,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
cpos, len, phys);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
inode->i_ino);
goto out;
}
ret = ocfs2_change_extent_flag(handle, et, cpos,
len, phys, meta_ac, dealloc,
OCFS2_EXT_REFCOUNTED, 0);
if (ret)
mlog_errno(ret);
out:
return ret;
}
/*
* Given some contiguous physical clusters, calculate what we need
* for modifying their refcount.
*/
static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 start_cpos,
u32 clusters,
int *meta_add,
int *credits)
{
int ret = 0, index, ref_blocks = 0, recs_add = 0;
u64 cpos = start_cpos;
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_rec rec;
struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
u32 len;
while (clusters) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, clusters, &rec,
&index, &ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
if (ref_leaf_bh != prev_bh) {
/*
* Now we encounter a new leaf block, so calculate
* whether we need to extend the old leaf.
*/
if (prev_bh) {
rb = (struct ocfs2_refcount_block *)
prev_bh->b_data;
if (le16_to_cpu(rb->rf_records.rl_used) +
recs_add >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
}
recs_add = 0;
*credits += 1;
brelse(prev_bh);
prev_bh = ref_leaf_bh;
get_bh(prev_bh);
}
trace_ocfs2_calc_refcount_meta_credits_iterate(
recs_add, (unsigned long long)cpos, clusters,
(unsigned long long)le64_to_cpu(rec.r_cpos),
le32_to_cpu(rec.r_clusters),
le32_to_cpu(rec.r_refcount), index);
len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
/*
* We record all the records which will be inserted to the
* same refcount block, so that we can tell exactly whether
* we need a new refcount block or not.
*
* If we will insert a new one, this is easy and only happens
* during adding refcounted flag to the extent, so we don't
* have a chance of spliting. We just need one record.
*
* If the refcount rec already exists, that would be a little
* complicated. we may have to:
* 1) split at the beginning if the start pos isn't aligned.
* we need 1 more record in this case.
* 2) split int the end if the end pos isn't aligned.
* we need 1 more record in this case.
* 3) split in the middle because of file system fragmentation.
* we need 2 more records in this case(we can't detect this
* beforehand, so always think of the worst case).
*/
if (rec.r_refcount) {
recs_add += 2;
/* Check whether we need a split at the beginning. */
if (cpos == start_cpos &&
cpos != le64_to_cpu(rec.r_cpos))
recs_add++;
/* Check whether we need a split in the end. */
if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters))
recs_add++;
} else
recs_add++;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
clusters -= len;
cpos += len;
}
if (prev_bh) {
rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
*credits += 1;
}
if (!ref_blocks)
goto out;
*meta_add += ref_blocks;
*credits += ref_blocks;
/*
* So we may need ref_blocks to insert into the tree.
* That also means we need to change the b-tree and add that number
* of records since we never merge them.
* We need one more block for expansion since the new created leaf
* block is also full and needs split.
*/
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
struct ocfs2_extent_tree et;
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
*meta_add += ocfs2_extend_meta_needed(et.et_root_el);
*credits += ocfs2_calc_extend_credits(sb,
et.et_root_el);
} else {
*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
*meta_add += 1;
}
out:
trace_ocfs2_calc_refcount_meta_credits(
(unsigned long long)start_cpos, clusters,
*meta_add, *credits);
brelse(ref_leaf_bh);
brelse(prev_bh);
return ret;
}
/*
* For refcount tree, we will decrease some contiguous clusters
* refcount count, so just go through it to see how many blocks
* we gonna touch and whether we need to create new blocks.
*
* Normally the refcount blocks store these refcount should be
* contiguous also, so that we can get the number easily.
* We will at most add split 2 refcount records and 2 more
* refcount blocks, so just check it in a rough way.
*
* Caller must hold refcount tree lock.
*/
int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
u64 refcount_loc,
u64 phys_blkno,
u32 clusters,
int *credits,
int *ref_blocks)
{
int ret;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *tree;
u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
inode->i_ino);
goto out;
}
BUG_ON(!ocfs2_is_refcount_inode(inode));
ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
refcount_loc, &tree);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
&ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
&tree->rf_ci,
ref_root_bh,
start_cpos, clusters,
ref_blocks, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
out:
brelse(ref_root_bh);
return ret;
}
#define MAX_CONTIG_BYTES 1048576
static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
{
return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
}
static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
{
return ~(ocfs2_cow_contig_clusters(sb) - 1);
}
/*
* Given an extent that starts at 'start' and an I/O that starts at 'cpos',
* find an offset (start + (n * contig_clusters)) that is closest to cpos
* while still being less than or equal to it.
*
* The goal is to break the extent at a multiple of contig_clusters.
*/
static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
unsigned int start,
unsigned int cpos)
{
BUG_ON(start > cpos);
return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
}
/*
* Given a cluster count of len, pad it out so that it is a multiple
* of contig_clusters.
*/
static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
unsigned int len)
{
unsigned int padded =
(len + (ocfs2_cow_contig_clusters(sb) - 1)) &
ocfs2_cow_contig_mask(sb);
/* Did we wrap? */
if (padded < len)
padded = UINT_MAX;
return padded;
}
/*
* Calculate out the start and number of virtual clusters we need to CoW.
*
* cpos is vitual start cluster position we want to do CoW in a
* file and write_len is the cluster length.
* max_cpos is the place where we want to stop CoW intentionally.
*
* Normal we will start CoW from the beginning of extent record cotaining cpos.
* We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
* get good I/O from the resulting extent tree.
*/
static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
struct ocfs2_extent_list *el,
u32 cpos,
u32 write_len,
u32 max_cpos,
u32 *cow_start,
u32 *cow_len)
{
int ret = 0;
int tree_height = le16_to_cpu(el->l_tree_depth), i;
struct buffer_head *eb_bh = NULL;
struct ocfs2_extent_block *eb = NULL;
struct ocfs2_extent_rec *rec;
unsigned int want_clusters, rec_end = 0;
int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
int leaf_clusters;
BUG_ON(cpos + write_len > max_cpos);
if (tree_height > 0) {
ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ret = ocfs2_error(inode->i_sb,
"Inode %lu has non zero tree depth in leaf block %llu\n",
inode->i_ino,
(unsigned long long)eb_bh->b_blocknr);
goto out;
}
}
*cow_len = 0;
for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
if (ocfs2_is_empty_extent(rec)) {
mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
"index %d\n", inode->i_ino, i);
continue;
}
if (le32_to_cpu(rec->e_cpos) +
le16_to_cpu(rec->e_leaf_clusters) <= cpos)
continue;
if (*cow_len == 0) {
/*
* We should find a refcounted record in the
* first pass.
*/
BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
*cow_start = le32_to_cpu(rec->e_cpos);
}
/*
* If we encounter a hole, a non-refcounted record or
* pass the max_cpos, stop the search.
*/
if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
(*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
(max_cpos <= le32_to_cpu(rec->e_cpos)))
break;
leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
if (rec_end > max_cpos) {
rec_end = max_cpos;
leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
}
/*
* How many clusters do we actually need from
* this extent? First we see how many we actually
* need to complete the write. If that's smaller
* than contig_clusters, we try for contig_clusters.
*/
if (!*cow_len)
want_clusters = write_len;
else
want_clusters = (cpos + write_len) -
(*cow_start + *cow_len);
if (want_clusters < contig_clusters)
want_clusters = contig_clusters;
/*
* If the write does not cover the whole extent, we
* need to calculate how we're going to split the extent.
* We try to do it on contig_clusters boundaries.
*
* Any extent smaller than contig_clusters will be
* CoWed in its entirety.
*/
if (leaf_clusters <= contig_clusters)
*cow_len += leaf_clusters;
else if (*cow_len || (*cow_start == cpos)) {
/*
* This extent needs to be CoW'd from its
* beginning, so all we have to do is compute
* how many clusters to grab. We align
* want_clusters to the edge of contig_clusters
* to get better I/O.
*/
want_clusters = ocfs2_cow_align_length(inode->i_sb,
want_clusters);
if (leaf_clusters < want_clusters)
*cow_len += leaf_clusters;
else
*cow_len += want_clusters;
} else if ((*cow_start + contig_clusters) >=
(cpos + write_len)) {
/*
* Breaking off contig_clusters at the front
* of the extent will cover our write. That's
* easy.
*/
*cow_len = contig_clusters;
} else if ((rec_end - cpos) <= contig_clusters) {
/*
* Breaking off contig_clusters at the tail of
* this extent will cover cpos.
*/
*cow_start = rec_end - contig_clusters;
*cow_len = contig_clusters;
} else if ((rec_end - cpos) <= want_clusters) {
/*
* While we can't fit the entire write in this
* extent, we know that the write goes from cpos
* to the end of the extent. Break that off.
* We try to break it at some multiple of
* contig_clusters from the front of the extent.
* Failing that (ie, cpos is within
* contig_clusters of the front), we'll CoW the
* entire extent.
*/
*cow_start = ocfs2_cow_align_start(inode->i_sb,
*cow_start, cpos);
*cow_len = rec_end - *cow_start;
} else {
/*
* Ok, the entire write lives in the middle of
* this extent. Let's try to slice the extent up
* nicely. Optimally, our CoW region starts at
* m*contig_clusters from the beginning of the
* extent and goes for n*contig_clusters,
* covering the entire write.
*/
*cow_start = ocfs2_cow_align_start(inode->i_sb,
*cow_start, cpos);
want_clusters = (cpos + write_len) - *cow_start;
want_clusters = ocfs2_cow_align_length(inode->i_sb,
want_clusters);
if (*cow_start + want_clusters <= rec_end)
*cow_len = want_clusters;
else
*cow_len = rec_end - *cow_start;
}
/* Have we covered our entire write yet? */
if ((*cow_start + *cow_len) >= (cpos + write_len))
break;
/*
* If we reach the end of the extent block and don't get enough
* clusters, continue with the next extent block if possible.
*/
if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
eb && eb->h_next_leaf_blk) {
brelse(eb_bh);
eb_bh = NULL;
ret = ocfs2_read_extent_block(INODE_CACHE(inode),
le64_to_cpu(eb->h_next_leaf_blk),
&eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
i = -1;
}
}
out:
brelse(eb_bh);
return ret;
}
/*
* Prepare meta_ac, data_ac and calculate credits when we want to add some
* num_clusters in data_tree "et" and change the refcount for the old
* clusters(starting form p_cluster) in the refcount tree.
*
* Note:
* 1. since we may split the old tree, so we at most will need num_clusters + 2
* more new leaf records.
* 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
* just give data_ac = NULL.
*/
static int ocfs2_lock_refcount_allocators(struct super_block *sb,
u32 p_cluster, u32 num_clusters,
struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_alloc_context **meta_ac,
struct ocfs2_alloc_context **data_ac,
int *credits)
{
int ret = 0, meta_add = 0;
int num_free_extents = ocfs2_num_free_extents(et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
goto out;
}
if (num_free_extents < num_clusters + 2)
meta_add =
ocfs2_extend_meta_needed(et->et_root_el);
*credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
p_cluster, num_clusters,
&meta_add, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
if (data_ac) {
ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
data_ac);
if (ret)
mlog_errno(ret);
}
out:
if (ret) {
if (*meta_ac) {
ocfs2_free_alloc_context(*meta_ac);
*meta_ac = NULL;
}
}
return ret;
}
static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
{
BUG_ON(buffer_dirty(bh));
clear_buffer_mapped(bh);
return 0;
}
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0, partial;
struct super_block *sb = inode->i_sb;
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct page *page;
pgoff_t page_index;
unsigned int from, to;
loff_t offset, end, map_end;
struct address_space *mapping = inode->i_mapping;
trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
new_cluster, new_len);
offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
/*
* We only duplicate pages until we reach the page contains i_size - 1.
* So trim 'end' to i_size.
*/
if (end > i_size_read(inode))
end = i_size_read(inode);
while (offset < end) {
page_index = offset >> PAGE_SHIFT;
map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
if (map_end > end)
map_end = end;
/* from, to is the offset within the page. */
from = offset & (PAGE_SIZE - 1);
to = PAGE_SIZE;
if (map_end & (PAGE_SIZE - 1))
to = map_end & (PAGE_SIZE - 1);
retry:
page = find_or_create_page(mapping, page_index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
mlog_errno(ret);
break;
}
/*
* In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
* page, so write it back.
*/
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
if (PageDirty(page)) {
unlock_page(page);
put_page(page);
ret = filemap_write_and_wait_range(mapping,
offset, map_end - 1);
goto retry;
}
}
if (!PageUptodate(page)) {
struct folio *folio = page_folio(page);
ret = block_read_full_folio(folio, ocfs2_get_block);
if (ret) {
mlog_errno(ret);
goto unlock;
}
folio_lock(folio);
}
if (page_has_buffers(page)) {
ret = walk_page_buffers(handle, page_buffers(page),
from, to, &partial,
ocfs2_clear_cow_buffer);
if (ret) {
mlog_errno(ret);
goto unlock;
}
}
ocfs2_map_and_dirty_page(inode,
handle, from, to,
page, 0, &new_block);
mark_page_accessed(page);
unlock:
unlock_page(page);
put_page(page);
page = NULL;
offset = map_end;
if (ret)
break;
}
return ret;
}
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0;
struct super_block *sb = inode->i_sb;
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct ocfs2_super *osb = OCFS2_SB(sb);
struct buffer_head *old_bh = NULL;
struct buffer_head *new_bh = NULL;
trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
new_cluster, new_len);
for (i = 0; i < blocks; i++, old_block++, new_block++) {
new_bh = sb_getblk(osb->sb, new_block);
if (new_bh == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
break;
}
ocfs2_set_new_buffer_uptodate(ci, new_bh);
ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
if (ret) {
mlog_errno(ret);
break;
}
ret = ocfs2_journal_access(handle, ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
break;
}
memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
ocfs2_journal_dirty(handle, new_bh);
brelse(new_bh);
brelse(old_bh);
new_bh = NULL;
old_bh = NULL;
}
brelse(new_bh);
brelse(old_bh);
return ret;
}
static int ocfs2_clear_ext_refcount(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 cpos, u32 p_cluster, u32 len,
unsigned int ext_flags,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, index;
struct ocfs2_extent_rec replace_rec;
struct ocfs2_path *path = NULL;
struct ocfs2_extent_list *el;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
cpos, len, p_cluster, ext_flags);
memset(&replace_rec, 0, sizeof(replace_rec));
replace_rec.e_cpos = cpu_to_le32(cpos);
replace_rec.e_leaf_clusters = cpu_to_le16(len);
replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
p_cluster));
replace_rec.e_flags = ext_flags;
replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
path = ocfs2_new_path_from_et(et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1) {
ret = ocfs2_error(sb,
"Inode %llu has an extent at cpos %u which can no longer be found\n",
(unsigned long long)ino, cpos);
goto out;
}
ret = ocfs2_split_extent(handle, et, path, index,
&replace_rec, meta_ac, dealloc);
if (ret)
mlog_errno(ret);
out:
ocfs2_free_path(path);
return ret;
}
static int ocfs2_replace_clusters(handle_t *handle,
struct ocfs2_cow_context *context,
u32 cpos, u32 old,
u32 new, u32 len,
unsigned int ext_flags)
{
int ret;
struct ocfs2_caching_info *ci = context->data_et.et_ci;
u64 ino = ocfs2_metadata_cache_owner(ci);
trace_ocfs2_replace_clusters((unsigned long long)ino,
cpos, old, new, len, ext_flags);
/*If the old clusters is unwritten, no need to duplicate. */
if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
ret = context->cow_duplicate_clusters(handle, context->inode,
cpos, old, new, len);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
cpos, new, len, ext_flags,
context->meta_ac, &context->dealloc);
if (ret)
mlog_errno(ret);
out:
return ret;
}
int ocfs2_cow_sync_writeback(struct super_block *sb,
struct inode *inode,
u32 cpos, u32 num_clusters)
{
int ret;
loff_t start, end;
if (ocfs2_should_order_data(inode))
return 0;
start = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
end = start + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits) - 1;
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret < 0)
mlog_errno(ret);
return ret;
}
static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
u32 v_cluster, u32 *p_cluster,
u32 *num_clusters,
unsigned int *extent_flags)
{
return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
num_clusters, extent_flags);
}
static int ocfs2_make_clusters_writable(struct super_block *sb,
struct ocfs2_cow_context *context,
u32 cpos, u32 p_cluster,
u32 num_clusters, unsigned int e_flags)
{
int ret, delete, index, credits = 0;
u32 new_bit, new_len, orig_num_clusters;
unsigned int set_len;
struct ocfs2_super *osb = OCFS2_SB(sb);
handle_t *handle;
struct buffer_head *ref_leaf_bh = NULL;
struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
struct ocfs2_refcount_rec rec;
trace_ocfs2_make_clusters_writable(cpos, p_cluster,
num_clusters, e_flags);
ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
&context->data_et,
ref_ci,
context->ref_root_bh,
&context->meta_ac,
&context->data_ac, &credits);
if (ret) {
mlog_errno(ret);
return ret;
}
if (context->post_refcount)
credits += context->post_refcount->credits;
credits += context->extra_credits;
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
orig_num_clusters = num_clusters;
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
p_cluster, num_clusters,
&rec, &index, &ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
BUG_ON(!rec.r_refcount);
set_len = min((u64)p_cluster + num_clusters,
le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - p_cluster;
/*
* There are many different situation here.
* 1. If refcount == 1, remove the flag and don't COW.
* 2. If refcount > 1, allocate clusters.
* Here we may not allocate r_len once at a time, so continue
* until we reach num_clusters.
*/
if (le32_to_cpu(rec.r_refcount) == 1) {
delete = 0;
ret = ocfs2_clear_ext_refcount(handle,
&context->data_et,
cpos, p_cluster,
set_len, e_flags,
context->meta_ac,
&context->dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
} else {
delete = 1;
ret = __ocfs2_claim_clusters(handle,
context->data_ac,
1, set_len,
&new_bit, &new_len);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_replace_clusters(handle, context,
cpos, p_cluster, new_bit,
new_len, e_flags);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
set_len = new_len;
}
ret = __ocfs2_decrease_refcount(handle, ref_ci,
context->ref_root_bh,
p_cluster, set_len,
context->meta_ac,
&context->dealloc, delete);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
cpos += set_len;
p_cluster += set_len;
num_clusters -= set_len;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
}
/* handle any post_cow action. */
if (context->post_refcount && context->post_refcount->func) {
ret = context->post_refcount->func(context->inode, handle,
context->post_refcount->para);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
}
/*
* Here we should write the new page out first if we are
* in write-back mode.
*/
if (context->get_clusters == ocfs2_di_get_clusters) {
ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
orig_num_clusters);
if (ret)
mlog_errno(ret);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (context->data_ac) {
ocfs2_free_alloc_context(context->data_ac);
context->data_ac = NULL;
}
if (context->meta_ac) {
ocfs2_free_alloc_context(context->meta_ac);
context->meta_ac = NULL;
}
brelse(ref_leaf_bh);
return ret;
}
static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
{
int ret = 0;
struct inode *inode = context->inode;
u32 cow_start = context->cow_start, cow_len = context->cow_len;
u32 p_cluster, num_clusters;
unsigned int ext_flags;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (!ocfs2_refcount_tree(osb)) {
return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
inode->i_ino);
}
ocfs2_init_dealloc_ctxt(&context->dealloc);
while (cow_len) {
ret = context->get_clusters(context, cow_start, &p_cluster,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
break;
}
BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
if (cow_len < num_clusters)
num_clusters = cow_len;
ret = ocfs2_make_clusters_writable(inode->i_sb, context,
cow_start, p_cluster,
num_clusters, ext_flags);
if (ret) {
mlog_errno(ret);
break;
}
cow_len -= num_clusters;
cow_start += num_clusters;
}
if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &context->dealloc);
}
return ret;
}
/*
* Starting at cpos, try to CoW write_len clusters. Don't CoW
* past max_cpos. This will stop when it runs into a hole or an
* unrefcounted extent.
*/
static int ocfs2_refcount_cow_hunk(struct inode *inode,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
int ret;
u32 cow_start = 0, cow_len = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *ref_tree;
struct ocfs2_cow_context *context = NULL;
BUG_ON(!ocfs2_is_refcount_inode(inode));
ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
cpos, write_len, max_cpos,
&cow_start, &cow_len);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
cpos, write_len, max_cpos,
cow_start, cow_len);
BUG_ON(cow_len == 0);
context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
if (!context) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
1, &ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
context->inode = inode;
context->cow_start = cow_start;
context->cow_len = cow_len;
context->ref_tree = ref_tree;
context->ref_root_bh = ref_root_bh;
context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
context->get_clusters = ocfs2_di_get_clusters;
ocfs2_init_dinode_extent_tree(&context->data_et,
INODE_CACHE(inode), di_bh);
ret = ocfs2_replace_cow(context);
if (ret)
mlog_errno(ret);
/*
* truncate the extent map here since no matter whether we meet with
* any error during the action, we shouldn't trust cached extent map
* any more.
*/
ocfs2_extent_map_trunc(inode, cow_start);
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
out:
kfree(context);
return ret;
}
/*
* CoW any and all clusters between cpos and cpos+write_len.
* Don't CoW past max_cpos. If this returns successfully, all
* clusters between cpos and cpos+write_len are safe to modify.
*/
int ocfs2_refcount_cow(struct inode *inode,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
int ret = 0;
u32 p_cluster, num_clusters;
unsigned int ext_flags;
while (write_len) {
ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
break;
}
if (write_len < num_clusters)
num_clusters = write_len;
if (ext_flags & OCFS2_EXT_REFCOUNTED) {
ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
num_clusters, max_cpos);
if (ret) {
mlog_errno(ret);
break;
}
}
write_len -= num_clusters;
cpos += num_clusters;
}
return ret;
}
static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
u32 v_cluster, u32 *p_cluster,
u32 *num_clusters,
unsigned int *extent_flags)
{
struct inode *inode = context->inode;
struct ocfs2_xattr_value_root *xv = context->cow_object;
return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
num_clusters, &xv->xr_list,
extent_flags);
}
/*
* Given a xattr value root, calculate the most meta/credits we need for
* refcount tree change if we truncate it to 0.
*/
int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_xattr_value_root *xv,
int *meta_add, int *credits)
{
int ret = 0, index, ref_blocks = 0;
u32 p_cluster, num_clusters;
u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_rec rec;
struct buffer_head *ref_leaf_bh = NULL;
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &xv->xr_list,
NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
cpos += num_clusters;
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
p_cluster, num_clusters,
&rec, &index,
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(!rec.r_refcount);
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
/*
* We really don't know whether the other clusters is in
* this refcount block or not, so just take the worst
* case that all the clusters are in this block and each
* one will split a refcount rec, so totally we need
* clusters * 2 new refcount rec.
*/
if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
*credits += 1;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
if (num_clusters <= le32_to_cpu(rec.r_clusters))
break;
else
num_clusters -= le32_to_cpu(rec.r_clusters);
p_cluster += num_clusters;
}
}
*meta_add += ref_blocks;
if (!ref_blocks)
goto out;
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
else {
struct ocfs2_extent_tree et;
ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
*credits += ocfs2_calc_extend_credits(inode->i_sb,
et.et_root_el);
}
out:
brelse(ref_leaf_bh);
return ret;
}
/*
* Do CoW for xattr.
*/
int ocfs2_refcount_cow_xattr(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_refcount_tree *ref_tree,
struct buffer_head *ref_root_bh,
u32 cpos, u32 write_len,
struct ocfs2_post_refcount *post)
{
int ret;
struct ocfs2_xattr_value_root *xv = vb->vb_xv;
struct ocfs2_cow_context *context = NULL;
u32 cow_start, cow_len;
BUG_ON(!ocfs2_is_refcount_inode(inode));
ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
cpos, write_len, UINT_MAX,
&cow_start, &cow_len);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(cow_len == 0);
context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
if (!context) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
context->inode = inode;
context->cow_start = cow_start;
context->cow_len = cow_len;
context->ref_tree = ref_tree;
context->ref_root_bh = ref_root_bh;
context->cow_object = xv;
context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
/* We need the extra credits for duplicate_clusters by jbd. */
context->extra_credits =
ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
context->get_clusters = ocfs2_xattr_value_get_clusters;
context->post_refcount = post;
ocfs2_init_xattr_value_extent_tree(&context->data_et,
INODE_CACHE(inode), vb);
ret = ocfs2_replace_cow(context);
if (ret)
mlog_errno(ret);
out:
kfree(context);
return ret;
}
/*
* Insert a new extent into refcount tree and mark a extent rec
* as refcounted in the dinode tree.
*/
int ocfs2_add_refcount_flag(struct inode *inode,
struct ocfs2_extent_tree *data_et,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
u32 cpos, u32 p_cluster, u32 num_clusters,
struct ocfs2_cached_dealloc_ctxt *dealloc,
struct ocfs2_post_refcount *post)
{
int ret;
handle_t *handle;
int credits = 1, ref_blocks = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_alloc_context *meta_ac = NULL;
/* We need to be able to handle at least an extent tree split. */
ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el);
ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
ref_ci, ref_root_bh,
p_cluster, num_clusters,
&ref_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_add_refcount_flag(ref_blocks, credits);
if (ref_blocks) {
ret = ocfs2_reserve_new_metadata_blocks(osb,
ref_blocks, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (post)
credits += post->credits;
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
cpos, num_clusters, p_cluster,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
p_cluster, num_clusters, 0,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
if (post && post->func) {
ret = post->func(inode, handle, post->para);
if (ret)
mlog_errno(ret);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
return ret;
}
static int ocfs2_change_ctime(struct inode *inode,
struct buffer_head *di_bh)
{
int ret;
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
inode_set_ctime_current(inode);
di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
return ret;
}
static int ocfs2_attach_refcount_tree(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, data_changed = 0;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_refcount_tree *ref_tree;
unsigned int ext_flags;
loff_t size;
u32 cpos, num_clusters, clusters, p_cluster;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_extent_tree di_et;
ocfs2_init_dealloc_ctxt(&dealloc);
if (!ocfs2_is_refcount_inode(inode)) {
ret = ocfs2_create_refcount_tree(inode, di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
}
BUG_ON(!di->i_refcount_loc);
ret = ocfs2_lock_refcount_tree(osb,
le64_to_cpu(di->i_refcount_loc), 1,
&ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
goto attach_xattr;
ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
size = i_size_read(inode);
clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
goto unlock;
}
if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
ret = ocfs2_add_refcount_flag(inode, &di_et,
&ref_tree->rf_ci,
ref_root_bh, cpos,
p_cluster, num_clusters,
&dealloc, NULL);
if (ret) {
mlog_errno(ret);
goto unlock;
}
data_changed = 1;
}
cpos += num_clusters;
}
attach_xattr:
if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
&ref_tree->rf_ci,
ref_root_bh,
&dealloc);
if (ret) {
mlog_errno(ret);
goto unlock;
}
}
if (data_changed) {
ret = ocfs2_change_ctime(inode, di_bh);
if (ret)
mlog_errno(ret);
}
unlock:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
}
out:
/*
* Empty the extent map so that we may get the right extent
* record from the disk.
*/
ocfs2_extent_map_trunc(inode, 0);
return ret;
}
static int ocfs2_add_refcounted_extent(struct inode *inode,
struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
u32 cpos, u32 p_cluster, u32 num_clusters,
unsigned int ext_flags,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
handle_t *handle;
int credits = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_alloc_context *meta_ac = NULL;
ret = ocfs2_lock_refcount_allocators(inode->i_sb,
p_cluster, num_clusters,
et, ref_ci,
ref_root_bh, &meta_ac,
NULL, &credits);
if (ret) {
mlog_errno(ret);
goto out;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_insert_extent(handle, et, cpos,
ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
num_clusters, ext_flags, meta_ac);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
p_cluster, num_clusters,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, num_clusters));
if (ret)
mlog_errno(ret);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
return ret;
}
static int ocfs2_duplicate_inline_data(struct inode *s_inode,
struct buffer_head *s_bh,
struct inode *t_inode,
struct buffer_head *t_bh)
{
int ret;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
le16_to_cpu(s_di->id2.i_data.id_count));
spin_lock(&OCFS2_I(t_inode)->ip_lock);
OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
spin_unlock(&OCFS2_I(t_inode)->ip_lock);
ocfs2_journal_dirty(handle, t_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
static int ocfs2_duplicate_extent_list(struct inode *s_inode,
struct inode *t_inode,
struct buffer_head *t_bh,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0;
u32 p_cluster, num_clusters, clusters, cpos;
loff_t size;
unsigned int ext_flags;
struct ocfs2_extent_tree et;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
size = i_size_read(s_inode);
clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
if (p_cluster) {
ret = ocfs2_add_refcounted_extent(t_inode, &et,
ref_ci, ref_root_bh,
cpos, p_cluster,
num_clusters,
ext_flags,
dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
}
cpos += num_clusters;
}
out:
return ret;
}
/*
* change the new file's attributes to the src.
*
* reflink creates a snapshot of a file, that means the attributes
* must be identical except for three exceptions - nlink, ino, and ctime.
*/
static int ocfs2_complete_reflink(struct inode *s_inode,
struct buffer_head *s_bh,
struct inode *t_inode,
struct buffer_head *t_bh,
bool preserve)
{
int ret;
handle_t *handle;
struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
loff_t size = i_size_read(s_inode);
handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
return ret;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
spin_lock(&OCFS2_I(t_inode)->ip_lock);
OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
spin_unlock(&OCFS2_I(t_inode)->ip_lock);
i_size_write(t_inode, size);
t_inode->i_blocks = s_inode->i_blocks;
di->i_xattr_inline_size = s_di->i_xattr_inline_size;
di->i_clusters = s_di->i_clusters;
di->i_size = s_di->i_size;
di->i_dyn_features = s_di->i_dyn_features;
di->i_attr = s_di->i_attr;
if (preserve) {
t_inode->i_uid = s_inode->i_uid;
t_inode->i_gid = s_inode->i_gid;
t_inode->i_mode = s_inode->i_mode;
di->i_uid = s_di->i_uid;
di->i_gid = s_di->i_gid;
di->i_mode = s_di->i_mode;
/*
* update time.
* we want mtime to appear identical to the source and
* update ctime.
*/
inode_set_ctime_current(t_inode);
di->i_ctime = cpu_to_le64(inode_get_ctime(t_inode).tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(t_inode).tv_nsec);
t_inode->i_mtime = s_inode->i_mtime;
di->i_mtime = s_di->i_mtime;
di->i_mtime_nsec = s_di->i_mtime_nsec;
}
ocfs2_journal_dirty(handle, t_bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
return ret;
}
static int ocfs2_create_reflink_node(struct inode *s_inode,
struct buffer_head *s_bh,
struct inode *t_inode,
struct buffer_head *t_bh,
bool preserve)
{
int ret;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
struct ocfs2_refcount_tree *ref_tree;
ocfs2_init_dealloc_ctxt(&dealloc);
ret = ocfs2_set_refcount_tree(t_inode, t_bh,
le64_to_cpu(di->i_refcount_loc));
if (ret) {
mlog_errno(ret);
goto out;
}
if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
t_inode, t_bh);
if (ret)
mlog_errno(ret);
goto out;
}
ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
1, &ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
&ref_tree->rf_ci, ref_root_bh,
&dealloc);
if (ret) {
mlog_errno(ret);
goto out_unlock_refcount;
}
out_unlock_refcount:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
out:
if (ocfs2_dealloc_has_cluster(&dealloc)) {
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
}
return ret;
}
static int __ocfs2_reflink(struct dentry *old_dentry,
struct buffer_head *old_bh,
struct inode *new_inode,
bool preserve)
{
int ret;
struct inode *inode = d_inode(old_dentry);
struct buffer_head *new_bh = NULL;
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
}
ret = filemap_fdatawrite(inode->i_mapping);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_attach_refcount_tree(inode, old_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
inode_lock_nested(new_inode, I_MUTEX_CHILD);
ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
OI_LS_REFLINK_TARGET);
if (ret) {
mlog_errno(ret);
goto out_unlock;
}
ret = ocfs2_create_reflink_node(inode, old_bh,
new_inode, new_bh, preserve);
if (ret) {
mlog_errno(ret);
goto inode_unlock;
}
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
ret = ocfs2_reflink_xattrs(inode, old_bh,
new_inode, new_bh,
preserve);
if (ret) {
mlog_errno(ret);
goto inode_unlock;
}
}
ret = ocfs2_complete_reflink(inode, old_bh,
new_inode, new_bh, preserve);
if (ret)
mlog_errno(ret);
inode_unlock:
ocfs2_inode_unlock(new_inode, 1);
brelse(new_bh);
out_unlock:
inode_unlock(new_inode);
out:
if (!ret) {
ret = filemap_fdatawait(inode->i_mapping);
if (ret)
mlog_errno(ret);
}
return ret;
}
static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry, bool preserve)
{
int error, had_lock;
struct inode *inode = d_inode(old_dentry);
struct buffer_head *old_bh = NULL;
struct inode *new_orphan_inode = NULL;
struct ocfs2_lock_holder oh;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
&new_orphan_inode);
if (error) {
mlog_errno(error);
goto out;
}
error = ocfs2_rw_lock(inode, 1);
if (error) {
mlog_errno(error);
goto out;
}
error = ocfs2_inode_lock(inode, &old_bh, 1);
if (error) {
mlog_errno(error);
ocfs2_rw_unlock(inode, 1);
goto out;
}
down_write(&OCFS2_I(inode)->ip_xattr_sem);
down_write(&OCFS2_I(inode)->ip_alloc_sem);
error = __ocfs2_reflink(old_dentry, old_bh,
new_orphan_inode, preserve);
up_write(&OCFS2_I(inode)->ip_alloc_sem);
up_write(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock(inode, 1);
ocfs2_rw_unlock(inode, 1);
brelse(old_bh);
if (error) {
mlog_errno(error);
goto out;
}
had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
&oh);
if (had_lock < 0) {
error = had_lock;
mlog_errno(error);
goto out;
}
/* If the security isn't preserved, we need to re-initialize them. */
if (!preserve) {
error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
&new_dentry->d_name);
if (error)
mlog_errno(error);
}
if (!error) {
error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
new_dentry);
if (error)
mlog_errno(error);
}
ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
out:
if (new_orphan_inode) {
/*
* We need to open_unlock the inode no matter whether we
* succeed or not, so that other nodes can delete it later.
*/
ocfs2_open_unlock(new_orphan_inode);
if (error)
iput(new_orphan_inode);
}
return error;
}
/*
* Below here are the bits used by OCFS2_IOC_REFLINK() to fake
* sys_reflink(). This will go away when vfs_reflink() exists in
* fs/namei.c.
*/
/* copied from may_create in VFS. */
static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
{
if (d_really_is_positive(child))
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
return inode_permission(&nop_mnt_idmap, dir, MAY_WRITE | MAY_EXEC);
}
/**
* ocfs2_vfs_reflink - Create a reference-counted link
*
* @old_dentry: source dentry + inode
* @dir: directory to create the target
* @new_dentry: target dentry
* @preserve: if true, preserve all file attributes
*/
static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry, bool preserve)
{
struct inode *inode = d_inode(old_dentry);
int error;
if (!inode)
return -ENOENT;
error = ocfs2_may_create(dir, new_dentry);
if (error)
return error;
if (dir->i_sb != inode->i_sb)
return -EXDEV;
/*
* A reflink to an append-only or immutable file cannot be created.
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
/* Only regular files can be reflinked. */
if (!S_ISREG(inode->i_mode))
return -EPERM;
/*
* If the caller wants to preserve ownership, they require the
* rights to do so.
*/
if (preserve) {
if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN))
return -EPERM;
if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
return -EPERM;
}
/*
* If the caller is modifying any aspect of the attributes, they
* are not creating a snapshot. They need read permission on the
* file.
*/
if (!preserve) {
error = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
if (error)
return error;
}
inode_lock(inode);
error = dquot_initialize(dir);
if (!error)
error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
inode_unlock(inode);
if (!error)
fsnotify_create(dir, new_dentry);
return error;
}
/*
* Most codes are copied from sys_linkat.
*/
int ocfs2_reflink_ioctl(struct inode *inode,
const char __user *oldname,
const char __user *newname,
bool preserve)
{
struct dentry *new_dentry;
struct path old_path, new_path;
int error;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
if (error) {
mlog_errno(error);
return error;
}
new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry)) {
mlog_errno(error);
goto out;
}
error = -EXDEV;
if (old_path.mnt != new_path.mnt) {
mlog_errno(error);
goto out_dput;
}
error = ocfs2_vfs_reflink(old_path.dentry,
d_inode(new_path.dentry),
new_dentry, preserve);
out_dput:
done_path_create(&new_path, new_dentry);
out:
path_put(&old_path);
return error;
}
/* Update destination inode size, if necessary. */
int ocfs2_reflink_update_dest(struct inode *dest,
struct buffer_head *d_bh,
loff_t newlen)
{
handle_t *handle;
int ret;
dest->i_blocks = ocfs2_inode_sector_count(dest);
if (newlen <= i_size_read(dest))
return 0;
handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb),
OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
return ret;
}
/* Extend i_size if needed. */
spin_lock(&OCFS2_I(dest)->ip_lock);
if (newlen > i_size_read(dest))
i_size_write(dest, newlen);
spin_unlock(&OCFS2_I(dest)->ip_lock);
dest->i_mtime = inode_set_ctime_current(dest);
ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
out_commit:
ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle);
return ret;
}
/* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */
static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode,
struct buffer_head *s_bh,
loff_t pos_in,
struct inode *t_inode,
struct buffer_head *t_bh,
loff_t pos_out,
loff_t len,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
struct ocfs2_extent_tree s_et;
struct ocfs2_extent_tree t_et;
struct ocfs2_dinode *dis;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *ref_tree;
struct ocfs2_super *osb;
loff_t remapped_bytes = 0;
loff_t pstart, plen;
u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0;
unsigned int ext_flags;
int ret = 0;
osb = OCFS2_SB(s_inode->i_sb);
dis = (struct ocfs2_dinode *)s_bh->b_data;
ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh);
ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh);
spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in);
tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out);
slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len);
while (spos < slast) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
goto out;
}
/* Look up the extent. */
ret = ocfs2_get_clusters(s_inode, spos, &p_cluster,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
num_clusters = min_t(u32, num_clusters, slast - spos);
/* Punch out the dest range. */
pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos);
plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters);
ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen);
if (ret) {
mlog_errno(ret);
goto out;
}
if (p_cluster == 0)
goto next_loop;
/* Lock the refcount btree... */
ret = ocfs2_lock_refcount_tree(osb,
le64_to_cpu(dis->i_refcount_loc),
1, &ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
/* Mark s_inode's extent as refcounted. */
if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) {
ret = ocfs2_add_refcount_flag(s_inode, &s_et,
&ref_tree->rf_ci,
ref_root_bh, spos,
p_cluster, num_clusters,
dealloc, NULL);
if (ret) {
mlog_errno(ret);
goto out_unlock_refcount;
}
}
/* Map in the new extent. */
ext_flags |= OCFS2_EXT_REFCOUNTED;
ret = ocfs2_add_refcounted_extent(t_inode, &t_et,
&ref_tree->rf_ci,
ref_root_bh,
tpos, p_cluster,
num_clusters,
ext_flags,
dealloc);
if (ret) {
mlog_errno(ret);
goto out_unlock_refcount;
}
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
next_loop:
spos += num_clusters;
tpos += num_clusters;
remapped_clus += num_clusters;
}
goto out;
out_unlock_refcount:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
out:
remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus);
remapped_bytes = min_t(loff_t, len, remapped_bytes);
return remapped_bytes > 0 ? remapped_bytes : ret;
}
/* Set up refcount tree and remap s_inode to t_inode. */
loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
struct buffer_head *s_bh,
loff_t pos_in,
struct inode *t_inode,
struct buffer_head *t_bh,
loff_t pos_out,
loff_t len)
{
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_super *osb;
struct ocfs2_dinode *dis;
struct ocfs2_dinode *dit;
loff_t ret;
osb = OCFS2_SB(s_inode->i_sb);
dis = (struct ocfs2_dinode *)s_bh->b_data;
dit = (struct ocfs2_dinode *)t_bh->b_data;
ocfs2_init_dealloc_ctxt(&dealloc);
/*
* If we're reflinking the entire file and the source is inline
* data, just copy the contents.
*/
if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) &&
i_size_read(t_inode) <= len &&
(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) {
ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh);
if (ret)
mlog_errno(ret);
goto out;
}
/*
* If both inodes belong to two different refcount groups then
* forget it because we don't know how (or want) to go merging
* refcount trees.
*/
ret = -EOPNOTSUPP;
if (ocfs2_is_refcount_inode(s_inode) &&
ocfs2_is_refcount_inode(t_inode) &&
le64_to_cpu(dis->i_refcount_loc) !=
le64_to_cpu(dit->i_refcount_loc))
goto out;
/* Neither inode has a refcount tree. Add one to s_inode. */
if (!ocfs2_is_refcount_inode(s_inode) &&
!ocfs2_is_refcount_inode(t_inode)) {
ret = ocfs2_create_refcount_tree(s_inode, s_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/* Ensure that both inodes end up with the same refcount tree. */
if (!ocfs2_is_refcount_inode(s_inode)) {
ret = ocfs2_set_refcount_tree(s_inode, s_bh,
le64_to_cpu(dit->i_refcount_loc));
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (!ocfs2_is_refcount_inode(t_inode)) {
ret = ocfs2_set_refcount_tree(t_inode, t_bh,
le64_to_cpu(dis->i_refcount_loc));
if (ret) {
mlog_errno(ret);
goto out;
}
}
/* Turn off inline data in the dest file. */
if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/* Actually remap extents now. */
ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh,
pos_out, len, &dealloc);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
out:
if (ocfs2_dealloc_has_cluster(&dealloc)) {
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
}
return ret;
}
/* Lock an inode and grab a bh pointing to the inode. */
int ocfs2_reflink_inodes_lock(struct inode *s_inode,
struct buffer_head **bh_s,
struct inode *t_inode,
struct buffer_head **bh_t)
{
struct inode *inode1 = s_inode;
struct inode *inode2 = t_inode;
struct ocfs2_inode_info *oi1;
struct ocfs2_inode_info *oi2;
struct buffer_head *bh1 = NULL;
struct buffer_head *bh2 = NULL;
bool same_inode = (s_inode == t_inode);
bool need_swap = (inode1->i_ino > inode2->i_ino);
int status;
/* First grab the VFS and rw locks. */
lock_two_nondirectories(s_inode, t_inode);
if (need_swap)
swap(inode1, inode2);
status = ocfs2_rw_lock(inode1, 1);
if (status) {
mlog_errno(status);
goto out_i1;
}
if (!same_inode) {
status = ocfs2_rw_lock(inode2, 1);
if (status) {
mlog_errno(status);
goto out_i2;
}
}
/* Now go for the cluster locks */
oi1 = OCFS2_I(inode1);
oi2 = OCFS2_I(inode2);
trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
(unsigned long long)oi2->ip_blkno);
/* We always want to lock the one with the lower lockid first. */
if (oi1->ip_blkno > oi2->ip_blkno)
mlog_errno(-ENOLCK);
/* lock id1 */
status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
OI_LS_REFLINK_TARGET);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto out_rw2;
}
/* lock id2 */
if (!same_inode) {
status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
OI_LS_REFLINK_TARGET);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto out_cl1;
}
} else {
bh2 = bh1;
}
/*
* If we swapped inode order above, we have to swap the buffer heads
* before passing them back to the caller.
*/
if (need_swap)
swap(bh1, bh2);
*bh_s = bh1;
*bh_t = bh2;
trace_ocfs2_double_lock_end(
(unsigned long long)oi1->ip_blkno,
(unsigned long long)oi2->ip_blkno);
return 0;
out_cl1:
ocfs2_inode_unlock(inode1, 1);
brelse(bh1);
out_rw2:
ocfs2_rw_unlock(inode2, 1);
out_i2:
ocfs2_rw_unlock(inode1, 1);
out_i1:
unlock_two_nondirectories(s_inode, t_inode);
return status;
}
/* Unlock both inodes and release buffers. */
void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
struct buffer_head *s_bh,
struct inode *t_inode,
struct buffer_head *t_bh)
{
ocfs2_inode_unlock(s_inode, 1);
ocfs2_rw_unlock(s_inode, 1);
brelse(s_bh);
if (s_inode != t_inode) {
ocfs2_inode_unlock(t_inode, 1);
ocfs2_rw_unlock(t_inode, 1);
brelse(t_bh);
}
unlock_two_nondirectories(s_inode, t_inode);
}
| linux-master | fs/ocfs2/refcounttree.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* journal.c
*
* Defines functions of journalling api
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/random.h>
#include <linux/delay.h>
#include <linux/writeback.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "dir.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
#include "slot_map.h"
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
#include "quota.h"
#include "file.h"
#include "namei.h"
#include "buffer_head_io.h"
#include "ocfs2_trace.h"
DEFINE_SPINLOCK(trans_inc_lock);
#define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000
static int ocfs2_force_read_journal(struct inode *inode);
static int ocfs2_recover_node(struct ocfs2_super *osb,
int node_num, int slot_num);
static int __ocfs2_recovery_thread(void *arg);
static int ocfs2_commit_cache(struct ocfs2_super *osb);
static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
int dirty, int replayed);
static int ocfs2_trylock_journal(struct ocfs2_super *osb,
int slot_num);
static int ocfs2_recover_orphans(struct ocfs2_super *osb,
int slot,
enum ocfs2_orphan_reco_type orphan_reco_type);
static int ocfs2_commit_thread(void *arg);
static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
int slot_num,
struct ocfs2_dinode *la_dinode,
struct ocfs2_dinode *tl_dinode,
struct ocfs2_quota_recovery *qrec,
enum ocfs2_orphan_reco_type orphan_reco_type);
static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
{
return __ocfs2_wait_on_mount(osb, 0);
}
static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
{
return __ocfs2_wait_on_mount(osb, 1);
}
/*
* This replay_map is to track online/offline slots, so we could recover
* offline slots during recovery and mount
*/
enum ocfs2_replay_state {
REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */
REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */
REPLAY_DONE /* Replay was already queued */
};
struct ocfs2_replay_map {
unsigned int rm_slots;
enum ocfs2_replay_state rm_state;
unsigned char rm_replay_slots[];
};
static void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
{
if (!osb->replay_map)
return;
/* If we've already queued the replay, we don't have any more to do */
if (osb->replay_map->rm_state == REPLAY_DONE)
return;
osb->replay_map->rm_state = state;
}
int ocfs2_compute_replay_slots(struct ocfs2_super *osb)
{
struct ocfs2_replay_map *replay_map;
int i, node_num;
/* If replay map is already set, we don't do it again */
if (osb->replay_map)
return 0;
replay_map = kzalloc(struct_size(replay_map, rm_replay_slots,
osb->max_slots),
GFP_KERNEL);
if (!replay_map) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
spin_lock(&osb->osb_lock);
replay_map->rm_slots = osb->max_slots;
replay_map->rm_state = REPLAY_UNNEEDED;
/* set rm_replay_slots for offline slot(s) */
for (i = 0; i < replay_map->rm_slots; i++) {
if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT)
replay_map->rm_replay_slots[i] = 1;
}
osb->replay_map = replay_map;
spin_unlock(&osb->osb_lock);
return 0;
}
static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
enum ocfs2_orphan_reco_type orphan_reco_type)
{
struct ocfs2_replay_map *replay_map = osb->replay_map;
int i;
if (!replay_map)
return;
if (replay_map->rm_state != REPLAY_NEEDED)
return;
for (i = 0; i < replay_map->rm_slots; i++)
if (replay_map->rm_replay_slots[i])
ocfs2_queue_recovery_completion(osb->journal, i, NULL,
NULL, NULL,
orphan_reco_type);
replay_map->rm_state = REPLAY_DONE;
}
void ocfs2_free_replay_slots(struct ocfs2_super *osb)
{
struct ocfs2_replay_map *replay_map = osb->replay_map;
if (!osb->replay_map)
return;
kfree(replay_map);
osb->replay_map = NULL;
}
int ocfs2_recovery_init(struct ocfs2_super *osb)
{
struct ocfs2_recovery_map *rm;
mutex_init(&osb->recovery_lock);
osb->disable_recovery = 0;
osb->recovery_thread_task = NULL;
init_waitqueue_head(&osb->recovery_event);
rm = kzalloc(struct_size(rm, rm_entries, osb->max_slots),
GFP_KERNEL);
if (!rm) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
osb->recovery_map = rm;
return 0;
}
/* we can't grab the goofy sem lock from inside wait_event, so we use
* memory barriers to make sure that we'll see the null task before
* being woken up */
static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
{
mb();
return osb->recovery_thread_task != NULL;
}
void ocfs2_recovery_exit(struct ocfs2_super *osb)
{
struct ocfs2_recovery_map *rm;
/* disable any new recovery threads and wait for any currently
* running ones to exit. Do this before setting the vol_state. */
mutex_lock(&osb->recovery_lock);
osb->disable_recovery = 1;
mutex_unlock(&osb->recovery_lock);
wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
/* At this point, we know that no more recovery threads can be
* launched, so wait for any recovery completion work to
* complete. */
if (osb->ocfs2_wq)
flush_workqueue(osb->ocfs2_wq);
/*
* Now that recovery is shut down, and the osb is about to be
* freed, the osb_lock is not taken here.
*/
rm = osb->recovery_map;
/* XXX: Should we bug if there are dirty entries? */
kfree(rm);
}
static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
unsigned int node_num)
{
int i;
struct ocfs2_recovery_map *rm = osb->recovery_map;
assert_spin_locked(&osb->osb_lock);
for (i = 0; i < rm->rm_used; i++) {
if (rm->rm_entries[i] == node_num)
return 1;
}
return 0;
}
/* Behaves like test-and-set. Returns the previous value */
static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
unsigned int node_num)
{
struct ocfs2_recovery_map *rm = osb->recovery_map;
spin_lock(&osb->osb_lock);
if (__ocfs2_recovery_map_test(osb, node_num)) {
spin_unlock(&osb->osb_lock);
return 1;
}
/* XXX: Can this be exploited? Not from o2dlm... */
BUG_ON(rm->rm_used >= osb->max_slots);
rm->rm_entries[rm->rm_used] = node_num;
rm->rm_used++;
spin_unlock(&osb->osb_lock);
return 0;
}
static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
unsigned int node_num)
{
int i;
struct ocfs2_recovery_map *rm = osb->recovery_map;
spin_lock(&osb->osb_lock);
for (i = 0; i < rm->rm_used; i++) {
if (rm->rm_entries[i] == node_num)
break;
}
if (i < rm->rm_used) {
/* XXX: be careful with the pointer math */
memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
(rm->rm_used - i - 1) * sizeof(unsigned int));
rm->rm_used--;
}
spin_unlock(&osb->osb_lock);
}
static int ocfs2_commit_cache(struct ocfs2_super *osb)
{
int status = 0;
unsigned int flushed;
struct ocfs2_journal *journal = NULL;
journal = osb->journal;
/* Flush all pending commits and checkpoint the journal. */
down_write(&journal->j_trans_barrier);
flushed = atomic_read(&journal->j_num_trans);
trace_ocfs2_commit_cache_begin(flushed);
if (flushed == 0) {
up_write(&journal->j_trans_barrier);
goto finally;
}
jbd2_journal_lock_updates(journal->j_journal);
status = jbd2_journal_flush(journal->j_journal, 0);
jbd2_journal_unlock_updates(journal->j_journal);
if (status < 0) {
up_write(&journal->j_trans_barrier);
mlog_errno(status);
goto finally;
}
ocfs2_inc_trans_id(journal);
flushed = atomic_read(&journal->j_num_trans);
atomic_set(&journal->j_num_trans, 0);
up_write(&journal->j_trans_barrier);
trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed);
ocfs2_wake_downconvert_thread(osb);
wake_up(&journal->j_checkpointed);
finally:
return status;
}
handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
{
journal_t *journal = osb->journal->j_journal;
handle_t *handle;
BUG_ON(!osb || !osb->journal->j_journal);
if (ocfs2_is_hard_readonly(osb))
return ERR_PTR(-EROFS);
BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
BUG_ON(max_buffs <= 0);
/* Nested transaction? Just return the handle... */
if (journal_current_handle())
return jbd2_journal_start(journal, max_buffs);
sb_start_intwrite(osb->sb);
down_read(&osb->journal->j_trans_barrier);
handle = jbd2_journal_start(journal, max_buffs);
if (IS_ERR(handle)) {
up_read(&osb->journal->j_trans_barrier);
sb_end_intwrite(osb->sb);
mlog_errno(PTR_ERR(handle));
if (is_journal_aborted(journal)) {
ocfs2_abort(osb->sb, "Detected aborted journal\n");
handle = ERR_PTR(-EROFS);
}
} else {
if (!ocfs2_mount_local(osb))
atomic_inc(&(osb->journal->j_num_trans));
}
return handle;
}
int ocfs2_commit_trans(struct ocfs2_super *osb,
handle_t *handle)
{
int ret, nested;
struct ocfs2_journal *journal = osb->journal;
BUG_ON(!handle);
nested = handle->h_ref > 1;
ret = jbd2_journal_stop(handle);
if (ret < 0)
mlog_errno(ret);
if (!nested) {
up_read(&journal->j_trans_barrier);
sb_end_intwrite(osb->sb);
}
return ret;
}
/*
* 'nblocks' is what you want to add to the current transaction.
*
* This might call jbd2_journal_restart() which will commit dirty buffers
* and then restart the transaction. Before calling
* ocfs2_extend_trans(), any changed blocks should have been
* dirtied. After calling it, all blocks which need to be changed must
* go through another set of journal_access/journal_dirty calls.
*
* WARNING: This will not release any semaphores or disk locks taken
* during the transaction, so make sure they were taken *before*
* start_trans or we'll have ordering deadlocks.
*
* WARNING2: Note that we do *not* drop j_trans_barrier here. This is
* good because transaction ids haven't yet been recorded on the
* cluster locks associated with this handle.
*/
int ocfs2_extend_trans(handle_t *handle, int nblocks)
{
int status, old_nblocks;
BUG_ON(!handle);
BUG_ON(nblocks < 0);
if (!nblocks)
return 0;
old_nblocks = jbd2_handle_buffer_credits(handle);
trace_ocfs2_extend_trans(old_nblocks, nblocks);
#ifdef CONFIG_OCFS2_DEBUG_FS
status = 1;
#else
status = jbd2_journal_extend(handle, nblocks, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
#endif
if (status > 0) {
trace_ocfs2_extend_trans_restart(old_nblocks + nblocks);
status = jbd2_journal_restart(handle,
old_nblocks + nblocks);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
status = 0;
bail:
return status;
}
/*
* If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
* If that fails, restart the transaction & regain write access for the
* buffer head which is used for metadata modifications.
* Taken from Ext4: extend_or_restart_transaction()
*/
int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
{
int status, old_nblks;
BUG_ON(!handle);
old_nblks = jbd2_handle_buffer_credits(handle);
trace_ocfs2_allocate_extend_trans(old_nblks, thresh);
if (old_nblks < thresh)
return 0;
status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (status > 0) {
status = jbd2_journal_restart(handle, OCFS2_MAX_TRANS_DATA);
if (status < 0)
mlog_errno(status);
}
bail:
return status;
}
struct ocfs2_triggers {
struct jbd2_buffer_trigger_type ot_triggers;
int ot_offset;
};
static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
{
return container_of(triggers, struct ocfs2_triggers, ot_triggers);
}
static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh,
void *data, size_t size)
{
struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
/*
* We aren't guaranteed to have the superblock here, so we
* must unconditionally compute the ecc data.
* __ocfs2_journal_access() will only set the triggers if
* metaecc is enabled.
*/
ocfs2_block_check_compute(data, size, data + ot->ot_offset);
}
/*
* Quota blocks have their own trigger because the struct ocfs2_block_check
* offset depends on the blocksize.
*/
static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh,
void *data, size_t size)
{
struct ocfs2_disk_dqtrailer *dqt =
ocfs2_block_dqtrailer(size, data);
/*
* We aren't guaranteed to have the superblock here, so we
* must unconditionally compute the ecc data.
* __ocfs2_journal_access() will only set the triggers if
* metaecc is enabled.
*/
ocfs2_block_check_compute(data, size, &dqt->dq_check);
}
/*
* Directory blocks also have their own trigger because the
* struct ocfs2_block_check offset depends on the blocksize.
*/
static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh,
void *data, size_t size)
{
struct ocfs2_dir_block_trailer *trailer =
ocfs2_dir_trailer_from_size(size, data);
/*
* We aren't guaranteed to have the superblock here, so we
* must unconditionally compute the ecc data.
* __ocfs2_journal_access() will only set the triggers if
* metaecc is enabled.
*/
ocfs2_block_check_compute(data, size, &trailer->db_check);
}
static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh)
{
mlog(ML_ERROR,
"ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
"bh->b_blocknr = %llu\n",
(unsigned long)bh,
(unsigned long long)bh->b_blocknr);
ocfs2_error(bh->b_assoc_map->host->i_sb,
"JBD2 has aborted our journal, ocfs2 cannot continue\n");
}
static struct ocfs2_triggers di_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_dinode, i_check),
};
static struct ocfs2_triggers eb_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_extent_block, h_check),
};
static struct ocfs2_triggers rb_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
};
static struct ocfs2_triggers gd_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
};
static struct ocfs2_triggers db_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_db_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
};
static struct ocfs2_triggers xb_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
};
static struct ocfs2_triggers dq_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_dq_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
};
static struct ocfs2_triggers dr_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
};
static struct ocfs2_triggers dl_triggers = {
.ot_triggers = {
.t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
};
static int __ocfs2_journal_access(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *bh,
struct ocfs2_triggers *triggers,
int type)
{
int status;
struct ocfs2_super *osb =
OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
BUG_ON(!ci || !ci->ci_ops);
BUG_ON(!handle);
BUG_ON(!bh);
trace_ocfs2_journal_access(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)bh->b_blocknr, type, bh->b_size);
/* we can safely remove this assertion after testing. */
if (!buffer_uptodate(bh)) {
mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
(unsigned long long)bh->b_blocknr, bh->b_state);
lock_buffer(bh);
/*
* A previous transaction with a couple of buffer heads fail
* to checkpoint, so all the bhs are marked as BH_Write_EIO.
* For current transaction, the bh is just among those error
* bhs which previous transaction handle. We can't just clear
* its BH_Write_EIO and reuse directly, since other bhs are
* not written to disk yet and that will cause metadata
* inconsistency. So we should set fs read-only to avoid
* further damage.
*/
if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
unlock_buffer(bh);
return ocfs2_error(osb->sb, "A previous attempt to "
"write this buffer head failed\n");
}
unlock_buffer(bh);
}
/* Set the current transaction information on the ci so
* that the locking code knows whether it can drop it's locks
* on this ci or not. We're protected from the commit
* thread updating the current transaction id until
* ocfs2_commit_trans() because ocfs2_start_trans() took
* j_trans_barrier for us. */
ocfs2_set_ci_lock_trans(osb->journal, ci);
ocfs2_metadata_cache_io_lock(ci);
switch (type) {
case OCFS2_JOURNAL_ACCESS_CREATE:
case OCFS2_JOURNAL_ACCESS_WRITE:
status = jbd2_journal_get_write_access(handle, bh);
break;
case OCFS2_JOURNAL_ACCESS_UNDO:
status = jbd2_journal_get_undo_access(handle, bh);
break;
default:
status = -EINVAL;
mlog(ML_ERROR, "Unknown access type!\n");
}
if (!status && ocfs2_meta_ecc(osb) && triggers)
jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
ocfs2_metadata_cache_io_unlock(ci);
if (status < 0)
mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
status, type);
return status;
}
int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
}
int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
}
int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
type);
}
int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
}
int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
}
int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
}
int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
}
int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
}
int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
}
int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
return __ocfs2_journal_access(handle, ci, bh, NULL, type);
}
void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
{
int status;
trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
status = jbd2_journal_dirty_metadata(handle, bh);
if (status) {
mlog_errno(status);
if (!is_handle_aborted(handle)) {
journal_t *journal = handle->h_transaction->t_journal;
mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
"Aborting transaction and journal.\n");
handle->h_err = status;
jbd2_journal_abort_handle(handle);
jbd2_journal_abort(journal, status);
ocfs2_abort(bh->b_assoc_map->host->i_sb,
"Journal already aborted.\n");
}
}
}
#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
void ocfs2_set_journal_params(struct ocfs2_super *osb)
{
journal_t *journal = osb->journal->j_journal;
unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
if (osb->osb_commit_interval)
commit_interval = osb->osb_commit_interval;
write_lock(&journal->j_state_lock);
journal->j_commit_interval = commit_interval;
if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
journal->j_flags |= JBD2_BARRIER;
else
journal->j_flags &= ~JBD2_BARRIER;
write_unlock(&journal->j_state_lock);
}
/*
* alloc & initialize skeleton for journal structure.
* ocfs2_journal_init() will make fs have journal ability.
*/
int ocfs2_journal_alloc(struct ocfs2_super *osb)
{
int status = 0;
struct ocfs2_journal *journal;
journal = kzalloc(sizeof(struct ocfs2_journal), GFP_KERNEL);
if (!journal) {
mlog(ML_ERROR, "unable to alloc journal\n");
status = -ENOMEM;
goto bail;
}
osb->journal = journal;
journal->j_osb = osb;
atomic_set(&journal->j_num_trans, 0);
init_rwsem(&journal->j_trans_barrier);
init_waitqueue_head(&journal->j_checkpointed);
spin_lock_init(&journal->j_lock);
journal->j_trans_id = 1UL;
INIT_LIST_HEAD(&journal->j_la_cleanups);
INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
journal->j_state = OCFS2_JOURNAL_FREE;
bail:
return status;
}
static int ocfs2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = mapping->nrpages * 2,
.range_start = jinode->i_dirty_start,
.range_end = jinode->i_dirty_end,
};
return filemap_fdatawrite_wbc(mapping, &wbc);
}
int ocfs2_journal_init(struct ocfs2_super *osb, int *dirty)
{
int status = -1;
struct inode *inode = NULL; /* the journal inode */
journal_t *j_journal = NULL;
struct ocfs2_journal *journal = osb->journal;
struct ocfs2_dinode *di = NULL;
struct buffer_head *bh = NULL;
int inode_lock = 0;
BUG_ON(!journal);
/* already have the inode for our journal */
inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
osb->slot_num);
if (inode == NULL) {
status = -EACCES;
mlog_errno(status);
goto done;
}
if (is_bad_inode(inode)) {
mlog(ML_ERROR, "access error (bad inode)\n");
iput(inode);
inode = NULL;
status = -EACCES;
goto done;
}
SET_INODE_JOURNAL(inode);
OCFS2_I(inode)->ip_open_count++;
/* Skip recovery waits here - journal inode metadata never
* changes in a live cluster so it can be considered an
* exception to the rule. */
status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
if (status < 0) {
if (status != -ERESTARTSYS)
mlog(ML_ERROR, "Could not get lock on journal!\n");
goto done;
}
inode_lock = 1;
di = (struct ocfs2_dinode *)bh->b_data;
if (i_size_read(inode) < OCFS2_MIN_JOURNAL_SIZE) {
mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
i_size_read(inode));
status = -EINVAL;
goto done;
}
trace_ocfs2_journal_init(i_size_read(inode),
(unsigned long long)inode->i_blocks,
OCFS2_I(inode)->ip_clusters);
/* call the kernels journal init function now */
j_journal = jbd2_journal_init_inode(inode);
if (IS_ERR(j_journal)) {
mlog(ML_ERROR, "Linux journal layer error\n");
status = PTR_ERR(j_journal);
goto done;
}
trace_ocfs2_journal_init_maxlen(j_journal->j_total_len);
*dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
OCFS2_JOURNAL_DIRTY_FL);
journal->j_journal = j_journal;
journal->j_journal->j_submit_inode_data_buffers =
ocfs2_journal_submit_inode_data_buffers;
journal->j_journal->j_finish_inode_data_buffers =
jbd2_journal_finish_inode_data_buffers;
journal->j_inode = inode;
journal->j_bh = bh;
ocfs2_set_journal_params(osb);
journal->j_state = OCFS2_JOURNAL_LOADED;
status = 0;
done:
if (status < 0) {
if (inode_lock)
ocfs2_inode_unlock(inode, 1);
brelse(bh);
if (inode) {
OCFS2_I(inode)->ip_open_count--;
iput(inode);
}
}
return status;
}
static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
{
le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
}
static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
{
return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
}
static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
int dirty, int replayed)
{
int status;
unsigned int flags;
struct ocfs2_journal *journal = osb->journal;
struct buffer_head *bh = journal->j_bh;
struct ocfs2_dinode *fe;
fe = (struct ocfs2_dinode *)bh->b_data;
/* The journal bh on the osb always comes from ocfs2_journal_init()
* and was validated there inside ocfs2_inode_lock_full(). It's a
* code bug if we mess it up. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
flags = le32_to_cpu(fe->id1.journal1.ij_flags);
if (dirty)
flags |= OCFS2_JOURNAL_DIRTY_FL;
else
flags &= ~OCFS2_JOURNAL_DIRTY_FL;
fe->id1.journal1.ij_flags = cpu_to_le32(flags);
if (replayed)
ocfs2_bump_recovery_generation(fe);
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode));
if (status < 0)
mlog_errno(status);
return status;
}
/*
* If the journal has been kmalloc'd it needs to be freed after this
* call.
*/
void ocfs2_journal_shutdown(struct ocfs2_super *osb)
{
struct ocfs2_journal *journal = NULL;
int status = 0;
struct inode *inode = NULL;
int num_running_trans = 0;
BUG_ON(!osb);
journal = osb->journal;
if (!journal)
goto done;
inode = journal->j_inode;
if (journal->j_state != OCFS2_JOURNAL_LOADED)
goto done;
/* need to inc inode use count - jbd2_journal_destroy will iput. */
if (!igrab(inode))
BUG();
num_running_trans = atomic_read(&(osb->journal->j_num_trans));
trace_ocfs2_journal_shutdown(num_running_trans);
/* Do a commit_cache here. It will flush our journal, *and*
* release any locks that are still held.
* set the SHUTDOWN flag and release the trans lock.
* the commit thread will take the trans lock for us below. */
journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
/* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
* drop the trans_lock (which we want to hold until we
* completely destroy the journal. */
if (osb->commit_task) {
/* Wait for the commit thread */
trace_ocfs2_journal_shutdown_wait(osb->commit_task);
kthread_stop(osb->commit_task);
osb->commit_task = NULL;
}
BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
if (ocfs2_mount_local(osb)) {
jbd2_journal_lock_updates(journal->j_journal);
status = jbd2_journal_flush(journal->j_journal, 0);
jbd2_journal_unlock_updates(journal->j_journal);
if (status < 0)
mlog_errno(status);
}
/* Shutdown the kernel journal system */
if (!jbd2_journal_destroy(journal->j_journal) && !status) {
/*
* Do not toggle if flush was unsuccessful otherwise
* will leave dirty metadata in a "clean" journal
*/
status = ocfs2_journal_toggle_dirty(osb, 0, 0);
if (status < 0)
mlog_errno(status);
}
journal->j_journal = NULL;
OCFS2_I(inode)->ip_open_count--;
/* unlock our journal */
ocfs2_inode_unlock(inode, 1);
brelse(journal->j_bh);
journal->j_bh = NULL;
journal->j_state = OCFS2_JOURNAL_FREE;
done:
iput(inode);
kfree(journal);
osb->journal = NULL;
}
static void ocfs2_clear_journal_error(struct super_block *sb,
journal_t *journal,
int slot)
{
int olderr;
olderr = jbd2_journal_errno(journal);
if (olderr) {
mlog(ML_ERROR, "File system error %d recorded in "
"journal %u.\n", olderr, slot);
mlog(ML_ERROR, "File system on device %s needs checking.\n",
sb->s_id);
jbd2_journal_ack_err(journal);
jbd2_journal_clear_err(journal);
}
}
int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
{
int status = 0;
struct ocfs2_super *osb;
BUG_ON(!journal);
osb = journal->j_osb;
status = jbd2_journal_load(journal->j_journal);
if (status < 0) {
mlog(ML_ERROR, "Failed to load journal!\n");
goto done;
}
ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
if (replayed) {
jbd2_journal_lock_updates(journal->j_journal);
status = jbd2_journal_flush(journal->j_journal, 0);
jbd2_journal_unlock_updates(journal->j_journal);
if (status < 0)
mlog_errno(status);
}
status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
if (status < 0) {
mlog_errno(status);
goto done;
}
/* Launch the commit thread */
if (!local) {
osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
"ocfs2cmt-%s", osb->uuid_str);
if (IS_ERR(osb->commit_task)) {
status = PTR_ERR(osb->commit_task);
osb->commit_task = NULL;
mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
"error=%d", status);
goto done;
}
} else
osb->commit_task = NULL;
done:
return status;
}
/* 'full' flag tells us whether we clear out all blocks or if we just
* mark the journal clean */
int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
{
int status;
BUG_ON(!journal);
status = jbd2_journal_wipe(journal->j_journal, full);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
if (status < 0)
mlog_errno(status);
bail:
return status;
}
static int ocfs2_recovery_completed(struct ocfs2_super *osb)
{
int empty;
struct ocfs2_recovery_map *rm = osb->recovery_map;
spin_lock(&osb->osb_lock);
empty = (rm->rm_used == 0);
spin_unlock(&osb->osb_lock);
return empty;
}
void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
{
wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
}
/*
* JBD Might read a cached version of another nodes journal file. We
* don't want this as this file changes often and we get no
* notification on those changes. The only way to be sure that we've
* got the most up to date version of those blocks then is to force
* read them off disk. Just searching through the buffer cache won't
* work as there may be pages backing this file which are still marked
* up to date. We know things can't change on this file underneath us
* as we have the lock by now :)
*/
static int ocfs2_force_read_journal(struct inode *inode)
{
int status = 0;
int i;
u64 v_blkno, p_blkno, p_blocks, num_blocks;
struct buffer_head *bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
v_blkno = 0;
while (v_blkno < num_blocks) {
status = ocfs2_extent_map_get_blocks(inode, v_blkno,
&p_blkno, &p_blocks, NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
}
for (i = 0; i < p_blocks; i++, p_blkno++) {
bh = __find_get_block(osb->sb->s_bdev, p_blkno,
osb->sb->s_blocksize);
/* block not cached. */
if (!bh)
continue;
brelse(bh);
bh = NULL;
/* We are reading journal data which should not
* be put in the uptodate cache.
*/
status = ocfs2_read_blocks_sync(osb, p_blkno, 1, &bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
brelse(bh);
bh = NULL;
}
v_blkno += p_blocks;
}
bail:
return status;
}
struct ocfs2_la_recovery_item {
struct list_head lri_list;
int lri_slot;
struct ocfs2_dinode *lri_la_dinode;
struct ocfs2_dinode *lri_tl_dinode;
struct ocfs2_quota_recovery *lri_qrec;
enum ocfs2_orphan_reco_type lri_orphan_reco_type;
};
/* Does the second half of the recovery process. By this point, the
* node is marked clean and can actually be considered recovered,
* hence it's no longer in the recovery map, but there's still some
* cleanup we can do which shouldn't happen within the recovery thread
* as locking in that context becomes very difficult if we are to take
* recovering nodes into account.
*
* NOTE: This function can and will sleep on recovery of other nodes
* during cluster locking, just like any other ocfs2 process.
*/
void ocfs2_complete_recovery(struct work_struct *work)
{
int ret = 0;
struct ocfs2_journal *journal =
container_of(work, struct ocfs2_journal, j_recovery_work);
struct ocfs2_super *osb = journal->j_osb;
struct ocfs2_dinode *la_dinode, *tl_dinode;
struct ocfs2_la_recovery_item *item, *n;
struct ocfs2_quota_recovery *qrec;
enum ocfs2_orphan_reco_type orphan_reco_type;
LIST_HEAD(tmp_la_list);
trace_ocfs2_complete_recovery(
(unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno);
spin_lock(&journal->j_lock);
list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
spin_unlock(&journal->j_lock);
list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
list_del_init(&item->lri_list);
ocfs2_wait_on_quotas(osb);
la_dinode = item->lri_la_dinode;
tl_dinode = item->lri_tl_dinode;
qrec = item->lri_qrec;
orphan_reco_type = item->lri_orphan_reco_type;
trace_ocfs2_complete_recovery_slot(item->lri_slot,
la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0,
qrec);
if (la_dinode) {
ret = ocfs2_complete_local_alloc_recovery(osb,
la_dinode);
if (ret < 0)
mlog_errno(ret);
kfree(la_dinode);
}
if (tl_dinode) {
ret = ocfs2_complete_truncate_log_recovery(osb,
tl_dinode);
if (ret < 0)
mlog_errno(ret);
kfree(tl_dinode);
}
ret = ocfs2_recover_orphans(osb, item->lri_slot,
orphan_reco_type);
if (ret < 0)
mlog_errno(ret);
if (qrec) {
ret = ocfs2_finish_quota_recovery(osb, qrec,
item->lri_slot);
if (ret < 0)
mlog_errno(ret);
/* Recovery info is already freed now */
}
kfree(item);
}
trace_ocfs2_complete_recovery_end(ret);
}
/* NOTE: This function always eats your references to la_dinode and
* tl_dinode, either manually on error, or by passing them to
* ocfs2_complete_recovery */
static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
int slot_num,
struct ocfs2_dinode *la_dinode,
struct ocfs2_dinode *tl_dinode,
struct ocfs2_quota_recovery *qrec,
enum ocfs2_orphan_reco_type orphan_reco_type)
{
struct ocfs2_la_recovery_item *item;
item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
if (!item) {
/* Though we wish to avoid it, we are in fact safe in
* skipping local alloc cleanup as fsck.ocfs2 is more
* than capable of reclaiming unused space. */
kfree(la_dinode);
kfree(tl_dinode);
if (qrec)
ocfs2_free_quota_recovery(qrec);
mlog_errno(-ENOMEM);
return;
}
INIT_LIST_HEAD(&item->lri_list);
item->lri_la_dinode = la_dinode;
item->lri_slot = slot_num;
item->lri_tl_dinode = tl_dinode;
item->lri_qrec = qrec;
item->lri_orphan_reco_type = orphan_reco_type;
spin_lock(&journal->j_lock);
list_add_tail(&item->lri_list, &journal->j_la_cleanups);
queue_work(journal->j_osb->ocfs2_wq, &journal->j_recovery_work);
spin_unlock(&journal->j_lock);
}
/* Called by the mount code to queue recovery the last part of
* recovery for it's own and offline slot(s). */
void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
{
struct ocfs2_journal *journal = osb->journal;
if (ocfs2_is_hard_readonly(osb))
return;
/* No need to queue up our truncate_log as regular cleanup will catch
* that */
ocfs2_queue_recovery_completion(journal, osb->slot_num,
osb->local_alloc_copy, NULL, NULL,
ORPHAN_NEED_TRUNCATE);
ocfs2_schedule_truncate_log_flush(osb, 0);
osb->local_alloc_copy = NULL;
/* queue to recover orphan slots for all offline slots */
ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
ocfs2_free_replay_slots(osb);
}
void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
{
if (osb->quota_rec) {
ocfs2_queue_recovery_completion(osb->journal,
osb->slot_num,
NULL,
NULL,
osb->quota_rec,
ORPHAN_NEED_TRUNCATE);
osb->quota_rec = NULL;
}
}
static int __ocfs2_recovery_thread(void *arg)
{
int status, node_num, slot_num;
struct ocfs2_super *osb = arg;
struct ocfs2_recovery_map *rm = osb->recovery_map;
int *rm_quota = NULL;
int rm_quota_used = 0, i;
struct ocfs2_quota_recovery *qrec;
/* Whether the quota supported. */
int quota_enabled = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)
|| OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA);
status = ocfs2_wait_on_mount(osb);
if (status < 0) {
goto bail;
}
if (quota_enabled) {
rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
if (!rm_quota) {
status = -ENOMEM;
goto bail;
}
}
restart:
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_compute_replay_slots(osb);
if (status < 0)
mlog_errno(status);
/* queue recovery for our own slot */
ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
NULL, NULL, ORPHAN_NO_NEED_TRUNCATE);
spin_lock(&osb->osb_lock);
while (rm->rm_used) {
/* It's always safe to remove entry zero, as we won't
* clear it until ocfs2_recover_node() has succeeded. */
node_num = rm->rm_entries[0];
spin_unlock(&osb->osb_lock);
slot_num = ocfs2_node_num_to_slot(osb, node_num);
trace_ocfs2_recovery_thread_node(node_num, slot_num);
if (slot_num == -ENOENT) {
status = 0;
goto skip_recovery;
}
/* It is a bit subtle with quota recovery. We cannot do it
* immediately because we have to obtain cluster locks from
* quota files and we also don't want to just skip it because
* then quota usage would be out of sync until some node takes
* the slot. So we remember which nodes need quota recovery
* and when everything else is done, we recover quotas. */
if (quota_enabled) {
for (i = 0; i < rm_quota_used
&& rm_quota[i] != slot_num; i++)
;
if (i == rm_quota_used)
rm_quota[rm_quota_used++] = slot_num;
}
status = ocfs2_recover_node(osb, node_num, slot_num);
skip_recovery:
if (!status) {
ocfs2_recovery_map_clear(osb, node_num);
} else {
mlog(ML_ERROR,
"Error %d recovering node %d on device (%u,%u)!\n",
status, node_num,
MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
mlog(ML_ERROR, "Volume requires unmount.\n");
}
spin_lock(&osb->osb_lock);
}
spin_unlock(&osb->osb_lock);
trace_ocfs2_recovery_thread_end(status);
/* Refresh all journal recovery generations from disk */
status = ocfs2_check_journals_nolocks(osb);
status = (status == -EROFS) ? 0 : status;
if (status < 0)
mlog_errno(status);
/* Now it is right time to recover quotas... We have to do this under
* superblock lock so that no one can start using the slot (and crash)
* before we recover it */
if (quota_enabled) {
for (i = 0; i < rm_quota_used; i++) {
qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
if (IS_ERR(qrec)) {
status = PTR_ERR(qrec);
mlog_errno(status);
continue;
}
ocfs2_queue_recovery_completion(osb->journal,
rm_quota[i],
NULL, NULL, qrec,
ORPHAN_NEED_TRUNCATE);
}
}
ocfs2_super_unlock(osb, 1);
/* queue recovery for offline slots */
ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
bail:
mutex_lock(&osb->recovery_lock);
if (!status && !ocfs2_recovery_completed(osb)) {
mutex_unlock(&osb->recovery_lock);
goto restart;
}
ocfs2_free_replay_slots(osb);
osb->recovery_thread_task = NULL;
mb(); /* sync with ocfs2_recovery_thread_running */
wake_up(&osb->recovery_event);
mutex_unlock(&osb->recovery_lock);
if (quota_enabled)
kfree(rm_quota);
return status;
}
void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
{
mutex_lock(&osb->recovery_lock);
trace_ocfs2_recovery_thread(node_num, osb->node_num,
osb->disable_recovery, osb->recovery_thread_task,
osb->disable_recovery ?
-1 : ocfs2_recovery_map_set(osb, node_num));
if (osb->disable_recovery)
goto out;
if (osb->recovery_thread_task)
goto out;
osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
"ocfs2rec-%s", osb->uuid_str);
if (IS_ERR(osb->recovery_thread_task)) {
mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
osb->recovery_thread_task = NULL;
}
out:
mutex_unlock(&osb->recovery_lock);
wake_up(&osb->recovery_event);
}
static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
int slot_num,
struct buffer_head **bh,
struct inode **ret_inode)
{
int status = -EACCES;
struct inode *inode = NULL;
BUG_ON(slot_num >= osb->max_slots);
inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
slot_num);
if (!inode || is_bad_inode(inode)) {
mlog_errno(status);
goto bail;
}
SET_INODE_JOURNAL(inode);
status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = 0;
bail:
if (inode) {
if (status || !ret_inode)
iput(inode);
else
*ret_inode = inode;
}
return status;
}
/* Does the actual journal replay and marks the journal inode as
* clean. Will only replay if the journal inode is marked dirty. */
static int ocfs2_replay_journal(struct ocfs2_super *osb,
int node_num,
int slot_num)
{
int status;
int got_lock = 0;
unsigned int flags;
struct inode *inode = NULL;
struct ocfs2_dinode *fe;
journal_t *journal = NULL;
struct buffer_head *bh = NULL;
u32 slot_reco_gen;
status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
if (status) {
mlog_errno(status);
goto done;
}
fe = (struct ocfs2_dinode *)bh->b_data;
slot_reco_gen = ocfs2_get_recovery_generation(fe);
brelse(bh);
bh = NULL;
/*
* As the fs recovery is asynchronous, there is a small chance that
* another node mounted (and recovered) the slot before the recovery
* thread could get the lock. To handle that, we dirty read the journal
* inode for that slot to get the recovery generation. If it is
* different than what we expected, the slot has been recovered.
* If not, it needs recovery.
*/
if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
trace_ocfs2_replay_journal_recovered(slot_num,
osb->slot_recovery_generations[slot_num], slot_reco_gen);
osb->slot_recovery_generations[slot_num] = slot_reco_gen;
status = -EBUSY;
goto done;
}
/* Continue with recovery as the journal has not yet been recovered */
status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
if (status < 0) {
trace_ocfs2_replay_journal_lock_err(status);
if (status != -ERESTARTSYS)
mlog(ML_ERROR, "Could not lock journal!\n");
goto done;
}
got_lock = 1;
fe = (struct ocfs2_dinode *) bh->b_data;
flags = le32_to_cpu(fe->id1.journal1.ij_flags);
slot_reco_gen = ocfs2_get_recovery_generation(fe);
if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
trace_ocfs2_replay_journal_skip(node_num);
/* Refresh recovery generation for the slot */
osb->slot_recovery_generations[slot_num] = slot_reco_gen;
goto done;
}
/* we need to run complete recovery for offline orphan slots */
ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
"device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
MINOR(osb->sb->s_dev));
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
status = ocfs2_force_read_journal(inode);
if (status < 0) {
mlog_errno(status);
goto done;
}
journal = jbd2_journal_init_inode(inode);
if (IS_ERR(journal)) {
mlog(ML_ERROR, "Linux journal layer error\n");
status = PTR_ERR(journal);
goto done;
}
status = jbd2_journal_load(journal);
if (status < 0) {
mlog_errno(status);
BUG_ON(!igrab(inode));
jbd2_journal_destroy(journal);
goto done;
}
ocfs2_clear_journal_error(osb->sb, journal, slot_num);
/* wipe the journal */
jbd2_journal_lock_updates(journal);
status = jbd2_journal_flush(journal, 0);
jbd2_journal_unlock_updates(journal);
if (status < 0)
mlog_errno(status);
/* This will mark the node clean */
flags = le32_to_cpu(fe->id1.journal1.ij_flags);
flags &= ~OCFS2_JOURNAL_DIRTY_FL;
fe->id1.journal1.ij_flags = cpu_to_le32(flags);
/* Increment recovery generation to indicate successful recovery */
ocfs2_bump_recovery_generation(fe);
osb->slot_recovery_generations[slot_num] =
ocfs2_get_recovery_generation(fe);
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
status = ocfs2_write_block(osb, bh, INODE_CACHE(inode));
if (status < 0)
mlog_errno(status);
BUG_ON(!igrab(inode));
jbd2_journal_destroy(journal);
printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
"device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
MINOR(osb->sb->s_dev));
done:
/* drop the lock on this nodes journal */
if (got_lock)
ocfs2_inode_unlock(inode, 1);
iput(inode);
brelse(bh);
return status;
}
/*
* Do the most important parts of node recovery:
* - Replay it's journal
* - Stamp a clean local allocator file
* - Stamp a clean truncate log
* - Mark the node clean
*
* If this function completes without error, a node in OCFS2 can be
* said to have been safely recovered. As a result, failure during the
* second part of a nodes recovery process (local alloc recovery) is
* far less concerning.
*/
static int ocfs2_recover_node(struct ocfs2_super *osb,
int node_num, int slot_num)
{
int status = 0;
struct ocfs2_dinode *la_copy = NULL;
struct ocfs2_dinode *tl_copy = NULL;
trace_ocfs2_recover_node(node_num, slot_num, osb->node_num);
/* Should not ever be called to recover ourselves -- in that
* case we should've called ocfs2_journal_load instead. */
BUG_ON(osb->node_num == node_num);
status = ocfs2_replay_journal(osb, node_num, slot_num);
if (status < 0) {
if (status == -EBUSY) {
trace_ocfs2_recover_node_skip(slot_num, node_num);
status = 0;
goto done;
}
mlog_errno(status);
goto done;
}
/* Stamp a clean local alloc file AFTER recovering the journal... */
status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
if (status < 0) {
mlog_errno(status);
goto done;
}
/* An error from begin_truncate_log_recovery is not
* serious enough to warrant halting the rest of
* recovery. */
status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
if (status < 0)
mlog_errno(status);
/* Likewise, this would be a strange but ultimately not so
* harmful place to get an error... */
status = ocfs2_clear_slot(osb, slot_num);
if (status < 0)
mlog_errno(status);
/* This will kfree the memory pointed to by la_copy and tl_copy */
ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
tl_copy, NULL, ORPHAN_NEED_TRUNCATE);
status = 0;
done:
return status;
}
/* Test node liveness by trylocking his journal. If we get the lock,
* we drop it here. Return 0 if we got the lock, -EAGAIN if node is
* still alive (we couldn't get the lock) and < 0 on error. */
static int ocfs2_trylock_journal(struct ocfs2_super *osb,
int slot_num)
{
int status, flags;
struct inode *inode = NULL;
inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
slot_num);
if (inode == NULL) {
mlog(ML_ERROR, "access error\n");
status = -EACCES;
goto bail;
}
if (is_bad_inode(inode)) {
mlog(ML_ERROR, "access error (bad inode)\n");
iput(inode);
inode = NULL;
status = -EACCES;
goto bail;
}
SET_INODE_JOURNAL(inode);
flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
if (status < 0) {
if (status != -EAGAIN)
mlog_errno(status);
goto bail;
}
ocfs2_inode_unlock(inode, 1);
bail:
iput(inode);
return status;
}
/* Call this underneath ocfs2_super_lock. It also assumes that the
* slot info struct has been updated from disk. */
int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
{
unsigned int node_num;
int status, i;
u32 gen;
struct buffer_head *bh = NULL;
struct ocfs2_dinode *di;
/* This is called with the super block cluster lock, so we
* know that the slot map can't change underneath us. */
for (i = 0; i < osb->max_slots; i++) {
/* Read journal inode to get the recovery generation */
status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
if (status) {
mlog_errno(status);
goto bail;
}
di = (struct ocfs2_dinode *)bh->b_data;
gen = ocfs2_get_recovery_generation(di);
brelse(bh);
bh = NULL;
spin_lock(&osb->osb_lock);
osb->slot_recovery_generations[i] = gen;
trace_ocfs2_mark_dead_nodes(i,
osb->slot_recovery_generations[i]);
if (i == osb->slot_num) {
spin_unlock(&osb->osb_lock);
continue;
}
status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
if (status == -ENOENT) {
spin_unlock(&osb->osb_lock);
continue;
}
if (__ocfs2_recovery_map_test(osb, node_num)) {
spin_unlock(&osb->osb_lock);
continue;
}
spin_unlock(&osb->osb_lock);
/* Ok, we have a slot occupied by another node which
* is not in the recovery map. We trylock his journal
* file here to test if he's alive. */
status = ocfs2_trylock_journal(osb, i);
if (!status) {
/* Since we're called from mount, we know that
* the recovery thread can't race us on
* setting / checking the recovery bits. */
ocfs2_recovery_thread(osb, node_num);
} else if ((status < 0) && (status != -EAGAIN)) {
mlog_errno(status);
goto bail;
}
}
status = 0;
bail:
return status;
}
/*
* Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
* randomness to the timeout to minimize multple nodes firing the timer at the
* same time.
*/
static inline unsigned long ocfs2_orphan_scan_timeout(void)
{
unsigned long time;
get_random_bytes(&time, sizeof(time));
time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000);
return msecs_to_jiffies(time);
}
/*
* ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for
* every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
* is done to catch any orphans that are left over in orphan directories.
*
* It scans all slots, even ones that are in use. It does so to handle the
* case described below:
*
* Node 1 has an inode it was using. The dentry went away due to memory
* pressure. Node 1 closes the inode, but it's on the free list. The node
* has the open lock.
* Node 2 unlinks the inode. It grabs the dentry lock to notify others,
* but node 1 has no dentry and doesn't get the message. It trylocks the
* open lock, sees that another node has a PR, and does nothing.
* Later node 2 runs its orphan dir. It igets the inode, trylocks the
* open lock, sees the PR still, and does nothing.
* Basically, we have to trigger an orphan iput on node 1. The only way
* for this to happen is if node 1 runs node 2's orphan dir.
*
* ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
* seconds. It gets an EX lock on os_lockres and checks sequence number
* stored in LVB. If the sequence number has changed, it means some other
* node has done the scan. This node skips the scan and tracks the
* sequence number. If the sequence number didn't change, it means a scan
* hasn't happened. The node queues a scan and increments the
* sequence number in the LVB.
*/
static void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
{
struct ocfs2_orphan_scan *os;
int status, i;
u32 seqno = 0;
os = &osb->osb_orphan_scan;
if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
goto out;
trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno,
atomic_read(&os->os_state));
status = ocfs2_orphan_scan_lock(osb, &seqno);
if (status < 0) {
if (status != -EAGAIN)
mlog_errno(status);
goto out;
}
/* Do no queue the tasks if the volume is being umounted */
if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
goto unlock;
if (os->os_seqno != seqno) {
os->os_seqno = seqno;
goto unlock;
}
for (i = 0; i < osb->max_slots; i++)
ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL,
NULL, ORPHAN_NO_NEED_TRUNCATE);
/*
* We queued a recovery on orphan slots, increment the sequence
* number and update LVB so other node will skip the scan for a while
*/
seqno++;
os->os_count++;
os->os_scantime = ktime_get_seconds();
unlock:
ocfs2_orphan_scan_unlock(osb, seqno);
out:
trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno,
atomic_read(&os->os_state));
return;
}
/* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */
static void ocfs2_orphan_scan_work(struct work_struct *work)
{
struct ocfs2_orphan_scan *os;
struct ocfs2_super *osb;
os = container_of(work, struct ocfs2_orphan_scan,
os_orphan_scan_work.work);
osb = os->os_osb;
mutex_lock(&os->os_lock);
ocfs2_queue_orphan_scan(osb);
if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work,
ocfs2_orphan_scan_timeout());
mutex_unlock(&os->os_lock);
}
void ocfs2_orphan_scan_stop(struct ocfs2_super *osb)
{
struct ocfs2_orphan_scan *os;
os = &osb->osb_orphan_scan;
if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) {
atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
mutex_lock(&os->os_lock);
cancel_delayed_work(&os->os_orphan_scan_work);
mutex_unlock(&os->os_lock);
}
}
void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
{
struct ocfs2_orphan_scan *os;
os = &osb->osb_orphan_scan;
os->os_osb = osb;
os->os_count = 0;
os->os_seqno = 0;
mutex_init(&os->os_lock);
INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
}
void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
{
struct ocfs2_orphan_scan *os;
os = &osb->osb_orphan_scan;
os->os_scantime = ktime_get_seconds();
if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
else {
atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work,
ocfs2_orphan_scan_timeout());
}
}
struct ocfs2_orphan_filldir_priv {
struct dir_context ctx;
struct inode *head;
struct ocfs2_super *osb;
enum ocfs2_orphan_reco_type orphan_reco_type;
};
static bool ocfs2_orphan_filldir(struct dir_context *ctx, const char *name,
int name_len, loff_t pos, u64 ino,
unsigned type)
{
struct ocfs2_orphan_filldir_priv *p =
container_of(ctx, struct ocfs2_orphan_filldir_priv, ctx);
struct inode *iter;
if (name_len == 1 && !strncmp(".", name, 1))
return true;
if (name_len == 2 && !strncmp("..", name, 2))
return true;
/* do not include dio entry in case of orphan scan */
if ((p->orphan_reco_type == ORPHAN_NO_NEED_TRUNCATE) &&
(!strncmp(name, OCFS2_DIO_ORPHAN_PREFIX,
OCFS2_DIO_ORPHAN_PREFIX_LEN)))
return true;
/* Skip bad inodes so that recovery can continue */
iter = ocfs2_iget(p->osb, ino,
OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
if (IS_ERR(iter))
return true;
if (!strncmp(name, OCFS2_DIO_ORPHAN_PREFIX,
OCFS2_DIO_ORPHAN_PREFIX_LEN))
OCFS2_I(iter)->ip_flags |= OCFS2_INODE_DIO_ORPHAN_ENTRY;
/* Skip inodes which are already added to recover list, since dio may
* happen concurrently with unlink/rename */
if (OCFS2_I(iter)->ip_next_orphan) {
iput(iter);
return true;
}
trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
/* No locking is required for the next_orphan queue as there
* is only ever a single process doing orphan recovery. */
OCFS2_I(iter)->ip_next_orphan = p->head;
p->head = iter;
return true;
}
static int ocfs2_queue_orphans(struct ocfs2_super *osb,
int slot,
struct inode **head,
enum ocfs2_orphan_reco_type orphan_reco_type)
{
int status;
struct inode *orphan_dir_inode = NULL;
struct ocfs2_orphan_filldir_priv priv = {
.ctx.actor = ocfs2_orphan_filldir,
.osb = osb,
.head = *head,
.orphan_reco_type = orphan_reco_type
};
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
slot);
if (!orphan_dir_inode) {
status = -ENOENT;
mlog_errno(status);
return status;
}
inode_lock(orphan_dir_inode);
status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
if (status < 0) {
mlog_errno(status);
goto out;
}
status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx);
if (status) {
mlog_errno(status);
goto out_cluster;
}
*head = priv.head;
out_cluster:
ocfs2_inode_unlock(orphan_dir_inode, 0);
out:
inode_unlock(orphan_dir_inode);
iput(orphan_dir_inode);
return status;
}
static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
int slot)
{
int ret;
spin_lock(&osb->osb_lock);
ret = !osb->osb_orphan_wipes[slot];
spin_unlock(&osb->osb_lock);
return ret;
}
static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
int slot)
{
spin_lock(&osb->osb_lock);
/* Mark ourselves such that new processes in delete_inode()
* know to quit early. */
ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
while (osb->osb_orphan_wipes[slot]) {
/* If any processes are already in the middle of an
* orphan wipe on this dir, then we need to wait for
* them. */
spin_unlock(&osb->osb_lock);
wait_event_interruptible(osb->osb_wipe_event,
ocfs2_orphan_recovery_can_continue(osb, slot));
spin_lock(&osb->osb_lock);
}
spin_unlock(&osb->osb_lock);
}
static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
int slot)
{
ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
}
/*
* Orphan recovery. Each mounted node has it's own orphan dir which we
* must run during recovery. Our strategy here is to build a list of
* the inodes in the orphan dir and iget/iput them. The VFS does
* (most) of the rest of the work.
*
* Orphan recovery can happen at any time, not just mount so we have a
* couple of extra considerations.
*
* - We grab as many inodes as we can under the orphan dir lock -
* doing iget() outside the orphan dir risks getting a reference on
* an invalid inode.
* - We must be sure not to deadlock with other processes on the
* system wanting to run delete_inode(). This can happen when they go
* to lock the orphan dir and the orphan recovery process attempts to
* iget() inside the orphan dir lock. This can be avoided by
* advertising our state to ocfs2_delete_inode().
*/
static int ocfs2_recover_orphans(struct ocfs2_super *osb,
int slot,
enum ocfs2_orphan_reco_type orphan_reco_type)
{
int ret = 0;
struct inode *inode = NULL;
struct inode *iter;
struct ocfs2_inode_info *oi;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di = NULL;
trace_ocfs2_recover_orphans(slot);
ocfs2_mark_recovering_orphan_dir(osb, slot);
ret = ocfs2_queue_orphans(osb, slot, &inode, orphan_reco_type);
ocfs2_clear_recovering_orphan_dir(osb, slot);
/* Error here should be noted, but we want to continue with as
* many queued inodes as we've got. */
if (ret)
mlog_errno(ret);
while (inode) {
oi = OCFS2_I(inode);
trace_ocfs2_recover_orphans_iput(
(unsigned long long)oi->ip_blkno);
iter = oi->ip_next_orphan;
oi->ip_next_orphan = NULL;
if (oi->ip_flags & OCFS2_INODE_DIO_ORPHAN_ENTRY) {
inode_lock(inode);
ret = ocfs2_rw_lock(inode, 1);
if (ret < 0) {
mlog_errno(ret);
goto unlock_mutex;
}
/*
* We need to take and drop the inode lock to
* force read inode from disk.
*/
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret) {
mlog_errno(ret);
goto unlock_rw;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
if (di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL)) {
ret = ocfs2_truncate_file(inode, di_bh,
i_size_read(inode));
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto unlock_inode;
}
ret = ocfs2_del_inode_from_orphan(osb, inode,
di_bh, 0, 0);
if (ret)
mlog_errno(ret);
}
unlock_inode:
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
di_bh = NULL;
unlock_rw:
ocfs2_rw_unlock(inode, 1);
unlock_mutex:
inode_unlock(inode);
/* clear dio flag in ocfs2_inode_info */
oi->ip_flags &= ~OCFS2_INODE_DIO_ORPHAN_ENTRY;
} else {
spin_lock(&oi->ip_lock);
/* Set the proper information to get us going into
* ocfs2_delete_inode. */
oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
spin_unlock(&oi->ip_lock);
}
iput(inode);
inode = iter;
}
return ret;
}
static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
{
/* This check is good because ocfs2 will wait on our recovery
* thread before changing it to something other than MOUNTED
* or DISABLED. */
wait_event(osb->osb_mount_event,
(!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) ||
atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS ||
atomic_read(&osb->vol_state) == VOLUME_DISABLED);
/* If there's an error on mount, then we may never get to the
* MOUNTED flag, but this is set right before
* dismount_volume() so we can trust it. */
if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
trace_ocfs2_wait_on_mount(VOLUME_DISABLED);
mlog(0, "mount error, exiting!\n");
return -EBUSY;
}
return 0;
}
static int ocfs2_commit_thread(void *arg)
{
int status;
struct ocfs2_super *osb = arg;
struct ocfs2_journal *journal = osb->journal;
/* we can trust j_num_trans here because _should_stop() is only set in
* shutdown and nobody other than ourselves should be able to start
* transactions. committing on shutdown might take a few iterations
* as final transactions put deleted inodes on the list */
while (!(kthread_should_stop() &&
atomic_read(&journal->j_num_trans) == 0)) {
wait_event_interruptible(osb->checkpoint_event,
atomic_read(&journal->j_num_trans)
|| kthread_should_stop());
status = ocfs2_commit_cache(osb);
if (status < 0) {
static unsigned long abort_warn_time;
/* Warn about this once per minute */
if (printk_timed_ratelimit(&abort_warn_time, 60*HZ))
mlog(ML_ERROR, "status = %d, journal is "
"already aborted.\n", status);
/*
* After ocfs2_commit_cache() fails, j_num_trans has a
* non-zero value. Sleep here to avoid a busy-wait
* loop.
*/
msleep_interruptible(1000);
}
if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
mlog(ML_KTHREAD,
"commit_thread: %u transactions pending on "
"shutdown\n",
atomic_read(&journal->j_num_trans));
}
}
return 0;
}
/* Reads all the journal inodes without taking any cluster locks. Used
* for hard readonly access to determine whether any journal requires
* recovery. Also used to refresh the recovery generation numbers after
* a journal has been recovered by another node.
*/
int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
{
int ret = 0;
unsigned int slot;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
int journal_dirty = 0;
for(slot = 0; slot < osb->max_slots; slot++) {
ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
di = (struct ocfs2_dinode *) di_bh->b_data;
osb->slot_recovery_generations[slot] =
ocfs2_get_recovery_generation(di);
if (le32_to_cpu(di->id1.journal1.ij_flags) &
OCFS2_JOURNAL_DIRTY_FL)
journal_dirty = 1;
brelse(di_bh);
di_bh = NULL;
}
out:
if (journal_dirty)
ret = -EROFS;
return ret;
}
| linux-master | fs/ocfs2/journal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* move_extents.c
*
* Copyright (C) 2011 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/mount.h>
#include <linux/swap.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "ocfs2_ioctl.h"
#include "alloc.h"
#include "localalloc.h"
#include "aops.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "inode.h"
#include "journal.h"
#include "suballoc.h"
#include "uptodate.h"
#include "super.h"
#include "dir.h"
#include "buffer_head_io.h"
#include "sysfile.h"
#include "refcounttree.h"
#include "move_extents.h"
struct ocfs2_move_extents_context {
struct inode *inode;
struct file *file;
int auto_defrag;
int partial;
int credits;
u32 new_phys_cpos;
u32 clusters_moved;
u64 refcount_loc;
struct ocfs2_move_extents *range;
struct ocfs2_extent_tree et;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_alloc_context *data_ac;
struct ocfs2_cached_dealloc_ctxt dealloc;
};
static int __ocfs2_move_extent(handle_t *handle,
struct ocfs2_move_extents_context *context,
u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
int ext_flags)
{
int ret = 0, index;
struct inode *inode = context->inode;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_rec *rec, replace_rec;
struct ocfs2_path *path = NULL;
struct ocfs2_extent_list *el;
u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
p_cpos, new_p_cpos, len);
if (ret) {
mlog_errno(ret);
goto out;
}
memset(&replace_rec, 0, sizeof(replace_rec));
replace_rec.e_cpos = cpu_to_le32(cpos);
replace_rec.e_leaf_clusters = cpu_to_le16(len);
replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
new_p_cpos));
path = ocfs2_new_path_from_et(&context->et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1) {
ret = ocfs2_error(inode->i_sb,
"Inode %llu has an extent at cpos %u which can no longer be found\n",
(unsigned long long)ino, cpos);
goto out;
}
rec = &el->l_recs[index];
BUG_ON(ext_flags != rec->e_flags);
/*
* after moving/defraging to new location, the extent is not going
* to be refcounted anymore.
*/
replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
ret = ocfs2_split_extent(handle, &context->et, path, index,
&replace_rec, context->meta_ac,
&context->dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
context->new_phys_cpos = new_p_cpos;
/*
* need I to append truncate log for old clusters?
*/
if (old_blkno) {
if (ext_flags & OCFS2_EXT_REFCOUNTED)
ret = ocfs2_decrease_refcount(inode, handle,
ocfs2_blocks_to_clusters(osb->sb,
old_blkno),
len, context->meta_ac,
&context->dealloc, 1);
else
ret = ocfs2_truncate_log_append(osb, handle,
old_blkno, len);
}
ocfs2_update_inode_fsync_trans(handle, inode, 0);
out:
ocfs2_free_path(path);
return ret;
}
/*
* lock allocator, and reserve appropriate number of bits for
* meta blocks.
*/
static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 clusters_to_move,
u32 extents_to_split,
struct ocfs2_alloc_context **meta_ac,
int extra_blocks,
int *credits)
{
int ret, num_free_extents;
unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
num_free_extents = ocfs2_num_free_extents(et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
goto out;
}
if (!num_free_extents ||
(ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
extra_blocks, clusters_to_move, *credits);
out:
if (ret) {
if (*meta_ac) {
ocfs2_free_alloc_context(*meta_ac);
*meta_ac = NULL;
}
}
return ret;
}
/*
* Using one journal handle to guarantee the data consistency in case
* crash happens anywhere.
*
* XXX: defrag can end up with finishing partial extent as requested,
* due to not enough contiguous clusters can be found in allocator.
*/
static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
{
int ret, credits = 0, extra_blocks = 0, partial = context->partial;
handle_t *handle;
struct inode *inode = context->inode;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_refcount_tree *ref_tree = NULL;
u32 new_phys_cpos, new_len;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
int need_free = 0;
if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
BUG_ON(!ocfs2_is_refcount_inode(inode));
BUG_ON(!context->refcount_loc);
ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
&ref_tree, NULL);
if (ret) {
mlog_errno(ret);
return ret;
}
ret = ocfs2_prepare_refcount_change_for_del(inode,
context->refcount_loc,
phys_blkno,
*len,
&credits,
&extra_blocks);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
*len, 1,
&context->meta_ac,
extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* should be using allocation reservation strategy there?
*
* if (context->data_ac)
* context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
*/
inode_lock(tl_inode);
if (ocfs2_truncate_log_needs_flush(osb)) {
ret = __ocfs2_flush_truncate_log(osb);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock_mutex;
}
}
/*
* Make sure ocfs2_reserve_cluster is called after
* __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
*
* If ocfs2_reserve_cluster is called
* before __ocfs2_flush_truncate_log, dead lock on global bitmap
* may happen.
*
*/
ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
if (ret) {
mlog_errno(ret);
goto out_unlock_mutex;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_unlock_mutex;
}
ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
&new_phys_cpos, &new_len);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/*
* allowing partial extent moving is kind of 'pros and cons', it makes
* whole defragmentation less likely to fail, on the contrary, the bad
* thing is it may make the fs even more fragmented after moving, let
* userspace make a good decision here.
*/
if (new_len != *len) {
mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
if (!partial) {
context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
ret = -ENOSPC;
need_free = 1;
goto out_commit;
}
}
mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
phys_cpos, new_phys_cpos);
ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
new_phys_cpos, ext_flags);
if (ret)
mlog_errno(ret);
if (partial && (new_len != *len))
*len = new_len;
/*
* Here we should write the new page out first if we are
* in write-back mode.
*/
ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
if (ret)
mlog_errno(ret);
out_commit:
if (need_free && context->data_ac) {
struct ocfs2_alloc_context *data_ac = context->data_ac;
if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
ocfs2_free_local_alloc_bits(osb, handle, data_ac,
new_phys_cpos, new_len);
else
ocfs2_free_clusters(handle,
data_ac->ac_inode,
data_ac->ac_bh,
ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
new_len);
}
ocfs2_commit_trans(osb, handle);
out_unlock_mutex:
inode_unlock(tl_inode);
if (context->data_ac) {
ocfs2_free_alloc_context(context->data_ac);
context->data_ac = NULL;
}
if (context->meta_ac) {
ocfs2_free_alloc_context(context->meta_ac);
context->meta_ac = NULL;
}
out:
if (ref_tree)
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
return ret;
}
/*
* find the victim alloc group, where #blkno fits.
*/
static int ocfs2_find_victim_alloc_group(struct inode *inode,
u64 vict_blkno,
int type, int slot,
int *vict_bit,
struct buffer_head **ret_bh)
{
int ret, i, bits_per_unit = 0;
u64 blkno;
char namebuf[40];
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
struct ocfs2_chain_list *cl;
struct ocfs2_chain_rec *rec;
struct ocfs2_dinode *ac_dinode;
struct ocfs2_group_desc *bg;
ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
strlen(namebuf), &blkno);
if (ret) {
ret = -ENOENT;
goto out;
}
ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
cl = &(ac_dinode->id2.i_chain);
rec = &(cl->cl_recs[0]);
if (type == GLOBAL_BITMAP_SYSTEM_INODE)
bits_per_unit = osb->s_clustersize_bits -
inode->i_sb->s_blocksize_bits;
/*
* 'vict_blkno' was out of the valid range.
*/
if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
(vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
bits_per_unit))) {
ret = -EINVAL;
goto out;
}
for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
rec = &(cl->cl_recs[i]);
if (!rec)
continue;
bg = NULL;
do {
if (!bg)
blkno = le64_to_cpu(rec->c_blkno);
else
blkno = le64_to_cpu(bg->bg_next_group);
if (gd_bh) {
brelse(gd_bh);
gd_bh = NULL;
}
ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
(le16_to_cpu(bg->bg_bits) << bits_per_unit))) {
*ret_bh = gd_bh;
*vict_bit = (vict_blkno - blkno) >>
bits_per_unit;
mlog(0, "find the victim group: #%llu, "
"total_bits: %u, vict_bit: %u\n",
blkno, le16_to_cpu(bg->bg_bits),
*vict_bit);
goto out;
}
} while (le64_to_cpu(bg->bg_next_group));
}
ret = -EINVAL;
out:
brelse(ac_bh);
/*
* caller has to release the gd_bh properly.
*/
return ret;
}
/*
* XXX: helper to validate and adjust moving goal.
*/
static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
struct ocfs2_move_extents *range)
{
int ret, goal_bit = 0;
struct buffer_head *gd_bh = NULL;
struct ocfs2_group_desc *bg;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int c_to_b = 1 << (osb->s_clustersize_bits -
inode->i_sb->s_blocksize_bits);
/*
* make goal become cluster aligned.
*/
range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
range->me_goal);
/*
* validate goal sits within global_bitmap, and return the victim
* group desc
*/
ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT,
&goal_bit, &gd_bh);
if (ret)
goto out;
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
/*
* moving goal is not allowd to start with a group desc blok(#0 blk)
* let's compromise to the latter cluster.
*/
if (range->me_goal == le64_to_cpu(bg->bg_blkno))
range->me_goal += c_to_b;
/*
* movement is not gonna cross two groups.
*/
if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
range->me_len) {
ret = -EINVAL;
goto out;
}
/*
* more exact validations/adjustments will be performed later during
* moving operation for each extent range.
*/
mlog(0, "extents get ready to be moved to #%llu block\n",
range->me_goal);
out:
brelse(gd_bh);
return ret;
}
static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
int *goal_bit, u32 move_len, u32 max_hop,
u32 *phys_cpos)
{
int i, used, last_free_bits = 0, base_bit = *goal_bit;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
le64_to_cpu(gd->bg_blkno));
for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
if (used) {
/*
* we even tried searching the free chunk by jumping
* a 'max_hop' distance, but still failed.
*/
if ((i - base_bit) > max_hop) {
*phys_cpos = 0;
break;
}
if (last_free_bits)
last_free_bits = 0;
continue;
} else
last_free_bits++;
if (last_free_bits == move_len) {
i -= move_len;
*goal_bit = i;
*phys_cpos = base_cpos + i;
break;
}
}
mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
}
static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
u32 len, int ext_flags)
{
int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
handle_t *handle;
struct inode *inode = context->inode;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
struct inode *gb_inode = NULL;
struct buffer_head *gb_bh = NULL;
struct buffer_head *gd_bh = NULL;
struct ocfs2_group_desc *gd;
struct ocfs2_refcount_tree *ref_tree = NULL;
u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
context->range->me_threshold);
u64 phys_blkno, new_phys_blkno;
phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
BUG_ON(!ocfs2_is_refcount_inode(inode));
BUG_ON(!context->refcount_loc);
ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
&ref_tree, NULL);
if (ret) {
mlog_errno(ret);
return ret;
}
ret = ocfs2_prepare_refcount_change_for_del(inode,
context->refcount_loc,
phys_blkno,
len,
&credits,
&extra_blocks);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
len, 1,
&context->meta_ac,
extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* need to count 2 extra credits for global_bitmap inode and
* group descriptor.
*/
credits += OCFS2_INODE_UPDATE_CREDITS + 1;
/*
* ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
* logic, while we still need to lock the global_bitmap.
*/
gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!gb_inode) {
mlog(ML_ERROR, "unable to get global_bitmap inode\n");
ret = -EIO;
goto out;
}
inode_lock(gb_inode);
ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
if (ret) {
mlog_errno(ret);
goto out_unlock_gb_mutex;
}
inode_lock(tl_inode);
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_unlock_tl_inode;
}
new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT,
&goal_bit, &gd_bh);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/*
* probe the victim cluster group to find a proper
* region to fit wanted movement, it even will perfrom
* a best-effort attempt by compromising to a threshold
* around the goal.
*/
ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
new_phys_cpos);
if (!*new_phys_cpos) {
ret = -ENOSPC;
goto out_commit;
}
ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
*new_phys_cpos, ext_flags);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
gd = (struct ocfs2_group_desc *)gd_bh->b_data;
ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
le16_to_cpu(gd->bg_chain));
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
goal_bit, len);
if (ret) {
ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
le16_to_cpu(gd->bg_chain));
mlog_errno(ret);
}
/*
* Here we should write the new page out first if we are
* in write-back mode.
*/
ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
if (ret)
mlog_errno(ret);
out_commit:
ocfs2_commit_trans(osb, handle);
brelse(gd_bh);
out_unlock_tl_inode:
inode_unlock(tl_inode);
ocfs2_inode_unlock(gb_inode, 1);
out_unlock_gb_mutex:
inode_unlock(gb_inode);
brelse(gb_bh);
iput(gb_inode);
out:
if (context->meta_ac) {
ocfs2_free_alloc_context(context->meta_ac);
context->meta_ac = NULL;
}
if (ref_tree)
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
return ret;
}
/*
* Helper to calculate the defraging length in one run according to threshold.
*/
static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
u32 threshold, int *skip)
{
if ((*alloc_size + *len_defraged) < threshold) {
/*
* proceed defragmentation until we meet the thresh
*/
*len_defraged += *alloc_size;
} else if (*len_defraged == 0) {
/*
* XXX: skip a large extent.
*/
*skip = 1;
} else {
/*
* split this extent to coalesce with former pieces as
* to reach the threshold.
*
* we're done here with one cycle of defragmentation
* in a size of 'thresh', resetting 'len_defraged'
* forces a new defragmentation.
*/
*alloc_size = threshold - *len_defraged;
*len_defraged = 0;
}
}
static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
struct ocfs2_move_extents_context *context)
{
int ret = 0, flags, do_defrag, skip = 0;
u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
struct inode *inode = context->inode;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_move_extents *range = context->range;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if ((i_size_read(inode) == 0) || (range->me_len == 0))
return 0;
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return 0;
context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&context->dealloc);
/*
* TO-DO XXX:
*
* - xattr extents.
*/
do_defrag = context->auto_defrag;
/*
* extents moving happens in unit of clusters, for the sake
* of simplicity, we may ignore two clusters where 'byte_start'
* and 'byte_start + len' were within.
*/
move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
len_to_move = (range->me_start + range->me_len) >>
osb->s_clustersize_bits;
if (len_to_move >= move_start)
len_to_move -= move_start;
else
len_to_move = 0;
if (do_defrag) {
defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
if (defrag_thresh <= 1)
goto done;
} else
new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
range->me_goal);
mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
"thresh: %u\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)range->me_start,
(unsigned long long)range->me_len,
move_start, len_to_move, defrag_thresh);
cpos = move_start;
while (len_to_move) {
ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
&flags);
if (ret) {
mlog_errno(ret);
goto out;
}
if (alloc_size > len_to_move)
alloc_size = len_to_move;
/*
* XXX: how to deal with a hole:
*
* - skip the hole of course
* - force a new defragmentation
*/
if (!phys_cpos) {
if (do_defrag)
len_defraged = 0;
goto next;
}
if (do_defrag) {
ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
defrag_thresh, &skip);
/*
* skip large extents
*/
if (skip) {
skip = 0;
goto next;
}
mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
"alloc_size: %u, len_defraged: %u\n",
cpos, phys_cpos, alloc_size, len_defraged);
ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
&alloc_size, flags);
} else {
ret = ocfs2_move_extent(context, cpos, phys_cpos,
&new_phys_cpos, alloc_size,
flags);
new_phys_cpos += alloc_size;
}
if (ret < 0) {
mlog_errno(ret);
goto out;
}
context->clusters_moved += alloc_size;
next:
cpos += alloc_size;
len_to_move -= alloc_size;
}
done:
range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
out:
range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
context->clusters_moved);
range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
context->new_phys_cpos);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &context->dealloc);
return ret;
}
static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
{
int status;
handle_t *handle;
struct inode *inode = context->inode;
struct ocfs2_dinode *di;
struct buffer_head *di_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
inode_lock(inode);
/*
* This prevents concurrent writes from other nodes
*/
status = ocfs2_rw_lock(inode, 1);
if (status) {
mlog_errno(status);
goto out;
}
status = ocfs2_inode_lock(inode, &di_bh, 1);
if (status) {
mlog_errno(status);
goto out_rw_unlock;
}
/*
* rememer ip_xattr_sem also needs to be held if necessary
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
status = __ocfs2_move_extents_range(di_bh, context);
up_write(&OCFS2_I(inode)->ip_alloc_sem);
if (status) {
mlog_errno(status);
goto out_inode_unlock;
}
/*
* We update ctime for these changes
*/
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_inode_unlock;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status) {
mlog_errno(status);
goto out_commit;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
inode_set_ctime_current(inode);
di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out_inode_unlock:
brelse(di_bh);
ocfs2_inode_unlock(inode, 1);
out_rw_unlock:
ocfs2_rw_unlock(inode, 1);
out:
inode_unlock(inode);
return status;
}
int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
{
int status;
struct inode *inode = file_inode(filp);
struct ocfs2_move_extents range;
struct ocfs2_move_extents_context *context;
if (!argp)
return -EINVAL;
status = mnt_want_write_file(filp);
if (status)
return status;
if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
status = -EPERM;
goto out_drop;
}
if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
status = -EPERM;
goto out_drop;
}
context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
if (!context) {
status = -ENOMEM;
mlog_errno(status);
goto out_drop;
}
context->inode = inode;
context->file = filp;
if (copy_from_user(&range, argp, sizeof(range))) {
status = -EFAULT;
goto out_free;
}
if (range.me_start > i_size_read(inode)) {
status = -EINVAL;
goto out_free;
}
if (range.me_start + range.me_len > i_size_read(inode))
range.me_len = i_size_read(inode) - range.me_start;
context->range = ⦥
/*
* ok, the default theshold for the defragmentation
* is 1M, since our maximum clustersize was 1M also.
* any thought?
*/
if (!range.me_threshold)
range.me_threshold = 1024 * 1024;
if (range.me_threshold > i_size_read(inode))
range.me_threshold = i_size_read(inode);
if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
context->auto_defrag = 1;
if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
context->partial = 1;
} else {
/*
* first best-effort attempt to validate and adjust the goal
* (physical address in block), while it can't guarantee later
* operation can succeed all the time since global_bitmap may
* change a bit over time.
*/
status = ocfs2_validate_and_adjust_move_goal(inode, &range);
if (status)
goto out_copy;
}
status = ocfs2_move_extents(context);
if (status)
mlog_errno(status);
out_copy:
/*
* movement/defragmentation may end up being partially completed,
* that's the reason why we need to return userspace the finished
* length and new_offset even if failure happens somewhere.
*/
if (copy_to_user(argp, &range, sizeof(range)))
status = -EFAULT;
out_free:
kfree(context);
out_drop:
mnt_drop_write_file(filp);
return status;
}
| linux-master | fs/ocfs2/move_extents.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sysfile.c
*
* Initialize, read, write, etc. system files.
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dir.h"
#include "inode.h"
#include "journal.h"
#include "sysfile.h"
#include "buffer_head_io.h"
static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
int type,
u32 slot);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key ocfs2_sysfile_cluster_lock_key[NUM_SYSTEM_INODES];
#endif
static inline int is_global_system_inode(int type)
{
return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE &&
type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE;
}
static struct inode **get_local_system_inode(struct ocfs2_super *osb,
int type,
u32 slot)
{
int index;
struct inode **local_system_inodes, **free = NULL;
BUG_ON(slot == OCFS2_INVALID_SLOT);
BUG_ON(type < OCFS2_FIRST_LOCAL_SYSTEM_INODE ||
type > OCFS2_LAST_LOCAL_SYSTEM_INODE);
spin_lock(&osb->osb_lock);
local_system_inodes = osb->local_system_inodes;
spin_unlock(&osb->osb_lock);
if (unlikely(!local_system_inodes)) {
local_system_inodes =
kzalloc(array3_size(sizeof(struct inode *),
NUM_LOCAL_SYSTEM_INODES,
osb->max_slots),
GFP_NOFS);
if (!local_system_inodes) {
mlog_errno(-ENOMEM);
/*
* return NULL here so that ocfs2_get_sytem_file_inodes
* will try to create an inode and use it. We will try
* to initialize local_system_inodes next time.
*/
return NULL;
}
spin_lock(&osb->osb_lock);
if (osb->local_system_inodes) {
/* Someone has initialized it for us. */
free = local_system_inodes;
local_system_inodes = osb->local_system_inodes;
} else
osb->local_system_inodes = local_system_inodes;
spin_unlock(&osb->osb_lock);
kfree(free);
}
index = (slot * NUM_LOCAL_SYSTEM_INODES) +
(type - OCFS2_FIRST_LOCAL_SYSTEM_INODE);
return &local_system_inodes[index];
}
struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb,
int type,
u32 slot)
{
struct inode *inode = NULL;
struct inode **arr = NULL;
/* avoid the lookup if cached in local system file array */
if (is_global_system_inode(type)) {
arr = &(osb->global_system_inodes[type]);
} else
arr = get_local_system_inode(osb, type, slot);
mutex_lock(&osb->system_file_mutex);
if (arr && ((inode = *arr) != NULL)) {
/* get a ref in addition to the array ref */
inode = igrab(inode);
mutex_unlock(&osb->system_file_mutex);
BUG_ON(!inode);
return inode;
}
/* this gets one ref thru iget */
inode = _ocfs2_get_system_file_inode(osb, type, slot);
/* add one more if putting into array for first time */
if (arr && inode) {
*arr = igrab(inode);
BUG_ON(!*arr);
}
mutex_unlock(&osb->system_file_mutex);
return inode;
}
static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
int type,
u32 slot)
{
char namebuf[40];
struct inode *inode = NULL;
u64 blkno;
int status = 0;
ocfs2_sprintf_system_inode_name(namebuf,
sizeof(namebuf),
type, slot);
status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
strlen(namebuf), &blkno);
if (status < 0) {
goto bail;
}
inode = ocfs2_iget(osb, blkno, OCFS2_FI_FLAG_SYSFILE, type);
if (IS_ERR(inode)) {
mlog_errno(PTR_ERR(inode));
inode = NULL;
goto bail;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
type == LOCAL_GROUP_QUOTA_SYSTEM_INODE ||
type == JOURNAL_SYSTEM_INODE) {
/* Ignore inode lock on these inodes as the lock does not
* really belong to any process and lockdep cannot handle
* that */
OCFS2_I(inode)->ip_inode_lockres.l_lockdep_map.key = NULL;
} else {
lockdep_init_map(&OCFS2_I(inode)->ip_inode_lockres.
l_lockdep_map,
ocfs2_system_inodes[type].si_name,
&ocfs2_sysfile_cluster_lock_key[type], 0);
}
#endif
bail:
return inode;
}
| linux-master | fs/ocfs2/sysfile.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* alloc.c
*
* Extent allocs and frees
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
#include <linux/sched/signal.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "aops.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
#include "suballoc.h"
#include "sysfile.h"
#include "file.h"
#include "super.h"
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
enum ocfs2_contig_type {
CONTIG_NONE = 0,
CONTIG_LEFT,
CONTIG_RIGHT,
CONTIG_LEFTRIGHT,
};
static enum ocfs2_contig_type
ocfs2_extent_rec_contig(struct super_block *sb,
struct ocfs2_extent_rec *ext,
struct ocfs2_extent_rec *insert_rec);
/*
* Operations for a specific extent tree type.
*
* To implement an on-disk btree (extent tree) type in ocfs2, add
* an ocfs2_extent_tree_operations structure and the matching
* ocfs2_init_<thingy>_extent_tree() function. That's pretty much it
* for the allocation portion of the extent tree.
*/
struct ocfs2_extent_tree_operations {
/*
* last_eb_blk is the block number of the right most leaf extent
* block. Most on-disk structures containing an extent tree store
* this value for fast access. The ->eo_set_last_eb_blk() and
* ->eo_get_last_eb_blk() operations access this value. They are
* both required.
*/
void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et,
u64 blkno);
u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et);
/*
* The on-disk structure usually keeps track of how many total
* clusters are stored in this extent tree. This function updates
* that value. new_clusters is the delta, and must be
* added to the total. Required.
*/
void (*eo_update_clusters)(struct ocfs2_extent_tree *et,
u32 new_clusters);
/*
* If this extent tree is supported by an extent map, insert
* a record into the map.
*/
void (*eo_extent_map_insert)(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec);
/*
* If this extent tree is supported by an extent map, truncate the
* map to clusters,
*/
void (*eo_extent_map_truncate)(struct ocfs2_extent_tree *et,
u32 clusters);
/*
* If ->eo_insert_check() exists, it is called before rec is
* inserted into the extent tree. It is optional.
*/
int (*eo_insert_check)(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec);
int (*eo_sanity_check)(struct ocfs2_extent_tree *et);
/*
* --------------------------------------------------------------
* The remaining are internal to ocfs2_extent_tree and don't have
* accessor functions
*/
/*
* ->eo_fill_root_el() takes et->et_object and sets et->et_root_el.
* It is required.
*/
void (*eo_fill_root_el)(struct ocfs2_extent_tree *et);
/*
* ->eo_fill_max_leaf_clusters sets et->et_max_leaf_clusters if
* it exists. If it does not, et->et_max_leaf_clusters is set
* to 0 (unlimited). Optional.
*/
void (*eo_fill_max_leaf_clusters)(struct ocfs2_extent_tree *et);
/*
* ->eo_extent_contig test whether the 2 ocfs2_extent_rec
* are contiguous or not. Optional. Don't need to set it if use
* ocfs2_extent_rec as the tree leaf.
*/
enum ocfs2_contig_type
(*eo_extent_contig)(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *ext,
struct ocfs2_extent_rec *insert_rec);
};
/*
* Pre-declare ocfs2_dinode_et_ops so we can use it as a sanity check
* in the methods.
*/
static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et);
static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 blkno);
static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters);
static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec);
static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
u32 clusters);
static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec);
static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et);
static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et);
static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
struct ocfs2_extent_tree *et,
struct buffer_head **new_eb_bh,
int blk_wanted, int *blk_given);
static int ocfs2_is_dealloc_empty(struct ocfs2_extent_tree *et);
static const struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = {
.eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk,
.eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk,
.eo_update_clusters = ocfs2_dinode_update_clusters,
.eo_extent_map_insert = ocfs2_dinode_extent_map_insert,
.eo_extent_map_truncate = ocfs2_dinode_extent_map_truncate,
.eo_insert_check = ocfs2_dinode_insert_check,
.eo_sanity_check = ocfs2_dinode_sanity_check,
.eo_fill_root_el = ocfs2_dinode_fill_root_el,
};
static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 blkno)
{
struct ocfs2_dinode *di = et->et_object;
BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
di->i_last_eb_blk = cpu_to_le64(blkno);
}
static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et)
{
struct ocfs2_dinode *di = et->et_object;
BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
return le64_to_cpu(di->i_last_eb_blk);
}
static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
struct ocfs2_dinode *di = et->et_object;
le32_add_cpu(&di->i_clusters, clusters);
spin_lock(&oi->ip_lock);
oi->ip_clusters = le32_to_cpu(di->i_clusters);
spin_unlock(&oi->ip_lock);
}
static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec)
{
struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
ocfs2_extent_map_insert_rec(inode, rec);
}
static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
ocfs2_extent_map_trunc(inode, clusters);
}
static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
struct ocfs2_super *osb = OCFS2_SB(oi->vfs_inode.i_sb);
BUG_ON(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL);
mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
(oi->ip_clusters != le32_to_cpu(rec->e_cpos)),
"Device %s, asking for sparse allocation: inode %llu, "
"cpos %u, clusters %u\n",
osb->dev_str,
(unsigned long long)oi->ip_blkno,
rec->e_cpos, oi->ip_clusters);
return 0;
}
static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et)
{
struct ocfs2_dinode *di = et->et_object;
BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
BUG_ON(!OCFS2_IS_VALID_DINODE(di));
return 0;
}
static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et)
{
struct ocfs2_dinode *di = et->et_object;
et->et_root_el = &di->id2.i_list;
}
static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et)
{
struct ocfs2_xattr_value_buf *vb = et->et_object;
et->et_root_el = &vb->vb_xv->xr_list;
}
static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 blkno)
{
struct ocfs2_xattr_value_buf *vb = et->et_object;
vb->vb_xv->xr_last_eb_blk = cpu_to_le64(blkno);
}
static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et)
{
struct ocfs2_xattr_value_buf *vb = et->et_object;
return le64_to_cpu(vb->vb_xv->xr_last_eb_blk);
}
static void ocfs2_xattr_value_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_xattr_value_buf *vb = et->et_object;
le32_add_cpu(&vb->vb_xv->xr_clusters, clusters);
}
static const struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = {
.eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk,
.eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk,
.eo_update_clusters = ocfs2_xattr_value_update_clusters,
.eo_fill_root_el = ocfs2_xattr_value_fill_root_el,
};
static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et)
{
struct ocfs2_xattr_block *xb = et->et_object;
et->et_root_el = &xb->xb_attrs.xb_root.xt_list;
}
static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct ocfs2_extent_tree *et)
{
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
et->et_max_leaf_clusters =
ocfs2_clusters_for_bytes(sb, OCFS2_MAX_XATTR_TREE_LEAF_SIZE);
}
static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 blkno)
{
struct ocfs2_xattr_block *xb = et->et_object;
struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
xt->xt_last_eb_blk = cpu_to_le64(blkno);
}
static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
{
struct ocfs2_xattr_block *xb = et->et_object;
struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
return le64_to_cpu(xt->xt_last_eb_blk);
}
static void ocfs2_xattr_tree_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_xattr_block *xb = et->et_object;
le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters);
}
static const struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = {
.eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk,
.eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk,
.eo_update_clusters = ocfs2_xattr_tree_update_clusters,
.eo_fill_root_el = ocfs2_xattr_tree_fill_root_el,
.eo_fill_max_leaf_clusters = ocfs2_xattr_tree_fill_max_leaf_clusters,
};
static void ocfs2_dx_root_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 blkno)
{
struct ocfs2_dx_root_block *dx_root = et->et_object;
dx_root->dr_last_eb_blk = cpu_to_le64(blkno);
}
static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et)
{
struct ocfs2_dx_root_block *dx_root = et->et_object;
return le64_to_cpu(dx_root->dr_last_eb_blk);
}
static void ocfs2_dx_root_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_dx_root_block *dx_root = et->et_object;
le32_add_cpu(&dx_root->dr_clusters, clusters);
}
static int ocfs2_dx_root_sanity_check(struct ocfs2_extent_tree *et)
{
struct ocfs2_dx_root_block *dx_root = et->et_object;
BUG_ON(!OCFS2_IS_VALID_DX_ROOT(dx_root));
return 0;
}
static void ocfs2_dx_root_fill_root_el(struct ocfs2_extent_tree *et)
{
struct ocfs2_dx_root_block *dx_root = et->et_object;
et->et_root_el = &dx_root->dr_list;
}
static const struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = {
.eo_set_last_eb_blk = ocfs2_dx_root_set_last_eb_blk,
.eo_get_last_eb_blk = ocfs2_dx_root_get_last_eb_blk,
.eo_update_clusters = ocfs2_dx_root_update_clusters,
.eo_sanity_check = ocfs2_dx_root_sanity_check,
.eo_fill_root_el = ocfs2_dx_root_fill_root_el,
};
static void ocfs2_refcount_tree_fill_root_el(struct ocfs2_extent_tree *et)
{
struct ocfs2_refcount_block *rb = et->et_object;
et->et_root_el = &rb->rf_list;
}
static void ocfs2_refcount_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 blkno)
{
struct ocfs2_refcount_block *rb = et->et_object;
rb->rf_last_eb_blk = cpu_to_le64(blkno);
}
static u64 ocfs2_refcount_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
{
struct ocfs2_refcount_block *rb = et->et_object;
return le64_to_cpu(rb->rf_last_eb_blk);
}
static void ocfs2_refcount_tree_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_refcount_block *rb = et->et_object;
le32_add_cpu(&rb->rf_clusters, clusters);
}
static enum ocfs2_contig_type
ocfs2_refcount_tree_extent_contig(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *ext,
struct ocfs2_extent_rec *insert_rec)
{
return CONTIG_NONE;
}
static const struct ocfs2_extent_tree_operations ocfs2_refcount_tree_et_ops = {
.eo_set_last_eb_blk = ocfs2_refcount_tree_set_last_eb_blk,
.eo_get_last_eb_blk = ocfs2_refcount_tree_get_last_eb_blk,
.eo_update_clusters = ocfs2_refcount_tree_update_clusters,
.eo_fill_root_el = ocfs2_refcount_tree_fill_root_el,
.eo_extent_contig = ocfs2_refcount_tree_extent_contig,
};
static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ci,
struct buffer_head *bh,
ocfs2_journal_access_func access,
void *obj,
const struct ocfs2_extent_tree_operations *ops)
{
et->et_ops = ops;
et->et_root_bh = bh;
et->et_ci = ci;
et->et_root_journal_access = access;
if (!obj)
obj = (void *)bh->b_data;
et->et_object = obj;
et->et_dealloc = NULL;
et->et_ops->eo_fill_root_el(et);
if (!et->et_ops->eo_fill_max_leaf_clusters)
et->et_max_leaf_clusters = 0;
else
et->et_ops->eo_fill_max_leaf_clusters(et);
}
void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
__ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_di,
NULL, &ocfs2_dinode_et_ops);
}
void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
__ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_xb,
NULL, &ocfs2_xattr_tree_et_ops);
}
void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ci,
struct ocfs2_xattr_value_buf *vb)
{
__ocfs2_init_extent_tree(et, ci, vb->vb_bh, vb->vb_access, vb,
&ocfs2_xattr_value_et_ops);
}
void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
__ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_dr,
NULL, &ocfs2_dx_root_et_ops);
}
void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
__ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_rb,
NULL, &ocfs2_refcount_tree_et_ops);
}
static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 new_last_eb_blk)
{
et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk);
}
static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et)
{
return et->et_ops->eo_get_last_eb_blk(et);
}
static inline void ocfs2_et_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
et->et_ops->eo_update_clusters(et, clusters);
}
static inline void ocfs2_et_extent_map_insert(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec)
{
if (et->et_ops->eo_extent_map_insert)
et->et_ops->eo_extent_map_insert(et, rec);
}
static inline void ocfs2_et_extent_map_truncate(struct ocfs2_extent_tree *et,
u32 clusters)
{
if (et->et_ops->eo_extent_map_truncate)
et->et_ops->eo_extent_map_truncate(et, clusters);
}
static inline int ocfs2_et_root_journal_access(handle_t *handle,
struct ocfs2_extent_tree *et,
int type)
{
return et->et_root_journal_access(handle, et->et_ci, et->et_root_bh,
type);
}
static inline enum ocfs2_contig_type
ocfs2_et_extent_contig(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec,
struct ocfs2_extent_rec *insert_rec)
{
if (et->et_ops->eo_extent_contig)
return et->et_ops->eo_extent_contig(et, rec, insert_rec);
return ocfs2_extent_rec_contig(
ocfs2_metadata_cache_get_super(et->et_ci),
rec, insert_rec);
}
static inline int ocfs2_et_insert_check(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec)
{
int ret = 0;
if (et->et_ops->eo_insert_check)
ret = et->et_ops->eo_insert_check(et, rec);
return ret;
}
static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et)
{
int ret = 0;
if (et->et_ops->eo_sanity_check)
ret = et->et_ops->eo_sanity_check(et);
return ret;
}
static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
struct ocfs2_extent_block *eb);
static void ocfs2_adjust_rightmost_records(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec);
/*
* Reset the actual path elements so that we can re-use the structure
* to build another path. Generally, this involves freeing the buffer
* heads.
*/
void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
{
int i, start = 0, depth = 0;
struct ocfs2_path_item *node;
if (keep_root)
start = 1;
for(i = start; i < path_num_items(path); i++) {
node = &path->p_node[i];
brelse(node->bh);
node->bh = NULL;
node->el = NULL;
}
/*
* Tree depth may change during truncate, or insert. If we're
* keeping the root extent list, then make sure that our path
* structure reflects the proper depth.
*/
if (keep_root)
depth = le16_to_cpu(path_root_el(path)->l_tree_depth);
else
path_root_access(path) = NULL;
path->p_tree_depth = depth;
}
void ocfs2_free_path(struct ocfs2_path *path)
{
if (path) {
ocfs2_reinit_path(path, 0);
kfree(path);
}
}
/*
* All the elements of src into dest. After this call, src could be freed
* without affecting dest.
*
* Both paths should have the same root. Any non-root elements of dest
* will be freed.
*/
static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src)
{
int i;
BUG_ON(path_root_bh(dest) != path_root_bh(src));
BUG_ON(path_root_el(dest) != path_root_el(src));
BUG_ON(path_root_access(dest) != path_root_access(src));
ocfs2_reinit_path(dest, 1);
for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
dest->p_node[i].bh = src->p_node[i].bh;
dest->p_node[i].el = src->p_node[i].el;
if (dest->p_node[i].bh)
get_bh(dest->p_node[i].bh);
}
}
/*
* Make the *dest path the same as src and re-initialize src path to
* have a root only.
*/
static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src)
{
int i;
BUG_ON(path_root_bh(dest) != path_root_bh(src));
BUG_ON(path_root_access(dest) != path_root_access(src));
for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
brelse(dest->p_node[i].bh);
dest->p_node[i].bh = src->p_node[i].bh;
dest->p_node[i].el = src->p_node[i].el;
src->p_node[i].bh = NULL;
src->p_node[i].el = NULL;
}
}
/*
* Insert an extent block at given index.
*
* This will not take an additional reference on eb_bh.
*/
static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index,
struct buffer_head *eb_bh)
{
struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data;
/*
* Right now, no root bh is an extent block, so this helps
* catch code errors with dinode trees. The assertion can be
* safely removed if we ever need to insert extent block
* structures at the root.
*/
BUG_ON(index == 0);
path->p_node[index].bh = eb_bh;
path->p_node[index].el = &eb->h_list;
}
static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
struct ocfs2_extent_list *root_el,
ocfs2_journal_access_func access)
{
struct ocfs2_path *path;
BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH);
path = kzalloc(sizeof(*path), GFP_NOFS);
if (path) {
path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth);
get_bh(root_bh);
path_root_bh(path) = root_bh;
path_root_el(path) = root_el;
path_root_access(path) = access;
}
return path;
}
struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path)
{
return ocfs2_new_path(path_root_bh(path), path_root_el(path),
path_root_access(path));
}
struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et)
{
return ocfs2_new_path(et->et_root_bh, et->et_root_el,
et->et_root_journal_access);
}
/*
* Journal the buffer at depth idx. All idx>0 are extent_blocks,
* otherwise it's the root_access function.
*
* I don't like the way this function's name looks next to
* ocfs2_journal_access_path(), but I don't have a better one.
*/
int ocfs2_path_bh_journal_access(handle_t *handle,
struct ocfs2_caching_info *ci,
struct ocfs2_path *path,
int idx)
{
ocfs2_journal_access_func access = path_root_access(path);
if (!access)
access = ocfs2_journal_access;
if (idx)
access = ocfs2_journal_access_eb;
return access(handle, ci, path->p_node[idx].bh,
OCFS2_JOURNAL_ACCESS_WRITE);
}
/*
* Convenience function to journal all components in a path.
*/
int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
handle_t *handle,
struct ocfs2_path *path)
{
int i, ret = 0;
if (!path)
goto out;
for(i = 0; i < path_num_items(path); i++) {
ret = ocfs2_path_bh_journal_access(handle, ci, path, i);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
out:
return ret;
}
/*
* Return the index of the extent record which contains cluster #v_cluster.
* -1 is returned if it was not found.
*
* Should work fine on interior and exterior nodes.
*/
int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster)
{
int ret = -1;
int i;
struct ocfs2_extent_rec *rec;
u32 rec_end, rec_start, clusters;
for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
rec_start = le32_to_cpu(rec->e_cpos);
clusters = ocfs2_rec_clusters(el, rec);
rec_end = rec_start + clusters;
if (v_cluster >= rec_start && v_cluster < rec_end) {
ret = i;
break;
}
}
return ret;
}
/*
* NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
* ocfs2_extent_rec_contig only work properly against leaf nodes!
*/
static int ocfs2_block_extent_contig(struct super_block *sb,
struct ocfs2_extent_rec *ext,
u64 blkno)
{
u64 blk_end = le64_to_cpu(ext->e_blkno);
blk_end += ocfs2_clusters_to_blocks(sb,
le16_to_cpu(ext->e_leaf_clusters));
return blkno == blk_end;
}
static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
struct ocfs2_extent_rec *right)
{
u32 left_range;
left_range = le32_to_cpu(left->e_cpos) +
le16_to_cpu(left->e_leaf_clusters);
return (left_range == le32_to_cpu(right->e_cpos));
}
static enum ocfs2_contig_type
ocfs2_extent_rec_contig(struct super_block *sb,
struct ocfs2_extent_rec *ext,
struct ocfs2_extent_rec *insert_rec)
{
u64 blkno = le64_to_cpu(insert_rec->e_blkno);
/*
* Refuse to coalesce extent records with different flag
* fields - we don't want to mix unwritten extents with user
* data.
*/
if (ext->e_flags != insert_rec->e_flags)
return CONTIG_NONE;
if (ocfs2_extents_adjacent(ext, insert_rec) &&
ocfs2_block_extent_contig(sb, ext, blkno))
return CONTIG_RIGHT;
blkno = le64_to_cpu(ext->e_blkno);
if (ocfs2_extents_adjacent(insert_rec, ext) &&
ocfs2_block_extent_contig(sb, insert_rec, blkno))
return CONTIG_LEFT;
return CONTIG_NONE;
}
/*
* NOTE: We can have pretty much any combination of contiguousness and
* appending.
*
* The usefulness of APPEND_TAIL is more in that it lets us know that
* we'll have to update the path to that leaf.
*/
enum ocfs2_append_type {
APPEND_NONE = 0,
APPEND_TAIL,
};
enum ocfs2_split_type {
SPLIT_NONE = 0,
SPLIT_LEFT,
SPLIT_RIGHT,
};
struct ocfs2_insert_type {
enum ocfs2_split_type ins_split;
enum ocfs2_append_type ins_appending;
enum ocfs2_contig_type ins_contig;
int ins_contig_index;
int ins_tree_depth;
};
struct ocfs2_merge_ctxt {
enum ocfs2_contig_type c_contig_type;
int c_has_empty_extent;
int c_split_covers_rec;
};
static int ocfs2_validate_extent_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_extent_block *eb =
(struct ocfs2_extent_block *)bh->b_data;
trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check);
if (rc) {
mlog(ML_ERROR, "Checksum failed for extent block %llu\n",
(unsigned long long)bh->b_blocknr);
return rc;
}
/*
* Errors after here are fatal.
*/
if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
rc = ocfs2_error(sb,
"Extent block #%llu has bad signature %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
eb->h_signature);
goto bail;
}
if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) {
rc = ocfs2_error(sb,
"Extent block #%llu has an invalid h_blkno of %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(eb->h_blkno));
goto bail;
}
if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation)
rc = ocfs2_error(sb,
"Extent block #%llu has an invalid h_fs_generation of #%u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(eb->h_fs_generation));
bail:
return rc;
}
int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
struct buffer_head **bh)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_block(ci, eb_blkno, &tmp,
ocfs2_validate_extent_block);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
/*
* How many free extents have we got before we need more meta data?
*/
int ocfs2_num_free_extents(struct ocfs2_extent_tree *et)
{
int retval;
struct ocfs2_extent_list *el = NULL;
struct ocfs2_extent_block *eb;
struct buffer_head *eb_bh = NULL;
u64 last_eb_blk = 0;
el = et->et_root_el;
last_eb_blk = ocfs2_et_get_last_eb_blk(et);
if (last_eb_blk) {
retval = ocfs2_read_extent_block(et->et_ci, last_eb_blk,
&eb_bh);
if (retval < 0) {
mlog_errno(retval);
goto bail;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
}
BUG_ON(el->l_tree_depth != 0);
retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
bail:
brelse(eb_bh);
trace_ocfs2_num_free_extents(retval);
return retval;
}
/* expects array to already be allocated
*
* sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and
* l_count for you
*/
static int ocfs2_create_new_meta_bhs(handle_t *handle,
struct ocfs2_extent_tree *et,
int wanted,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head *bhs[])
{
int count, status, i;
u16 suballoc_bit_start;
u32 num_got;
u64 suballoc_loc, first_blkno;
struct ocfs2_super *osb =
OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
struct ocfs2_extent_block *eb;
count = 0;
while (count < wanted) {
status = ocfs2_claim_metadata(handle,
meta_ac,
wanted - count,
&suballoc_loc,
&suballoc_bit_start,
&num_got,
&first_blkno);
if (status < 0) {
mlog_errno(status);
goto bail;
}
for(i = count; i < (num_got + count); i++) {
bhs[i] = sb_getblk(osb->sb, first_blkno);
if (bhs[i] == NULL) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(et->et_ci, bhs[i]);
status = ocfs2_journal_access_eb(handle, et->et_ci,
bhs[i],
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
/* Ok, setup the minimal stuff here. */
strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(first_blkno);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
eb->h_suballoc_slot =
cpu_to_le16(meta_ac->ac_alloc_slot);
eb->h_suballoc_loc = cpu_to_le64(suballoc_loc);
eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
eb->h_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
suballoc_bit_start++;
first_blkno++;
/* We'll also be dirtied by the caller, so
* this isn't absolutely necessary. */
ocfs2_journal_dirty(handle, bhs[i]);
}
count += num_got;
}
status = 0;
bail:
if (status < 0) {
for(i = 0; i < wanted; i++) {
brelse(bhs[i]);
bhs[i] = NULL;
}
}
return status;
}
/*
* Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth().
*
* Returns the sum of the rightmost extent rec logical offset and
* cluster count.
*
* ocfs2_add_branch() uses this to determine what logical cluster
* value should be populated into the leftmost new branch records.
*
* ocfs2_shift_tree_depth() uses this to determine the # clusters
* value for the new topmost tree record.
*/
static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
{
int i;
i = le16_to_cpu(el->l_next_free_rec) - 1;
return le32_to_cpu(el->l_recs[i].e_cpos) +
ocfs2_rec_clusters(el, &el->l_recs[i]);
}
/*
* Change range of the branches in the right most path according to the leaf
* extent block's rightmost record.
*/
static int ocfs2_adjust_rightmost_branch(handle_t *handle,
struct ocfs2_extent_tree *et)
{
int status;
struct ocfs2_path *path = NULL;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
path = ocfs2_new_path_from_et(et);
if (!path) {
status = -ENOMEM;
return status;
}
status = ocfs2_find_path(et->et_ci, path, UINT_MAX);
if (status < 0) {
mlog_errno(status);
goto out;
}
status = ocfs2_extend_trans(handle, path_num_items(path));
if (status < 0) {
mlog_errno(status);
goto out;
}
status = ocfs2_journal_access_path(et->et_ci, handle, path);
if (status < 0) {
mlog_errno(status);
goto out;
}
el = path_leaf_el(path);
rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
ocfs2_adjust_rightmost_records(handle, et, path, rec);
out:
ocfs2_free_path(path);
return status;
}
/*
* Add an entire tree branch to our inode. eb_bh is the extent block
* to start at, if we don't want to start the branch at the root
* structure.
*
* last_eb_bh is required as we have to update it's next_leaf pointer
* for the new last extent block.
*
* the new branch will be 'empty' in the sense that every block will
* contain a single record with cluster count == 0.
*/
static int ocfs2_add_branch(handle_t *handle,
struct ocfs2_extent_tree *et,
struct buffer_head *eb_bh,
struct buffer_head **last_eb_bh,
struct ocfs2_alloc_context *meta_ac)
{
int status, new_blocks, i, block_given = 0;
u64 next_blkno, new_last_eb_blk;
struct buffer_head *bh;
struct buffer_head **new_eb_bhs = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *eb_el;
struct ocfs2_extent_list *el;
u32 new_cpos, root_end;
BUG_ON(!last_eb_bh || !*last_eb_bh);
if (eb_bh) {
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
} else
el = et->et_root_el;
/* we never add a branch to a leaf. */
BUG_ON(!el->l_tree_depth);
new_blocks = le16_to_cpu(el->l_tree_depth);
eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
root_end = ocfs2_sum_rightmost_rec(et->et_root_el);
/*
* If there is a gap before the root end and the real end
* of the righmost leaf block, we need to remove the gap
* between new_cpos and root_end first so that the tree
* is consistent after we add a new branch(it will start
* from new_cpos).
*/
if (root_end > new_cpos) {
trace_ocfs2_adjust_rightmost_branch(
(unsigned long long)
ocfs2_metadata_cache_owner(et->et_ci),
root_end, new_cpos);
status = ocfs2_adjust_rightmost_branch(handle, et);
if (status) {
mlog_errno(status);
goto bail;
}
}
/* allocate the number of new eb blocks we need */
new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
GFP_KERNEL);
if (!new_eb_bhs) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
/* Firstyly, try to reuse dealloc since we have already estimated how
* many extent blocks we may use.
*/
if (!ocfs2_is_dealloc_empty(et)) {
status = ocfs2_reuse_blk_from_dealloc(handle, et,
new_eb_bhs, new_blocks,
&block_given);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
BUG_ON(block_given > new_blocks);
if (block_given < new_blocks) {
BUG_ON(!meta_ac);
status = ocfs2_create_new_meta_bhs(handle, et,
new_blocks - block_given,
meta_ac,
&new_eb_bhs[block_given]);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
* linked with the rest of the tree.
* conversly, new_eb_bhs[0] is the new bottommost leaf.
*
* when we leave the loop, new_last_eb_blk will point to the
* newest leaf, and next_blkno will point to the topmost extent
* block. */
next_blkno = new_last_eb_blk = 0;
for(i = 0; i < new_blocks; i++) {
bh = new_eb_bhs[i];
eb = (struct ocfs2_extent_block *) bh->b_data;
/* ocfs2_create_new_meta_bhs() should create it right! */
BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
eb_el = &eb->h_list;
status = ocfs2_journal_access_eb(handle, et->et_ci, bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
eb->h_next_leaf_blk = 0;
eb_el->l_tree_depth = cpu_to_le16(i);
eb_el->l_next_free_rec = cpu_to_le16(1);
/*
* This actually counts as an empty extent as
* c_clusters == 0
*/
eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
/*
* eb_el isn't always an interior node, but even leaf
* nodes want a zero'd flags and reserved field so
* this gets the whole 32 bits regardless of use.
*/
eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
if (!eb_el->l_tree_depth)
new_last_eb_blk = le64_to_cpu(eb->h_blkno);
ocfs2_journal_dirty(handle, bh);
next_blkno = le64_to_cpu(eb->h_blkno);
}
/* This is a bit hairy. We want to update up to three blocks
* here without leaving any of them in an inconsistent state
* in case of error. We don't have to worry about
* journal_dirty erroring as it won't unless we've aborted the
* handle (in which case we would never be here) so reserving
* the write with journal_access is all we need to do. */
status = ocfs2_journal_access_eb(handle, et->et_ci, *last_eb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (eb_bh) {
status = ocfs2_journal_access_eb(handle, et->et_ci, eb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
/* Link the new branch into the rest of the tree (el will
* either be on the root_bh, or the extent block passed in. */
i = le16_to_cpu(el->l_next_free_rec);
el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
el->l_recs[i].e_int_clusters = 0;
le16_add_cpu(&el->l_next_free_rec, 1);
/* fe needs a new last extent block pointer, as does the
* next_leaf on the previously last-extent-block. */
ocfs2_et_set_last_eb_blk(et, new_last_eb_blk);
eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
ocfs2_journal_dirty(handle, *last_eb_bh);
ocfs2_journal_dirty(handle, et->et_root_bh);
if (eb_bh)
ocfs2_journal_dirty(handle, eb_bh);
/*
* Some callers want to track the rightmost leaf so pass it
* back here.
*/
brelse(*last_eb_bh);
get_bh(new_eb_bhs[0]);
*last_eb_bh = new_eb_bhs[0];
status = 0;
bail:
if (new_eb_bhs) {
for (i = 0; i < new_blocks; i++)
brelse(new_eb_bhs[i]);
kfree(new_eb_bhs);
}
return status;
}
/*
* adds another level to the allocation tree.
* returns back the new extent block so you can add a branch to it
* after this call.
*/
static int ocfs2_shift_tree_depth(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head **ret_new_eb_bh)
{
int status, i, block_given = 0;
u32 new_clusters;
struct buffer_head *new_eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *root_el;
struct ocfs2_extent_list *eb_el;
if (!ocfs2_is_dealloc_empty(et)) {
status = ocfs2_reuse_blk_from_dealloc(handle, et,
&new_eb_bh, 1,
&block_given);
} else if (meta_ac) {
status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
&new_eb_bh);
} else {
BUG();
}
if (status < 0) {
mlog_errno(status);
goto bail;
}
eb = (struct ocfs2_extent_block *) new_eb_bh->b_data;
/* ocfs2_create_new_meta_bhs() should create it right! */
BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
eb_el = &eb->h_list;
root_el = et->et_root_el;
status = ocfs2_journal_access_eb(handle, et->et_ci, new_eb_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
/* copy the root extent list data into the new extent block */
eb_el->l_tree_depth = root_el->l_tree_depth;
eb_el->l_next_free_rec = root_el->l_next_free_rec;
for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++)
eb_el->l_recs[i] = root_el->l_recs[i];
ocfs2_journal_dirty(handle, new_eb_bh);
status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
new_clusters = ocfs2_sum_rightmost_rec(eb_el);
/* update root_bh now */
le16_add_cpu(&root_el->l_tree_depth, 1);
root_el->l_recs[0].e_cpos = 0;
root_el->l_recs[0].e_blkno = eb->h_blkno;
root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
root_el->l_next_free_rec = cpu_to_le16(1);
/* If this is our 1st tree depth shift, then last_eb_blk
* becomes the allocated extent block */
if (root_el->l_tree_depth == cpu_to_le16(1))
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
ocfs2_journal_dirty(handle, et->et_root_bh);
*ret_new_eb_bh = new_eb_bh;
new_eb_bh = NULL;
status = 0;
bail:
brelse(new_eb_bh);
return status;
}
/*
* Should only be called when there is no space left in any of the
* leaf nodes. What we want to do is find the lowest tree depth
* non-leaf extent block with room for new records. There are three
* valid results of this search:
*
* 1) a lowest extent block is found, then we pass it back in
* *lowest_eb_bh and return '0'
*
* 2) the search fails to find anything, but the root_el has room. We
* pass NULL back in *lowest_eb_bh, but still return '0'
*
* 3) the search fails to find anything AND the root_el is full, in
* which case we return > 0
*
* return status < 0 indicates an error.
*/
static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
struct buffer_head **target_bh)
{
int status = 0, i;
u64 blkno;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
struct buffer_head *bh = NULL;
struct buffer_head *lowest_bh = NULL;
*target_bh = NULL;
el = et->et_root_el;
while(le16_to_cpu(el->l_tree_depth) > 1) {
if (le16_to_cpu(el->l_next_free_rec) == 0) {
status = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu has empty extent list (next_free_rec == 0)\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
goto bail;
}
i = le16_to_cpu(el->l_next_free_rec) - 1;
blkno = le64_to_cpu(el->l_recs[i].e_blkno);
if (!blkno) {
status = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu has extent list where extent # %d has no physical block start\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
goto bail;
}
brelse(bh);
bh = NULL;
status = ocfs2_read_extent_block(et->et_ci, blkno, &bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
eb = (struct ocfs2_extent_block *) bh->b_data;
el = &eb->h_list;
if (le16_to_cpu(el->l_next_free_rec) <
le16_to_cpu(el->l_count)) {
brelse(lowest_bh);
lowest_bh = bh;
get_bh(lowest_bh);
}
}
/* If we didn't find one and the fe doesn't have any room,
* then return '1' */
el = et->et_root_el;
if (!lowest_bh && (el->l_next_free_rec == el->l_count))
status = 1;
*target_bh = lowest_bh;
bail:
brelse(bh);
return status;
}
/*
* Grow a b-tree so that it has more records.
*
* We might shift the tree depth in which case existing paths should
* be considered invalid.
*
* Tree depth after the grow is returned via *final_depth.
*
* *last_eb_bh will be updated by ocfs2_add_branch().
*/
static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
int *final_depth, struct buffer_head **last_eb_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret, shift;
struct ocfs2_extent_list *el = et->et_root_el;
int depth = le16_to_cpu(el->l_tree_depth);
struct buffer_head *bh = NULL;
BUG_ON(meta_ac == NULL && ocfs2_is_dealloc_empty(et));
shift = ocfs2_find_branch_target(et, &bh);
if (shift < 0) {
ret = shift;
mlog_errno(ret);
goto out;
}
/* We traveled all the way to the bottom of the allocation tree
* and didn't find room for any more extents - we need to add
* another tree level */
if (shift) {
BUG_ON(bh);
trace_ocfs2_grow_tree(
(unsigned long long)
ocfs2_metadata_cache_owner(et->et_ci),
depth);
/* ocfs2_shift_tree_depth will return us a buffer with
* the new extent block (so we can pass that to
* ocfs2_add_branch). */
ret = ocfs2_shift_tree_depth(handle, et, meta_ac, &bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
depth++;
if (depth == 1) {
/*
* Special case: we have room now if we shifted from
* tree_depth 0, so no more work needs to be done.
*
* We won't be calling add_branch, so pass
* back *last_eb_bh as the new leaf. At depth
* zero, it should always be null so there's
* no reason to brelse.
*/
BUG_ON(*last_eb_bh);
get_bh(bh);
*last_eb_bh = bh;
goto out;
}
}
/* call ocfs2_add_branch to add the final part of the tree with
* the new data. */
ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
meta_ac);
if (ret < 0)
mlog_errno(ret);
out:
if (final_depth)
*final_depth = depth;
brelse(bh);
return ret;
}
/*
* This function will discard the rightmost extent record.
*/
static void ocfs2_shift_records_right(struct ocfs2_extent_list *el)
{
int next_free = le16_to_cpu(el->l_next_free_rec);
int count = le16_to_cpu(el->l_count);
unsigned int num_bytes;
BUG_ON(!next_free);
/* This will cause us to go off the end of our extent list. */
BUG_ON(next_free >= count);
num_bytes = sizeof(struct ocfs2_extent_rec) * next_free;
memmove(&el->l_recs[1], &el->l_recs[0], num_bytes);
}
static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
struct ocfs2_extent_rec *insert_rec)
{
int i, insert_index, next_free, has_empty, num_bytes;
u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos);
struct ocfs2_extent_rec *rec;
next_free = le16_to_cpu(el->l_next_free_rec);
has_empty = ocfs2_is_empty_extent(&el->l_recs[0]);
BUG_ON(!next_free);
/* The tree code before us didn't allow enough room in the leaf. */
BUG_ON(el->l_next_free_rec == el->l_count && !has_empty);
/*
* The easiest way to approach this is to just remove the
* empty extent and temporarily decrement next_free.
*/
if (has_empty) {
/*
* If next_free was 1 (only an empty extent), this
* loop won't execute, which is fine. We still want
* the decrement above to happen.
*/
for(i = 0; i < (next_free - 1); i++)
el->l_recs[i] = el->l_recs[i+1];
next_free--;
}
/*
* Figure out what the new record index should be.
*/
for(i = 0; i < next_free; i++) {
rec = &el->l_recs[i];
if (insert_cpos < le32_to_cpu(rec->e_cpos))
break;
}
insert_index = i;
trace_ocfs2_rotate_leaf(insert_cpos, insert_index,
has_empty, next_free,
le16_to_cpu(el->l_count));
BUG_ON(insert_index < 0);
BUG_ON(insert_index >= le16_to_cpu(el->l_count));
BUG_ON(insert_index > next_free);
/*
* No need to memmove if we're just adding to the tail.
*/
if (insert_index != next_free) {
BUG_ON(next_free >= le16_to_cpu(el->l_count));
num_bytes = next_free - insert_index;
num_bytes *= sizeof(struct ocfs2_extent_rec);
memmove(&el->l_recs[insert_index + 1],
&el->l_recs[insert_index],
num_bytes);
}
/*
* Either we had an empty extent, and need to re-increment or
* there was no empty extent on a non full rightmost leaf node,
* in which case we still need to increment.
*/
next_free++;
el->l_next_free_rec = cpu_to_le16(next_free);
/*
* Make sure none of the math above just messed up our tree.
*/
BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count));
el->l_recs[insert_index] = *insert_rec;
}
static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el)
{
int size, num_recs = le16_to_cpu(el->l_next_free_rec);
BUG_ON(num_recs == 0);
if (ocfs2_is_empty_extent(&el->l_recs[0])) {
num_recs--;
size = num_recs * sizeof(struct ocfs2_extent_rec);
memmove(&el->l_recs[0], &el->l_recs[1], size);
memset(&el->l_recs[num_recs], 0,
sizeof(struct ocfs2_extent_rec));
el->l_next_free_rec = cpu_to_le16(num_recs);
}
}
/*
* Create an empty extent record .
*
* l_next_free_rec may be updated.
*
* If an empty extent already exists do nothing.
*/
static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el)
{
int next_free = le16_to_cpu(el->l_next_free_rec);
BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
if (next_free == 0)
goto set_and_inc;
if (ocfs2_is_empty_extent(&el->l_recs[0]))
return;
mlog_bug_on_msg(el->l_count == el->l_next_free_rec,
"Asked to create an empty extent in a full list:\n"
"count = %u, tree depth = %u",
le16_to_cpu(el->l_count),
le16_to_cpu(el->l_tree_depth));
ocfs2_shift_records_right(el);
set_and_inc:
le16_add_cpu(&el->l_next_free_rec, 1);
memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
}
/*
* For a rotation which involves two leaf nodes, the "root node" is
* the lowest level tree node which contains a path to both leafs. This
* resulting set of information can be used to form a complete "subtree"
*
* This function is passed two full paths from the dinode down to a
* pair of adjacent leaves. It's task is to figure out which path
* index contains the subtree root - this can be the root index itself
* in a worst-case rotation.
*
* The array index of the subtree root is passed back.
*/
int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
struct ocfs2_path *left,
struct ocfs2_path *right)
{
int i = 0;
/*
* Check that the caller passed in two paths from the same tree.
*/
BUG_ON(path_root_bh(left) != path_root_bh(right));
do {
i++;
/*
* The caller didn't pass two adjacent paths.
*/
mlog_bug_on_msg(i > left->p_tree_depth,
"Owner %llu, left depth %u, right depth %u\n"
"left leaf blk %llu, right leaf blk %llu\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
left->p_tree_depth, right->p_tree_depth,
(unsigned long long)path_leaf_bh(left)->b_blocknr,
(unsigned long long)path_leaf_bh(right)->b_blocknr);
} while (left->p_node[i].bh->b_blocknr ==
right->p_node[i].bh->b_blocknr);
return i - 1;
}
typedef void (path_insert_t)(void *, struct buffer_head *);
/*
* Traverse a btree path in search of cpos, starting at root_el.
*
* This code can be called with a cpos larger than the tree, in which
* case it will return the rightmost path.
*/
static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
struct ocfs2_extent_list *root_el, u32 cpos,
path_insert_t *func, void *data)
{
int i, ret = 0;
u32 range;
u64 blkno;
struct buffer_head *bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
el = root_el;
while (el->l_tree_depth) {
if (le16_to_cpu(el->l_next_free_rec) == 0) {
ocfs2_error(ocfs2_metadata_cache_get_super(ci),
"Owner %llu has empty extent list at depth %u\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci),
le16_to_cpu(el->l_tree_depth));
ret = -EROFS;
goto out;
}
for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) {
rec = &el->l_recs[i];
/*
* In the case that cpos is off the allocation
* tree, this should just wind up returning the
* rightmost record.
*/
range = le32_to_cpu(rec->e_cpos) +
ocfs2_rec_clusters(el, rec);
if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
break;
}
blkno = le64_to_cpu(el->l_recs[i].e_blkno);
if (blkno == 0) {
ocfs2_error(ocfs2_metadata_cache_get_super(ci),
"Owner %llu has bad blkno in extent list at depth %u (index %d)\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci),
le16_to_cpu(el->l_tree_depth), i);
ret = -EROFS;
goto out;
}
brelse(bh);
bh = NULL;
ret = ocfs2_read_extent_block(ci, blkno, &bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) bh->b_data;
el = &eb->h_list;
if (le16_to_cpu(el->l_next_free_rec) >
le16_to_cpu(el->l_count)) {
ocfs2_error(ocfs2_metadata_cache_get_super(ci),
"Owner %llu has bad count in extent list at block %llu (next free=%u, count=%u)\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)bh->b_blocknr,
le16_to_cpu(el->l_next_free_rec),
le16_to_cpu(el->l_count));
ret = -EROFS;
goto out;
}
if (func)
func(data, bh);
}
out:
/*
* Catch any trailing bh that the loop didn't handle.
*/
brelse(bh);
return ret;
}
/*
* Given an initialized path (that is, it has a valid root extent
* list), this function will traverse the btree in search of the path
* which would contain cpos.
*
* The path traveled is recorded in the path structure.
*
* Note that this will not do any comparisons on leaf node extent
* records, so it will work fine in the case that we just added a tree
* branch.
*/
struct find_path_data {
int index;
struct ocfs2_path *path;
};
static void find_path_ins(void *data, struct buffer_head *bh)
{
struct find_path_data *fp = data;
get_bh(bh);
ocfs2_path_insert_eb(fp->path, fp->index, bh);
fp->index++;
}
int ocfs2_find_path(struct ocfs2_caching_info *ci,
struct ocfs2_path *path, u32 cpos)
{
struct find_path_data data;
data.index = 1;
data.path = path;
return __ocfs2_find_path(ci, path_root_el(path), cpos,
find_path_ins, &data);
}
static void find_leaf_ins(void *data, struct buffer_head *bh)
{
struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data;
struct ocfs2_extent_list *el = &eb->h_list;
struct buffer_head **ret = data;
/* We want to retain only the leaf block. */
if (le16_to_cpu(el->l_tree_depth) == 0) {
get_bh(bh);
*ret = bh;
}
}
/*
* Find the leaf block in the tree which would contain cpos. No
* checking of the actual leaf is done.
*
* Some paths want to call this instead of allocating a path structure
* and calling ocfs2_find_path().
*
* This function doesn't handle non btree extent lists.
*/
int ocfs2_find_leaf(struct ocfs2_caching_info *ci,
struct ocfs2_extent_list *root_el, u32 cpos,
struct buffer_head **leaf_bh)
{
int ret;
struct buffer_head *bh = NULL;
ret = __ocfs2_find_path(ci, root_el, cpos, find_leaf_ins, &bh);
if (ret) {
mlog_errno(ret);
goto out;
}
*leaf_bh = bh;
out:
return ret;
}
/*
* Adjust the adjacent records (left_rec, right_rec) involved in a rotation.
*
* Basically, we've moved stuff around at the bottom of the tree and
* we need to fix up the extent records above the changes to reflect
* the new changes.
*
* left_rec: the record on the left.
* right_rec: the record to the right of left_rec
* right_child_el: is the child list pointed to by right_rec
*
* By definition, this only works on interior nodes.
*/
static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
struct ocfs2_extent_rec *right_rec,
struct ocfs2_extent_list *right_child_el)
{
u32 left_clusters, right_end;
/*
* Interior nodes never have holes. Their cpos is the cpos of
* the leftmost record in their child list. Their cluster
* count covers the full theoretical range of their child list
* - the range between their cpos and the cpos of the record
* immediately to their right.
*/
left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) {
BUG_ON(right_child_el->l_tree_depth);
BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
}
left_clusters -= le32_to_cpu(left_rec->e_cpos);
left_rec->e_int_clusters = cpu_to_le32(left_clusters);
/*
* Calculate the rightmost cluster count boundary before
* moving cpos - we will need to adjust clusters after
* updating e_cpos to keep the same highest cluster count.
*/
right_end = le32_to_cpu(right_rec->e_cpos);
right_end += le32_to_cpu(right_rec->e_int_clusters);
right_rec->e_cpos = left_rec->e_cpos;
le32_add_cpu(&right_rec->e_cpos, left_clusters);
right_end -= le32_to_cpu(right_rec->e_cpos);
right_rec->e_int_clusters = cpu_to_le32(right_end);
}
/*
* Adjust the adjacent root node records involved in a
* rotation. left_el_blkno is passed in as a key so that we can easily
* find it's index in the root list.
*/
static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
struct ocfs2_extent_list *left_el,
struct ocfs2_extent_list *right_el,
u64 left_el_blkno)
{
int i;
BUG_ON(le16_to_cpu(root_el->l_tree_depth) <=
le16_to_cpu(left_el->l_tree_depth));
for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) {
if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno)
break;
}
/*
* The path walking code should have never returned a root and
* two paths which are not adjacent.
*/
BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1));
ocfs2_adjust_adjacent_records(&root_el->l_recs[i],
&root_el->l_recs[i + 1], right_el);
}
/*
* We've changed a leaf block (in right_path) and need to reflect that
* change back up the subtree.
*
* This happens in multiple places:
* - When we've moved an extent record from the left path leaf to the right
* path leaf to make room for an empty extent in the left path leaf.
* - When our insert into the right path leaf is at the leftmost edge
* and requires an update of the path immediately to it's left. This
* can occur at the end of some types of rotation and appending inserts.
* - When we've adjusted the last extent record in the left path leaf and the
* 1st extent record in the right path leaf during cross extent block merge.
*/
static void ocfs2_complete_edge_insert(handle_t *handle,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index)
{
int i, idx;
struct ocfs2_extent_list *el, *left_el, *right_el;
struct ocfs2_extent_rec *left_rec, *right_rec;
struct buffer_head *root_bh;
/*
* Update the counts and position values within all the
* interior nodes to reflect the leaf rotation we just did.
*
* The root node is handled below the loop.
*
* We begin the loop with right_el and left_el pointing to the
* leaf lists and work our way up.
*
* NOTE: within this loop, left_el and right_el always refer
* to the *child* lists.
*/
left_el = path_leaf_el(left_path);
right_el = path_leaf_el(right_path);
for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
trace_ocfs2_complete_edge_insert(i);
/*
* One nice property of knowing that all of these
* nodes are below the root is that we only deal with
* the leftmost right node record and the rightmost
* left node record.
*/
el = left_path->p_node[i].el;
idx = le16_to_cpu(left_el->l_next_free_rec) - 1;
left_rec = &el->l_recs[idx];
el = right_path->p_node[i].el;
right_rec = &el->l_recs[0];
ocfs2_adjust_adjacent_records(left_rec, right_rec, right_el);
ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
/*
* Setup our list pointers now so that the current
* parents become children in the next iteration.
*/
left_el = left_path->p_node[i].el;
right_el = right_path->p_node[i].el;
}
/*
* At the root node, adjust the two adjacent records which
* begin our path to the leaves.
*/
el = left_path->p_node[subtree_index].el;
left_el = left_path->p_node[subtree_index + 1].el;
right_el = right_path->p_node[subtree_index + 1].el;
ocfs2_adjust_root_records(el, left_el, right_el,
left_path->p_node[subtree_index + 1].bh->b_blocknr);
root_bh = left_path->p_node[subtree_index].bh;
ocfs2_journal_dirty(handle, root_bh);
}
static int ocfs2_rotate_subtree_right(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index)
{
int ret, i;
struct buffer_head *right_leaf_bh;
struct buffer_head *left_leaf_bh = NULL;
struct buffer_head *root_bh;
struct ocfs2_extent_list *right_el, *left_el;
struct ocfs2_extent_rec move_rec;
left_leaf_bh = path_leaf_bh(left_path);
left_el = path_leaf_el(left_path);
if (left_el->l_next_free_rec != left_el->l_count) {
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Inode %llu has non-full interior leaf node %llu (next free = %u)\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)left_leaf_bh->b_blocknr,
le16_to_cpu(left_el->l_next_free_rec));
return -EROFS;
}
/*
* This extent block may already have an empty record, so we
* return early if so.
*/
if (ocfs2_is_empty_extent(&left_el->l_recs[0]))
return 0;
root_bh = left_path->p_node[subtree_index].bh;
BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
goto out;
}
for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
}
right_leaf_bh = path_leaf_bh(right_path);
right_el = path_leaf_el(right_path);
/* This is a code error, not a disk corruption. */
mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
"because rightmost leaf block %llu is empty\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)right_leaf_bh->b_blocknr);
ocfs2_create_empty_extent(right_el);
ocfs2_journal_dirty(handle, right_leaf_bh);
/* Do the copy now. */
i = le16_to_cpu(left_el->l_next_free_rec) - 1;
move_rec = left_el->l_recs[i];
right_el->l_recs[0] = move_rec;
/*
* Clear out the record we just copied and shift everything
* over, leaving an empty extent in the left leaf.
*
* We temporarily subtract from next_free_rec so that the
* shift will lose the tail record (which is now defunct).
*/
le16_add_cpu(&left_el->l_next_free_rec, -1);
ocfs2_shift_records_right(left_el);
memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
le16_add_cpu(&left_el->l_next_free_rec, 1);
ocfs2_journal_dirty(handle, left_leaf_bh);
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
out:
return ret;
}
/*
* Given a full path, determine what cpos value would return us a path
* containing the leaf immediately to the left of the current one.
*
* Will return zero if the path passed in is already the leftmost path.
*/
int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
struct ocfs2_path *path, u32 *cpos)
{
int i, j, ret = 0;
u64 blkno;
struct ocfs2_extent_list *el;
BUG_ON(path->p_tree_depth == 0);
*cpos = 0;
blkno = path_leaf_bh(path)->b_blocknr;
/* Start at the tree node just above the leaf and work our way up. */
i = path->p_tree_depth - 1;
while (i >= 0) {
el = path->p_node[i].el;
/*
* Find the extent record just before the one in our
* path.
*/
for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
if (j == 0) {
if (i == 0) {
/*
* We've determined that the
* path specified is already
* the leftmost one - return a
* cpos of zero.
*/
goto out;
}
/*
* The leftmost record points to our
* leaf - we need to travel up the
* tree one level.
*/
goto next_node;
}
*cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
*cpos = *cpos + ocfs2_rec_clusters(el,
&el->l_recs[j - 1]);
*cpos = *cpos - 1;
goto out;
}
}
/*
* If we got here, we never found a valid node where
* the tree indicated one should be.
*/
ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
(unsigned long long)blkno);
ret = -EROFS;
goto out;
next_node:
blkno = path->p_node[i].bh->b_blocknr;
i--;
}
out:
return ret;
}
/*
* Extend the transaction by enough credits to complete the rotation,
* and still leave at least the original number of credits allocated
* to this transaction.
*/
static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
int op_credits,
struct ocfs2_path *path)
{
int ret = 0;
int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
if (jbd2_handle_buffer_credits(handle) < credits)
ret = ocfs2_extend_trans(handle,
credits - jbd2_handle_buffer_credits(handle));
return ret;
}
/*
* Trap the case where we're inserting into the theoretical range past
* the _actual_ left leaf range. Otherwise, we'll rotate a record
* whose cpos is less than ours into the right leaf.
*
* It's only necessary to look at the rightmost record of the left
* leaf because the logic that calls us should ensure that the
* theoretical ranges in the path components above the leaves are
* correct.
*/
static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path,
u32 insert_cpos)
{
struct ocfs2_extent_list *left_el;
struct ocfs2_extent_rec *rec;
int next_free;
left_el = path_leaf_el(left_path);
next_free = le16_to_cpu(left_el->l_next_free_rec);
rec = &left_el->l_recs[next_free - 1];
if (insert_cpos > le32_to_cpu(rec->e_cpos))
return 1;
return 0;
}
static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
{
int next_free = le16_to_cpu(el->l_next_free_rec);
unsigned int range;
struct ocfs2_extent_rec *rec;
if (next_free == 0)
return 0;
rec = &el->l_recs[0];
if (ocfs2_is_empty_extent(rec)) {
/* Empty list. */
if (next_free == 1)
return 0;
rec = &el->l_recs[1];
}
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
return 1;
return 0;
}
/*
* Rotate all the records in a btree right one record, starting at insert_cpos.
*
* The path to the rightmost leaf should be passed in.
*
* The array is assumed to be large enough to hold an entire path (tree depth).
*
* Upon successful return from this function:
*
* - The 'right_path' array will contain a path to the leaf block
* whose range contains e_cpos.
* - That leaf block will have a single empty extent in list index 0.
* - In the case that the rotation requires a post-insert update,
* *ret_left_path will contain a valid path which can be passed to
* ocfs2_insert_path().
*/
static int ocfs2_rotate_tree_right(handle_t *handle,
struct ocfs2_extent_tree *et,
enum ocfs2_split_type split,
u32 insert_cpos,
struct ocfs2_path *right_path,
struct ocfs2_path **ret_left_path)
{
int ret, start, orig_credits = jbd2_handle_buffer_credits(handle);
u32 cpos;
struct ocfs2_path *left_path = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
*ret_left_path = NULL;
left_path = ocfs2_new_path_from_path(right_path);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_rotate_tree_right(
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
insert_cpos, cpos);
/*
* What we want to do here is:
*
* 1) Start with the rightmost path.
*
* 2) Determine a path to the leaf block directly to the left
* of that leaf.
*
* 3) Determine the 'subtree root' - the lowest level tree node
* which contains a path to both leaves.
*
* 4) Rotate the subtree.
*
* 5) Find the next subtree by considering the left path to be
* the new right path.
*
* The check at the top of this while loop also accepts
* insert_cpos == cpos because cpos is only a _theoretical_
* value to get us the left path - insert_cpos might very well
* be filling that hole.
*
* Stop at a cpos of '0' because we either started at the
* leftmost branch (i.e., a tree with one branch and a
* rotation inside of it), or we've gone as far as we can in
* rotating subtrees.
*/
while (cpos && insert_cpos <= cpos) {
trace_ocfs2_rotate_tree_right(
(unsigned long long)
ocfs2_metadata_cache_owner(et->et_ci),
insert_cpos, cpos);
ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
mlog_bug_on_msg(path_leaf_bh(left_path) ==
path_leaf_bh(right_path),
"Owner %llu: error during insert of %u "
"(left path cpos %u) results in two identical "
"paths ending at %llu\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
insert_cpos, cpos,
(unsigned long long)
path_leaf_bh(left_path)->b_blocknr);
if (split == SPLIT_NONE &&
ocfs2_rotate_requires_path_adjustment(left_path,
insert_cpos)) {
/*
* We've rotated the tree as much as we
* should. The rest is up to
* ocfs2_insert_path() to complete, after the
* record insertion. We indicate this
* situation by returning the left path.
*
* The reason we don't adjust the records here
* before the record insert is that an error
* later might break the rule where a parent
* record e_cpos will reflect the actual
* e_cpos of the 1st nonempty record of the
* child list.
*/
*ret_left_path = left_path;
goto out_ret_path;
}
start = ocfs2_find_subtree_root(et, left_path, right_path);
trace_ocfs2_rotate_subtree(start,
(unsigned long long)
right_path->p_node[start].bh->b_blocknr,
right_path->p_tree_depth);
ret = ocfs2_extend_rotate_transaction(handle, start,
orig_credits, right_path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_rotate_subtree_right(handle, et, left_path,
right_path, start);
if (ret) {
mlog_errno(ret);
goto out;
}
if (split != SPLIT_NONE &&
ocfs2_leftmost_rec_contains(path_leaf_el(right_path),
insert_cpos)) {
/*
* A rotate moves the rightmost left leaf
* record over to the leftmost right leaf
* slot. If we're doing an extent split
* instead of a real insert, then we have to
* check that the extent to be split wasn't
* just moved over. If it was, then we can
* exit here, passing left_path back -
* ocfs2_split_extent() is smart enough to
* search both leaves.
*/
*ret_left_path = left_path;
goto out_ret_path;
}
/*
* There is no need to re-read the next right path
* as we know that it'll be our current left
* path. Optimize by copying values instead.
*/
ocfs2_mv_path(right_path, left_path);
ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
}
out:
ocfs2_free_path(left_path);
out_ret_path:
return ret;
}
static int ocfs2_update_edge_lengths(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path)
{
int i, idx, ret;
struct ocfs2_extent_rec *rec;
struct ocfs2_extent_list *el;
struct ocfs2_extent_block *eb;
u32 range;
ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
mlog_errno(ret);
goto out;
}
/* Path should always be rightmost. */
eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
BUG_ON(eb->h_next_leaf_blk != 0ULL);
el = &eb->h_list;
BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
idx = le16_to_cpu(el->l_next_free_rec) - 1;
rec = &el->l_recs[idx];
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
for (i = 0; i < path->p_tree_depth; i++) {
el = path->p_node[i].el;
idx = le16_to_cpu(el->l_next_free_rec) - 1;
rec = &el->l_recs[idx];
rec->e_int_clusters = cpu_to_le32(range);
le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos));
ocfs2_journal_dirty(handle, path->p_node[i].bh);
}
out:
return ret;
}
static void ocfs2_unlink_path(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_cached_dealloc_ctxt *dealloc,
struct ocfs2_path *path, int unlink_start)
{
int ret, i;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
struct buffer_head *bh;
for(i = unlink_start; i < path_num_items(path); i++) {
bh = path->p_node[i].bh;
eb = (struct ocfs2_extent_block *)bh->b_data;
/*
* Not all nodes might have had their final count
* decremented by the caller - handle this here.
*/
el = &eb->h_list;
if (le16_to_cpu(el->l_next_free_rec) > 1) {
mlog(ML_ERROR,
"Inode %llu, attempted to remove extent block "
"%llu with %u records\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)le64_to_cpu(eb->h_blkno),
le16_to_cpu(el->l_next_free_rec));
ocfs2_journal_dirty(handle, bh);
ocfs2_remove_from_cache(et->et_ci, bh);
continue;
}
el->l_next_free_rec = 0;
memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
ocfs2_journal_dirty(handle, bh);
ret = ocfs2_cache_extent_block_free(dealloc, eb);
if (ret)
mlog_errno(ret);
ocfs2_remove_from_cache(et->et_ci, bh);
}
}
static void ocfs2_unlink_subtree(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int i;
struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el;
struct ocfs2_extent_block *eb;
eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data;
for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
if (root_el->l_recs[i].e_blkno == eb->h_blkno)
break;
BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec));
memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
le16_add_cpu(&root_el->l_next_free_rec, -1);
eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
eb->h_next_leaf_blk = 0;
ocfs2_journal_dirty(handle, root_bh);
ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
ocfs2_unlink_path(handle, et, dealloc, right_path,
subtree_index + 1);
}
static int ocfs2_rotate_subtree_left(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int *deleted)
{
int ret, i, del_right_subtree = 0, right_has_empty = 0;
struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path);
struct ocfs2_extent_list *right_leaf_el, *left_leaf_el;
struct ocfs2_extent_block *eb;
*deleted = 0;
right_leaf_el = path_leaf_el(right_path);
left_leaf_el = path_leaf_el(left_path);
root_bh = left_path->p_node[subtree_index].bh;
BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0]))
return 0;
eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data;
if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) {
/*
* It's legal for us to proceed if the right leaf is
* the rightmost one and it has an empty extent. There
* are two cases to handle - whether the leaf will be
* empty after removal or not. If the leaf isn't empty
* then just remove the empty extent up front. The
* next block will handle empty leaves by flagging
* them for unlink.
*
* Non rightmost leaves will throw -EAGAIN and the
* caller can manually move the subtree and retry.
*/
if (eb->h_next_leaf_blk != 0ULL)
return -EAGAIN;
if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) {
ret = ocfs2_journal_access_eb(handle, et->et_ci,
path_leaf_bh(right_path),
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_remove_empty_extent(right_leaf_el);
} else
right_has_empty = 1;
}
if (eb->h_next_leaf_blk == 0ULL &&
le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) {
/*
* We have to update i_last_eb_blk during the meta
* data delete.
*/
ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
del_right_subtree = 1;
}
/*
* Getting here with an empty extent in the right path implies
* that it's the rightmost path and will be deleted.
*/
BUG_ON(right_has_empty && !del_right_subtree);
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
goto out;
}
for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (!right_has_empty) {
/*
* Only do this if we're moving a real
* record. Otherwise, the action is delayed until
* after removal of the right path in which case we
* can do a simple shift to remove the empty extent.
*/
ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]);
memset(&right_leaf_el->l_recs[0], 0,
sizeof(struct ocfs2_extent_rec));
}
if (eb->h_next_leaf_blk == 0ULL) {
/*
* Move recs over to get rid of empty extent, decrease
* next_free. This is allowed to remove the last
* extent in our leaf (setting l_next_free_rec to
* zero) - the delete code below won't care.
*/
ocfs2_remove_empty_extent(right_leaf_el);
}
ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
if (del_right_subtree) {
ocfs2_unlink_subtree(handle, et, left_path, right_path,
subtree_index, dealloc);
ret = ocfs2_update_edge_lengths(handle, et, left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
/*
* Removal of the extent in the left leaf was skipped
* above so we could delete the right path
* 1st.
*/
if (right_has_empty)
ocfs2_remove_empty_extent(left_leaf_el);
ocfs2_journal_dirty(handle, et_root_bh);
*deleted = 1;
} else
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
out:
return ret;
}
/*
* Given a full path, determine what cpos value would return us a path
* containing the leaf immediately to the right of the current one.
*
* Will return zero if the path passed in is already the rightmost path.
*
* This looks similar, but is subtly different to
* ocfs2_find_cpos_for_left_leaf().
*/
int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
struct ocfs2_path *path, u32 *cpos)
{
int i, j, ret = 0;
u64 blkno;
struct ocfs2_extent_list *el;
*cpos = 0;
if (path->p_tree_depth == 0)
return 0;
blkno = path_leaf_bh(path)->b_blocknr;
/* Start at the tree node just above the leaf and work our way up. */
i = path->p_tree_depth - 1;
while (i >= 0) {
int next_free;
el = path->p_node[i].el;
/*
* Find the extent record just after the one in our
* path.
*/
next_free = le16_to_cpu(el->l_next_free_rec);
for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
if (j == (next_free - 1)) {
if (i == 0) {
/*
* We've determined that the
* path specified is already
* the rightmost one - return a
* cpos of zero.
*/
goto out;
}
/*
* The rightmost record points to our
* leaf - we need to travel up the
* tree one level.
*/
goto next_node;
}
*cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos);
goto out;
}
}
/*
* If we got here, we never found a valid node where
* the tree indicated one should be.
*/
ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
(unsigned long long)blkno);
ret = -EROFS;
goto out;
next_node:
blkno = path->p_node[i].bh->b_blocknr;
i--;
}
out:
return ret;
}
static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path)
{
int ret;
struct buffer_head *bh = path_leaf_bh(path);
struct ocfs2_extent_list *el = path_leaf_el(path);
if (!ocfs2_is_empty_extent(&el->l_recs[0]))
return 0;
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path,
path_num_items(path) - 1);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_remove_empty_extent(el);
ocfs2_journal_dirty(handle, bh);
out:
return ret;
}
static int __ocfs2_rotate_tree_left(handle_t *handle,
struct ocfs2_extent_tree *et,
int orig_credits,
struct ocfs2_path *path,
struct ocfs2_cached_dealloc_ctxt *dealloc,
struct ocfs2_path **empty_extent_path)
{
int ret, subtree_root, deleted;
u32 right_cpos;
struct ocfs2_path *left_path = NULL;
struct ocfs2_path *right_path = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
if (!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0])))
return 0;
*empty_extent_path = NULL;
ret = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
left_path = ocfs2_new_path_from_path(path);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ocfs2_cp_path(left_path, path);
right_path = ocfs2_new_path_from_path(path);
if (!right_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
while (right_cpos) {
ret = ocfs2_find_path(et->et_ci, right_path, right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
subtree_root = ocfs2_find_subtree_root(et, left_path,
right_path);
trace_ocfs2_rotate_subtree(subtree_root,
(unsigned long long)
right_path->p_node[subtree_root].bh->b_blocknr,
right_path->p_tree_depth);
ret = ocfs2_extend_rotate_transaction(handle, 0,
orig_credits, left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Caller might still want to make changes to the
* tree root, so re-add it to the journal here.
*/
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, 0);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_rotate_subtree_left(handle, et, left_path,
right_path, subtree_root,
dealloc, &deleted);
if (ret == -EAGAIN) {
/*
* The rotation has to temporarily stop due to
* the right subtree having an empty
* extent. Pass it back to the caller for a
* fixup.
*/
*empty_extent_path = right_path;
right_path = NULL;
goto out;
}
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* The subtree rotate might have removed records on
* the rightmost edge. If so, then rotation is
* complete.
*/
if (deleted)
break;
ocfs2_mv_path(left_path, right_path);
ret = ocfs2_find_cpos_for_right_leaf(sb, left_path,
&right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
}
out:
ocfs2_free_path(right_path);
ocfs2_free_path(left_path);
return ret;
}
static int ocfs2_remove_rightmost_path(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, subtree_index;
u32 cpos;
struct ocfs2_path *left_path = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
ret = ocfs2_et_sanity_check(et);
if (ret)
goto out;
ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
if (cpos) {
/*
* We have a path to the left of this one - it needs
* an update too.
*/
left_path = ocfs2_new_path_from_path(path);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
subtree_index = ocfs2_find_subtree_root(et, left_path, path);
ocfs2_unlink_subtree(handle, et, left_path, path,
subtree_index, dealloc);
ret = ocfs2_update_edge_lengths(handle, et, left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
} else {
/*
* 'path' is also the leftmost path which
* means it must be the only one. This gets
* handled differently because we want to
* revert the root back to having extents
* in-line.
*/
ocfs2_unlink_path(handle, et, dealloc, path, 1);
el = et->et_root_el;
el->l_tree_depth = 0;
el->l_next_free_rec = 0;
memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
ocfs2_et_set_last_eb_blk(et, 0);
}
ocfs2_journal_dirty(handle, path_root_bh(path));
out:
ocfs2_free_path(left_path);
return ret;
}
static int ocfs2_remove_rightmost_empty_extent(struct ocfs2_super *osb,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
handle_t *handle;
int ret;
int credits = path->p_tree_depth * 2 + 1;
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
return ret;
}
ret = ocfs2_remove_rightmost_path(handle, et, path, dealloc);
if (ret)
mlog_errno(ret);
ocfs2_commit_trans(osb, handle);
return ret;
}
/*
* Left rotation of btree records.
*
* In many ways, this is (unsurprisingly) the opposite of right
* rotation. We start at some non-rightmost path containing an empty
* extent in the leaf block. The code works its way to the rightmost
* path by rotating records to the left in every subtree.
*
* This is used by any code which reduces the number of extent records
* in a leaf. After removal, an empty record should be placed in the
* leftmost list position.
*
* This won't handle a length update of the rightmost path records if
* the rightmost tree leaf record is removed so the caller is
* responsible for detecting and correcting that.
*/
static int ocfs2_rotate_tree_left(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, orig_credits = jbd2_handle_buffer_credits(handle);
struct ocfs2_path *tmp_path = NULL, *restart_path = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
el = path_leaf_el(path);
if (!ocfs2_is_empty_extent(&el->l_recs[0]))
return 0;
if (path->p_tree_depth == 0) {
rightmost_no_delete:
/*
* Inline extents. This is trivially handled, so do
* it up front.
*/
ret = ocfs2_rotate_rightmost_leaf_left(handle, et, path);
if (ret)
mlog_errno(ret);
goto out;
}
/*
* Handle rightmost branch now. There's several cases:
* 1) simple rotation leaving records in there. That's trivial.
* 2) rotation requiring a branch delete - there's no more
* records left. Two cases of this:
* a) There are branches to the left.
* b) This is also the leftmost (the only) branch.
*
* 1) is handled via ocfs2_rotate_rightmost_leaf_left()
* 2a) we need the left branch so that we can update it with the unlink
* 2b) we need to bring the root back to inline extents.
*/
eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
el = &eb->h_list;
if (eb->h_next_leaf_blk == 0) {
/*
* This gets a bit tricky if we're going to delete the
* rightmost path. Get the other cases out of the way
* 1st.
*/
if (le16_to_cpu(el->l_next_free_rec) > 1)
goto rightmost_no_delete;
if (le16_to_cpu(el->l_next_free_rec) == 0) {
ret = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu has empty extent block at %llu\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)le64_to_cpu(eb->h_blkno));
goto out;
}
/*
* XXX: The caller can not trust "path" any more after
* this as it will have been deleted. What do we do?
*
* In theory the rotate-for-merge code will never get
* here because it'll always ask for a rotate in a
* nonempty list.
*/
ret = ocfs2_remove_rightmost_path(handle, et, path,
dealloc);
if (ret)
mlog_errno(ret);
goto out;
}
/*
* Now we can loop, remembering the path we get from -EAGAIN
* and restarting from there.
*/
try_rotate:
ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, path,
dealloc, &restart_path);
if (ret && ret != -EAGAIN) {
mlog_errno(ret);
goto out;
}
while (ret == -EAGAIN) {
tmp_path = restart_path;
restart_path = NULL;
ret = __ocfs2_rotate_tree_left(handle, et, orig_credits,
tmp_path, dealloc,
&restart_path);
if (ret && ret != -EAGAIN) {
mlog_errno(ret);
goto out;
}
ocfs2_free_path(tmp_path);
tmp_path = NULL;
if (ret == 0)
goto try_rotate;
}
out:
ocfs2_free_path(tmp_path);
ocfs2_free_path(restart_path);
return ret;
}
static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el,
int index)
{
struct ocfs2_extent_rec *rec = &el->l_recs[index];
unsigned int size;
if (rec->e_leaf_clusters == 0) {
/*
* We consumed all of the merged-from record. An empty
* extent cannot exist anywhere but the 1st array
* position, so move things over if the merged-from
* record doesn't occupy that position.
*
* This creates a new empty extent so the caller
* should be smart enough to have removed any existing
* ones.
*/
if (index > 0) {
BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
size = index * sizeof(struct ocfs2_extent_rec);
memmove(&el->l_recs[1], &el->l_recs[0], size);
}
/*
* Always memset - the caller doesn't check whether it
* created an empty extent, so there could be junk in
* the other fields.
*/
memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
}
}
static int ocfs2_get_right_path(struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path **ret_right_path)
{
int ret;
u32 right_cpos;
struct ocfs2_path *right_path = NULL;
struct ocfs2_extent_list *left_el;
*ret_right_path = NULL;
/* This function shouldn't be called for non-trees. */
BUG_ON(left_path->p_tree_depth == 0);
left_el = path_leaf_el(left_path);
BUG_ON(left_el->l_next_free_rec != left_el->l_count);
ret = ocfs2_find_cpos_for_right_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
left_path, &right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
/* This function shouldn't be called for the rightmost leaf. */
BUG_ON(right_cpos == 0);
right_path = ocfs2_new_path_from_path(left_path);
if (!right_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, right_path, right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
*ret_right_path = right_path;
out:
if (ret)
ocfs2_free_path(right_path);
return ret;
}
/*
* Remove split_rec clusters from the record at index and merge them
* onto the beginning of the record "next" to it.
* For index < l_count - 1, the next means the extent rec at index + 1.
* For index == l_count - 1, the "next" means the 1st extent rec of the
* next extent block.
*/
static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *split_rec,
int index)
{
int ret, next_free, i;
unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
struct ocfs2_extent_rec *left_rec;
struct ocfs2_extent_rec *right_rec;
struct ocfs2_extent_list *right_el;
struct ocfs2_path *right_path = NULL;
int subtree_index = 0;
struct ocfs2_extent_list *el = path_leaf_el(left_path);
struct buffer_head *bh = path_leaf_bh(left_path);
struct buffer_head *root_bh = NULL;
BUG_ON(index >= le16_to_cpu(el->l_next_free_rec));
left_rec = &el->l_recs[index];
if (index == le16_to_cpu(el->l_next_free_rec) - 1 &&
le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) {
/* we meet with a cross extent block merge. */
ret = ocfs2_get_right_path(et, left_path, &right_path);
if (ret) {
mlog_errno(ret);
return ret;
}
right_el = path_leaf_el(right_path);
next_free = le16_to_cpu(right_el->l_next_free_rec);
BUG_ON(next_free <= 0);
right_rec = &right_el->l_recs[0];
if (ocfs2_is_empty_extent(right_rec)) {
BUG_ON(next_free <= 1);
right_rec = &right_el->l_recs[1];
}
BUG_ON(le32_to_cpu(left_rec->e_cpos) +
le16_to_cpu(left_rec->e_leaf_clusters) !=
le32_to_cpu(right_rec->e_cpos));
subtree_index = ocfs2_find_subtree_root(et, left_path,
right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
jbd2_handle_buffer_credits(handle),
right_path);
if (ret) {
mlog_errno(ret);
goto out;
}
root_bh = left_path->p_node[subtree_index].bh;
BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
goto out;
}
for (i = subtree_index + 1;
i < path_num_items(right_path); i++) {
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
}
} else {
BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1);
right_rec = &el->l_recs[index + 1];
}
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path,
path_num_items(left_path) - 1);
if (ret) {
mlog_errno(ret);
goto out;
}
le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters);
le32_add_cpu(&right_rec->e_cpos, -split_clusters);
le64_add_cpu(&right_rec->e_blkno,
-ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci),
split_clusters));
le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters);
ocfs2_cleanup_merge(el, index);
ocfs2_journal_dirty(handle, bh);
if (right_path) {
ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
}
out:
ocfs2_free_path(right_path);
return ret;
}
static int ocfs2_get_left_path(struct ocfs2_extent_tree *et,
struct ocfs2_path *right_path,
struct ocfs2_path **ret_left_path)
{
int ret;
u32 left_cpos;
struct ocfs2_path *left_path = NULL;
*ret_left_path = NULL;
/* This function shouldn't be called for non-trees. */
BUG_ON(right_path->p_tree_depth == 0);
ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
right_path, &left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
/* This function shouldn't be called for the leftmost leaf. */
BUG_ON(left_cpos == 0);
left_path = ocfs2_new_path_from_path(right_path);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, left_path, left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
*ret_left_path = left_path;
out:
if (ret)
ocfs2_free_path(left_path);
return ret;
}
/*
* Remove split_rec clusters from the record at index and merge them
* onto the tail of the record "before" it.
* For index > 0, the "before" means the extent rec at index - 1.
*
* For index == 0, the "before" means the last record of the previous
* extent block. And there is also a situation that we may need to
* remove the rightmost leaf extent block in the right_path and change
* the right path to indicate the new rightmost path.
*/
static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *split_rec,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int index)
{
int ret, i, subtree_index = 0, has_empty_extent = 0;
unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
struct ocfs2_extent_rec *left_rec;
struct ocfs2_extent_rec *right_rec;
struct ocfs2_extent_list *el = path_leaf_el(right_path);
struct buffer_head *bh = path_leaf_bh(right_path);
struct buffer_head *root_bh = NULL;
struct ocfs2_path *left_path = NULL;
struct ocfs2_extent_list *left_el;
BUG_ON(index < 0);
right_rec = &el->l_recs[index];
if (index == 0) {
/* we meet with a cross extent block merge. */
ret = ocfs2_get_left_path(et, right_path, &left_path);
if (ret) {
mlog_errno(ret);
return ret;
}
left_el = path_leaf_el(left_path);
BUG_ON(le16_to_cpu(left_el->l_next_free_rec) !=
le16_to_cpu(left_el->l_count));
left_rec = &left_el->l_recs[
le16_to_cpu(left_el->l_next_free_rec) - 1];
BUG_ON(le32_to_cpu(left_rec->e_cpos) +
le16_to_cpu(left_rec->e_leaf_clusters) !=
le32_to_cpu(split_rec->e_cpos));
subtree_index = ocfs2_find_subtree_root(et, left_path,
right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
jbd2_handle_buffer_credits(handle),
left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
root_bh = left_path->p_node[subtree_index].bh;
BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
goto out;
}
for (i = subtree_index + 1;
i < path_num_items(right_path); i++) {
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
}
} else {
left_rec = &el->l_recs[index - 1];
if (ocfs2_is_empty_extent(&el->l_recs[0]))
has_empty_extent = 1;
}
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
path_num_items(right_path) - 1);
if (ret) {
mlog_errno(ret);
goto out;
}
if (has_empty_extent && index == 1) {
/*
* The easy case - we can just plop the record right in.
*/
*left_rec = *split_rec;
} else
le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters);
le32_add_cpu(&right_rec->e_cpos, split_clusters);
le64_add_cpu(&right_rec->e_blkno,
ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci),
split_clusters));
le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters);
ocfs2_cleanup_merge(el, index);
ocfs2_journal_dirty(handle, bh);
if (left_path) {
ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
/*
* In the situation that the right_rec is empty and the extent
* block is empty also, ocfs2_complete_edge_insert can't handle
* it and we need to delete the right extent block.
*/
if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 &&
le16_to_cpu(el->l_next_free_rec) == 1) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
jbd2_handle_buffer_credits(handle),
right_path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_remove_rightmost_path(handle, et,
right_path,
dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
/* Now the rightmost extent block has been deleted.
* So we use the new rightmost path.
*/
ocfs2_mv_path(right_path, left_path);
left_path = NULL;
} else
ocfs2_complete_edge_insert(handle, left_path,
right_path, subtree_index);
}
out:
ocfs2_free_path(left_path);
return ret;
}
static int ocfs2_try_to_merge_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
int split_index,
struct ocfs2_extent_rec *split_rec,
struct ocfs2_cached_dealloc_ctxt *dealloc,
struct ocfs2_merge_ctxt *ctxt)
{
int ret = 0;
struct ocfs2_extent_list *el = path_leaf_el(path);
struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* The merge code will need to create an empty
* extent to take the place of the newly
* emptied slot. Remove any pre-existing empty
* extents - having more than one in a leaf is
* illegal.
*/
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
split_index--;
rec = &el->l_recs[split_index];
}
if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) {
/*
* Left-right contig implies this.
*/
BUG_ON(!ctxt->c_split_covers_rec);
/*
* Since the leftright insert always covers the entire
* extent, this call will delete the insert record
* entirely, resulting in an empty extent record added to
* the extent block.
*
* Since the adding of an empty extent shifts
* everything back to the right, there's no need to
* update split_index here.
*
* When the split_index is zero, we need to merge it to the
* prevoius extent block. It is more efficient and easier
* if we do merge_right first and merge_left later.
*/
ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
split_index);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We can only get this from logic error above.
*/
BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
goto out;
}
/* The merge left us with an empty extent, remove it. */
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
rec = &el->l_recs[split_index];
/*
* Note that we don't pass split_rec here on purpose -
* we've merged it into the rec already.
*/
ret = ocfs2_merge_rec_left(path, handle, et, rec,
dealloc, split_index);
if (ret) {
mlog_errno(ret);
goto out;
}
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
/*
* Error from this last rotate is not critical, so
* print but don't bubble it up.
*/
if (ret)
mlog_errno(ret);
ret = 0;
} else {
/*
* Merge a record to the left or right.
*
* 'contig_type' is relative to the existing record,
* so for example, if we're "right contig", it's to
* the record on the left (hence the left merge).
*/
if (ctxt->c_contig_type == CONTIG_RIGHT) {
ret = ocfs2_merge_rec_left(path, handle, et,
split_rec, dealloc,
split_index);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
ret = ocfs2_merge_rec_right(path, handle,
et, split_rec,
split_index);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (ctxt->c_split_covers_rec) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
ret = 0;
goto out;
}
/*
* The merge may have left an empty extent in
* our leaf. Try to rotate it away.
*/
ret = ocfs2_rotate_tree_left(handle, et, path,
dealloc);
if (ret)
mlog_errno(ret);
ret = 0;
}
}
out:
return ret;
}
static void ocfs2_subtract_from_rec(struct super_block *sb,
enum ocfs2_split_type split,
struct ocfs2_extent_rec *rec,
struct ocfs2_extent_rec *split_rec)
{
u64 len_blocks;
len_blocks = ocfs2_clusters_to_blocks(sb,
le16_to_cpu(split_rec->e_leaf_clusters));
if (split == SPLIT_LEFT) {
/*
* Region is on the left edge of the existing
* record.
*/
le32_add_cpu(&rec->e_cpos,
le16_to_cpu(split_rec->e_leaf_clusters));
le64_add_cpu(&rec->e_blkno, len_blocks);
le16_add_cpu(&rec->e_leaf_clusters,
-le16_to_cpu(split_rec->e_leaf_clusters));
} else {
/*
* Region is on the right edge of the existing
* record.
*/
le16_add_cpu(&rec->e_leaf_clusters,
-le16_to_cpu(split_rec->e_leaf_clusters));
}
}
/*
* Do the final bits of extent record insertion at the target leaf
* list. If this leaf is part of an allocation tree, it is assumed
* that the tree above has been prepared.
*/
static void ocfs2_insert_at_leaf(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *insert_rec,
struct ocfs2_extent_list *el,
struct ocfs2_insert_type *insert)
{
int i = insert->ins_contig_index;
unsigned int range;
struct ocfs2_extent_rec *rec;
BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
if (insert->ins_split != SPLIT_NONE) {
i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos));
BUG_ON(i == -1);
rec = &el->l_recs[i];
ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci),
insert->ins_split, rec,
insert_rec);
goto rotate;
}
/*
* Contiguous insert - either left or right.
*/
if (insert->ins_contig != CONTIG_NONE) {
rec = &el->l_recs[i];
if (insert->ins_contig == CONTIG_LEFT) {
rec->e_blkno = insert_rec->e_blkno;
rec->e_cpos = insert_rec->e_cpos;
}
le16_add_cpu(&rec->e_leaf_clusters,
le16_to_cpu(insert_rec->e_leaf_clusters));
return;
}
/*
* Handle insert into an empty leaf.
*/
if (le16_to_cpu(el->l_next_free_rec) == 0 ||
((le16_to_cpu(el->l_next_free_rec) == 1) &&
ocfs2_is_empty_extent(&el->l_recs[0]))) {
el->l_recs[0] = *insert_rec;
el->l_next_free_rec = cpu_to_le16(1);
return;
}
/*
* Appending insert.
*/
if (insert->ins_appending == APPEND_TAIL) {
i = le16_to_cpu(el->l_next_free_rec) - 1;
rec = &el->l_recs[i];
range = le32_to_cpu(rec->e_cpos)
+ le16_to_cpu(rec->e_leaf_clusters);
BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
le16_to_cpu(el->l_count),
"owner %llu, depth %u, count %u, next free %u, "
"rec.cpos %u, rec.clusters %u, "
"insert.cpos %u, insert.clusters %u\n",
ocfs2_metadata_cache_owner(et->et_ci),
le16_to_cpu(el->l_tree_depth),
le16_to_cpu(el->l_count),
le16_to_cpu(el->l_next_free_rec),
le32_to_cpu(el->l_recs[i].e_cpos),
le16_to_cpu(el->l_recs[i].e_leaf_clusters),
le32_to_cpu(insert_rec->e_cpos),
le16_to_cpu(insert_rec->e_leaf_clusters));
i++;
el->l_recs[i] = *insert_rec;
le16_add_cpu(&el->l_next_free_rec, 1);
return;
}
rotate:
/*
* Ok, we have to rotate.
*
* At this point, it is safe to assume that inserting into an
* empty leaf and appending to a leaf have both been handled
* above.
*
* This leaf needs to have space, either by the empty 1st
* extent record, or by virtue of an l_next_free_rec < l_count.
*/
ocfs2_rotate_leaf(el, insert_rec);
}
static void ocfs2_adjust_rightmost_records(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec)
{
int i, next_free;
struct buffer_head *bh;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
/*
* Update everything except the leaf block.
*/
for (i = 0; i < path->p_tree_depth; i++) {
bh = path->p_node[i].bh;
el = path->p_node[i].el;
next_free = le16_to_cpu(el->l_next_free_rec);
if (next_free == 0) {
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu has a bad extent list\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
return;
}
rec = &el->l_recs[next_free - 1];
rec->e_int_clusters = insert_rec->e_cpos;
le32_add_cpu(&rec->e_int_clusters,
le16_to_cpu(insert_rec->e_leaf_clusters));
le32_add_cpu(&rec->e_int_clusters,
-le32_to_cpu(rec->e_cpos));
ocfs2_journal_dirty(handle, bh);
}
}
static int ocfs2_append_rec_to_path(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *insert_rec,
struct ocfs2_path *right_path,
struct ocfs2_path **ret_left_path)
{
int ret, next_free;
struct ocfs2_extent_list *el;
struct ocfs2_path *left_path = NULL;
*ret_left_path = NULL;
/*
* This shouldn't happen for non-trees. The extent rec cluster
* count manipulation below only works for interior nodes.
*/
BUG_ON(right_path->p_tree_depth == 0);
/*
* If our appending insert is at the leftmost edge of a leaf,
* then we might need to update the rightmost records of the
* neighboring path.
*/
el = path_leaf_el(right_path);
next_free = le16_to_cpu(el->l_next_free_rec);
if (next_free == 0 ||
(next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) {
u32 left_cpos;
ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
right_path, &left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_append_rec_to_path(
(unsigned long long)
ocfs2_metadata_cache_owner(et->et_ci),
le32_to_cpu(insert_rec->e_cpos),
left_cpos);
/*
* No need to worry if the append is already in the
* leftmost leaf.
*/
if (left_cpos) {
left_path = ocfs2_new_path_from_path(right_path);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, left_path,
left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* ocfs2_insert_path() will pass the left_path to the
* journal for us.
*/
}
}
ret = ocfs2_journal_access_path(et->et_ci, handle, right_path);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_adjust_rightmost_records(handle, et, right_path, insert_rec);
*ret_left_path = left_path;
ret = 0;
out:
if (ret != 0)
ocfs2_free_path(left_path);
return ret;
}
static void ocfs2_split_record(struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
struct ocfs2_extent_rec *split_rec,
enum ocfs2_split_type split)
{
int index;
u32 cpos = le32_to_cpu(split_rec->e_cpos);
struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el;
struct ocfs2_extent_rec *rec, *tmprec;
right_el = path_leaf_el(right_path);
if (left_path)
left_el = path_leaf_el(left_path);
el = right_el;
insert_el = right_el;
index = ocfs2_search_extent_list(el, cpos);
if (index != -1) {
if (index == 0 && left_path) {
BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
/*
* This typically means that the record
* started in the left path but moved to the
* right as a result of rotation. We either
* move the existing record to the left, or we
* do the later insert there.
*
* In this case, the left path should always
* exist as the rotate code will have passed
* it back for a post-insert update.
*/
if (split == SPLIT_LEFT) {
/*
* It's a left split. Since we know
* that the rotate code gave us an
* empty extent in the left path, we
* can just do the insert there.
*/
insert_el = left_el;
} else {
/*
* Right split - we have to move the
* existing record over to the left
* leaf. The insert will be into the
* newly created empty extent in the
* right leaf.
*/
tmprec = &right_el->l_recs[index];
ocfs2_rotate_leaf(left_el, tmprec);
el = left_el;
memset(tmprec, 0, sizeof(*tmprec));
index = ocfs2_search_extent_list(left_el, cpos);
BUG_ON(index == -1);
}
}
} else {
BUG_ON(!left_path);
BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0]));
/*
* Left path is easy - we can just allow the insert to
* happen.
*/
el = left_el;
insert_el = left_el;
index = ocfs2_search_extent_list(el, cpos);
BUG_ON(index == -1);
}
rec = &el->l_recs[index];
ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci),
split, rec, split_rec);
ocfs2_rotate_leaf(insert_el, split_rec);
}
/*
* This function only does inserts on an allocation b-tree. For tree
* depth = 0, ocfs2_insert_at_leaf() is called directly.
*
* right_path is the path we want to do the actual insert
* in. left_path should only be passed in if we need to update that
* portion of the tree after an edge insert.
*/
static int ocfs2_insert_path(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
struct ocfs2_extent_rec *insert_rec,
struct ocfs2_insert_type *insert)
{
int ret, subtree_index;
struct buffer_head *leaf_bh = path_leaf_bh(right_path);
if (left_path) {
/*
* There's a chance that left_path got passed back to
* us without being accounted for in the
* journal. Extend our transaction here to be sure we
* can change those blocks.
*/
ret = ocfs2_extend_trans(handle, left_path->p_tree_depth);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
/*
* Pass both paths to the journal. The majority of inserts
* will be touching all components anyway.
*/
ret = ocfs2_journal_access_path(et->et_ci, handle, right_path);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
if (insert->ins_split != SPLIT_NONE) {
/*
* We could call ocfs2_insert_at_leaf() for some types
* of splits, but it's easier to just let one separate
* function sort it all out.
*/
ocfs2_split_record(et, left_path, right_path,
insert_rec, insert->ins_split);
/*
* Split might have modified either leaf and we don't
* have a guarantee that the later edge insert will
* dirty this for us.
*/
if (left_path)
ocfs2_journal_dirty(handle,
path_leaf_bh(left_path));
} else
ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path),
insert);
ocfs2_journal_dirty(handle, leaf_bh);
if (left_path) {
/*
* The rotate code has indicated that we need to fix
* up portions of the tree after the insert.
*
* XXX: Should we extend the transaction here?
*/
subtree_index = ocfs2_find_subtree_root(et, left_path,
right_path);
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
}
ret = 0;
out:
return ret;
}
static int ocfs2_do_insert_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *insert_rec,
struct ocfs2_insert_type *type)
{
int ret, rotate = 0;
u32 cpos;
struct ocfs2_path *right_path = NULL;
struct ocfs2_path *left_path = NULL;
struct ocfs2_extent_list *el;
el = et->et_root_el;
ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
if (le16_to_cpu(el->l_tree_depth) == 0) {
ocfs2_insert_at_leaf(et, insert_rec, el, type);
goto out_update_clusters;
}
right_path = ocfs2_new_path_from_et(et);
if (!right_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
/*
* Determine the path to start with. Rotations need the
* rightmost path, everything else can go directly to the
* target leaf.
*/
cpos = le32_to_cpu(insert_rec->e_cpos);
if (type->ins_appending == APPEND_NONE &&
type->ins_contig == CONTIG_NONE) {
rotate = 1;
cpos = UINT_MAX;
}
ret = ocfs2_find_path(et->et_ci, right_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Rotations and appends need special treatment - they modify
* parts of the tree's above them.
*
* Both might pass back a path immediate to the left of the
* one being inserted to. This will be cause
* ocfs2_insert_path() to modify the rightmost records of
* left_path to account for an edge insert.
*
* XXX: When modifying this code, keep in mind that an insert
* can wind up skipping both of these two special cases...
*/
if (rotate) {
ret = ocfs2_rotate_tree_right(handle, et, type->ins_split,
le32_to_cpu(insert_rec->e_cpos),
right_path, &left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* ocfs2_rotate_tree_right() might have extended the
* transaction without re-journaling our tree root.
*/
ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
} else if (type->ins_appending == APPEND_TAIL
&& type->ins_contig != CONTIG_LEFT) {
ret = ocfs2_append_rec_to_path(handle, et, insert_rec,
right_path, &left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_insert_path(handle, et, left_path, right_path,
insert_rec, type);
if (ret) {
mlog_errno(ret);
goto out;
}
out_update_clusters:
if (type->ins_split == SPLIT_NONE)
ocfs2_et_update_clusters(et,
le16_to_cpu(insert_rec->e_leaf_clusters));
ocfs2_journal_dirty(handle, et->et_root_bh);
out:
ocfs2_free_path(left_path);
ocfs2_free_path(right_path);
return ret;
}
static int ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_extent_list *el, int index,
struct ocfs2_extent_rec *split_rec,
struct ocfs2_merge_ctxt *ctxt)
{
int status = 0;
enum ocfs2_contig_type ret = CONTIG_NONE;
u32 left_cpos, right_cpos;
struct ocfs2_extent_rec *rec = NULL;
struct ocfs2_extent_list *new_el;
struct ocfs2_path *left_path = NULL, *right_path = NULL;
struct buffer_head *bh;
struct ocfs2_extent_block *eb;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
if (index > 0) {
rec = &el->l_recs[index - 1];
} else if (path->p_tree_depth > 0) {
status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
if (status)
goto exit;
if (left_cpos != 0) {
left_path = ocfs2_new_path_from_path(path);
if (!left_path) {
status = -ENOMEM;
mlog_errno(status);
goto exit;
}
status = ocfs2_find_path(et->et_ci, left_path,
left_cpos);
if (status)
goto free_left_path;
new_el = path_leaf_el(left_path);
if (le16_to_cpu(new_el->l_next_free_rec) !=
le16_to_cpu(new_el->l_count)) {
bh = path_leaf_bh(left_path);
eb = (struct ocfs2_extent_block *)bh->b_data;
status = ocfs2_error(sb,
"Extent block #%llu has an invalid l_next_free_rec of %d. It should have matched the l_count of %d\n",
(unsigned long long)le64_to_cpu(eb->h_blkno),
le16_to_cpu(new_el->l_next_free_rec),
le16_to_cpu(new_el->l_count));
goto free_left_path;
}
rec = &new_el->l_recs[
le16_to_cpu(new_el->l_next_free_rec) - 1];
}
}
/*
* We're careful to check for an empty extent record here -
* the merge code will know what to do if it sees one.
*/
if (rec) {
if (index == 1 && ocfs2_is_empty_extent(rec)) {
if (split_rec->e_cpos == el->l_recs[index].e_cpos)
ret = CONTIG_RIGHT;
} else {
ret = ocfs2_et_extent_contig(et, rec, split_rec);
}
}
rec = NULL;
if (index < (le16_to_cpu(el->l_next_free_rec) - 1))
rec = &el->l_recs[index + 1];
else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) &&
path->p_tree_depth > 0) {
status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
if (status)
goto free_left_path;
if (right_cpos == 0)
goto free_left_path;
right_path = ocfs2_new_path_from_path(path);
if (!right_path) {
status = -ENOMEM;
mlog_errno(status);
goto free_left_path;
}
status = ocfs2_find_path(et->et_ci, right_path, right_cpos);
if (status)
goto free_right_path;
new_el = path_leaf_el(right_path);
rec = &new_el->l_recs[0];
if (ocfs2_is_empty_extent(rec)) {
if (le16_to_cpu(new_el->l_next_free_rec) <= 1) {
bh = path_leaf_bh(right_path);
eb = (struct ocfs2_extent_block *)bh->b_data;
status = ocfs2_error(sb,
"Extent block #%llu has an invalid l_next_free_rec of %d\n",
(unsigned long long)le64_to_cpu(eb->h_blkno),
le16_to_cpu(new_el->l_next_free_rec));
goto free_right_path;
}
rec = &new_el->l_recs[1];
}
}
if (rec) {
enum ocfs2_contig_type contig_type;
contig_type = ocfs2_et_extent_contig(et, rec, split_rec);
if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT)
ret = CONTIG_LEFTRIGHT;
else if (ret == CONTIG_NONE)
ret = contig_type;
}
free_right_path:
ocfs2_free_path(right_path);
free_left_path:
ocfs2_free_path(left_path);
exit:
if (status == 0)
ctxt->c_contig_type = ret;
return status;
}
static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
struct ocfs2_insert_type *insert,
struct ocfs2_extent_list *el,
struct ocfs2_extent_rec *insert_rec)
{
int i;
enum ocfs2_contig_type contig_type = CONTIG_NONE;
BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
contig_type = ocfs2_et_extent_contig(et, &el->l_recs[i],
insert_rec);
if (contig_type != CONTIG_NONE) {
insert->ins_contig_index = i;
break;
}
}
insert->ins_contig = contig_type;
if (insert->ins_contig != CONTIG_NONE) {
struct ocfs2_extent_rec *rec =
&el->l_recs[insert->ins_contig_index];
unsigned int len = le16_to_cpu(rec->e_leaf_clusters) +
le16_to_cpu(insert_rec->e_leaf_clusters);
/*
* Caller might want us to limit the size of extents, don't
* calculate contiguousness if we might exceed that limit.
*/
if (et->et_max_leaf_clusters &&
(len > et->et_max_leaf_clusters))
insert->ins_contig = CONTIG_NONE;
}
}
/*
* This should only be called against the righmost leaf extent list.
*
* ocfs2_figure_appending_type() will figure out whether we'll have to
* insert at the tail of the rightmost leaf.
*
* This should also work against the root extent list for tree's with 0
* depth. If we consider the root extent list to be the rightmost leaf node
* then the logic here makes sense.
*/
static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert,
struct ocfs2_extent_list *el,
struct ocfs2_extent_rec *insert_rec)
{
int i;
u32 cpos = le32_to_cpu(insert_rec->e_cpos);
struct ocfs2_extent_rec *rec;
insert->ins_appending = APPEND_NONE;
BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
if (!el->l_next_free_rec)
goto set_tail_append;
if (ocfs2_is_empty_extent(&el->l_recs[0])) {
/* Were all records empty? */
if (le16_to_cpu(el->l_next_free_rec) == 1)
goto set_tail_append;
}
i = le16_to_cpu(el->l_next_free_rec) - 1;
rec = &el->l_recs[i];
if (cpos >=
(le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)))
goto set_tail_append;
return;
set_tail_append:
insert->ins_appending = APPEND_TAIL;
}
/*
* Helper function called at the beginning of an insert.
*
* This computes a few things that are commonly used in the process of
* inserting into the btree:
* - Whether the new extent is contiguous with an existing one.
* - The current tree depth.
* - Whether the insert is an appending one.
* - The total # of free records in the tree.
*
* All of the information is stored on the ocfs2_insert_type
* structure.
*/
static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et,
struct buffer_head **last_eb_bh,
struct ocfs2_extent_rec *insert_rec,
int *free_records,
struct ocfs2_insert_type *insert)
{
int ret;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
struct ocfs2_path *path = NULL;
struct buffer_head *bh = NULL;
insert->ins_split = SPLIT_NONE;
el = et->et_root_el;
insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth);
if (el->l_tree_depth) {
/*
* If we have tree depth, we read in the
* rightmost extent block ahead of time as
* ocfs2_figure_insert_type() and ocfs2_add_branch()
* may want it later.
*/
ret = ocfs2_read_extent_block(et->et_ci,
ocfs2_et_get_last_eb_blk(et),
&bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) bh->b_data;
el = &eb->h_list;
}
/*
* Unless we have a contiguous insert, we'll need to know if
* there is room left in our allocation tree for another
* extent record.
*
* XXX: This test is simplistic, we can search for empty
* extent records too.
*/
*free_records = le16_to_cpu(el->l_count) -
le16_to_cpu(el->l_next_free_rec);
if (!insert->ins_tree_depth) {
ocfs2_figure_contig_type(et, insert, el, insert_rec);
ocfs2_figure_appending_type(insert, el, insert_rec);
return 0;
}
path = ocfs2_new_path_from_et(et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
/*
* In the case that we're inserting past what the tree
* currently accounts for, ocfs2_find_path() will return for
* us the rightmost tree path. This is accounted for below in
* the appending code.
*/
ret = ocfs2_find_path(et->et_ci, path, le32_to_cpu(insert_rec->e_cpos));
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
/*
* Now that we have the path, there's two things we want to determine:
* 1) Contiguousness (also set contig_index if this is so)
*
* 2) Are we doing an append? We can trivially break this up
* into two types of appends: simple record append, or a
* rotate inside the tail leaf.
*/
ocfs2_figure_contig_type(et, insert, el, insert_rec);
/*
* The insert code isn't quite ready to deal with all cases of
* left contiguousness. Specifically, if it's an insert into
* the 1st record in a leaf, it will require the adjustment of
* cluster count on the last record of the path directly to it's
* left. For now, just catch that case and fool the layers
* above us. This works just fine for tree_depth == 0, which
* is why we allow that above.
*/
if (insert->ins_contig == CONTIG_LEFT &&
insert->ins_contig_index == 0)
insert->ins_contig = CONTIG_NONE;
/*
* Ok, so we can simply compare against last_eb to figure out
* whether the path doesn't exist. This will only happen in
* the case that we're doing a tail append, so maybe we can
* take advantage of that information somehow.
*/
if (ocfs2_et_get_last_eb_blk(et) ==
path_leaf_bh(path)->b_blocknr) {
/*
* Ok, ocfs2_find_path() returned us the rightmost
* tree path. This might be an appending insert. There are
* two cases:
* 1) We're doing a true append at the tail:
* -This might even be off the end of the leaf
* 2) We're "appending" by rotating in the tail
*/
ocfs2_figure_appending_type(insert, el, insert_rec);
}
out:
ocfs2_free_path(path);
if (ret == 0)
*last_eb_bh = bh;
else
brelse(bh);
return ret;
}
/*
* Insert an extent into a btree.
*
* The caller needs to update the owning btree's cluster count.
*/
int ocfs2_insert_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 cpos,
u64 start_blk,
u32 new_clusters,
u8 flags,
struct ocfs2_alloc_context *meta_ac)
{
int status;
int free_records;
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_insert_type insert = {0, };
struct ocfs2_extent_rec rec;
trace_ocfs2_insert_extent_start(
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos, new_clusters);
memset(&rec, 0, sizeof(rec));
rec.e_cpos = cpu_to_le32(cpos);
rec.e_blkno = cpu_to_le64(start_blk);
rec.e_leaf_clusters = cpu_to_le16(new_clusters);
rec.e_flags = flags;
status = ocfs2_et_insert_check(et, &rec);
if (status) {
mlog_errno(status);
goto bail;
}
status = ocfs2_figure_insert_type(et, &last_eb_bh, &rec,
&free_records, &insert);
if (status < 0) {
mlog_errno(status);
goto bail;
}
trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig,
insert.ins_contig_index, free_records,
insert.ins_tree_depth);
if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
status = ocfs2_grow_tree(handle, et,
&insert.ins_tree_depth, &last_eb_bh,
meta_ac);
if (status) {
mlog_errno(status);
goto bail;
}
}
/* Finally, we can add clusters. This might rotate the tree for us. */
status = ocfs2_do_insert_extent(handle, et, &rec, &insert);
if (status < 0)
mlog_errno(status);
else
ocfs2_et_extent_map_insert(et, &rec);
bail:
brelse(last_eb_bh);
return status;
}
/*
* Allcate and add clusters into the extent b-tree.
* The new clusters(clusters_to_add) will be inserted at logical_offset.
* The extent b-tree's root is specified by et, and
* it is not limited to the file storage. Any extent tree can use this
* function if it implements the proper ocfs2_extent_tree.
*/
int ocfs2_add_clusters_in_btree(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 *logical_offset,
u32 clusters_to_add,
int mark_unwritten,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret)
{
int status = 0, err = 0;
int need_free = 0;
int free_extents;
enum ocfs2_alloc_restarted reason = RESTART_NONE;
u32 bit_off, num_bits;
u64 block;
u8 flags = 0;
struct ocfs2_super *osb =
OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
BUG_ON(!clusters_to_add);
if (mark_unwritten)
flags = OCFS2_EXT_UNWRITTEN;
free_extents = ocfs2_num_free_extents(et);
if (free_extents < 0) {
status = free_extents;
mlog_errno(status);
goto leave;
}
/* there are two cases which could cause us to EAGAIN in the
* we-need-more-metadata case:
* 1) we haven't reserved *any*
* 2) we are so fragmented, we've needed to add metadata too
* many times. */
if (!free_extents && !meta_ac) {
err = -1;
status = -EAGAIN;
reason = RESTART_META;
goto leave;
} else if ((!free_extents)
&& (ocfs2_alloc_context_bits_left(meta_ac)
< ocfs2_extend_meta_needed(et->et_root_el))) {
err = -2;
status = -EAGAIN;
reason = RESTART_META;
goto leave;
}
status = __ocfs2_claim_clusters(handle, data_ac, 1,
clusters_to_add, &bit_off, &num_bits);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto leave;
}
BUG_ON(num_bits > clusters_to_add);
/* reserve our write early -- insert_extent may update the tree root */
status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
need_free = 1;
goto bail;
}
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
trace_ocfs2_add_clusters_in_btree(
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
bit_off, num_bits);
status = ocfs2_insert_extent(handle, et, *logical_offset, block,
num_bits, flags, meta_ac);
if (status < 0) {
mlog_errno(status);
need_free = 1;
goto bail;
}
ocfs2_journal_dirty(handle, et->et_root_bh);
clusters_to_add -= num_bits;
*logical_offset += num_bits;
if (clusters_to_add) {
err = clusters_to_add;
status = -EAGAIN;
reason = RESTART_TRANS;
}
bail:
if (need_free) {
if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
ocfs2_free_local_alloc_bits(osb, handle, data_ac,
bit_off, num_bits);
else
ocfs2_free_clusters(handle,
data_ac->ac_inode,
data_ac->ac_bh,
ocfs2_clusters_to_blocks(osb->sb, bit_off),
num_bits);
}
leave:
if (reason_ret)
*reason_ret = reason;
trace_ocfs2_add_clusters_in_btree_ret(status, reason, err);
return status;
}
static void ocfs2_make_right_split_rec(struct super_block *sb,
struct ocfs2_extent_rec *split_rec,
u32 cpos,
struct ocfs2_extent_rec *rec)
{
u32 rec_cpos = le32_to_cpu(rec->e_cpos);
u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters);
memset(split_rec, 0, sizeof(struct ocfs2_extent_rec));
split_rec->e_cpos = cpu_to_le32(cpos);
split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos);
split_rec->e_blkno = rec->e_blkno;
le64_add_cpu(&split_rec->e_blkno,
ocfs2_clusters_to_blocks(sb, cpos - rec_cpos));
split_rec->e_flags = rec->e_flags;
}
static int ocfs2_split_and_insert(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct buffer_head **last_eb_bh,
int split_index,
struct ocfs2_extent_rec *orig_split_rec,
struct ocfs2_alloc_context *meta_ac)
{
int ret = 0, depth;
unsigned int insert_range, rec_range, do_leftright = 0;
struct ocfs2_extent_rec tmprec;
struct ocfs2_extent_list *rightmost_el;
struct ocfs2_extent_rec rec;
struct ocfs2_extent_rec split_rec = *orig_split_rec;
struct ocfs2_insert_type insert;
struct ocfs2_extent_block *eb;
leftright:
/*
* Store a copy of the record on the stack - it might move
* around as the tree is manipulated below.
*/
rec = path_leaf_el(path)->l_recs[split_index];
rightmost_el = et->et_root_el;
depth = le16_to_cpu(rightmost_el->l_tree_depth);
if (depth) {
BUG_ON(!(*last_eb_bh));
eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
rightmost_el = &eb->h_list;
}
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
ret = ocfs2_grow_tree(handle, et,
&depth, last_eb_bh, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
memset(&insert, 0, sizeof(struct ocfs2_insert_type));
insert.ins_appending = APPEND_NONE;
insert.ins_contig = CONTIG_NONE;
insert.ins_tree_depth = depth;
insert_range = le32_to_cpu(split_rec.e_cpos) +
le16_to_cpu(split_rec.e_leaf_clusters);
rec_range = le32_to_cpu(rec.e_cpos) +
le16_to_cpu(rec.e_leaf_clusters);
if (split_rec.e_cpos == rec.e_cpos) {
insert.ins_split = SPLIT_LEFT;
} else if (insert_range == rec_range) {
insert.ins_split = SPLIT_RIGHT;
} else {
/*
* Left/right split. We fake this as a right split
* first and then make a second pass as a left split.
*/
insert.ins_split = SPLIT_RIGHT;
ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci),
&tmprec, insert_range, &rec);
split_rec = tmprec;
BUG_ON(do_leftright);
do_leftright = 1;
}
ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert);
if (ret) {
mlog_errno(ret);
goto out;
}
if (do_leftright == 1) {
u32 cpos;
struct ocfs2_extent_list *el;
do_leftright++;
split_rec = *orig_split_rec;
ocfs2_reinit_path(path, 1);
cpos = le32_to_cpu(split_rec.e_cpos);
ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
split_index = ocfs2_search_extent_list(el, cpos);
if (split_index == -1) {
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu has an extent at cpos %u which can no longer be found\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos);
ret = -EROFS;
goto out;
}
goto leftright;
}
out:
return ret;
}
static int ocfs2_replace_extent_rec(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_extent_list *el,
int split_index,
struct ocfs2_extent_rec *split_rec)
{
int ret;
ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path,
path_num_items(path) - 1);
if (ret) {
mlog_errno(ret);
goto out;
}
el->l_recs[split_index] = *split_rec;
ocfs2_journal_dirty(handle, path_leaf_bh(path));
out:
return ret;
}
/*
* Split part or all of the extent record at split_index in the leaf
* pointed to by path. Merge with the contiguous extent record if needed.
*
* Care is taken to handle contiguousness so as to not grow the tree.
*
* meta_ac is not strictly necessary - we only truly need it if growth
* of the tree is required. All other cases will degrade into a less
* optimal tree layout.
*
* last_eb_bh should be the rightmost leaf block for any extent
* btree. Since a split may grow the tree or a merge might shrink it,
* the caller cannot trust the contents of that buffer after this call.
*
* This code is optimized for readability - several passes might be
* made over certain portions of the tree. All of those blocks will
* have been brought into cache (and pinned via the journal), so the
* extra overhead is not expressed in terms of disk reads.
*/
int ocfs2_split_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
int split_index,
struct ocfs2_extent_rec *split_rec,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0;
struct ocfs2_extent_list *el = path_leaf_el(path);
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
struct ocfs2_merge_ctxt ctxt;
if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) ||
((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) <
(le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) {
ret = -EIO;
mlog_errno(ret);
goto out;
}
ret = ocfs2_figure_merge_contig_type(et, path, el,
split_index,
split_rec,
&ctxt);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* The core merge / split code wants to know how much room is
* left in this allocation tree, so we pass the
* rightmost extent list.
*/
if (path->p_tree_depth) {
ret = ocfs2_read_extent_block(et->et_ci,
ocfs2_et_get_last_eb_blk(et),
&last_eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (rec->e_cpos == split_rec->e_cpos &&
rec->e_leaf_clusters == split_rec->e_leaf_clusters)
ctxt.c_split_covers_rec = 1;
else
ctxt.c_split_covers_rec = 0;
ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
trace_ocfs2_split_extent(split_index, ctxt.c_contig_type,
ctxt.c_has_empty_extent,
ctxt.c_split_covers_rec);
if (ctxt.c_contig_type == CONTIG_NONE) {
if (ctxt.c_split_covers_rec)
ret = ocfs2_replace_extent_rec(handle, et, path, el,
split_index, split_rec);
else
ret = ocfs2_split_and_insert(handle, et, path,
&last_eb_bh, split_index,
split_rec, meta_ac);
if (ret)
mlog_errno(ret);
} else {
ret = ocfs2_try_to_merge_extent(handle, et, path,
split_index, split_rec,
dealloc, &ctxt);
if (ret)
mlog_errno(ret);
}
out:
brelse(last_eb_bh);
return ret;
}
/*
* Change the flags of the already-existing extent at cpos for len clusters.
*
* new_flags: the flags we want to set.
* clear_flags: the flags we want to clear.
* phys: the new physical offset we want this new extent starts from.
*
* If the existing extent is larger than the request, initiate a
* split. An attempt will be made at merging with adjacent extents.
*
* The caller is responsible for passing down meta_ac if we'll need it.
*/
int ocfs2_change_extent_flag(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 cpos, u32 len, u32 phys,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int new_flags, int clear_flags)
{
int ret, index;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
u64 start_blkno = ocfs2_clusters_to_blocks(sb, phys);
struct ocfs2_extent_rec split_rec;
struct ocfs2_path *left_path = NULL;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
left_path = ocfs2_new_path_from_et(et);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(left_path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1) {
ocfs2_error(sb,
"Owner %llu has an extent at cpos %u which can no longer be found\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos);
ret = -EROFS;
goto out;
}
ret = -EIO;
rec = &el->l_recs[index];
if (new_flags && (rec->e_flags & new_flags)) {
mlog(ML_ERROR, "Owner %llu tried to set %d flags on an "
"extent that already had them\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
new_flags);
goto out;
}
if (clear_flags && !(rec->e_flags & clear_flags)) {
mlog(ML_ERROR, "Owner %llu tried to clear %d flags on an "
"extent that didn't have them\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
clear_flags);
goto out;
}
memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec));
split_rec.e_cpos = cpu_to_le32(cpos);
split_rec.e_leaf_clusters = cpu_to_le16(len);
split_rec.e_blkno = cpu_to_le64(start_blkno);
split_rec.e_flags = rec->e_flags;
if (new_flags)
split_rec.e_flags |= new_flags;
if (clear_flags)
split_rec.e_flags &= ~clear_flags;
ret = ocfs2_split_extent(handle, et, left_path,
index, &split_rec, meta_ac,
dealloc);
if (ret)
mlog_errno(ret);
out:
ocfs2_free_path(left_path);
return ret;
}
/*
* Mark the already-existing extent at cpos as written for len clusters.
* This removes the unwritten extent flag.
*
* If the existing extent is larger than the request, initiate a
* split. An attempt will be made at merging with adjacent extents.
*
* The caller is responsible for passing down meta_ac if we'll need it.
*/
int ocfs2_mark_extent_written(struct inode *inode,
struct ocfs2_extent_tree *et,
handle_t *handle, u32 cpos, u32 len, u32 phys,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
trace_ocfs2_mark_extent_written(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
cpos, len, phys);
if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents that are being written to, but the feature bit is not set in the super block\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
ret = -EROFS;
goto out;
}
/*
* XXX: This should be fixed up so that we just re-insert the
* next extent records.
*/
ocfs2_et_extent_map_truncate(et, 0);
ret = ocfs2_change_extent_flag(handle, et, cpos,
len, phys, meta_ac, dealloc,
0, OCFS2_EXT_UNWRITTEN);
if (ret)
mlog_errno(ret);
out:
return ret;
}
static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
int index, u32 new_range,
struct ocfs2_alloc_context *meta_ac)
{
int ret, depth, credits;
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *rightmost_el, *el;
struct ocfs2_extent_rec split_rec;
struct ocfs2_extent_rec *rec;
struct ocfs2_insert_type insert;
/*
* Setup the record to split before we grow the tree.
*/
el = path_leaf_el(path);
rec = &el->l_recs[index];
ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci),
&split_rec, new_range, rec);
depth = path->p_tree_depth;
if (depth > 0) {
ret = ocfs2_read_extent_block(et->et_ci,
ocfs2_et_get_last_eb_blk(et),
&last_eb_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
rightmost_el = &eb->h_list;
} else
rightmost_el = path_leaf_el(path);
credits = path->p_tree_depth +
ocfs2_extend_meta_needed(et->et_root_el);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
ret = ocfs2_grow_tree(handle, et, &depth, &last_eb_bh,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
memset(&insert, 0, sizeof(struct ocfs2_insert_type));
insert.ins_appending = APPEND_NONE;
insert.ins_contig = CONTIG_NONE;
insert.ins_split = SPLIT_RIGHT;
insert.ins_tree_depth = depth;
ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert);
if (ret)
mlog_errno(ret);
out:
brelse(last_eb_bh);
return ret;
}
static int ocfs2_truncate_rec(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_path *path, int index,
struct ocfs2_cached_dealloc_ctxt *dealloc,
u32 cpos, u32 len)
{
int ret;
u32 left_cpos, rec_range, trunc_range;
int is_rightmost_tree_rec = 0;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
struct ocfs2_path *left_path = NULL;
struct ocfs2_extent_list *el = path_leaf_el(path);
struct ocfs2_extent_rec *rec;
struct ocfs2_extent_block *eb;
if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
index--;
}
if (index == (le16_to_cpu(el->l_next_free_rec) - 1) &&
path->p_tree_depth) {
/*
* Check whether this is the rightmost tree record. If
* we remove all of this record or part of its right
* edge then an update of the record lengths above it
* will be required.
*/
eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
if (eb->h_next_leaf_blk == 0)
is_rightmost_tree_rec = 1;
}
rec = &el->l_recs[index];
if (index == 0 && path->p_tree_depth &&
le32_to_cpu(rec->e_cpos) == cpos) {
/*
* Changing the leftmost offset (via partial or whole
* record truncate) of an interior (or rightmost) path
* means we have to update the subtree that is formed
* by this leaf and the one to it's left.
*
* There are two cases we can skip:
* 1) Path is the leftmost one in our btree.
* 2) The leaf is rightmost and will be empty after
* we remove the extent record - the rotate code
* knows how to update the newly formed edge.
*/
ret = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) {
left_path = ocfs2_new_path_from_path(path);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, left_path,
left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
}
}
ret = ocfs2_extend_rotate_transaction(handle, 0,
jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
trunc_range = cpos + len;
if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) {
int next_free;
memset(rec, 0, sizeof(*rec));
ocfs2_cleanup_merge(el, index);
next_free = le16_to_cpu(el->l_next_free_rec);
if (is_rightmost_tree_rec && next_free > 1) {
/*
* We skip the edge update if this path will
* be deleted by the rotate code.
*/
rec = &el->l_recs[next_free - 1];
ocfs2_adjust_rightmost_records(handle, et, path,
rec);
}
} else if (le32_to_cpu(rec->e_cpos) == cpos) {
/* Remove leftmost portion of the record. */
le32_add_cpu(&rec->e_cpos, len);
le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len));
le16_add_cpu(&rec->e_leaf_clusters, -len);
} else if (rec_range == trunc_range) {
/* Remove rightmost portion of the record */
le16_add_cpu(&rec->e_leaf_clusters, -len);
if (is_rightmost_tree_rec)
ocfs2_adjust_rightmost_records(handle, et, path, rec);
} else {
/* Caller should have trapped this. */
mlog(ML_ERROR, "Owner %llu: Invalid record truncate: (%u, %u) "
"(%u, %u)\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
le32_to_cpu(rec->e_cpos),
le16_to_cpu(rec->e_leaf_clusters), cpos, len);
BUG();
}
if (left_path) {
int subtree_index;
subtree_index = ocfs2_find_subtree_root(et, left_path, path);
ocfs2_complete_edge_insert(handle, left_path, path,
subtree_index);
}
ocfs2_journal_dirty(handle, path_leaf_bh(path));
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret)
mlog_errno(ret);
out:
ocfs2_free_path(left_path);
return ret;
}
int ocfs2_remove_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, index;
u32 rec_range, trunc_range;
struct ocfs2_extent_rec *rec;
struct ocfs2_extent_list *el;
struct ocfs2_path *path = NULL;
/*
* XXX: Why are we truncating to 0 instead of wherever this
* affects us?
*/
ocfs2_et_extent_map_truncate(et, 0);
path = ocfs2_new_path_from_et(et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1) {
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu has an extent at cpos %u which can no longer be found\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos);
ret = -EROFS;
goto out;
}
/*
* We have 3 cases of extent removal:
* 1) Range covers the entire extent rec
* 2) Range begins or ends on one edge of the extent rec
* 3) Range is in the middle of the extent rec (no shared edges)
*
* For case 1 we remove the extent rec and left rotate to
* fill the hole.
*
* For case 2 we just shrink the existing extent rec, with a
* tree update if the shrinking edge is also the edge of an
* extent block.
*
* For case 3 we do a right split to turn the extent rec into
* something case 2 can handle.
*/
rec = &el->l_recs[index];
rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
trunc_range = cpos + len;
BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
trace_ocfs2_remove_extent(
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos, len, index, le32_to_cpu(rec->e_cpos),
ocfs2_rec_clusters(el, rec));
if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
cpos, len);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
ret = ocfs2_split_tree(handle, et, path, index,
trunc_range, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* The split could have manipulated the tree enough to
* move the record location, so we have to look for it again.
*/
ocfs2_reinit_path(path, 1);
ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1) {
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu: split at cpos %u lost record\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos);
ret = -EROFS;
goto out;
}
/*
* Double check our values here. If anything is fishy,
* it's easier to catch it at the top level.
*/
rec = &el->l_recs[index];
rec_range = le32_to_cpu(rec->e_cpos) +
ocfs2_rec_clusters(el, rec);
if (rec_range != trunc_range) {
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu: error after split at cpos %u trunc len %u, existing record is (%u,%u)\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos, len, le32_to_cpu(rec->e_cpos),
ocfs2_rec_clusters(el, rec));
ret = -EROFS;
goto out;
}
ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
cpos, len);
if (ret)
mlog_errno(ret);
}
out:
ocfs2_free_path(path);
return ret;
}
/*
* ocfs2_reserve_blocks_for_rec_trunc() would look basically the
* same as ocfs2_lock_alloctors(), except for it accepts a blocks
* number to reserve some extra blocks, and it only handles meta
* data allocations.
*
* Currently, only ocfs2_remove_btree_range() uses it for truncating
* and punching holes.
*/
static int ocfs2_reserve_blocks_for_rec_trunc(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 extents_to_split,
struct ocfs2_alloc_context **ac,
int extra_blocks)
{
int ret = 0, num_free_extents;
unsigned int max_recs_needed = 2 * extents_to_split;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
*ac = NULL;
num_free_extents = ocfs2_num_free_extents(et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
goto out;
}
if (!num_free_extents ||
(ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
if (extra_blocks) {
ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, ac);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
}
}
out:
if (ret) {
if (*ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
}
return ret;
}
int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 cpos, u32 phys_cpos, u32 len, int flags,
struct ocfs2_cached_dealloc_ctxt *dealloc,
u64 refcount_loc, bool refcount_tree_locked)
{
int ret, credits = 0, extra_blocks = 0;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
handle_t *handle;
struct ocfs2_alloc_context *meta_ac = NULL;
struct ocfs2_refcount_tree *ref_tree = NULL;
if ((flags & OCFS2_EXT_REFCOUNTED) && len) {
BUG_ON(!ocfs2_is_refcount_inode(inode));
if (!refcount_tree_locked) {
ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
&ref_tree, NULL);
if (ret) {
mlog_errno(ret);
goto bail;
}
}
ret = ocfs2_prepare_refcount_change_for_del(inode,
refcount_loc,
phys_blkno,
len,
&credits,
&extra_blocks);
if (ret < 0) {
mlog_errno(ret);
goto bail;
}
}
ret = ocfs2_reserve_blocks_for_rec_trunc(inode, et, 1, &meta_ac,
extra_blocks);
if (ret) {
mlog_errno(ret);
goto bail;
}
inode_lock(tl_inode);
if (ocfs2_truncate_log_needs_flush(osb)) {
ret = __ocfs2_flush_truncate_log(osb);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
handle = ocfs2_start_trans(osb,
ocfs2_remove_extent_credits(osb->sb) + credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(inode->i_sb, len));
ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ocfs2_et_update_clusters(et, -len);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, et->et_root_bh);
if (phys_blkno) {
if (flags & OCFS2_EXT_REFCOUNTED)
ret = ocfs2_decrease_refcount(inode, handle,
ocfs2_blocks_to_clusters(osb->sb,
phys_blkno),
len, meta_ac,
dealloc, 1);
else
ret = ocfs2_truncate_log_append(osb, handle,
phys_blkno, len);
if (ret)
mlog_errno(ret);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out:
inode_unlock(tl_inode);
bail:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
if (ref_tree)
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
return ret;
}
int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb)
{
struct buffer_head *tl_bh = osb->osb_tl_bh;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
di = (struct ocfs2_dinode *) tl_bh->b_data;
tl = &di->id2.i_dealloc;
mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count),
"slot %d, invalid truncate log parameters: used = "
"%u, count = %u\n", osb->slot_num,
le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));
return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count);
}
static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
unsigned int new_start)
{
unsigned int tail_index;
unsigned int current_tail;
/* No records, nothing to coalesce */
if (!le16_to_cpu(tl->tl_used))
return 0;
tail_index = le16_to_cpu(tl->tl_used) - 1;
current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);
current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);
return current_tail == new_start;
}
int ocfs2_truncate_log_append(struct ocfs2_super *osb,
handle_t *handle,
u64 start_blk,
unsigned int num_clusters)
{
int status, index;
unsigned int start_cluster, tl_count;
struct inode *tl_inode = osb->osb_tl_inode;
struct buffer_head *tl_bh = osb->osb_tl_bh;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
BUG_ON(inode_trylock(tl_inode));
start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
di = (struct ocfs2_dinode *) tl_bh->b_data;
/* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated
* by the underlying call to ocfs2_read_inode_block(), so any
* corruption is a code bug */
BUG_ON(!OCFS2_IS_VALID_DINODE(di));
tl = &di->id2.i_dealloc;
tl_count = le16_to_cpu(tl->tl_count);
mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
tl_count == 0,
"Truncate record count on #%llu invalid "
"wanted %u, actual %u\n",
(unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
ocfs2_truncate_recs_per_inode(osb->sb),
le16_to_cpu(tl->tl_count));
/* Caller should have known to flush before calling us. */
index = le16_to_cpu(tl->tl_used);
if (index >= tl_count) {
status = -ENOSPC;
mlog_errno(status);
goto bail;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
trace_ocfs2_truncate_log_append(
(unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index,
start_cluster, num_clusters);
if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
/*
* Move index back to the record we are coalescing with.
* ocfs2_truncate_log_can_coalesce() guarantees nonzero
*/
index--;
num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
trace_ocfs2_truncate_log_append(
(unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
index, le32_to_cpu(tl->tl_recs[index].t_start),
num_clusters);
} else {
tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
tl->tl_used = cpu_to_le16(index + 1);
}
tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
ocfs2_journal_dirty(handle, tl_bh);
osb->truncated_clusters += num_clusters;
bail:
return status;
}
static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
struct inode *data_alloc_inode,
struct buffer_head *data_alloc_bh)
{
int status = 0;
int i;
unsigned int num_clusters;
u64 start_blk;
struct ocfs2_truncate_rec rec;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
struct inode *tl_inode = osb->osb_tl_inode;
struct buffer_head *tl_bh = osb->osb_tl_bh;
handle_t *handle;
di = (struct ocfs2_dinode *) tl_bh->b_data;
tl = &di->id2.i_dealloc;
i = le16_to_cpu(tl->tl_used) - 1;
while (i >= 0) {
handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto bail;
}
/* Caller has given us at least enough credits to
* update the truncate log dinode */
status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
ocfs2_commit_trans(osb, handle);
mlog_errno(status);
goto bail;
}
tl->tl_used = cpu_to_le16(i);
ocfs2_journal_dirty(handle, tl_bh);
rec = tl->tl_recs[i];
start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb,
le32_to_cpu(rec.t_start));
num_clusters = le32_to_cpu(rec.t_clusters);
/* if start_blk is not set, we ignore the record as
* invalid. */
if (start_blk) {
trace_ocfs2_replay_truncate_records(
(unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
i, le32_to_cpu(rec.t_start), num_clusters);
status = ocfs2_free_clusters(handle, data_alloc_inode,
data_alloc_bh, start_blk,
num_clusters);
if (status < 0) {
ocfs2_commit_trans(osb, handle);
mlog_errno(status);
goto bail;
}
}
ocfs2_commit_trans(osb, handle);
i--;
}
osb->truncated_clusters = 0;
bail:
return status;
}
/* Expects you to already be holding tl_inode->i_rwsem */
int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
{
int status;
unsigned int num_to_flush;
struct inode *tl_inode = osb->osb_tl_inode;
struct inode *data_alloc_inode = NULL;
struct buffer_head *tl_bh = osb->osb_tl_bh;
struct buffer_head *data_alloc_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
struct ocfs2_journal *journal = osb->journal;
BUG_ON(inode_trylock(tl_inode));
di = (struct ocfs2_dinode *) tl_bh->b_data;
/* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated
* by the underlying call to ocfs2_read_inode_block(), so any
* corruption is a code bug */
BUG_ON(!OCFS2_IS_VALID_DINODE(di));
tl = &di->id2.i_dealloc;
num_to_flush = le16_to_cpu(tl->tl_used);
trace_ocfs2_flush_truncate_log(
(unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
num_to_flush);
if (!num_to_flush) {
status = 0;
goto out;
}
/* Appending truncate log(TA) and flushing truncate log(TF) are
* two separated transactions. They can be both committed but not
* checkpointed. If crash occurs then, both two transaction will be
* replayed with several already released to global bitmap clusters.
* Then truncate log will be replayed resulting in cluster double free.
*/
jbd2_journal_lock_updates(journal->j_journal);
status = jbd2_journal_flush(journal->j_journal, 0);
jbd2_journal_unlock_updates(journal->j_journal);
if (status < 0) {
mlog_errno(status);
goto out;
}
data_alloc_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!data_alloc_inode) {
status = -EINVAL;
mlog(ML_ERROR, "Could not get bitmap inode!\n");
goto out;
}
inode_lock(data_alloc_inode);
status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1);
if (status < 0) {
mlog_errno(status);
goto out_mutex;
}
status = ocfs2_replay_truncate_records(osb, data_alloc_inode,
data_alloc_bh);
if (status < 0)
mlog_errno(status);
brelse(data_alloc_bh);
ocfs2_inode_unlock(data_alloc_inode, 1);
out_mutex:
inode_unlock(data_alloc_inode);
iput(data_alloc_inode);
out:
return status;
}
int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
{
int status;
struct inode *tl_inode = osb->osb_tl_inode;
inode_lock(tl_inode);
status = __ocfs2_flush_truncate_log(osb);
inode_unlock(tl_inode);
return status;
}
static void ocfs2_truncate_log_worker(struct work_struct *work)
{
int status;
struct ocfs2_super *osb =
container_of(work, struct ocfs2_super,
osb_truncate_log_wq.work);
status = ocfs2_flush_truncate_log(osb);
if (status < 0)
mlog_errno(status);
else
ocfs2_init_steal_slots(osb);
}
#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
int cancel)
{
if (osb->osb_tl_inode &&
atomic_read(&osb->osb_tl_disable) == 0) {
/* We want to push off log flushes while truncates are
* still running. */
if (cancel)
cancel_delayed_work(&osb->osb_truncate_log_wq);
queue_delayed_work(osb->ocfs2_wq, &osb->osb_truncate_log_wq,
OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL);
}
}
/*
* Try to flush truncate logs if we can free enough clusters from it.
* As for return value, "< 0" means error, "0" no space and "1" means
* we have freed enough spaces and let the caller try to allocate again.
*/
int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
unsigned int needed)
{
tid_t target;
int ret = 0;
unsigned int truncated_clusters;
inode_lock(osb->osb_tl_inode);
truncated_clusters = osb->truncated_clusters;
inode_unlock(osb->osb_tl_inode);
/*
* Check whether we can succeed in allocating if we free
* the truncate log.
*/
if (truncated_clusters < needed)
goto out;
ret = ocfs2_flush_truncate_log(osb);
if (ret) {
mlog_errno(ret);
goto out;
}
if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
jbd2_log_wait_commit(osb->journal->j_journal, target);
ret = 1;
}
out:
return ret;
}
static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
int slot_num,
struct inode **tl_inode,
struct buffer_head **tl_bh)
{
int status;
struct inode *inode = NULL;
struct buffer_head *bh = NULL;
inode = ocfs2_get_system_file_inode(osb,
TRUNCATE_LOG_SYSTEM_INODE,
slot_num);
if (!inode) {
status = -EINVAL;
mlog(ML_ERROR, "Could not get load truncate log inode!\n");
goto bail;
}
status = ocfs2_read_inode_block(inode, &bh);
if (status < 0) {
iput(inode);
mlog_errno(status);
goto bail;
}
*tl_inode = inode;
*tl_bh = bh;
bail:
return status;
}
/* called during the 1st stage of node recovery. we stamp a clean
* truncate log and pass back a copy for processing later. if the
* truncate log does not require processing, a *tl_copy is set to
* NULL. */
int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
int slot_num,
struct ocfs2_dinode **tl_copy)
{
int status;
struct inode *tl_inode = NULL;
struct buffer_head *tl_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
*tl_copy = NULL;
trace_ocfs2_begin_truncate_log_recovery(slot_num);
status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
di = (struct ocfs2_dinode *) tl_bh->b_data;
/* tl_bh is loaded from ocfs2_get_truncate_log_info(). It's
* validated by the underlying call to ocfs2_read_inode_block(),
* so any corruption is a code bug */
BUG_ON(!OCFS2_IS_VALID_DINODE(di));
tl = &di->id2.i_dealloc;
if (le16_to_cpu(tl->tl_used)) {
trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used));
/*
* Assuming the write-out below goes well, this copy will be
* passed back to recovery for processing.
*/
*tl_copy = kmemdup(tl_bh->b_data, tl_bh->b_size, GFP_KERNEL);
if (!(*tl_copy)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
/* All we need to do to clear the truncate log is set
* tl_used. */
tl->tl_used = 0;
ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check);
status = ocfs2_write_block(osb, tl_bh, INODE_CACHE(tl_inode));
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
bail:
iput(tl_inode);
brelse(tl_bh);
if (status < 0) {
kfree(*tl_copy);
*tl_copy = NULL;
mlog_errno(status);
}
return status;
}
int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
struct ocfs2_dinode *tl_copy)
{
int status = 0;
int i;
unsigned int clusters, num_recs, start_cluster;
u64 start_blk;
handle_t *handle;
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_truncate_log *tl;
if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
return -EINVAL;
}
tl = &tl_copy->id2.i_dealloc;
num_recs = le16_to_cpu(tl->tl_used);
trace_ocfs2_complete_truncate_log_recovery(
(unsigned long long)le64_to_cpu(tl_copy->i_blkno),
num_recs);
inode_lock(tl_inode);
for(i = 0; i < num_recs; i++) {
if (ocfs2_truncate_log_needs_flush(osb)) {
status = __ocfs2_flush_truncate_log(osb);
if (status < 0) {
mlog_errno(status);
goto bail_up;
}
}
handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto bail_up;
}
clusters = le32_to_cpu(tl->tl_recs[i].t_clusters);
start_cluster = le32_to_cpu(tl->tl_recs[i].t_start);
start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster);
status = ocfs2_truncate_log_append(osb, handle,
start_blk, clusters);
ocfs2_commit_trans(osb, handle);
if (status < 0) {
mlog_errno(status);
goto bail_up;
}
}
bail_up:
inode_unlock(tl_inode);
return status;
}
void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
{
int status;
struct inode *tl_inode = osb->osb_tl_inode;
atomic_set(&osb->osb_tl_disable, 1);
if (tl_inode) {
cancel_delayed_work(&osb->osb_truncate_log_wq);
flush_workqueue(osb->ocfs2_wq);
status = ocfs2_flush_truncate_log(osb);
if (status < 0)
mlog_errno(status);
brelse(osb->osb_tl_bh);
iput(osb->osb_tl_inode);
}
}
int ocfs2_truncate_log_init(struct ocfs2_super *osb)
{
int status;
struct inode *tl_inode = NULL;
struct buffer_head *tl_bh = NULL;
status = ocfs2_get_truncate_log_info(osb,
osb->slot_num,
&tl_inode,
&tl_bh);
if (status < 0)
mlog_errno(status);
/* ocfs2_truncate_log_shutdown keys on the existence of
* osb->osb_tl_inode so we don't set any of the osb variables
* until we're sure all is well. */
INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
ocfs2_truncate_log_worker);
atomic_set(&osb->osb_tl_disable, 0);
osb->osb_tl_bh = tl_bh;
osb->osb_tl_inode = tl_inode;
return status;
}
/*
* Delayed de-allocation of suballocator blocks.
*
* Some sets of block de-allocations might involve multiple suballocator inodes.
*
* The locking for this can get extremely complicated, especially when
* the suballocator inodes to delete from aren't known until deep
* within an unrelated codepath.
*
* ocfs2_extent_block structures are a good example of this - an inode
* btree could have been grown by any number of nodes each allocating
* out of their own suballoc inode.
*
* These structures allow the delay of block de-allocation until a
* later time, when locking of multiple cluster inodes won't cause
* deadlock.
*/
/*
* Describe a single bit freed from a suballocator. For the block
* suballocators, it represents one block. For the global cluster
* allocator, it represents some clusters and free_bit indicates
* clusters number.
*/
struct ocfs2_cached_block_free {
struct ocfs2_cached_block_free *free_next;
u64 free_bg;
u64 free_blk;
unsigned int free_bit;
};
struct ocfs2_per_slot_free_list {
struct ocfs2_per_slot_free_list *f_next_suballocator;
int f_inode_type;
int f_slot;
struct ocfs2_cached_block_free *f_first;
};
static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
int sysfile_type,
int slot,
struct ocfs2_cached_block_free *head)
{
int ret;
u64 bg_blkno;
handle_t *handle;
struct inode *inode;
struct buffer_head *di_bh = NULL;
struct ocfs2_cached_block_free *tmp;
inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot);
if (!inode) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
}
inode_lock(inode);
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret) {
mlog_errno(ret);
goto out_mutex;
}
while (head) {
if (head->free_bg)
bg_blkno = head->free_bg;
else
bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
head->free_bit);
handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_unlock;
}
trace_ocfs2_free_cached_blocks(
(unsigned long long)head->free_blk, head->free_bit);
ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
head->free_bit, bg_blkno, 1);
if (ret)
mlog_errno(ret);
ocfs2_commit_trans(osb, handle);
tmp = head;
head = head->free_next;
kfree(tmp);
}
out_unlock:
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
out_mutex:
inode_unlock(inode);
iput(inode);
out:
while(head) {
/* Premature exit may have left some dangling items. */
tmp = head;
head = head->free_next;
kfree(tmp);
}
return ret;
}
int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
u64 blkno, unsigned int bit)
{
int ret = 0;
struct ocfs2_cached_block_free *item;
item = kzalloc(sizeof(*item), GFP_NOFS);
if (item == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
return ret;
}
trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit);
item->free_blk = blkno;
item->free_bit = bit;
item->free_next = ctxt->c_global_allocator;
ctxt->c_global_allocator = item;
return ret;
}
static int ocfs2_free_cached_clusters(struct ocfs2_super *osb,
struct ocfs2_cached_block_free *head)
{
struct ocfs2_cached_block_free *tmp;
struct inode *tl_inode = osb->osb_tl_inode;
handle_t *handle;
int ret = 0;
inode_lock(tl_inode);
while (head) {
if (ocfs2_truncate_log_needs_flush(osb)) {
ret = __ocfs2_flush_truncate_log(osb);
if (ret < 0) {
mlog_errno(ret);
break;
}
}
handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
break;
}
ret = ocfs2_truncate_log_append(osb, handle, head->free_blk,
head->free_bit);
ocfs2_commit_trans(osb, handle);
tmp = head;
head = head->free_next;
kfree(tmp);
if (ret < 0) {
mlog_errno(ret);
break;
}
}
inode_unlock(tl_inode);
while (head) {
/* Premature exit may have left some dangling items. */
tmp = head;
head = head->free_next;
kfree(tmp);
}
return ret;
}
int ocfs2_run_deallocs(struct ocfs2_super *osb,
struct ocfs2_cached_dealloc_ctxt *ctxt)
{
int ret = 0, ret2;
struct ocfs2_per_slot_free_list *fl;
if (!ctxt)
return 0;
while (ctxt->c_first_suballocator) {
fl = ctxt->c_first_suballocator;
if (fl->f_first) {
trace_ocfs2_run_deallocs(fl->f_inode_type,
fl->f_slot);
ret2 = ocfs2_free_cached_blocks(osb,
fl->f_inode_type,
fl->f_slot,
fl->f_first);
if (ret2)
mlog_errno(ret2);
if (!ret)
ret = ret2;
}
ctxt->c_first_suballocator = fl->f_next_suballocator;
kfree(fl);
}
if (ctxt->c_global_allocator) {
ret2 = ocfs2_free_cached_clusters(osb,
ctxt->c_global_allocator);
if (ret2)
mlog_errno(ret2);
if (!ret)
ret = ret2;
ctxt->c_global_allocator = NULL;
}
return ret;
}
static struct ocfs2_per_slot_free_list *
ocfs2_find_per_slot_free_list(int type,
int slot,
struct ocfs2_cached_dealloc_ctxt *ctxt)
{
struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
while (fl) {
if (fl->f_inode_type == type && fl->f_slot == slot)
return fl;
fl = fl->f_next_suballocator;
}
fl = kmalloc(sizeof(*fl), GFP_NOFS);
if (fl) {
fl->f_inode_type = type;
fl->f_slot = slot;
fl->f_first = NULL;
fl->f_next_suballocator = ctxt->c_first_suballocator;
ctxt->c_first_suballocator = fl;
}
return fl;
}
static struct ocfs2_per_slot_free_list *
ocfs2_find_preferred_free_list(int type,
int preferred_slot,
int *real_slot,
struct ocfs2_cached_dealloc_ctxt *ctxt)
{
struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
while (fl) {
if (fl->f_inode_type == type && fl->f_slot == preferred_slot) {
*real_slot = fl->f_slot;
return fl;
}
fl = fl->f_next_suballocator;
}
/* If we can't find any free list matching preferred slot, just use
* the first one.
*/
fl = ctxt->c_first_suballocator;
*real_slot = fl->f_slot;
return fl;
}
/* Return Value 1 indicates empty */
static int ocfs2_is_dealloc_empty(struct ocfs2_extent_tree *et)
{
struct ocfs2_per_slot_free_list *fl = NULL;
if (!et->et_dealloc)
return 1;
fl = et->et_dealloc->c_first_suballocator;
if (!fl)
return 1;
if (!fl->f_first)
return 1;
return 0;
}
/* If extent was deleted from tree due to extent rotation and merging, and
* no metadata is reserved ahead of time. Try to reuse some extents
* just deleted. This is only used to reuse extent blocks.
* It is supposed to find enough extent blocks in dealloc if our estimation
* on metadata is accurate.
*/
static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
struct ocfs2_extent_tree *et,
struct buffer_head **new_eb_bh,
int blk_wanted, int *blk_given)
{
int i, status = 0, real_slot;
struct ocfs2_cached_dealloc_ctxt *dealloc;
struct ocfs2_per_slot_free_list *fl;
struct ocfs2_cached_block_free *bf;
struct ocfs2_extent_block *eb;
struct ocfs2_super *osb =
OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
*blk_given = 0;
/* If extent tree doesn't have a dealloc, this is not faulty. Just
* tell upper caller dealloc can't provide any block and it should
* ask for alloc to claim more space.
*/
dealloc = et->et_dealloc;
if (!dealloc)
goto bail;
for (i = 0; i < blk_wanted; i++) {
/* Prefer to use local slot */
fl = ocfs2_find_preferred_free_list(EXTENT_ALLOC_SYSTEM_INODE,
osb->slot_num, &real_slot,
dealloc);
/* If no more block can be reused, we should claim more
* from alloc. Just return here normally.
*/
if (!fl) {
status = 0;
break;
}
bf = fl->f_first;
fl->f_first = bf->free_next;
new_eb_bh[i] = sb_getblk(osb->sb, bf->free_blk);
if (new_eb_bh[i] == NULL) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
mlog(0, "Reusing block(%llu) from "
"dealloc(local slot:%d, real slot:%d)\n",
bf->free_blk, osb->slot_num, real_slot);
ocfs2_set_new_buffer_uptodate(et->et_ci, new_eb_bh[i]);
status = ocfs2_journal_access_eb(handle, et->et_ci,
new_eb_bh[i],
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
memset(new_eb_bh[i]->b_data, 0, osb->sb->s_blocksize);
eb = (struct ocfs2_extent_block *) new_eb_bh[i]->b_data;
/* We can't guarantee that buffer head is still cached, so
* polutlate the extent block again.
*/
strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(bf->free_blk);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
eb->h_suballoc_slot = cpu_to_le16(real_slot);
eb->h_suballoc_loc = cpu_to_le64(bf->free_bg);
eb->h_suballoc_bit = cpu_to_le16(bf->free_bit);
eb->h_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
/* We'll also be dirtied by the caller, so
* this isn't absolutely necessary.
*/
ocfs2_journal_dirty(handle, new_eb_bh[i]);
if (!fl->f_first) {
dealloc->c_first_suballocator = fl->f_next_suballocator;
kfree(fl);
}
kfree(bf);
}
*blk_given = i;
bail:
if (unlikely(status < 0)) {
for (i = 0; i < blk_wanted; i++)
brelse(new_eb_bh[i]);
}
return status;
}
int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
int type, int slot, u64 suballoc,
u64 blkno, unsigned int bit)
{
int ret;
struct ocfs2_per_slot_free_list *fl;
struct ocfs2_cached_block_free *item;
fl = ocfs2_find_per_slot_free_list(type, slot, ctxt);
if (fl == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
item = kzalloc(sizeof(*item), GFP_NOFS);
if (item == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
trace_ocfs2_cache_block_dealloc(type, slot,
(unsigned long long)suballoc,
(unsigned long long)blkno, bit);
item->free_bg = suballoc;
item->free_blk = blkno;
item->free_bit = bit;
item->free_next = fl->f_first;
fl->f_first = item;
ret = 0;
out:
return ret;
}
static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
struct ocfs2_extent_block *eb)
{
return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(eb->h_suballoc_slot),
le64_to_cpu(eb->h_suballoc_loc),
le64_to_cpu(eb->h_blkno),
le16_to_cpu(eb->h_suballoc_bit));
}
static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
{
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
return 0;
}
void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
unsigned int from, unsigned int to,
struct page *page, int zero, u64 *phys)
{
int ret, partial = 0;
loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
loff_t length = to - from;
ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
if (ret)
mlog_errno(ret);
if (zero)
zero_user_segment(page, from, to);
/*
* Need to set the buffers we zero'd into uptodate
* here if they aren't - ocfs2_map_page_blocks()
* might've skipped some
*/
ret = walk_page_buffers(handle, page_buffers(page),
from, to, &partial,
ocfs2_zero_func);
if (ret < 0)
mlog_errno(ret);
else if (ocfs2_should_order_data(inode)) {
ret = ocfs2_jbd2_inode_add_write(handle, inode,
start_byte, length);
if (ret < 0)
mlog_errno(ret);
}
if (!partial)
SetPageUptodate(page);
flush_dcache_page(page);
}
static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
loff_t end, struct page **pages,
int numpages, u64 phys, handle_t *handle)
{
int i;
struct page *page;
unsigned int from, to = PAGE_SIZE;
struct super_block *sb = inode->i_sb;
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
if (numpages == 0)
goto out;
to = PAGE_SIZE;
for(i = 0; i < numpages; i++) {
page = pages[i];
from = start & (PAGE_SIZE - 1);
if ((end >> PAGE_SHIFT) == page->index)
to = end & (PAGE_SIZE - 1);
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
&phys);
start = (page->index + 1) << PAGE_SHIFT;
}
out:
if (pages)
ocfs2_unlock_and_free_pages(pages, numpages);
}
int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
struct page **pages, int *num)
{
int numpages, ret = 0;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
BUG_ON(start > end);
numpages = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_SHIFT;
do {
pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
if (!pages[numpages]) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
numpages++;
index++;
} while (index < (last_page_bytes >> PAGE_SHIFT));
out:
if (ret != 0) {
if (pages)
ocfs2_unlock_and_free_pages(pages, numpages);
numpages = 0;
}
*num = numpages;
return ret;
}
static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
struct page **pages, int *num)
{
struct super_block *sb = inode->i_sb;
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
return ocfs2_grab_pages(inode, start, end, pages, num);
}
/*
* Zero partial cluster for a hole punch or truncate. This avoids exposing
* nonzero data on subsequent file extends.
*
* We need to call this before i_size is updated on the inode because
* otherwise block_write_full_page() will skip writeout of pages past
* i_size.
*/
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
u64 range_start, u64 range_end)
{
int ret = 0, numpages;
struct page **pages = NULL;
u64 phys;
unsigned int ext_flags;
struct super_block *sb = inode->i_sb;
/*
* File systems which don't support sparse files zero on every
* extend.
*/
if (!ocfs2_sparse_alloc(OCFS2_SB(sb)))
return 0;
/*
* Avoid zeroing pages fully beyond current i_size. It is pointless as
* underlying blocks of those pages should be already zeroed out and
* page writeback will skip them anyway.
*/
range_end = min_t(u64, range_end, i_size_read(inode));
if (range_start >= range_end)
return 0;
pages = kcalloc(ocfs2_pages_per_cluster(sb),
sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_extent_map_get_blocks(inode,
range_start >> sb->s_blocksize_bits,
&phys, NULL, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Tail is a hole, or is marked unwritten. In either case, we
* can count on read and write to return/push zero's.
*/
if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
goto out;
ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
&numpages);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
numpages, phys, handle);
/*
* Initiate writeout of the pages we zero'd here. We don't
* wait on them - the truncate_inode_pages() call later will
* do that for us.
*/
ret = filemap_fdatawrite_range(inode->i_mapping, range_start,
range_end - 1);
if (ret)
mlog_errno(ret);
out:
kfree(pages);
return ret;
}
static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode,
struct ocfs2_dinode *di)
{
unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits;
unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size);
if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL)
memset(&di->id2, 0, blocksize -
offsetof(struct ocfs2_dinode, id2) -
xattrsize);
else
memset(&di->id2, 0, blocksize -
offsetof(struct ocfs2_dinode, id2));
}
void ocfs2_dinode_new_extent_list(struct inode *inode,
struct ocfs2_dinode *di)
{
ocfs2_zero_dinode_id2_with_xattr(inode, di);
di->id2.i_list.l_tree_depth = 0;
di->id2.i_list.l_next_free_rec = 0;
di->id2.i_list.l_count = cpu_to_le16(
ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di));
}
void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_inline_data *idata = &di->id2.i_data;
spin_lock(&oi->ip_lock);
oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
/*
* We clear the entire i_data structure here so that all
* fields can be properly initialized.
*/
ocfs2_zero_dinode_id2_with_xattr(inode, di);
idata->id_count = cpu_to_le16(
ocfs2_max_inline_data_with_xattr(inode->i_sb, di));
}
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, has_data, num_pages = 0;
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
u64 block;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL;
struct page *page = NULL;
struct ocfs2_extent_tree et;
int did_quota = 0;
has_data = i_size_read(inode) ? 1 : 0;
if (has_data) {
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
handle = ocfs2_start_trans(osb,
ocfs2_inline_to_extents_credits(osb->sb));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
if (has_data) {
unsigned int page_end = min_t(unsigned, PAGE_SIZE,
osb->s_clustersize);
u64 phys;
ret = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (ret)
goto out_commit;
did_quota = 1;
data_ac->ac_resv = &oi->ip_la_data_resv;
ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
&num);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/*
* Save two copies, one for insert, and one that can
* be changed by ocfs2_map_and_dirty_page() below.
*/
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
&num_pages);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_commit;
}
/*
* This should populate the 1st page for us and mark
* it up to date.
*/
ret = ocfs2_read_inline_data(inode, page, di_bh);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
&phys);
}
spin_lock(&oi->ip_lock);
oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_dinode_new_extent_list(inode, di);
ocfs2_journal_dirty(handle, di_bh);
if (has_data) {
/*
* An error at this point should be extremely rare. If
* this proves to be false, we could always re-build
* the in-inode data from our pages.
*/
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
}
out_unlock:
if (page)
ocfs2_unlock_and_free_pages(&page, num_pages);
out_commit:
if (ret < 0 && did_quota)
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (need_free) {
if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
ocfs2_free_local_alloc_bits(osb, handle, data_ac,
bit_off, num);
else
ocfs2_free_clusters(handle,
data_ac->ac_inode,
data_ac->ac_bh,
ocfs2_clusters_to_blocks(osb->sb, bit_off),
num);
}
ocfs2_commit_trans(osb, handle);
out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
return ret;
}
/*
* It is expected, that by the time you call this function,
* inode->i_size and fe->i_size have been adjusted.
*
* WARNING: This will kfree the truncate context
*/
int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *di_bh)
{
int status = 0, i, flags = 0;
u32 new_highest_cpos, range, trunc_cpos, trunc_len, phys_cpos, coff;
u64 blkno = 0;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
struct ocfs2_path *path = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_extent_list *root_el = &(di->id2.i_list);
u64 refcount_loc = le64_to_cpu(di->i_refcount_loc);
struct ocfs2_extent_tree et;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_refcount_tree *ref_tree = NULL;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb,
i_size_read(inode));
path = ocfs2_new_path(di_bh, &di->id2.i_list,
ocfs2_journal_access_di);
if (!path) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ocfs2_extent_map_trunc(inode, new_highest_cpos);
start:
/*
* Check that we still have allocation to delete.
*/
if (OCFS2_I(inode)->ip_clusters == 0) {
status = 0;
goto bail;
}
/*
* Truncate always works against the rightmost tree branch.
*/
status = ocfs2_find_path(INODE_CACHE(inode), path, UINT_MAX);
if (status) {
mlog_errno(status);
goto bail;
}
trace_ocfs2_commit_truncate(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
new_highest_cpos,
OCFS2_I(inode)->ip_clusters,
path->p_tree_depth);
/*
* By now, el will point to the extent list on the bottom most
* portion of this tree. Only the tail record is considered in
* each pass.
*
* We handle the following cases, in order:
* - empty extent: delete the remaining branch
* - remove the entire record
* - remove a partial record
* - no record needs to be removed (truncate has completed)
*/
el = path_leaf_el(path);
if (le16_to_cpu(el->l_next_free_rec) == 0) {
ocfs2_error(inode->i_sb,
"Inode %llu has empty extent block at %llu\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)path_leaf_bh(path)->b_blocknr);
status = -EROFS;
goto bail;
}
i = le16_to_cpu(el->l_next_free_rec) - 1;
rec = &el->l_recs[i];
flags = rec->e_flags;
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
if (i == 0 && ocfs2_is_empty_extent(rec)) {
/*
* Lower levels depend on this never happening, but it's best
* to check it up here before changing the tree.
*/
if (root_el->l_tree_depth && rec->e_int_clusters == 0) {
mlog(ML_ERROR, "Inode %lu has an empty "
"extent record, depth %u\n", inode->i_ino,
le16_to_cpu(root_el->l_tree_depth));
status = ocfs2_remove_rightmost_empty_extent(osb,
&et, path, &dealloc);
if (status) {
mlog_errno(status);
goto bail;
}
ocfs2_reinit_path(path, 1);
goto start;
} else {
trunc_cpos = le32_to_cpu(rec->e_cpos);
trunc_len = 0;
blkno = 0;
}
} else if (le32_to_cpu(rec->e_cpos) >= new_highest_cpos) {
/*
* Truncate entire record.
*/
trunc_cpos = le32_to_cpu(rec->e_cpos);
trunc_len = ocfs2_rec_clusters(el, rec);
blkno = le64_to_cpu(rec->e_blkno);
} else if (range > new_highest_cpos) {
/*
* Partial truncate. it also should be
* the last truncate we're doing.
*/
trunc_cpos = new_highest_cpos;
trunc_len = range - new_highest_cpos;
coff = new_highest_cpos - le32_to_cpu(rec->e_cpos);
blkno = le64_to_cpu(rec->e_blkno) +
ocfs2_clusters_to_blocks(inode->i_sb, coff);
} else {
/*
* Truncate completed, leave happily.
*/
status = 0;
goto bail;
}
phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
if ((flags & OCFS2_EXT_REFCOUNTED) && trunc_len && !ref_tree) {
status = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
&ref_tree, NULL);
if (status) {
mlog_errno(status);
goto bail;
}
}
status = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
phys_cpos, trunc_len, flags, &dealloc,
refcount_loc, true);
if (status < 0) {
mlog_errno(status);
goto bail;
}
ocfs2_reinit_path(path, 1);
/*
* The check above will catch the case where we've truncated
* away all allocation.
*/
goto start;
bail:
if (ref_tree)
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
ocfs2_free_path(path);
return status;
}
/*
* 'start' is inclusive, 'end' is not.
*/
int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
unsigned int start, unsigned int end, int trunc)
{
int ret;
unsigned int numbytes;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inline_data *idata = &di->id2.i_data;
/* No need to punch hole beyond i_size. */
if (start >= i_size_read(inode))
return 0;
if (end > i_size_read(inode))
end = i_size_read(inode);
BUG_ON(start > end);
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
!ocfs2_supports_inline_data(osb)) {
ocfs2_error(inode->i_sb,
"Inline data flags for inode %llu don't agree! Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
le16_to_cpu(di->i_dyn_features),
OCFS2_I(inode)->ip_dyn_features,
osb->s_feature_incompat);
ret = -EROFS;
goto out;
}
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
numbytes = end - start;
memset(idata->id_data + start, 0, numbytes);
/*
* No need to worry about the data page here - it's been
* truncated already and inline data doesn't need it for
* pushing zero's to disk, so we'll let read_folio pick it up
* later.
*/
if (trunc) {
i_size_write(inode, start);
di->i_size = cpu_to_le64(start);
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_mtime = inode_set_ctime_current(inode);
di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
static int ocfs2_trim_extent(struct super_block *sb,
struct ocfs2_group_desc *gd,
u64 group, u32 start, u32 count)
{
u64 discard, bcount;
struct ocfs2_super *osb = OCFS2_SB(sb);
bcount = ocfs2_clusters_to_blocks(sb, count);
discard = ocfs2_clusters_to_blocks(sb, start);
/*
* For the first cluster group, the gd->bg_blkno is not at the start
* of the group, but at an offset from the start. If we add it while
* calculating discard for first group, we will wrongly start fstrim a
* few blocks after the desried start block and the range can cross
* over into the next cluster group. So, add it only if this is not
* the first cluster group.
*/
if (group != osb->first_cluster_group_blkno)
discard += le64_to_cpu(gd->bg_blkno);
trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
return sb_issue_discard(sb, discard, bcount, GFP_NOFS, 0);
}
static int ocfs2_trim_group(struct super_block *sb,
struct ocfs2_group_desc *gd, u64 group,
u32 start, u32 max, u32 minbits)
{
int ret = 0, count = 0, next;
void *bitmap = gd->bg_bitmap;
if (le16_to_cpu(gd->bg_free_bits_count) < minbits)
return 0;
trace_ocfs2_trim_group((unsigned long long)le64_to_cpu(gd->bg_blkno),
start, max, minbits);
while (start < max) {
start = ocfs2_find_next_zero_bit(bitmap, max, start);
if (start >= max)
break;
next = ocfs2_find_next_bit(bitmap, max, start);
if ((next - start) >= minbits) {
ret = ocfs2_trim_extent(sb, gd, group,
start, next - start);
if (ret < 0) {
mlog_errno(ret);
break;
}
count += next - start;
}
start = next + 1;
if (fatal_signal_pending(current)) {
count = -ERESTARTSYS;
break;
}
if ((le16_to_cpu(gd->bg_free_bits_count) - count) < minbits)
break;
}
if (ret < 0)
count = ret;
return count;
}
static
int ocfs2_trim_mainbm(struct super_block *sb, struct fstrim_range *range)
{
struct ocfs2_super *osb = OCFS2_SB(sb);
u64 start, len, trimmed = 0, first_group, last_group = 0, group = 0;
int ret, cnt;
u32 first_bit, last_bit, minlen;
struct buffer_head *main_bm_bh = NULL;
struct inode *main_bm_inode = NULL;
struct buffer_head *gd_bh = NULL;
struct ocfs2_dinode *main_bm;
struct ocfs2_group_desc *gd = NULL;
start = range->start >> osb->s_clustersize_bits;
len = range->len >> osb->s_clustersize_bits;
minlen = range->minlen >> osb->s_clustersize_bits;
if (minlen >= osb->bitmap_cpg || range->len < sb->s_blocksize)
return -EINVAL;
trace_ocfs2_trim_mainbm(start, len, minlen);
next_group:
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!main_bm_inode) {
ret = -EIO;
mlog_errno(ret);
goto out;
}
inode_lock(main_bm_inode);
ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 0);
if (ret < 0) {
mlog_errno(ret);
goto out_mutex;
}
main_bm = (struct ocfs2_dinode *)main_bm_bh->b_data;
/*
* Do some check before trim the first group.
*/
if (!group) {
if (start >= le32_to_cpu(main_bm->i_clusters)) {
ret = -EINVAL;
goto out_unlock;
}
if (start + len > le32_to_cpu(main_bm->i_clusters))
len = le32_to_cpu(main_bm->i_clusters) - start;
/*
* Determine first and last group to examine based on
* start and len
*/
first_group = ocfs2_which_cluster_group(main_bm_inode, start);
if (first_group == osb->first_cluster_group_blkno)
first_bit = start;
else
first_bit = start - ocfs2_blocks_to_clusters(sb,
first_group);
last_group = ocfs2_which_cluster_group(main_bm_inode,
start + len - 1);
group = first_group;
}
do {
if (first_bit + len >= osb->bitmap_cpg)
last_bit = osb->bitmap_cpg;
else
last_bit = first_bit + len;
ret = ocfs2_read_group_descriptor(main_bm_inode,
main_bm, group,
&gd_bh);
if (ret < 0) {
mlog_errno(ret);
break;
}
gd = (struct ocfs2_group_desc *)gd_bh->b_data;
cnt = ocfs2_trim_group(sb, gd, group,
first_bit, last_bit, minlen);
brelse(gd_bh);
gd_bh = NULL;
if (cnt < 0) {
ret = cnt;
mlog_errno(ret);
break;
}
trimmed += cnt;
len -= osb->bitmap_cpg - first_bit;
first_bit = 0;
if (group == osb->first_cluster_group_blkno)
group = ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
else
group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
} while (0);
out_unlock:
ocfs2_inode_unlock(main_bm_inode, 0);
brelse(main_bm_bh);
main_bm_bh = NULL;
out_mutex:
inode_unlock(main_bm_inode);
iput(main_bm_inode);
/*
* If all the groups trim are not done or failed, but we should release
* main_bm related locks for avoiding the current IO starve, then go to
* trim the next group
*/
if (ret >= 0 && group <= last_group) {
cond_resched();
goto next_group;
}
out:
range->len = trimmed * sb->s_blocksize;
return ret;
}
int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(sb);
struct ocfs2_trim_fs_info info, *pinfo = NULL;
ocfs2_trim_fs_lock_res_init(osb);
trace_ocfs2_trim_fs(range->start, range->len, range->minlen);
ret = ocfs2_trim_fs_lock(osb, NULL, 1);
if (ret < 0) {
if (ret != -EAGAIN) {
mlog_errno(ret);
ocfs2_trim_fs_lock_res_uninit(osb);
return ret;
}
mlog(ML_NOTICE, "Wait for trim on device (%s) to "
"finish, which is running from another node.\n",
osb->dev_str);
ret = ocfs2_trim_fs_lock(osb, &info, 0);
if (ret < 0) {
mlog_errno(ret);
ocfs2_trim_fs_lock_res_uninit(osb);
return ret;
}
if (info.tf_valid && info.tf_success &&
info.tf_start == range->start &&
info.tf_len == range->len &&
info.tf_minlen == range->minlen) {
/* Avoid sending duplicated trim to a shared device */
mlog(ML_NOTICE, "The same trim on device (%s) was "
"just done from node (%u), return.\n",
osb->dev_str, info.tf_nodenum);
range->len = info.tf_trimlen;
goto out;
}
}
info.tf_nodenum = osb->node_num;
info.tf_start = range->start;
info.tf_len = range->len;
info.tf_minlen = range->minlen;
ret = ocfs2_trim_mainbm(sb, range);
info.tf_trimlen = range->len;
info.tf_success = (ret < 0 ? 0 : 1);
pinfo = &info;
out:
ocfs2_trim_fs_unlock(osb, pinfo);
ocfs2_trim_fs_lock_res_uninit(osb);
return ret;
}
| linux-master | fs/ocfs2/alloc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dir.c
*
* Creates, reads, walks and deletes directory-nodes
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*
* Portions of this code from linux/fs/ext3/dir.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card ([email protected])
* Laboratoire MASI - Institut Blaise pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/dir.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/quotaops.h>
#include <linux/sort.h>
#include <linux/iversion.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "dir.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "inode.h"
#include "journal.h"
#include "namei.h"
#include "suballoc.h"
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
static int ocfs2_do_extend_dir(struct super_block *sb,
handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head **new_bh);
static int ocfs2_dir_indexed(struct inode *inode);
/*
* These are distinct checks because future versions of the file system will
* want to have a trailing dirent structure independent of indexing.
*/
static int ocfs2_supports_dir_trailer(struct inode *dir)
{
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return 0;
return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir);
}
/*
* "new' here refers to the point at which we're creating a new
* directory via "mkdir()", but also when we're expanding an inline
* directory. In either case, we don't yet have the indexing bit set
* on the directory, so the standard checks will fail in when metaecc
* is turned off. Only directory-initialization type functions should
* use this then. Everything else wants ocfs2_supports_dir_trailer()
*/
static int ocfs2_new_dir_wants_trailer(struct inode *dir)
{
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
return ocfs2_meta_ecc(osb) ||
ocfs2_supports_indexed_dirs(osb);
}
static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb)
{
return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer);
}
#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb))))
/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make
* them more consistent? */
struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize,
void *data)
{
char *p = data;
p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
return (struct ocfs2_dir_block_trailer *)p;
}
/*
* XXX: This is executed once on every dirent. We should consider optimizing
* it.
*/
static int ocfs2_skip_dir_trailer(struct inode *dir,
struct ocfs2_dir_entry *de,
unsigned long offset,
unsigned long blklen)
{
unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer);
if (!ocfs2_supports_dir_trailer(dir))
return 0;
if (offset != toff)
return 0;
return 1;
}
static void ocfs2_init_dir_trailer(struct inode *inode,
struct buffer_head *bh, u16 rec_len)
{
struct ocfs2_dir_block_trailer *trailer;
trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
trailer->db_compat_rec_len =
cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
trailer->db_blkno = cpu_to_le64(bh->b_blocknr);
trailer->db_free_rec_len = cpu_to_le16(rec_len);
}
/*
* Link an unindexed block with a dir trailer structure into the index free
* list. This function will modify dirdata_bh, but assumes you've already
* passed it to the journal.
*/
static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
struct buffer_head *dx_root_bh,
struct buffer_head *dirdata_bh)
{
int ret;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dir_block_trailer *trailer;
ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
trailer->db_free_next = dx_root->dr_free_blk;
dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
ocfs2_journal_dirty(handle, dx_root_bh);
out:
return ret;
}
static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res)
{
return res->dl_prev_leaf_bh == NULL;
}
void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res)
{
brelse(res->dl_dx_root_bh);
brelse(res->dl_leaf_bh);
brelse(res->dl_dx_leaf_bh);
brelse(res->dl_prev_leaf_bh);
}
static int ocfs2_dir_indexed(struct inode *inode)
{
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL)
return 1;
return 0;
}
static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root)
{
return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE;
}
/*
* Hashing code adapted from ext3
*/
#define DELTA 0x9E3779B9
static void TEA_transform(__u32 buf[4], __u32 const in[])
{
__u32 sum = 0;
__u32 b0 = buf[0], b1 = buf[1];
__u32 a = in[0], b = in[1], c = in[2], d = in[3];
int n = 16;
do {
sum += DELTA;
b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
} while (--n);
buf[0] += b0;
buf[1] += b1;
}
static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
{
__u32 pad, val;
int i;
pad = (__u32)len | ((__u32)len << 8);
pad |= pad << 16;
val = pad;
if (len > num*4)
len = num * 4;
for (i = 0; i < len; i++) {
if ((i % 4) == 0)
val = pad;
val = msg[i] + (val << 8);
if ((i % 4) == 3) {
*buf++ = val;
val = pad;
num--;
}
}
if (--num >= 0)
*buf++ = val;
while (--num >= 0)
*buf++ = pad;
}
static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
struct ocfs2_dx_hinfo *hinfo)
{
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
const char *p;
__u32 in[8], buf[4];
/*
* XXX: Is this really necessary, if the index is never looked
* at by readdir? Is a hash value of '0' a bad idea?
*/
if ((len == 1 && !strncmp(".", name, 1)) ||
(len == 2 && !strncmp("..", name, 2))) {
buf[0] = buf[1] = 0;
goto out;
}
#ifdef OCFS2_DEBUG_DX_DIRS
/*
* This makes it very easy to debug indexing problems. We
* should never allow this to be selected without hand editing
* this file though.
*/
buf[0] = buf[1] = len;
goto out;
#endif
memcpy(buf, osb->osb_dx_seed, sizeof(buf));
p = name;
while (len > 0) {
str2hashbuf(p, len, in, 4);
TEA_transform(buf, in);
len -= 16;
p += 16;
}
out:
hinfo->major_hash = buf[0];
hinfo->minor_hash = buf[1];
}
/*
* bh passed here can be an inode block or a dir data block, depending
* on the inode inline data flag.
*/
static int ocfs2_check_dir_entry(struct inode * dir,
struct ocfs2_dir_entry * de,
struct buffer_head * bh,
unsigned long offset)
{
const char *error_msg = NULL;
const int rlen = le16_to_cpu(de->rec_len);
if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely(
((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
error_msg = "directory entry across blocks";
if (unlikely(error_msg != NULL))
mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
"offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
de->name_len);
return error_msg == NULL ? 1 : 0;
}
static inline int ocfs2_match(int len,
const char * const name,
struct ocfs2_dir_entry *de)
{
if (len != de->name_len)
return 0;
if (!de->inode)
return 0;
return !memcmp(name, de->name, len);
}
/*
* Returns 0 if not found, -1 on failure, and 1 on success
*/
static inline int ocfs2_search_dirblock(struct buffer_head *bh,
struct inode *dir,
const char *name, int namelen,
unsigned long offset,
char *first_de,
unsigned int bytes,
struct ocfs2_dir_entry **res_dir)
{
struct ocfs2_dir_entry *de;
char *dlimit, *de_buf;
int de_len;
int ret = 0;
de_buf = first_de;
dlimit = de_buf + bytes;
while (de_buf < dlimit) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
de = (struct ocfs2_dir_entry *) de_buf;
if (de_buf + namelen <= dlimit &&
ocfs2_match(namelen, name, de)) {
/* found a match - just to be sure, do a full check */
if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
ret = -1;
goto bail;
}
*res_dir = de;
ret = 1;
goto bail;
}
/* prevent looping on a bad block */
de_len = le16_to_cpu(de->rec_len);
if (de_len <= 0) {
ret = -1;
goto bail;
}
de_buf += de_len;
offset += de_len;
}
bail:
trace_ocfs2_search_dirblock(ret);
return ret;
}
static struct buffer_head *ocfs2_find_entry_id(const char *name,
int namelen,
struct inode *dir,
struct ocfs2_dir_entry **res_dir)
{
int ret, found;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_inline_data *data;
ret = ocfs2_read_inode_block(dir, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
data = &di->id2.i_data;
found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
data->id_data, i_size_read(dir), res_dir);
if (found == 1)
return di_bh;
brelse(di_bh);
out:
return NULL;
}
static int ocfs2_validate_dir_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_dir_block_trailer *trailer =
ocfs2_trailer_from_bh(bh, sb);
/*
* We don't validate dirents here, that's handled
* in-place when the code walks them.
*/
trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*
* Note that we are safe to call this even if the directory
* doesn't have a trailer. Filesystems without metaecc will do
* nothing, and filesystems with it will have one.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check);
if (rc)
mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
(unsigned long long)bh->b_blocknr);
return rc;
}
/*
* Validate a directory trailer.
*
* We check the trailer here rather than in ocfs2_validate_dir_block()
* because that function doesn't have the inode to test.
*/
static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh)
{
int rc = 0;
struct ocfs2_dir_block_trailer *trailer;
trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
rc = ocfs2_error(dir->i_sb,
"Invalid dirblock #%llu: signature = %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
trailer->db_signature);
goto out;
}
if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
rc = ocfs2_error(dir->i_sb,
"Directory block #%llu has an invalid db_blkno of %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(trailer->db_blkno));
goto out;
}
if (le64_to_cpu(trailer->db_parent_dinode) !=
OCFS2_I(dir)->ip_blkno) {
rc = ocfs2_error(dir->i_sb,
"Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)le64_to_cpu(trailer->db_blkno));
goto out;
}
out:
return rc;
}
/*
* This function forces all errors to -EIO for consistency with its
* predecessor, ocfs2_bread(). We haven't audited what returning the
* real error codes would do to callers. We log the real codes with
* mlog_errno() before we squash them.
*/
static int ocfs2_read_dir_block(struct inode *inode, u64 v_block,
struct buffer_head **bh, int flags)
{
int rc = 0;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags,
ocfs2_validate_dir_block);
if (rc) {
mlog_errno(rc);
goto out;
}
if (!(flags & OCFS2_BH_READAHEAD) &&
ocfs2_supports_dir_trailer(inode)) {
rc = ocfs2_check_dir_trailer(inode, tmp);
if (rc) {
if (!*bh)
brelse(tmp);
mlog_errno(rc);
goto out;
}
}
/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
if (!*bh)
*bh = tmp;
out:
return rc ? -EIO : 0;
}
/*
* Read the block at 'phys' which belongs to this directory
* inode. This function does no virtual->physical block translation -
* what's passed in is assumed to be a valid directory block.
*/
static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
struct buffer_head **bh)
{
int ret;
struct buffer_head *tmp = *bh;
ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
ocfs2_validate_dir_block);
if (ret) {
mlog_errno(ret);
goto out;
}
if (ocfs2_supports_dir_trailer(dir)) {
ret = ocfs2_check_dir_trailer(dir, tmp);
if (ret) {
if (!*bh)
brelse(tmp);
mlog_errno(ret);
goto out;
}
}
if (!ret && !*bh)
*bh = tmp;
out:
return ret;
}
static int ocfs2_validate_dx_root(struct super_block *sb,
struct buffer_head *bh)
{
int ret;
struct ocfs2_dx_root_block *dx_root;
BUG_ON(!buffer_uptodate(bh));
dx_root = (struct ocfs2_dx_root_block *) bh->b_data;
ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check);
if (ret) {
mlog(ML_ERROR,
"Checksum failed for dir index root block %llu\n",
(unsigned long long)bh->b_blocknr);
return ret;
}
if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
ret = ocfs2_error(sb,
"Dir Index Root # %llu has bad signature %.*s\n",
(unsigned long long)le64_to_cpu(dx_root->dr_blkno),
7, dx_root->dr_signature);
}
return ret;
}
static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
struct buffer_head **dx_root_bh)
{
int ret;
u64 blkno = le64_to_cpu(di->i_dx_root);
struct buffer_head *tmp = *dx_root_bh;
ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
ocfs2_validate_dx_root);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!ret && !*dx_root_bh)
*dx_root_bh = tmp;
return ret;
}
static int ocfs2_validate_dx_leaf(struct super_block *sb,
struct buffer_head *bh)
{
int ret;
struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data;
BUG_ON(!buffer_uptodate(bh));
ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check);
if (ret) {
mlog(ML_ERROR,
"Checksum failed for dir index leaf block %llu\n",
(unsigned long long)bh->b_blocknr);
return ret;
}
if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
7, dx_leaf->dl_signature);
}
return ret;
}
static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
struct buffer_head **dx_leaf_bh)
{
int ret;
struct buffer_head *tmp = *dx_leaf_bh;
ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
ocfs2_validate_dx_leaf);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!ret && !*dx_leaf_bh)
*dx_leaf_bh = tmp;
return ret;
}
/*
* Read a series of dx_leaf blocks. This expects all buffer_head
* pointers to be NULL on function entry.
*/
static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
struct buffer_head **dx_leaf_bhs)
{
int ret;
ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
ocfs2_validate_dx_leaf);
if (ret)
mlog_errno(ret);
return ret;
}
static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
struct inode *dir,
struct ocfs2_dir_entry **res_dir)
{
struct super_block *sb;
struct buffer_head *bh_use[NAMEI_RA_SIZE];
struct buffer_head *bh, *ret = NULL;
unsigned long start, block, b;
int ra_max = 0; /* Number of bh's in the readahead
buffer, bh_use[] */
int ra_ptr = 0; /* Current index into readahead
buffer */
int num = 0;
int nblocks, i;
sb = dir->i_sb;
nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
start = OCFS2_I(dir)->ip_dir_start_lookup;
if (start >= nblocks)
start = 0;
block = start;
restart:
do {
/*
* We deal with the read-ahead logic here.
*/
if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */
ra_ptr = 0;
b = block;
for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
/*
* Terminate if we reach the end of the
* directory and must wrap, or if our
* search has finished at this block.
*/
if (b >= nblocks || (num && block == start)) {
bh_use[ra_max] = NULL;
break;
}
num++;
bh = NULL;
ocfs2_read_dir_block(dir, b++, &bh,
OCFS2_BH_READAHEAD);
bh_use[ra_max] = bh;
}
}
if ((bh = bh_use[ra_ptr++]) == NULL)
goto next;
if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
/* read error, skip block & hope for the best.
* ocfs2_read_dir_block() has released the bh. */
mlog(ML_ERROR, "reading directory %llu, "
"offset %lu\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno,
block);
goto next;
}
i = ocfs2_search_dirblock(bh, dir, name, namelen,
block << sb->s_blocksize_bits,
bh->b_data, sb->s_blocksize,
res_dir);
if (i == 1) {
OCFS2_I(dir)->ip_dir_start_lookup = block;
ret = bh;
goto cleanup_and_exit;
} else {
brelse(bh);
if (i < 0)
goto cleanup_and_exit;
}
next:
if (++block >= nblocks)
block = 0;
} while (block != start);
/*
* If the directory has grown while we were searching, then
* search the last part of the directory before giving up.
*/
block = nblocks;
nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
if (block < nblocks) {
start = 0;
goto restart;
}
cleanup_and_exit:
/* Clean up the read-ahead blocks */
for (; ra_ptr < ra_max; ra_ptr++)
brelse(bh_use[ra_ptr]);
trace_ocfs2_find_entry_el(ret);
return ret;
}
static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
struct ocfs2_extent_list *el,
u32 major_hash,
u32 *ret_cpos,
u64 *ret_phys_blkno,
unsigned int *ret_clen)
{
int ret = 0, i, found;
struct buffer_head *eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_rec *rec = NULL;
if (el->l_tree_depth) {
ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
&eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ret = ocfs2_error(inode->i_sb,
"Inode %lu has non zero tree depth in btree tree block %llu\n",
inode->i_ino,
(unsigned long long)eb_bh->b_blocknr);
goto out;
}
}
found = 0;
for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
rec = &el->l_recs[i];
if (le32_to_cpu(rec->e_cpos) <= major_hash) {
found = 1;
break;
}
}
if (!found) {
ret = ocfs2_error(inode->i_sb,
"Inode %lu has bad extent record (%u, %u, 0) in btree\n",
inode->i_ino,
le32_to_cpu(rec->e_cpos),
ocfs2_rec_clusters(el, rec));
goto out;
}
if (ret_phys_blkno)
*ret_phys_blkno = le64_to_cpu(rec->e_blkno);
if (ret_cpos)
*ret_cpos = le32_to_cpu(rec->e_cpos);
if (ret_clen)
*ret_clen = le16_to_cpu(rec->e_leaf_clusters);
out:
brelse(eb_bh);
return ret;
}
/*
* Returns the block index, from the start of the cluster which this
* hash belongs too.
*/
static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
u32 minor_hash)
{
return minor_hash & osb->osb_dx_mask;
}
static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
struct ocfs2_dx_hinfo *hinfo)
{
return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash);
}
static int ocfs2_dx_dir_lookup(struct inode *inode,
struct ocfs2_extent_list *el,
struct ocfs2_dx_hinfo *hinfo,
u32 *ret_cpos,
u64 *ret_phys_blkno)
{
int ret = 0;
unsigned int cend, clen;
u32 cpos;
u64 blkno;
u32 name_hash = hinfo->major_hash;
ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
&clen);
if (ret) {
mlog_errno(ret);
goto out;
}
cend = cpos + clen;
if (name_hash >= cend) {
/* We want the last cluster */
blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1);
cpos += clen - 1;
} else {
blkno += ocfs2_clusters_to_blocks(inode->i_sb,
name_hash - cpos);
cpos = name_hash;
}
/*
* We now have the cluster which should hold our entry. To
* find the exact block from the start of the cluster to
* search, we take the lower bits of the hash.
*/
blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo);
if (ret_phys_blkno)
*ret_phys_blkno = blkno;
if (ret_cpos)
*ret_cpos = cpos;
out:
return ret;
}
static int ocfs2_dx_dir_search(const char *name, int namelen,
struct inode *dir,
struct ocfs2_dx_root_block *dx_root,
struct ocfs2_dir_lookup_result *res)
{
int ret, i, found;
u64 phys;
struct buffer_head *dx_leaf_bh = NULL;
struct ocfs2_dx_leaf *dx_leaf;
struct ocfs2_dx_entry *dx_entry = NULL;
struct buffer_head *dir_ent_bh = NULL;
struct ocfs2_dir_entry *dir_ent = NULL;
struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo;
struct ocfs2_extent_list *dr_el;
struct ocfs2_dx_entry_list *entry_list;
ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo);
if (ocfs2_dx_root_inline(dx_root)) {
entry_list = &dx_root->dr_entries;
goto search;
}
dr_el = &dx_root->dr_list;
ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
namelen, name, hinfo->major_hash,
hinfo->minor_hash, (unsigned long long)phys);
ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
trace_ocfs2_dx_dir_search_leaf_info(
le16_to_cpu(dx_leaf->dl_list.de_num_used),
le16_to_cpu(dx_leaf->dl_list.de_count));
entry_list = &dx_leaf->dl_list;
search:
/*
* Empty leaf is legal, so no need to check for that.
*/
found = 0;
for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
dx_entry = &entry_list->de_entries[i];
if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash)
|| hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash))
continue;
/*
* Search unindexed leaf block now. We're not
* guaranteed to find anything.
*/
ret = ocfs2_read_dir_block_direct(dir,
le64_to_cpu(dx_entry->dx_dirent_blk),
&dir_ent_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* XXX: We should check the unindexed block here,
* before using it.
*/
found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen,
0, dir_ent_bh->b_data,
dir->i_sb->s_blocksize, &dir_ent);
if (found == 1)
break;
if (found == -1) {
/* This means we found a bad directory entry. */
ret = -EIO;
mlog_errno(ret);
goto out;
}
brelse(dir_ent_bh);
dir_ent_bh = NULL;
}
if (found <= 0) {
ret = -ENOENT;
goto out;
}
res->dl_leaf_bh = dir_ent_bh;
res->dl_entry = dir_ent;
res->dl_dx_leaf_bh = dx_leaf_bh;
res->dl_dx_entry = dx_entry;
ret = 0;
out:
if (ret) {
brelse(dx_leaf_bh);
brelse(dir_ent_bh);
}
return ret;
}
static int ocfs2_find_entry_dx(const char *name, int namelen,
struct inode *dir,
struct ocfs2_dir_lookup_result *lookup)
{
int ret;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
struct buffer_head *dx_root_bh = NULL;
struct ocfs2_dx_root_block *dx_root;
ret = ocfs2_read_inode_block(dir, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup);
if (ret) {
if (ret != -ENOENT)
mlog_errno(ret);
goto out;
}
lookup->dl_dx_root_bh = dx_root_bh;
dx_root_bh = NULL;
out:
brelse(di_bh);
brelse(dx_root_bh);
return ret;
}
/*
* Try to find an entry of the provided name within 'dir'.
*
* If nothing was found, -ENOENT is returned. Otherwise, zero is
* returned and the struct 'res' will contain information useful to
* other directory manipulation functions.
*
* Caller can NOT assume anything about the contents of the
* buffer_heads - they are passed back only so that it can be passed
* into any one of the manipulation functions (add entry, delete
* entry, etc). As an example, bh in the extent directory case is a
* data block, in the inline-data case it actually points to an inode,
* in the indexed directory case, multiple buffers are involved.
*/
int ocfs2_find_entry(const char *name, int namelen,
struct inode *dir, struct ocfs2_dir_lookup_result *lookup)
{
struct buffer_head *bh;
struct ocfs2_dir_entry *res_dir = NULL;
if (ocfs2_dir_indexed(dir))
return ocfs2_find_entry_dx(name, namelen, dir, lookup);
/*
* The unindexed dir code only uses part of the lookup
* structure, so there's no reason to push it down further
* than this.
*/
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
else
bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
if (bh == NULL)
return -ENOENT;
lookup->dl_leaf_bh = bh;
lookup->dl_entry = res_dir;
return 0;
}
/*
* Update inode number and type of a previously found directory entry.
*/
int ocfs2_update_entry(struct inode *dir, handle_t *handle,
struct ocfs2_dir_lookup_result *res,
struct inode *new_entry_inode)
{
int ret;
ocfs2_journal_access_func access = ocfs2_journal_access_db;
struct ocfs2_dir_entry *de = res->dl_entry;
struct buffer_head *de_bh = res->dl_leaf_bh;
/*
* The same code works fine for both inline-data and extent
* based directories, so no need to split this up. The only
* difference is the journal_access function.
*/
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
access = ocfs2_journal_access_di;
ret = access(handle, INODE_CACHE(dir), de_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
ocfs2_set_de_type(de, new_entry_inode->i_mode);
ocfs2_journal_dirty(handle, de_bh);
out:
return ret;
}
/*
* __ocfs2_delete_entry deletes a directory entry by merging it with the
* previous entry
*/
static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
struct ocfs2_dir_entry *de_del,
struct buffer_head *bh, char *first_de,
unsigned int bytes)
{
struct ocfs2_dir_entry *de, *pde;
int i, status = -ENOENT;
ocfs2_journal_access_func access = ocfs2_journal_access_db;
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
access = ocfs2_journal_access_di;
i = 0;
pde = NULL;
de = (struct ocfs2_dir_entry *) first_de;
while (i < bytes) {
if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
status = -EIO;
mlog_errno(status);
goto bail;
}
if (de == de_del) {
status = access(handle, INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
status = -EIO;
mlog_errno(status);
goto bail;
}
if (pde)
le16_add_cpu(&pde->rec_len,
le16_to_cpu(de->rec_len));
de->inode = 0;
inode_inc_iversion(dir);
ocfs2_journal_dirty(handle, bh);
goto bail;
}
i += le16_to_cpu(de->rec_len);
pde = de;
de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
}
bail:
return status;
}
static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de)
{
unsigned int hole;
if (le64_to_cpu(de->inode) == 0)
hole = le16_to_cpu(de->rec_len);
else
hole = le16_to_cpu(de->rec_len) -
OCFS2_DIR_REC_LEN(de->name_len);
return hole;
}
static int ocfs2_find_max_rec_len(struct super_block *sb,
struct buffer_head *dirblock_bh)
{
int size, this_hole, largest_hole = 0;
char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data;
struct ocfs2_dir_entry *de;
trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb);
size = ocfs2_dir_trailer_blk_off(sb);
limit = start + size;
de_buf = start;
de = (struct ocfs2_dir_entry *)de_buf;
do {
if (de_buf != trailer) {
this_hole = ocfs2_figure_dirent_hole(de);
if (this_hole > largest_hole)
largest_hole = this_hole;
}
de_buf += le16_to_cpu(de->rec_len);
de = (struct ocfs2_dir_entry *)de_buf;
} while (de_buf < limit);
if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
return largest_hole;
return 0;
}
static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list,
int index)
{
int num_used = le16_to_cpu(entry_list->de_num_used);
if (num_used == 1 || index == (num_used - 1))
goto clear;
memmove(&entry_list->de_entries[index],
&entry_list->de_entries[index + 1],
(num_used - index - 1)*sizeof(struct ocfs2_dx_entry));
clear:
num_used--;
memset(&entry_list->de_entries[num_used], 0,
sizeof(struct ocfs2_dx_entry));
entry_list->de_num_used = cpu_to_le16(num_used);
}
static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
struct ocfs2_dir_lookup_result *lookup)
{
int ret, index, max_rec_len, add_to_free_list = 0;
struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
struct buffer_head *leaf_bh = lookup->dl_leaf_bh;
struct ocfs2_dx_leaf *dx_leaf;
struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry;
struct ocfs2_dir_block_trailer *trailer;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dx_entry_list *entry_list;
/*
* This function gets a bit messy because we might have to
* modify the root block, regardless of whether the indexed
* entries are stored inline.
*/
/*
* *Only* set 'entry_list' here, based on where we're looking
* for the indexed entries. Later, we might still want to
* journal both blocks, based on free list state.
*/
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
if (ocfs2_dx_root_inline(dx_root)) {
entry_list = &dx_root->dr_entries;
} else {
dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data;
entry_list = &dx_leaf->dl_list;
}
/* Neither of these are a disk corruption - that should have
* been caught by lookup, before we got here. */
BUG_ON(le16_to_cpu(entry_list->de_count) <= 0);
BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0);
index = (char *)dx_entry - (char *)entry_list->de_entries;
index /= sizeof(*dx_entry);
if (index >= le16_to_cpu(entry_list->de_num_used)) {
mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno, index,
entry_list, dx_entry);
return -EIO;
}
/*
* We know that removal of this dirent will leave enough room
* for a new one, so add this block to the free list if it
* isn't already there.
*/
trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
if (trailer->db_free_rec_len == 0)
add_to_free_list = 1;
/*
* Add the block holding our index into the journal before
* removing the unindexed entry. If we get an error return
* from __ocfs2_delete_entry(), then it hasn't removed the
* entry yet. Likewise, successful return means we *must*
* remove the indexed entry.
*
* We're also careful to journal the root tree block here as
* the entry count needs to be updated. Also, we might be
* adding to the start of the free list.
*/
ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
if (!ocfs2_dx_root_inline(dx_root)) {
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
lookup->dl_dx_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
}
trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
index);
ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
if (ret) {
mlog_errno(ret);
goto out;
}
max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh);
trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
if (add_to_free_list) {
trailer->db_free_next = dx_root->dr_free_blk;
dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr);
ocfs2_journal_dirty(handle, dx_root_bh);
}
/* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */
ocfs2_journal_dirty(handle, leaf_bh);
le32_add_cpu(&dx_root->dr_num_entries, -1);
ocfs2_journal_dirty(handle, dx_root_bh);
ocfs2_dx_list_remove_entry(entry_list, index);
if (!ocfs2_dx_root_inline(dx_root))
ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh);
out:
return ret;
}
static inline int ocfs2_delete_entry_id(handle_t *handle,
struct inode *dir,
struct ocfs2_dir_entry *de_del,
struct buffer_head *bh)
{
int ret;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_inline_data *data;
ret = ocfs2_read_inode_block(dir, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
data = &di->id2.i_data;
ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
i_size_read(dir));
brelse(di_bh);
out:
return ret;
}
static inline int ocfs2_delete_entry_el(handle_t *handle,
struct inode *dir,
struct ocfs2_dir_entry *de_del,
struct buffer_head *bh)
{
return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
bh->b_size);
}
/*
* Delete a directory entry. Hide the details of directory
* implementation from the caller.
*/
int ocfs2_delete_entry(handle_t *handle,
struct inode *dir,
struct ocfs2_dir_lookup_result *res)
{
if (ocfs2_dir_indexed(dir))
return ocfs2_delete_entry_dx(handle, dir, res);
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return ocfs2_delete_entry_id(handle, dir, res->dl_entry,
res->dl_leaf_bh);
return ocfs2_delete_entry_el(handle, dir, res->dl_entry,
res->dl_leaf_bh);
}
/*
* Check whether 'de' has enough room to hold an entry of
* 'new_rec_len' bytes.
*/
static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
unsigned int new_rec_len)
{
unsigned int de_really_used;
/* Check whether this is an empty record with enough space */
if (le64_to_cpu(de->inode) == 0 &&
le16_to_cpu(de->rec_len) >= new_rec_len)
return 1;
/*
* Record might have free space at the end which we can
* use.
*/
de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
return 1;
return 0;
}
static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf,
struct ocfs2_dx_entry *dx_new_entry)
{
int i;
i = le16_to_cpu(dx_leaf->dl_list.de_num_used);
dx_leaf->dl_list.de_entries[i] = *dx_new_entry;
le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1);
}
static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list,
struct ocfs2_dx_hinfo *hinfo,
u64 dirent_blk)
{
int i;
struct ocfs2_dx_entry *dx_entry;
i = le16_to_cpu(entry_list->de_num_used);
dx_entry = &entry_list->de_entries[i];
memset(dx_entry, 0, sizeof(*dx_entry));
dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash);
dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash);
dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk);
le16_add_cpu(&entry_list->de_num_used, 1);
}
static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
struct ocfs2_dx_hinfo *hinfo,
u64 dirent_blk,
struct buffer_head *dx_leaf_bh)
{
int ret;
struct ocfs2_dx_leaf *dx_leaf;
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk);
ocfs2_journal_dirty(handle, dx_leaf_bh);
out:
return ret;
}
static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle,
struct ocfs2_dx_hinfo *hinfo,
u64 dirent_blk,
struct ocfs2_dx_root_block *dx_root)
{
ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk);
}
static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
struct ocfs2_dir_lookup_result *lookup)
{
int ret = 0;
struct ocfs2_dx_root_block *dx_root;
struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data;
if (ocfs2_dx_root_inline(dx_root)) {
ocfs2_dx_inline_root_insert(dir, handle,
&lookup->dl_hinfo,
lookup->dl_leaf_bh->b_blocknr,
dx_root);
} else {
ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo,
lookup->dl_leaf_bh->b_blocknr,
lookup->dl_dx_leaf_bh);
if (ret)
goto out;
}
le32_add_cpu(&dx_root->dr_num_entries, 1);
ocfs2_journal_dirty(handle, dx_root_bh);
out:
return ret;
}
static void ocfs2_remove_block_from_free_list(struct inode *dir,
handle_t *handle,
struct ocfs2_dir_lookup_result *lookup)
{
struct ocfs2_dir_block_trailer *trailer, *prev;
struct ocfs2_dx_root_block *dx_root;
struct buffer_head *bh;
trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
if (ocfs2_free_list_at_root(lookup)) {
bh = lookup->dl_dx_root_bh;
dx_root = (struct ocfs2_dx_root_block *)bh->b_data;
dx_root->dr_free_blk = trailer->db_free_next;
} else {
bh = lookup->dl_prev_leaf_bh;
prev = ocfs2_trailer_from_bh(bh, dir->i_sb);
prev->db_free_next = trailer->db_free_next;
}
trailer->db_free_rec_len = cpu_to_le16(0);
trailer->db_free_next = cpu_to_le64(0);
ocfs2_journal_dirty(handle, bh);
ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
}
/*
* This expects that a journal write has been reserved on
* lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh
*/
static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle,
struct ocfs2_dir_lookup_result *lookup)
{
int max_rec_len;
struct ocfs2_dir_block_trailer *trailer;
/* Walk dl_leaf_bh to figure out what the new free rec_len is. */
max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh);
if (max_rec_len) {
/*
* There's still room in this block, so no need to remove it
* from the free list. In this case, we just want to update
* the rec len accounting.
*/
trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
} else {
ocfs2_remove_block_from_free_list(dir, handle, lookup);
}
}
/* we don't always have a dentry for what we want to add, so people
* like orphan dir can call this instead.
*
* The lookup context must have been filled from
* ocfs2_prepare_dir_for_insert.
*/
int __ocfs2_add_entry(handle_t *handle,
struct inode *dir,
const char *name, int namelen,
struct inode *inode, u64 blkno,
struct buffer_head *parent_fe_bh,
struct ocfs2_dir_lookup_result *lookup)
{
unsigned long offset;
unsigned short rec_len;
struct ocfs2_dir_entry *de, *de1;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
struct super_block *sb = dir->i_sb;
int retval;
unsigned int size = sb->s_blocksize;
struct buffer_head *insert_bh = lookup->dl_leaf_bh;
char *data_start = insert_bh->b_data;
if (!namelen)
return -EINVAL;
if (ocfs2_dir_indexed(dir)) {
struct buffer_head *bh;
/*
* An indexed dir may require that we update the free space
* list. Reserve a write to the previous node in the list so
* that we don't fail later.
*
* XXX: This can be either a dx_root_block, or an unindexed
* directory tree leaf block.
*/
if (ocfs2_free_list_at_root(lookup)) {
bh = lookup->dl_dx_root_bh;
retval = ocfs2_journal_access_dr(handle,
INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
} else {
bh = lookup->dl_prev_leaf_bh;
retval = ocfs2_journal_access_db(handle,
INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
}
if (retval) {
mlog_errno(retval);
return retval;
}
} else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
data_start = di->id2.i_data.id_data;
size = i_size_read(dir);
BUG_ON(insert_bh != parent_fe_bh);
}
rec_len = OCFS2_DIR_REC_LEN(namelen);
offset = 0;
de = (struct ocfs2_dir_entry *) data_start;
while (1) {
BUG_ON((char *)de >= (size + data_start));
/* These checks should've already been passed by the
* prepare function, but I guess we can leave them
* here anyway. */
if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
retval = -ENOENT;
goto bail;
}
if (ocfs2_match(namelen, name, de)) {
retval = -EEXIST;
goto bail;
}
/* We're guaranteed that we should have space, so we
* can't possibly have hit the trailer...right? */
mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size),
"Hit dir trailer trying to insert %.*s "
"(namelen %d) into directory %llu. "
"offset is %lu, trailer offset is %d\n",
namelen, name, namelen,
(unsigned long long)parent_fe_bh->b_blocknr,
offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
if (ocfs2_dirent_would_fit(de, rec_len)) {
dir->i_mtime = inode_set_ctime_current(dir);
retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
if (retval < 0) {
mlog_errno(retval);
goto bail;
}
if (insert_bh == parent_fe_bh)
retval = ocfs2_journal_access_di(handle,
INODE_CACHE(dir),
insert_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
else {
retval = ocfs2_journal_access_db(handle,
INODE_CACHE(dir),
insert_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (!retval && ocfs2_dir_indexed(dir))
retval = ocfs2_dx_dir_insert(dir,
handle,
lookup);
}
if (retval) {
mlog_errno(retval);
goto bail;
}
/* By now the buffer is marked for journaling */
offset += le16_to_cpu(de->rec_len);
if (le64_to_cpu(de->inode)) {
de1 = (struct ocfs2_dir_entry *)((char *) de +
OCFS2_DIR_REC_LEN(de->name_len));
de1->rec_len =
cpu_to_le16(le16_to_cpu(de->rec_len) -
OCFS2_DIR_REC_LEN(de->name_len));
de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
de = de1;
}
de->file_type = FT_UNKNOWN;
if (blkno) {
de->inode = cpu_to_le64(blkno);
ocfs2_set_de_type(de, inode->i_mode);
} else
de->inode = 0;
de->name_len = namelen;
memcpy(de->name, name, namelen);
if (ocfs2_dir_indexed(dir))
ocfs2_recalc_free_list(dir, handle, lookup);
inode_inc_iversion(dir);
ocfs2_journal_dirty(handle, insert_bh);
retval = 0;
goto bail;
}
offset += le16_to_cpu(de->rec_len);
de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
}
/* when you think about it, the assert above should prevent us
* from ever getting here. */
retval = -ENOSPC;
bail:
if (retval)
mlog_errno(retval);
return retval;
}
static int ocfs2_dir_foreach_blk_id(struct inode *inode,
u64 *f_version,
struct dir_context *ctx)
{
int ret, i;
unsigned long offset = ctx->pos;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_inline_data *data;
struct ocfs2_dir_entry *de;
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
goto out;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
data = &di->id2.i_data;
while (ctx->pos < i_size_read(inode)) {
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
if (!inode_eq_iversion(inode, *f_version)) {
for (i = 0; i < i_size_read(inode) && i < offset; ) {
de = (struct ocfs2_dir_entry *)
(data->id_data + i);
/* It's too expensive to do a full
* dirent test each time round this
* loop, but we do have to test at
* least that it is non-zero. A
* failure will be detected in the
* dirent test below. */
if (le16_to_cpu(de->rec_len) <
OCFS2_DIR_REC_LEN(1))
break;
i += le16_to_cpu(de->rec_len);
}
ctx->pos = offset = i;
*f_version = inode_query_iversion(inode);
}
de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
/* On error, skip the f_pos to the end. */
ctx->pos = i_size_read(inode);
break;
}
offset += le16_to_cpu(de->rec_len);
if (le64_to_cpu(de->inode)) {
if (!dir_emit(ctx, de->name, de->name_len,
le64_to_cpu(de->inode),
fs_ftype_to_dtype(de->file_type)))
goto out;
}
ctx->pos += le16_to_cpu(de->rec_len);
}
out:
brelse(di_bh);
return 0;
}
/*
* NOTE: This function can be called against unindexed directories,
* and indexed ones.
*/
static int ocfs2_dir_foreach_blk_el(struct inode *inode,
u64 *f_version,
struct dir_context *ctx,
bool persist)
{
unsigned long offset, blk, last_ra_blk = 0;
int i;
struct buffer_head * bh, * tmp;
struct ocfs2_dir_entry * de;
struct super_block * sb = inode->i_sb;
unsigned int ra_sectors = 16;
int stored = 0;
bh = NULL;
offset = ctx->pos & (sb->s_blocksize - 1);
while (ctx->pos < i_size_read(inode)) {
blk = ctx->pos >> sb->s_blocksize_bits;
if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
/* Skip the corrupt dirblock and keep trying */
ctx->pos += sb->s_blocksize - offset;
continue;
}
/* The idea here is to begin with 8k read-ahead and to stay
* 4k ahead of our current position.
*
* TODO: Use the pagecache for this. We just need to
* make sure it's cluster-safe... */
if (!last_ra_blk
|| (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) {
for (i = ra_sectors >> (sb->s_blocksize_bits - 9);
i > 0; i--) {
tmp = NULL;
if (!ocfs2_read_dir_block(inode, ++blk, &tmp,
OCFS2_BH_READAHEAD))
brelse(tmp);
}
last_ra_blk = blk;
ra_sectors = 8;
}
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
if (!inode_eq_iversion(inode, *f_version)) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ocfs2_dir_entry *) (bh->b_data + i);
/* It's too expensive to do a full
* dirent test each time round this
* loop, but we do have to test at
* least that it is non-zero. A
* failure will be detected in the
* dirent test below. */
if (le16_to_cpu(de->rec_len) <
OCFS2_DIR_REC_LEN(1))
break;
i += le16_to_cpu(de->rec_len);
}
offset = i;
ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
| offset;
*f_version = inode_query_iversion(inode);
}
while (ctx->pos < i_size_read(inode)
&& offset < sb->s_blocksize) {
de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
/* On error, skip the f_pos to the
next block. */
ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
break;
}
if (le64_to_cpu(de->inode)) {
if (!dir_emit(ctx, de->name,
de->name_len,
le64_to_cpu(de->inode),
fs_ftype_to_dtype(de->file_type))) {
brelse(bh);
return 0;
}
stored++;
}
offset += le16_to_cpu(de->rec_len);
ctx->pos += le16_to_cpu(de->rec_len);
}
offset = 0;
brelse(bh);
bh = NULL;
if (!persist && stored)
break;
}
return 0;
}
static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
struct dir_context *ctx,
bool persist)
{
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
}
/*
* This is intended to be called from inside other kernel functions,
* so we fake some arguments.
*/
int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
{
u64 version = inode_query_iversion(inode);
ocfs2_dir_foreach_blk(inode, &version, ctx, true);
return 0;
}
/*
* ocfs2_readdir()
*
*/
int ocfs2_readdir(struct file *file, struct dir_context *ctx)
{
int error = 0;
struct inode *inode = file_inode(file);
int lock_level = 0;
trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level, 1);
if (lock_level && error >= 0) {
/* We release EX lock which used to update atime
* and get PR lock again to reduce contention
* on commonly accessed directories. */
ocfs2_inode_unlock(inode, 1);
lock_level = 0;
error = ocfs2_inode_lock(inode, NULL, 0);
}
if (error < 0) {
if (error != -ENOENT)
mlog_errno(error);
/* we haven't got any yet, so propagate the error. */
goto bail_nolock;
}
error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
ocfs2_inode_unlock(inode, lock_level);
if (error)
mlog_errno(error);
bail_nolock:
return error;
}
/*
* NOTE: this should always be called with parent dir i_rwsem taken.
*/
int ocfs2_find_files_on_disk(const char *name,
int namelen,
u64 *blkno,
struct inode *inode,
struct ocfs2_dir_lookup_result *lookup)
{
int status = -ENOENT;
trace_ocfs2_find_files_on_disk(namelen, name, blkno,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_find_entry(name, namelen, inode, lookup);
if (status)
goto leave;
*blkno = le64_to_cpu(lookup->dl_entry->inode);
status = 0;
leave:
return status;
}
/*
* Convenience function for callers which just want the block number
* mapped to a name and don't require the full dirent info, etc.
*/
int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
int namelen, u64 *blkno)
{
int ret;
struct ocfs2_dir_lookup_result lookup = { NULL, };
ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup);
ocfs2_free_dir_lookup_result(&lookup);
return ret;
}
/* Check for a name within a directory.
*
* Return 0 if the name does not exist
* Return -EEXIST if the directory contains the name
*
* Callers should have i_rwsem + a cluster lock on dir
*/
int ocfs2_check_dir_for_entry(struct inode *dir,
const char *name,
int namelen)
{
int ret = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
trace_ocfs2_check_dir_for_entry(
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
ret = -EEXIST;
mlog_errno(ret);
}
ocfs2_free_dir_lookup_result(&lookup);
return ret;
}
struct ocfs2_empty_dir_priv {
struct dir_context ctx;
unsigned seen_dot;
unsigned seen_dot_dot;
unsigned seen_other;
unsigned dx_dir;
};
static bool ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name,
int name_len, loff_t pos, u64 ino,
unsigned type)
{
struct ocfs2_empty_dir_priv *p =
container_of(ctx, struct ocfs2_empty_dir_priv, ctx);
/*
* Check the positions of "." and ".." records to be sure
* they're in the correct place.
*
* Indexed directories don't need to proceed past the first
* two entries, so we end the scan after seeing '..'. Despite
* that, we allow the scan to proceed In the event that we
* have a corrupted indexed directory (no dot or dot dot
* entries). This allows us to double check for existing
* entries which might not have been found in the index.
*/
if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
p->seen_dot = 1;
return true;
}
if (name_len == 2 && !strncmp("..", name, 2) &&
pos == OCFS2_DIR_REC_LEN(1)) {
p->seen_dot_dot = 1;
if (p->dx_dir && p->seen_dot)
return false;
return true;
}
p->seen_other = 1;
return false;
}
static int ocfs2_empty_dir_dx(struct inode *inode,
struct ocfs2_empty_dir_priv *priv)
{
int ret;
struct buffer_head *di_bh = NULL;
struct buffer_head *dx_root_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_dx_root_block *dx_root;
priv->dx_dir = 1;
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
ret = ocfs2_read_dx_root(inode, di, &dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
if (le32_to_cpu(dx_root->dr_num_entries) != 2)
priv->seen_other = 1;
out:
brelse(di_bh);
brelse(dx_root_bh);
return ret;
}
/*
* routine to check that the specified directory is empty (for rmdir)
*
* Returns 1 if dir is empty, zero otherwise.
*
* XXX: This is a performance problem for unindexed directories.
*/
int ocfs2_empty_dir(struct inode *inode)
{
int ret;
struct ocfs2_empty_dir_priv priv = {
.ctx.actor = ocfs2_empty_dir_filldir,
};
if (ocfs2_dir_indexed(inode)) {
ret = ocfs2_empty_dir_dx(inode, &priv);
if (ret)
mlog_errno(ret);
/*
* We still run ocfs2_dir_foreach to get the checks
* for "." and "..".
*/
}
ret = ocfs2_dir_foreach(inode, &priv.ctx);
if (ret)
mlog_errno(ret);
if (!priv.seen_dot || !priv.seen_dot_dot) {
mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
/*
* XXX: Is it really safe to allow an unlink to continue?
*/
return 1;
}
return !priv.seen_other;
}
/*
* Fills "." and ".." dirents in a new directory block. Returns dirent for
* "..", which might be used during creation of a directory with a trailing
* header. It is otherwise safe to ignore the return code.
*/
static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
struct inode *parent,
char *start,
unsigned int size)
{
struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
de->name_len = 1;
de->rec_len =
cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
strcpy(de->name, ".");
ocfs2_set_de_type(de, S_IFDIR);
de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
de->name_len = 2;
strcpy(de->name, "..");
ocfs2_set_de_type(de, S_IFDIR);
return de;
}
/*
* This works together with code in ocfs2_mknod_locked() which sets
* the inline-data flag and initializes the inline-data section.
*/
static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
handle_t *handle,
struct inode *parent,
struct inode *inode,
struct buffer_head *di_bh)
{
int ret;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inline_data *data = &di->id2.i_data;
unsigned int size = le16_to_cpu(data->id_count);
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
ocfs2_journal_dirty(handle, di_bh);
i_size_write(inode, size);
set_nlink(inode, 2);
inode->i_blocks = ocfs2_inode_sector_count(inode);
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
mlog_errno(ret);
out:
return ret;
}
static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
handle_t *handle,
struct inode *parent,
struct inode *inode,
struct buffer_head *fe_bh,
struct ocfs2_alloc_context *data_ac,
struct buffer_head **ret_new_bh)
{
int status;
unsigned int size = osb->sb->s_blocksize;
struct buffer_head *new_bh = NULL;
struct ocfs2_dir_entry *de;
if (ocfs2_new_dir_wants_trailer(inode))
size = ocfs2_dir_trailer_blk_off(parent->i_sb);
status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
data_ac, NULL, &new_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
memset(new_bh->b_data, 0, osb->sb->s_blocksize);
de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size);
if (ocfs2_new_dir_wants_trailer(inode)) {
int size = le16_to_cpu(de->rec_len);
/*
* Figure out the size of the hole left over after
* insertion of '.' and '..'. The trailer wants this
* information.
*/
size -= OCFS2_DIR_REC_LEN(2);
size -= sizeof(struct ocfs2_dir_block_trailer);
ocfs2_init_dir_trailer(inode, new_bh, size);
}
ocfs2_journal_dirty(handle, new_bh);
i_size_write(inode, inode->i_sb->s_blocksize);
set_nlink(inode, 2);
inode->i_blocks = ocfs2_inode_sector_count(inode);
status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = 0;
if (ret_new_bh) {
*ret_new_bh = new_bh;
new_bh = NULL;
}
bail:
brelse(new_bh);
return status;
}
static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
handle_t *handle, struct inode *dir,
struct buffer_head *di_bh,
struct buffer_head *dirdata_bh,
struct ocfs2_alloc_context *meta_ac,
int dx_inline, u32 num_entries,
struct buffer_head **ret_dx_root_bh)
{
int ret;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
u16 dr_suballoc_bit;
u64 suballoc_loc, dr_blkno;
unsigned int num_bits;
struct buffer_head *dx_root_bh = NULL;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dir_block_trailer *trailer =
ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&dr_suballoc_bit, &num_bits, &dr_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_dx_dir_attach_index(
(unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)dr_blkno);
dx_root_bh = sb_getblk(osb->sb, dr_blkno);
if (dx_root_bh == NULL) {
ret = -ENOMEM;
goto out;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
memset(dx_root, 0, osb->sb->s_blocksize);
strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
dx_root->dr_blkno = cpu_to_le64(dr_blkno);
dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno);
dx_root->dr_num_entries = cpu_to_le32(num_entries);
if (le16_to_cpu(trailer->db_free_rec_len))
dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
else
dx_root->dr_free_blk = cpu_to_le64(0);
if (dx_inline) {
dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE;
dx_root->dr_entries.de_count =
cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb));
} else {
dx_root->dr_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
}
ocfs2_journal_dirty(handle, dx_root_bh);
ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out;
}
di->i_dx_root = cpu_to_le64(dr_blkno);
spin_lock(&OCFS2_I(dir)->ip_lock);
OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
spin_unlock(&OCFS2_I(dir)->ip_lock);
ocfs2_journal_dirty(handle, di_bh);
*ret_dx_root_bh = dx_root_bh;
dx_root_bh = NULL;
out:
brelse(dx_root_bh);
return ret;
}
static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
handle_t *handle, struct inode *dir,
struct buffer_head **dx_leaves,
int num_dx_leaves, u64 start_blk)
{
int ret, i;
struct ocfs2_dx_leaf *dx_leaf;
struct buffer_head *bh;
for (i = 0; i < num_dx_leaves; i++) {
bh = sb_getblk(osb->sb, start_blk + i);
if (bh == NULL) {
ret = -ENOMEM;
goto out;
}
dx_leaves[i] = bh;
ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
memset(dx_leaf, 0, osb->sb->s_blocksize);
strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
dx_leaf->dl_list.de_count =
cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
trace_ocfs2_dx_dir_format_cluster(
(unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)bh->b_blocknr,
le16_to_cpu(dx_leaf->dl_list.de_count));
ocfs2_journal_dirty(handle, bh);
}
ret = 0;
out:
return ret;
}
/*
* Allocates and formats a new cluster for use in an indexed dir
* leaf. This version will not do the extent insert, so that it can be
* used by operations which need careful ordering.
*/
static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
u32 cpos, handle_t *handle,
struct ocfs2_alloc_context *data_ac,
struct buffer_head **dx_leaves,
int num_dx_leaves, u64 *ret_phys_blkno)
{
int ret;
u32 phys, num;
u64 phys_blkno;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
/*
* XXX: For create, this should claim cluster for the index
* *before* the unindexed insert so that we have a better
* chance of contiguousness as the directory grows in number
* of entries.
*/
ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Format the new cluster first. That way, we're inserting
* valid data.
*/
phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys);
ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves,
num_dx_leaves, phys_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
*ret_phys_blkno = phys_blkno;
out:
return ret;
}
static int ocfs2_dx_dir_new_cluster(struct inode *dir,
struct ocfs2_extent_tree *et,
u32 cpos, handle_t *handle,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head **dx_leaves,
int num_dx_leaves)
{
int ret;
u64 phys_blkno;
ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
num_dx_leaves, &phys_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
meta_ac);
if (ret)
mlog_errno(ret);
out:
return ret;
}
static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb,
int *ret_num_leaves)
{
int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1);
struct buffer_head **dx_leaves;
dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *),
GFP_NOFS);
if (dx_leaves && ret_num_leaves)
*ret_num_leaves = num_dx_leaves;
return dx_leaves;
}
static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb,
handle_t *handle,
struct inode *parent,
struct inode *inode,
struct buffer_head *di_bh,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
struct buffer_head *leaf_bh = NULL;
struct buffer_head *dx_root_bh = NULL;
struct ocfs2_dx_hinfo hinfo;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dx_entry_list *entry_list;
/*
* Our strategy is to create the directory as though it were
* unindexed, then add the index block. This works with very
* little complication since the state of a new directory is a
* very well known quantity.
*
* Essentially, we have two dirents ("." and ".."), in the 1st
* block which need indexing. These are easily inserted into
* the index block.
*/
ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh,
data_ac, &leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh,
meta_ac, 1, 2, &dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
entry_list = &dx_root->dr_entries;
/* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */
ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo);
ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo);
ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
out:
brelse(dx_root_bh);
brelse(leaf_bh);
return ret;
}
int ocfs2_fill_new_dir(struct ocfs2_super *osb,
handle_t *handle,
struct inode *parent,
struct inode *inode,
struct buffer_head *fe_bh,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac)
{
BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
if (ocfs2_supports_indexed_dirs(osb))
return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh,
data_ac, meta_ac);
return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
data_ac, NULL);
}
static int ocfs2_dx_dir_index_block(struct inode *dir,
handle_t *handle,
struct buffer_head **dx_leaves,
int num_dx_leaves,
u32 *num_dx_entries,
struct buffer_head *dirent_bh)
{
int ret = 0, namelen, i;
char *de_buf, *limit;
struct ocfs2_dir_entry *de;
struct buffer_head *dx_leaf_bh;
struct ocfs2_dx_hinfo hinfo;
u64 dirent_blk = dirent_bh->b_blocknr;
de_buf = dirent_bh->b_data;
limit = de_buf + dir->i_sb->s_blocksize;
while (de_buf < limit) {
de = (struct ocfs2_dir_entry *)de_buf;
namelen = de->name_len;
if (!namelen || !de->inode)
goto inc;
ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo);
i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo);
dx_leaf_bh = dx_leaves[i];
ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo,
dirent_blk, dx_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
*num_dx_entries = *num_dx_entries + 1;
inc:
de_buf += le16_to_cpu(de->rec_len);
}
out:
return ret;
}
/*
* XXX: This expects dx_root_bh to already be part of the transaction.
*/
static void ocfs2_dx_dir_index_root_block(struct inode *dir,
struct buffer_head *dx_root_bh,
struct buffer_head *dirent_bh)
{
char *de_buf, *limit;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dir_entry *de;
struct ocfs2_dx_hinfo hinfo;
u64 dirent_blk = dirent_bh->b_blocknr;
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
de_buf = dirent_bh->b_data;
limit = de_buf + dir->i_sb->s_blocksize;
while (de_buf < limit) {
de = (struct ocfs2_dir_entry *)de_buf;
if (!de->name_len || !de->inode)
goto inc;
ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
trace_ocfs2_dx_dir_index_root_block(
(unsigned long long)dir->i_ino,
hinfo.major_hash, hinfo.minor_hash,
de->name_len, de->name,
le16_to_cpu(dx_root->dr_entries.de_num_used));
ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
dirent_blk);
le32_add_cpu(&dx_root->dr_num_entries, 1);
inc:
de_buf += le16_to_cpu(de->rec_len);
}
}
/*
* Count the number of inline directory entries in di_bh and compare
* them against the number of entries we can hold in an inline dx root
* block.
*/
static int ocfs2_new_dx_should_be_inline(struct inode *dir,
struct buffer_head *di_bh)
{
int dirent_count = 0;
char *de_buf, *limit;
struct ocfs2_dir_entry *de;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
de_buf = di->id2.i_data.id_data;
limit = de_buf + i_size_read(dir);
while (de_buf < limit) {
de = (struct ocfs2_dir_entry *)de_buf;
if (de->name_len && de->inode)
dirent_count++;
de_buf += le16_to_cpu(de->rec_len);
}
/* We are careful to leave room for one extra record. */
return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb);
}
/*
* Expand rec_len of the rightmost dirent in a directory block so that it
* contains the end of our valid space for dirents. We do this during
* expansion from an inline directory to one with extents. The first dir block
* in that case is taken from the inline data portion of the inode block.
*
* This will also return the largest amount of contiguous space for a dirent
* in the block. That value is *not* necessarily the last dirent, even after
* expansion. The directory indexing code wants this value for free space
* accounting. We do this here since we're already walking the entire dir
* block.
*
* We add the dir trailer if this filesystem wants it.
*/
static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size,
struct inode *dir)
{
struct super_block *sb = dir->i_sb;
struct ocfs2_dir_entry *de;
struct ocfs2_dir_entry *prev_de;
char *de_buf, *limit;
unsigned int new_size = sb->s_blocksize;
unsigned int bytes, this_hole;
unsigned int largest_hole = 0;
if (ocfs2_new_dir_wants_trailer(dir))
new_size = ocfs2_dir_trailer_blk_off(sb);
bytes = new_size - old_size;
limit = start + old_size;
de_buf = start;
de = (struct ocfs2_dir_entry *)de_buf;
do {
this_hole = ocfs2_figure_dirent_hole(de);
if (this_hole > largest_hole)
largest_hole = this_hole;
prev_de = de;
de_buf += le16_to_cpu(de->rec_len);
de = (struct ocfs2_dir_entry *)de_buf;
} while (de_buf < limit);
le16_add_cpu(&prev_de->rec_len, bytes);
/* We need to double check this after modification of the final
* dirent. */
this_hole = ocfs2_figure_dirent_hole(prev_de);
if (this_hole > largest_hole)
largest_hole = this_hole;
if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
return largest_hole;
return 0;
}
/*
* We allocate enough clusters to fulfill "blocks_wanted", but set
* i_size to exactly one block. Ocfs2_extend_dir() will handle the
* rest automatically for us.
*
* *first_block_bh is a pointer to the 1st data block allocated to the
* directory.
*/
static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
unsigned int blocks_wanted,
struct ocfs2_dir_lookup_result *lookup,
struct buffer_head **first_block_bh)
{
u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0;
struct super_block *sb = dir->i_sb;
int ret, i, num_dx_leaves = 0, dx_inline = 0,
credits = ocfs2_inline_to_extents_credits(sb);
u64 dx_insert_blkno, blkno,
bytes = blocks_wanted << sb->s_blocksize_bits;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_inode_info *oi = OCFS2_I(dir);
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
struct buffer_head *dirdata_bh = NULL;
struct buffer_head *dx_root_bh = NULL;
struct buffer_head **dx_leaves = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
handle_t *handle;
struct ocfs2_extent_tree et;
struct ocfs2_extent_tree dx_et;
int did_quota = 0, bytes_allocated = 0;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
alloc = ocfs2_clusters_for_bytes(sb, bytes);
dx_alloc = 0;
down_write(&oi->ip_alloc_sem);
if (ocfs2_supports_indexed_dirs(osb)) {
credits += ocfs2_add_dir_index_credits(sb);
dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh);
if (!dx_inline) {
/* Add one more cluster for an index leaf */
dx_alloc++;
dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb,
&num_dx_leaves);
if (!dx_leaves) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
}
/* This gets us the dx_root */
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/*
* We should never need more than 2 clusters for the unindexed
* tree - maximum dirent size is far less than one block. In
* fact, the only time we'd need more than one cluster is if
* blocksize == clustersize and the dirent won't fit in the
* extra space that the expansion to a single block gives. As
* of today, that only happens on 4k/4k file systems.
*/
BUG_ON(alloc > 2);
ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Prepare for worst case allocation scenario of two separate
* extents in the unindexed tree.
*/
if (alloc == 2)
credits += OCFS2_SUBALLOC_ALLOC;
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
if (ret)
goto out_commit;
did_quota = 1;
if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
/*
* Allocate our index cluster first, to maximize the
* possibility that unindexed leaves grow
* contiguously.
*/
ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac,
dx_leaves, num_dx_leaves,
&dx_insert_blkno);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
}
/*
* Try to claim as many clusters as the bitmap can give though
* if we only get one now, that's enough to continue. The rest
* will be claimed after the conversion to extents.
*/
if (ocfs2_dir_resv_allowed(osb))
data_ac->ac_resv = &oi->ip_la_data_resv;
ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
/*
* Operations are carefully ordered so that we set up the new
* data block first. The conversion from inline data to
* extents follows.
*/
blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
dirdata_bh = sb_getblk(sb, blkno);
if (!dirdata_bh) {
ret = -ENOMEM;
mlog_errno(ret);
goto out_commit;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
memset(dirdata_bh->b_data + i_size_read(dir), 0,
sb->s_blocksize - i_size_read(dir));
i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir);
if (ocfs2_new_dir_wants_trailer(dir)) {
/*
* Prepare the dir trailer up front. It will otherwise look
* like a valid dirent. Even if inserting the index fails
* (unlikely), then all we'll have done is given first dir
* block a small amount of fragmentation.
*/
ocfs2_init_dir_trailer(dir, dirdata_bh, i);
}
ocfs2_update_inode_fsync_trans(handle, dir, 1);
ocfs2_journal_dirty(handle, dirdata_bh);
if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
/*
* Dx dirs with an external cluster need to do this up
* front. Inline dx root's get handled later, after
* we've allocated our root block. We get passed back
* a total number of items so that dr_num_entries can
* be correctly set once the dx_root has been
* allocated.
*/
ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves,
num_dx_leaves, &num_dx_entries,
dirdata_bh);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
}
/*
* Set extent, i_size, etc on the directory. After this, the
* inode should contain the same exact dirents as before and
* be fully accessible from system calls.
*
* We let the later dirent insert modify c/mtime - to the user
* the data hasn't changed.
*/
ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
spin_lock(&oi->ip_lock);
oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
ocfs2_dinode_new_extent_list(dir, di);
i_size_write(dir, sb->s_blocksize);
dir->i_mtime = inode_set_ctime_current(dir);
di->i_size = cpu_to_le64(sb->s_blocksize);
di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(dir).tv_sec);
di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(dir).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, dir, 1);
/*
* This should never fail as our extent list is empty and all
* related blocks have been journaled already.
*/
ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
0, NULL);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/*
* Set i_blocks after the extent insert for the most up to
* date ip_clusters value.
*/
dir->i_blocks = ocfs2_inode_sector_count(dir);
ocfs2_journal_dirty(handle, di_bh);
if (ocfs2_supports_indexed_dirs(osb)) {
ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
dirdata_bh, meta_ac, dx_inline,
num_dx_entries, &dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
if (dx_inline) {
ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
dirdata_bh);
} else {
ocfs2_init_dx_root_extent_tree(&dx_et,
INODE_CACHE(dir),
dx_root_bh);
ret = ocfs2_insert_extent(handle, &dx_et, 0,
dx_insert_blkno, 1, 0, NULL);
if (ret)
mlog_errno(ret);
}
}
/*
* We asked for two clusters, but only got one in the 1st
* pass. Claim the 2nd cluster as a separate extent.
*/
if (alloc > len) {
ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
&len);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
ret = ocfs2_insert_extent(handle, &et, 1,
blkno, len, 0, NULL);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
}
*first_block_bh = dirdata_bh;
dirdata_bh = NULL;
if (ocfs2_supports_indexed_dirs(osb)) {
unsigned int off;
if (!dx_inline) {
/*
* We need to return the correct block within the
* cluster which should hold our entry.
*/
off = ocfs2_dx_dir_hash_idx(osb,
&lookup->dl_hinfo);
get_bh(dx_leaves[off]);
lookup->dl_dx_leaf_bh = dx_leaves[off];
}
lookup->dl_dx_root_bh = dx_root_bh;
dx_root_bh = NULL;
}
out_commit:
if (ret < 0 && did_quota)
dquot_free_space_nodirty(dir, bytes_allocated);
ocfs2_commit_trans(osb, handle);
out:
up_write(&oi->ip_alloc_sem);
if (data_ac)
ocfs2_free_alloc_context(data_ac);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
if (dx_leaves) {
for (i = 0; i < num_dx_leaves; i++)
brelse(dx_leaves[i]);
kfree(dx_leaves);
}
brelse(dirdata_bh);
brelse(dx_root_bh);
return ret;
}
/* returns a bh of the 1st new block in the allocation. */
static int ocfs2_do_extend_dir(struct super_block *sb,
handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head **new_bh)
{
int status;
int extend, did_quota = 0;
u64 p_blkno, v_blkno;
spin_lock(&OCFS2_I(dir)->ip_lock);
extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
spin_unlock(&OCFS2_I(dir)->ip_lock);
if (extend) {
u32 offset = OCFS2_I(dir)->ip_clusters;
status = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(sb, 1));
if (status)
goto bail;
did_quota = 1;
status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
1, 0, parent_fe_bh, handle,
data_ac, meta_ac, NULL);
BUG_ON(status == -EAGAIN);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
}
*new_bh = sb_getblk(sb, p_blkno);
if (!*new_bh) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
status = 0;
bail:
if (did_quota && status < 0)
dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
return status;
}
/*
* Assumes you already have a cluster lock on the directory.
*
* 'blocks_wanted' is only used if we have an inline directory which
* is to be turned into an extent based one. The size of the dirent to
* insert might be larger than the space gained by growing to just one
* block, so we may have to grow the inode by two blocks in that case.
*
* If the directory is already indexed, dx_root_bh must be provided.
*/
static int ocfs2_extend_dir(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
unsigned int blocks_wanted,
struct ocfs2_dir_lookup_result *lookup,
struct buffer_head **new_de_bh)
{
int status = 0;
int credits, num_free_extents, drop_alloc_sem = 0;
loff_t dir_i_size;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
struct ocfs2_extent_list *el = &fe->id2.i_list;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
handle_t *handle = NULL;
struct buffer_head *new_bh = NULL;
struct ocfs2_dir_entry * de;
struct super_block *sb = osb->sb;
struct ocfs2_extent_tree et;
struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
/*
* This would be a code error as an inline directory should
* never have an index root.
*/
BUG_ON(dx_root_bh);
status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
blocks_wanted, lookup,
&new_bh);
if (status) {
mlog_errno(status);
goto bail;
}
/* Expansion from inline to an indexed directory will
* have given us this. */
dx_root_bh = lookup->dl_dx_root_bh;
if (blocks_wanted == 1) {
/*
* If the new dirent will fit inside the space
* created by pushing out to one block, then
* we can complete the operation
* here. Otherwise we have to expand i_size
* and format the 2nd block below.
*/
BUG_ON(new_bh == NULL);
goto bail_bh;
}
/*
* Get rid of 'new_bh' - we want to format the 2nd
* data block and return that instead.
*/
brelse(new_bh);
new_bh = NULL;
down_write(&OCFS2_I(dir)->ip_alloc_sem);
drop_alloc_sem = 1;
dir_i_size = i_size_read(dir);
credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
goto do_extend;
}
down_write(&OCFS2_I(dir)->ip_alloc_sem);
drop_alloc_sem = 1;
dir_i_size = i_size_read(dir);
trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
dir_i_size);
/* dir->i_size is always block aligned. */
spin_lock(&OCFS2_I(dir)->ip_lock);
if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
spin_unlock(&OCFS2_I(dir)->ip_lock);
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
parent_fe_bh);
num_free_extents = ocfs2_num_free_extents(&et);
if (num_free_extents < 0) {
status = num_free_extents;
mlog_errno(status);
goto bail;
}
if (!num_free_extents) {
status = ocfs2_reserve_new_metadata(osb, el, &meta_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
}
status = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
if (ocfs2_dir_resv_allowed(osb))
data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
credits = ocfs2_calc_extend_credits(sb, el);
} else {
spin_unlock(&OCFS2_I(dir)->ip_lock);
credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
}
do_extend:
if (ocfs2_dir_indexed(dir))
credits++; /* For attaching the new dirent block to the
* dx_root */
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto bail;
}
status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
data_ac, meta_ac, &new_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
memset(new_bh->b_data, 0, sb->s_blocksize);
de = (struct ocfs2_dir_entry *) new_bh->b_data;
de->inode = 0;
if (ocfs2_supports_dir_trailer(dir)) {
de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb));
ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len));
if (ocfs2_dir_indexed(dir)) {
status = ocfs2_dx_dir_link_trailer(dir, handle,
dx_root_bh, new_bh);
if (status) {
mlog_errno(status);
goto bail;
}
}
} else {
de->rec_len = cpu_to_le16(sb->s_blocksize);
}
ocfs2_update_inode_fsync_trans(handle, dir, 1);
ocfs2_journal_dirty(handle, new_bh);
dir_i_size += dir->i_sb->s_blocksize;
i_size_write(dir, dir_i_size);
dir->i_blocks = ocfs2_inode_sector_count(dir);
status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
bail_bh:
*new_de_bh = new_bh;
get_bh(*new_de_bh);
bail:
if (handle)
ocfs2_commit_trans(osb, handle);
if (drop_alloc_sem)
up_write(&OCFS2_I(dir)->ip_alloc_sem);
if (data_ac)
ocfs2_free_alloc_context(data_ac);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
brelse(new_bh);
return status;
}
static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
const char *name, int namelen,
struct buffer_head **ret_de_bh,
unsigned int *blocks_wanted)
{
int ret;
struct super_block *sb = dir->i_sb;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_dir_entry *de, *last_de = NULL;
char *de_buf, *limit;
unsigned long offset = 0;
unsigned int rec_len, new_rec_len, free_space;
/*
* This calculates how many free bytes we'd have in block zero, should
* this function force expansion to an extent tree.
*/
if (ocfs2_new_dir_wants_trailer(dir))
free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir);
else
free_space = dir->i_sb->s_blocksize - i_size_read(dir);
de_buf = di->id2.i_data.id_data;
limit = de_buf + i_size_read(dir);
rec_len = OCFS2_DIR_REC_LEN(namelen);
while (de_buf < limit) {
de = (struct ocfs2_dir_entry *)de_buf;
if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
ret = -ENOENT;
goto out;
}
if (ocfs2_match(namelen, name, de)) {
ret = -EEXIST;
goto out;
}
/*
* No need to check for a trailing dirent record here as
* they're not used for inline dirs.
*/
if (ocfs2_dirent_would_fit(de, rec_len)) {
/* Ok, we found a spot. Return this bh and let
* the caller actually fill it in. */
*ret_de_bh = di_bh;
get_bh(*ret_de_bh);
ret = 0;
goto out;
}
last_de = de;
de_buf += le16_to_cpu(de->rec_len);
offset += le16_to_cpu(de->rec_len);
}
/*
* We're going to require expansion of the directory - figure
* out how many blocks we'll need so that a place for the
* dirent can be found.
*/
*blocks_wanted = 1;
new_rec_len = le16_to_cpu(last_de->rec_len) + free_space;
if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
*blocks_wanted = 2;
ret = -ENOSPC;
out:
return ret;
}
static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
int namelen, struct buffer_head **ret_de_bh)
{
unsigned long offset;
struct buffer_head *bh = NULL;
unsigned short rec_len;
struct ocfs2_dir_entry *de;
struct super_block *sb = dir->i_sb;
int status;
int blocksize = dir->i_sb->s_blocksize;
status = ocfs2_read_dir_block(dir, 0, &bh, 0);
if (status)
goto bail;
rec_len = OCFS2_DIR_REC_LEN(namelen);
offset = 0;
de = (struct ocfs2_dir_entry *) bh->b_data;
while (1) {
if ((char *)de >= sb->s_blocksize + bh->b_data) {
brelse(bh);
bh = NULL;
if (i_size_read(dir) <= offset) {
/*
* Caller will have to expand this
* directory.
*/
status = -ENOSPC;
goto bail;
}
status = ocfs2_read_dir_block(dir,
offset >> sb->s_blocksize_bits,
&bh, 0);
if (status)
goto bail;
/* move to next block */
de = (struct ocfs2_dir_entry *) bh->b_data;
}
if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
status = -ENOENT;
goto bail;
}
if (ocfs2_match(namelen, name, de)) {
status = -EEXIST;
goto bail;
}
if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize,
blocksize))
goto next;
if (ocfs2_dirent_would_fit(de, rec_len)) {
/* Ok, we found a spot. Return this bh and let
* the caller actually fill it in. */
*ret_de_bh = bh;
get_bh(*ret_de_bh);
status = 0;
goto bail;
}
next:
offset += le16_to_cpu(de->rec_len);
de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
}
bail:
brelse(bh);
if (status)
mlog_errno(status);
return status;
}
static int dx_leaf_sort_cmp(const void *a, const void *b)
{
const struct ocfs2_dx_entry *entry1 = a;
const struct ocfs2_dx_entry *entry2 = b;
u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash);
u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash);
u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash);
u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash);
if (major_hash1 > major_hash2)
return 1;
if (major_hash1 < major_hash2)
return -1;
/*
* It is not strictly necessary to sort by minor
*/
if (minor_hash1 > minor_hash2)
return 1;
if (minor_hash1 < minor_hash2)
return -1;
return 0;
}
static void dx_leaf_sort_swap(void *a, void *b, int size)
{
struct ocfs2_dx_entry *entry1 = a;
struct ocfs2_dx_entry *entry2 = b;
BUG_ON(size != sizeof(*entry1));
swap(*entry1, *entry2);
}
static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
{
struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
int i, num = le16_to_cpu(dl_list->de_num_used);
for (i = 0; i < (num - 1); i++) {
if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) !=
le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash))
return 0;
}
return 1;
}
/*
* Find the optimal value to split this leaf on. This expects the leaf
* entries to be in sorted order.
*
* leaf_cpos is the cpos of the leaf we're splitting. insert_hash is
* the hash we want to insert.
*
* This function is only concerned with the major hash - that which
* determines which cluster an item belongs to.
*/
static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf,
u32 leaf_cpos, u32 insert_hash,
u32 *split_hash)
{
struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
int i, num_used = le16_to_cpu(dl_list->de_num_used);
int allsame;
/*
* There's a couple rare, but nasty corner cases we have to
* check for here. All of them involve a leaf where all value
* have the same hash, which is what we look for first.
*
* Most of the time, all of the above is false, and we simply
* pick the median value for a split.
*/
allsame = ocfs2_dx_leaf_same_major(dx_leaf);
if (allsame) {
u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash);
if (val == insert_hash) {
/*
* No matter where we would choose to split,
* the new entry would want to occupy the same
* block as these. Since there's no space left
* in their existing block, we know there
* won't be space after the split.
*/
return -ENOSPC;
}
if (val == leaf_cpos) {
/*
* Because val is the same as leaf_cpos (which
* is the smallest value this leaf can have),
* yet is not equal to insert_hash, then we
* know that insert_hash *must* be larger than
* val (and leaf_cpos). At least cpos+1 in value.
*
* We also know then, that there cannot be an
* adjacent extent (otherwise we'd be looking
* at it). Choosing this value gives us a
* chance to get some contiguousness.
*/
*split_hash = leaf_cpos + 1;
return 0;
}
if (val > insert_hash) {
/*
* val can not be the same as insert hash, and
* also must be larger than leaf_cpos. Also,
* we know that there can't be a leaf between
* cpos and val, otherwise the entries with
* hash 'val' would be there.
*/
*split_hash = val;
return 0;
}
*split_hash = insert_hash;
return 0;
}
/*
* Since the records are sorted and the checks above
* guaranteed that not all records in this block are the same,
* we simple travel forward, from the median, and pick the 1st
* record whose value is larger than leaf_cpos.
*/
for (i = (num_used / 2); i < num_used; i++)
if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) >
leaf_cpos)
break;
BUG_ON(i == num_used); /* Should be impossible */
*split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash);
return 0;
}
/*
* Transfer all entries in orig_dx_leaves whose major hash is equal to or
* larger than split_hash into new_dx_leaves. We use a temporary
* buffer (tmp_dx_leaf) to make the changes to the original leaf blocks.
*
* Since the block offset inside a leaf (cluster) is a constant mask
* of minor_hash, we can optimize - an item at block offset X within
* the original cluster, will be at offset X within the new cluster.
*/
static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
handle_t *handle,
struct ocfs2_dx_leaf *tmp_dx_leaf,
struct buffer_head **orig_dx_leaves,
struct buffer_head **new_dx_leaves,
int num_dx_leaves)
{
int i, j, num_used;
u32 major_hash;
struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
struct ocfs2_dx_entry_list *orig_list, *tmp_list;
struct ocfs2_dx_entry *dx_entry;
tmp_list = &tmp_dx_leaf->dl_list;
for (i = 0; i < num_dx_leaves; i++) {
orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
orig_list = &orig_dx_leaf->dl_list;
new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
num_used = le16_to_cpu(orig_list->de_num_used);
memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize);
tmp_list->de_num_used = cpu_to_le16(0);
memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used);
for (j = 0; j < num_used; j++) {
dx_entry = &orig_list->de_entries[j];
major_hash = le32_to_cpu(dx_entry->dx_major_hash);
if (major_hash >= split_hash)
ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf,
dx_entry);
else
ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf,
dx_entry);
}
memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize);
ocfs2_journal_dirty(handle, orig_dx_leaves[i]);
ocfs2_journal_dirty(handle, new_dx_leaves[i]);
}
}
static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
struct ocfs2_dx_root_block *dx_root)
{
int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
credits += ocfs2_quota_trans_credits(osb->sb);
return credits;
}
/*
* Find the median value in dx_leaf_bh and allocate a new leaf to move
* half our entries into.
*/
static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
struct buffer_head *dx_root_bh,
struct buffer_head *dx_leaf_bh,
struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos,
u64 leaf_blkno)
{
struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
int credits, ret, i, num_used, did_quota = 0;
u32 cpos, split_hash, insert_hash = hinfo->major_hash;
u64 orig_leaves_start;
int num_dx_leaves;
struct buffer_head **orig_dx_leaves = NULL;
struct buffer_head **new_dx_leaves = NULL;
struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL;
struct ocfs2_extent_tree et;
handle_t *handle = NULL;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)leaf_blkno,
insert_hash);
ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
/*
* XXX: This is a rather large limit. We should use a more
* realistic value.
*/
if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX)
return -ENOSPC;
num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used);
if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) {
mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: "
"%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)leaf_blkno, num_used);
ret = -EIO;
goto out;
}
orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
if (!orig_dx_leaves) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL);
if (!new_dx_leaves) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root);
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
mlog_errno(ret);
goto out;
}
ret = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(dir->i_sb, 1));
if (ret)
goto out_commit;
did_quota = 1;
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/*
* This block is changing anyway, so we can sort it in place.
*/
sort(dx_leaf->dl_list.de_entries, num_used,
sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
dx_leaf_sort_swap);
ocfs2_journal_dirty(handle, dx_leaf_bh);
ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
&split_hash);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
/*
* We have to carefully order operations here. There are items
* which want to be in the new cluster before insert, but in
* order to put those items in the new cluster, we alter the
* old cluster. A failure to insert gets nasty.
*
* So, start by reserving writes to the old
* cluster. ocfs2_dx_dir_new_cluster will reserve writes on
* the new cluster for us, before inserting it. The insert
* won't happen if there's an error before that. Once the
* insert is done then, we can transfer from one leaf into the
* other without fear of hitting any error.
*/
/*
* The leaf transfer wants some scratch space so that we don't
* wind up doing a bunch of expensive memmove().
*/
tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS);
if (!tmp_dx_leaf) {
ret = -ENOMEM;
mlog_errno(ret);
goto out_commit;
}
orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno);
ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves,
orig_dx_leaves);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
cpos = split_hash;
ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
data_ac, meta_ac, new_dx_leaves,
num_dx_leaves);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
for (i = 0; i < num_dx_leaves; i++) {
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
orig_dx_leaves[i],
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
new_dx_leaves[i],
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
}
ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
orig_dx_leaves, new_dx_leaves, num_dx_leaves);
out_commit:
if (ret < 0 && did_quota)
dquot_free_space_nodirty(dir,
ocfs2_clusters_to_bytes(dir->i_sb, 1));
ocfs2_update_inode_fsync_trans(handle, dir, 1);
ocfs2_commit_trans(osb, handle);
out:
if (orig_dx_leaves || new_dx_leaves) {
for (i = 0; i < num_dx_leaves; i++) {
if (orig_dx_leaves)
brelse(orig_dx_leaves[i]);
if (new_dx_leaves)
brelse(new_dx_leaves[i]);
}
kfree(orig_dx_leaves);
kfree(new_dx_leaves);
}
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
if (data_ac)
ocfs2_free_alloc_context(data_ac);
kfree(tmp_dx_leaf);
return ret;
}
static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir,
struct buffer_head *di_bh,
struct buffer_head *dx_root_bh,
const char *name, int namelen,
struct ocfs2_dir_lookup_result *lookup)
{
int ret, rebalanced = 0;
struct ocfs2_dx_root_block *dx_root;
struct buffer_head *dx_leaf_bh = NULL;
struct ocfs2_dx_leaf *dx_leaf;
u64 blkno;
u32 leaf_cpos;
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
restart_search:
ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo,
&leaf_cpos, &blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >=
le16_to_cpu(dx_leaf->dl_list.de_count)) {
if (rebalanced) {
/*
* Rebalancing should have provided us with
* space in an appropriate leaf.
*
* XXX: Is this an abnormal condition then?
* Should we print a message here?
*/
ret = -ENOSPC;
goto out;
}
ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh,
&lookup->dl_hinfo, leaf_cpos,
blkno);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
/*
* Restart the lookup. The rebalance might have
* changed which block our item fits into. Mark our
* progress, so we only execute this once.
*/
brelse(dx_leaf_bh);
dx_leaf_bh = NULL;
rebalanced = 1;
goto restart_search;
}
lookup->dl_dx_leaf_bh = dx_leaf_bh;
dx_leaf_bh = NULL;
out:
brelse(dx_leaf_bh);
return ret;
}
static int ocfs2_search_dx_free_list(struct inode *dir,
struct buffer_head *dx_root_bh,
int namelen,
struct ocfs2_dir_lookup_result *lookup)
{
int ret = -ENOSPC;
struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL;
struct ocfs2_dir_block_trailer *db;
u64 next_block;
int rec_len = OCFS2_DIR_REC_LEN(namelen);
struct ocfs2_dx_root_block *dx_root;
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
next_block = le64_to_cpu(dx_root->dr_free_blk);
while (next_block) {
brelse(prev_leaf_bh);
prev_leaf_bh = leaf_bh;
leaf_bh = NULL;
ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
if (rec_len <= le16_to_cpu(db->db_free_rec_len)) {
lookup->dl_leaf_bh = leaf_bh;
lookup->dl_prev_leaf_bh = prev_leaf_bh;
leaf_bh = NULL;
prev_leaf_bh = NULL;
break;
}
next_block = le64_to_cpu(db->db_free_next);
}
if (!next_block)
ret = -ENOSPC;
out:
brelse(leaf_bh);
brelse(prev_leaf_bh);
return ret;
}
static int ocfs2_expand_inline_dx_root(struct inode *dir,
struct buffer_head *dx_root_bh)
{
int ret, num_dx_leaves, i, j, did_quota = 0;
struct buffer_head **dx_leaves = NULL;
struct ocfs2_extent_tree et;
u64 insert_blkno;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
handle_t *handle = NULL;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dx_entry_list *entry_list;
struct ocfs2_dx_entry *dx_entry;
struct ocfs2_dx_leaf *target_leaf;
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
if (!dx_leaves) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (ret)
goto out_commit;
did_quota = 1;
/*
* We do this up front, before the allocation, so that a
* failure to add the dx_root_bh to the journal won't result
* us losing clusters.
*/
ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves,
num_dx_leaves, &insert_blkno);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/*
* Transfer the entries from our dx_root into the appropriate
* block
*/
dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
entry_list = &dx_root->dr_entries;
for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
dx_entry = &entry_list->de_entries[i];
j = __ocfs2_dx_dir_hash_idx(osb,
le32_to_cpu(dx_entry->dx_minor_hash));
target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data;
ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry);
/* Each leaf has been passed to the journal already
* via __ocfs2_dx_dir_new_cluster() */
}
dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
offsetof(struct ocfs2_dx_root_block, dr_list));
dx_root->dr_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
/* This should never fail considering we start with an empty
* dx_root. */
ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
if (ret)
mlog_errno(ret);
did_quota = 0;
ocfs2_update_inode_fsync_trans(handle, dir, 1);
ocfs2_journal_dirty(handle, dx_root_bh);
out_commit:
if (ret < 0 && did_quota)
dquot_free_space_nodirty(dir,
ocfs2_clusters_to_bytes(dir->i_sb, 1));
ocfs2_commit_trans(osb, handle);
out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
if (dx_leaves) {
for (i = 0; i < num_dx_leaves; i++)
brelse(dx_leaves[i]);
kfree(dx_leaves);
}
return ret;
}
static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh)
{
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dx_entry_list *entry_list;
dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
entry_list = &dx_root->dr_entries;
if (le16_to_cpu(entry_list->de_num_used) >=
le16_to_cpu(entry_list->de_count))
return -ENOSPC;
return 0;
}
static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir,
struct buffer_head *di_bh,
const char *name,
int namelen,
struct ocfs2_dir_lookup_result *lookup)
{
int ret, free_dx_root = 1;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct buffer_head *dx_root_bh = NULL;
struct buffer_head *leaf_bh = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_dx_root_block *dx_root;
ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) {
ret = -ENOSPC;
mlog_errno(ret);
goto out;
}
if (ocfs2_dx_root_inline(dx_root)) {
ret = ocfs2_inline_dx_has_space(dx_root_bh);
if (ret == 0)
goto search_el;
/*
* We ran out of room in the root block. Expand it to
* an extent, then allow ocfs2_find_dir_space_dx to do
* the rest.
*/
ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/*
* Insert preparation for an indexed directory is split into two
* steps. The call to find_dir_space_dx reserves room in the index for
* an additional item. If we run out of space there, it's a real error
* we can't continue on.
*/
ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name,
namelen, lookup);
if (ret) {
mlog_errno(ret);
goto out;
}
search_el:
/*
* Next, we need to find space in the unindexed tree. This call
* searches using the free space linked list. If the unindexed tree
* lacks sufficient space, we'll expand it below. The expansion code
* is smart enough to add any new blocks to the free space list.
*/
ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup);
if (ret && ret != -ENOSPC) {
mlog_errno(ret);
goto out;
}
/* Do this up here - ocfs2_extend_dir might need the dx_root */
lookup->dl_dx_root_bh = dx_root_bh;
free_dx_root = 0;
if (ret == -ENOSPC) {
ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We make the assumption here that new leaf blocks are added
* to the front of our free list.
*/
lookup->dl_prev_leaf_bh = NULL;
lookup->dl_leaf_bh = leaf_bh;
}
out:
if (free_dx_root)
brelse(dx_root_bh);
return ret;
}
/*
* Get a directory ready for insert. Any directory allocation required
* happens here. Success returns zero, and enough context in the dir
* lookup result that ocfs2_add_entry() will be able complete the task
* with minimal performance impact.
*/
int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
const char *name,
int namelen,
struct ocfs2_dir_lookup_result *lookup)
{
int ret;
unsigned int blocks_wanted = 1;
struct buffer_head *bh = NULL;
trace_ocfs2_prepare_dir_for_insert(
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
if (!namelen) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
}
/*
* Do this up front to reduce confusion.
*
* The directory might start inline, then be turned into an
* indexed one, in which case we'd need to hash deep inside
* ocfs2_find_dir_space_id(). Since
* ocfs2_prepare_dx_dir_for_insert() also needs this hash
* done, there seems no point in spreading out the calls. We
* can optimize away the case where the file system doesn't
* support indexing.
*/
if (ocfs2_supports_indexed_dirs(osb))
ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo);
if (ocfs2_dir_indexed(dir)) {
ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh,
name, namelen, lookup);
if (ret)
mlog_errno(ret);
goto out;
}
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
namelen, &bh, &blocks_wanted);
} else
ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
if (ret && ret != -ENOSPC) {
mlog_errno(ret);
goto out;
}
if (ret == -ENOSPC) {
/*
* We have to expand the directory to add this name.
*/
BUG_ON(bh);
ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
lookup, &bh);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
BUG_ON(!bh);
}
lookup->dl_leaf_bh = bh;
bh = NULL;
out:
brelse(bh);
return ret;
}
static int ocfs2_dx_dir_remove_index(struct inode *dir,
struct buffer_head *di_bh,
struct buffer_head *dx_root_bh)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_dx_root_block *dx_root;
struct inode *dx_alloc_inode = NULL;
struct buffer_head *dx_alloc_bh = NULL;
handle_t *handle;
u64 blk;
u16 bit;
u64 bg_blkno;
dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
dx_alloc_inode = ocfs2_get_system_file_inode(osb,
EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(dx_root->dr_suballoc_slot));
if (!dx_alloc_inode) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
inode_lock(dx_alloc_inode);
ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1);
if (ret) {
mlog_errno(ret);
goto out_mutex;
}
handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_unlock;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
spin_lock(&OCFS2_I(dir)->ip_lock);
OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
spin_unlock(&OCFS2_I(dir)->ip_lock);
di->i_dx_root = cpu_to_le64(0ULL);
ocfs2_update_inode_fsync_trans(handle, dir, 1);
ocfs2_journal_dirty(handle, di_bh);
blk = le64_to_cpu(dx_root->dr_blkno);
bit = le16_to_cpu(dx_root->dr_suballoc_bit);
if (dx_root->dr_suballoc_loc)
bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
else
bg_blkno = ocfs2_which_suballoc_group(blk, bit);
ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
bit, bg_blkno, 1);
if (ret)
mlog_errno(ret);
out_commit:
ocfs2_commit_trans(osb, handle);
out_unlock:
ocfs2_inode_unlock(dx_alloc_inode, 1);
out_mutex:
inode_unlock(dx_alloc_inode);
brelse(dx_alloc_bh);
out:
iput(dx_alloc_inode);
return ret;
}
int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
{
int ret;
unsigned int clen;
u32 major_hash = UINT_MAX, p_cpos, cpos;
u64 blkno;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct buffer_head *dx_root_bh = NULL;
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_extent_tree et;
ocfs2_init_dealloc_ctxt(&dealloc);
if (!ocfs2_dir_indexed(dir))
return 0;
ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
if (ocfs2_dx_root_inline(dx_root))
goto remove_index;
ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
/* XXX: What if dr_clusters is too large? */
while (le32_to_cpu(dx_root->dr_clusters)) {
ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list,
major_hash, &cpos, &blkno, &clen);
if (ret) {
mlog_errno(ret);
goto out;
}
p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
&dealloc, 0, false);
if (ret) {
mlog_errno(ret);
goto out;
}
if (cpos == 0)
break;
major_hash = cpos - 1;
}
remove_index:
ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
out:
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
brelse(dx_root_bh);
return ret;
}
| linux-master | fs/ocfs2/dir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* locks.c
*
* Userspace file locking support
*
* Copyright (C) 2007 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/fcntl.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
#include "locks.h"
static int ocfs2_do_flock(struct file *file, struct inode *inode,
int cmd, struct file_lock *fl)
{
int ret = 0, level = 0, trylock = 0;
struct ocfs2_file_private *fp = file->private_data;
struct ocfs2_lock_res *lockres = &fp->fp_flock;
if (fl->fl_type == F_WRLCK)
level = 1;
if (!IS_SETLKW(cmd))
trylock = 1;
mutex_lock(&fp->fp_mutex);
if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
lockres->l_level > LKM_NLMODE) {
int old_level = 0;
struct file_lock request;
if (lockres->l_level == LKM_EXMODE)
old_level = 1;
if (level == old_level)
goto out;
/*
* Converting an existing lock is not guaranteed to be
* atomic, so we can get away with simply unlocking
* here and allowing the lock code to try at the new
* level.
*/
locks_init_lock(&request);
request.fl_type = F_UNLCK;
request.fl_flags = FL_FLOCK;
locks_lock_file_wait(file, &request);
ocfs2_file_unlock(file);
}
ret = ocfs2_file_lock(file, level, trylock);
if (ret) {
if (ret == -EAGAIN && trylock)
ret = -EWOULDBLOCK;
else
mlog_errno(ret);
goto out;
}
ret = locks_lock_file_wait(file, fl);
if (ret)
ocfs2_file_unlock(file);
out:
mutex_unlock(&fp->fp_mutex);
return ret;
}
static int ocfs2_do_funlock(struct file *file, int cmd, struct file_lock *fl)
{
int ret;
struct ocfs2_file_private *fp = file->private_data;
mutex_lock(&fp->fp_mutex);
ocfs2_file_unlock(file);
ret = locks_lock_file_wait(file, fl);
mutex_unlock(&fp->fp_mutex);
return ret;
}
/*
* Overall flow of ocfs2_flock() was influenced by gfs2_flock().
*/
int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
ocfs2_mount_local(osb))
return locks_lock_file_wait(file, fl);
if (fl->fl_type == F_UNLCK)
return ocfs2_do_funlock(file, cmd, fl);
else
return ocfs2_do_flock(file, inode, cmd, fl);
}
int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
}
| linux-master | fs/ocfs2/locks.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inode.c
*
* vfs' aops, fops, dops and iops
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/iversion.h>
#include <asm/byteorder.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dir.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "namei.h"
#include "suballoc.h"
#include "super.h"
#include "symlink.h"
#include "sysfile.h"
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
#include "ocfs2_trace.h"
#include "filecheck.h"
#include "buffer_head_io.h"
struct ocfs2_find_inode_args
{
u64 fi_blkno;
unsigned long fi_ino;
unsigned int fi_flags;
unsigned int fi_sysfile_type;
};
static struct lock_class_key ocfs2_sysfile_lock_key[NUM_SYSTEM_INODES];
static int ocfs2_read_locked_inode(struct inode *inode,
struct ocfs2_find_inode_args *args);
static int ocfs2_init_locked_inode(struct inode *inode, void *opaque);
static int ocfs2_find_actor(struct inode *inode, void *opaque);
static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh);
static int ocfs2_filecheck_read_inode_block_full(struct inode *inode,
struct buffer_head **bh,
int flags, int type);
static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
struct buffer_head *bh);
static int ocfs2_filecheck_repair_inode_block(struct super_block *sb,
struct buffer_head *bh);
void ocfs2_set_inode_flags(struct inode *inode)
{
unsigned int flags = OCFS2_I(inode)->ip_attr;
inode->i_flags &= ~(S_IMMUTABLE |
S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
if (flags & OCFS2_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
if (flags & OCFS2_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & OCFS2_APPEND_FL)
inode->i_flags |= S_APPEND;
if (flags & OCFS2_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & OCFS2_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
}
/* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */
void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi)
{
unsigned int flags = oi->vfs_inode.i_flags;
oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL|
OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL);
if (flags & S_SYNC)
oi->ip_attr |= OCFS2_SYNC_FL;
if (flags & S_APPEND)
oi->ip_attr |= OCFS2_APPEND_FL;
if (flags & S_IMMUTABLE)
oi->ip_attr |= OCFS2_IMMUTABLE_FL;
if (flags & S_NOATIME)
oi->ip_attr |= OCFS2_NOATIME_FL;
if (flags & S_DIRSYNC)
oi->ip_attr |= OCFS2_DIRSYNC_FL;
}
struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
{
struct ocfs2_find_inode_args args;
args.fi_blkno = blkno;
args.fi_flags = 0;
args.fi_ino = ino_from_blkno(sb, blkno);
args.fi_sysfile_type = 0;
return ilookup5(sb, blkno, ocfs2_find_actor, &args);
}
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
int sysfile_type)
{
int rc = -ESTALE;
struct inode *inode = NULL;
struct super_block *sb = osb->sb;
struct ocfs2_find_inode_args args;
journal_t *journal = osb->journal->j_journal;
trace_ocfs2_iget_begin((unsigned long long)blkno, flags,
sysfile_type);
/* Ok. By now we've either got the offsets passed to us by the
* caller, or we just pulled them off the bh. Lets do some
* sanity checks to make sure they're OK. */
if (blkno == 0) {
inode = ERR_PTR(-EINVAL);
mlog_errno(PTR_ERR(inode));
goto bail;
}
args.fi_blkno = blkno;
args.fi_flags = flags;
args.fi_ino = ino_from_blkno(sb, blkno);
args.fi_sysfile_type = sysfile_type;
inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor,
ocfs2_init_locked_inode, &args);
/* inode was *not* in the inode cache. 2.6.x requires
* us to do our own read_inode call and unlock it
* afterwards. */
if (inode == NULL) {
inode = ERR_PTR(-ENOMEM);
mlog_errno(PTR_ERR(inode));
goto bail;
}
trace_ocfs2_iget5_locked(inode->i_state);
if (inode->i_state & I_NEW) {
rc = ocfs2_read_locked_inode(inode, &args);
unlock_new_inode(inode);
}
if (is_bad_inode(inode)) {
iput(inode);
inode = ERR_PTR(rc);
goto bail;
}
/*
* Set transaction id's of transactions that have to be committed
* to finish f[data]sync. We set them to currently running transaction
* as we cannot be sure that the inode or some of its metadata isn't
* part of the transaction - the inode could have been reclaimed and
* now it is reread from disk.
*/
if (journal) {
transaction_t *transaction;
tid_t tid;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
read_lock(&journal->j_state_lock);
if (journal->j_running_transaction)
transaction = journal->j_running_transaction;
else
transaction = journal->j_committing_transaction;
if (transaction)
tid = transaction->t_tid;
else
tid = journal->j_commit_sequence;
read_unlock(&journal->j_state_lock);
oi->i_sync_tid = tid;
oi->i_datasync_tid = tid;
}
bail:
if (!IS_ERR(inode)) {
trace_ocfs2_iget_end(inode,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
}
return inode;
}
/*
* here's how inodes get read from disk:
* iget5_locked -> find_actor -> OCFS2_FIND_ACTOR
* found? : return the in-memory inode
* not found? : get_new_inode -> OCFS2_INIT_LOCKED_INODE
*/
static int ocfs2_find_actor(struct inode *inode, void *opaque)
{
struct ocfs2_find_inode_args *args = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int ret = 0;
args = opaque;
mlog_bug_on_msg(!inode, "No inode in find actor!\n");
trace_ocfs2_find_actor(inode, inode->i_ino, opaque, args->fi_blkno);
if (oi->ip_blkno != args->fi_blkno)
goto bail;
ret = 1;
bail:
return ret;
}
/*
* initialize the new inode, but don't do anything that would cause
* us to sleep.
* return 0 on success, 1 on failure
*/
static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
{
struct ocfs2_find_inode_args *args = opaque;
static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
ocfs2_file_ip_alloc_sem_key;
inode->i_ino = args->fi_ino;
OCFS2_I(inode)->ip_blkno = args->fi_blkno;
if (args->fi_sysfile_type != 0)
lockdep_set_class(&inode->i_rwsem,
&ocfs2_sysfile_lock_key[args->fi_sysfile_type]);
if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE ||
args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE ||
args->fi_sysfile_type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
args->fi_sysfile_type == LOCAL_GROUP_QUOTA_SYSTEM_INODE)
lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
&ocfs2_quota_ip_alloc_sem_key);
else
lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
&ocfs2_file_ip_alloc_sem_key);
return 0;
}
void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
int create_ino)
{
struct super_block *sb;
struct ocfs2_super *osb;
int use_plocks = 1;
sb = inode->i_sb;
osb = OCFS2_SB(sb);
if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
ocfs2_mount_local(osb) || !ocfs2_stack_supports_plocks())
use_plocks = 0;
/*
* These have all been checked by ocfs2_read_inode_block() or set
* by ocfs2_mknod_locked(), so a failure is a code bug.
*/
BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); /* This means that read_inode
cannot create a superblock
inode today. change if
that is needed. */
BUG_ON(!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)));
BUG_ON(le32_to_cpu(fe->i_fs_generation) != osb->fs_generation);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
inode_set_iversion(inode, 1);
inode->i_generation = le32_to_cpu(fe->i_generation);
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
inode->i_mode = le16_to_cpu(fe->i_mode);
i_uid_write(inode, le32_to_cpu(fe->i_uid));
i_gid_write(inode, le32_to_cpu(fe->i_gid));
/* Fast symlinks will have i_size but no allocated clusters. */
if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
inode->i_blocks = 0;
inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
} else {
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_mapping->a_ops = &ocfs2_aops;
}
inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
le32_to_cpu(fe->i_ctime_nsec));
if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno))
mlog(ML_ERROR,
"ip_blkno %llu != i_blkno %llu!\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)le64_to_cpu(fe->i_blkno));
set_nlink(inode, ocfs2_read_links_count(fe));
trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno,
le32_to_cpu(fe->i_flags));
if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
inode->i_flags |= S_NOQUOTA;
}
if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
} else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
} else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) {
inode->i_flags |= S_NOQUOTA;
} else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
/* we can't actually hit this as read_inode can't
* handle superblocks today ;-) */
BUG();
}
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
if (use_plocks)
inode->i_fop = &ocfs2_fops;
else
inode->i_fop = &ocfs2_fops_no_plocks;
inode->i_op = &ocfs2_file_iops;
i_size_write(inode, le64_to_cpu(fe->i_size));
break;
case S_IFDIR:
inode->i_op = &ocfs2_dir_iops;
if (use_plocks)
inode->i_fop = &ocfs2_dops;
else
inode->i_fop = &ocfs2_dops_no_plocks;
i_size_write(inode, le64_to_cpu(fe->i_size));
OCFS2_I(inode)->ip_dir_lock_gen = 1;
break;
case S_IFLNK:
inode->i_op = &ocfs2_symlink_inode_operations;
inode_nohighmem(inode);
i_size_write(inode, le64_to_cpu(fe->i_size));
break;
default:
inode->i_op = &ocfs2_special_file_iops;
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
break;
}
if (create_ino) {
inode->i_ino = ino_from_blkno(inode->i_sb,
le64_to_cpu(fe->i_blkno));
/*
* If we ever want to create system files from kernel,
* the generation argument to
* ocfs2_inode_lock_res_init() will have to change.
*/
BUG_ON(le32_to_cpu(fe->i_flags) & OCFS2_SYSTEM_FL);
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres,
OCFS2_LOCK_TYPE_META, 0, inode);
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
OCFS2_LOCK_TYPE_OPEN, 0, inode);
}
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres,
OCFS2_LOCK_TYPE_RW, inode->i_generation,
inode);
ocfs2_set_inode_flags(inode);
OCFS2_I(inode)->ip_last_used_slot = 0;
OCFS2_I(inode)->ip_last_used_group = 0;
if (S_ISDIR(inode->i_mode))
ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
OCFS2_RESV_FLAG_DIR);
}
static int ocfs2_read_locked_inode(struct inode *inode,
struct ocfs2_find_inode_args *args)
{
struct super_block *sb;
struct ocfs2_super *osb;
struct ocfs2_dinode *fe;
struct buffer_head *bh = NULL;
int status, can_lock, lock_level = 0;
u32 generation = 0;
status = -EINVAL;
sb = inode->i_sb;
osb = OCFS2_SB(sb);
/*
* To improve performance of cold-cache inode stats, we take
* the cluster lock here if possible.
*
* Generally, OCFS2 never trusts the contents of an inode
* unless it's holding a cluster lock, so taking it here isn't
* a correctness issue as much as it is a performance
* improvement.
*
* There are three times when taking the lock is not a good idea:
*
* 1) During startup, before we have initialized the DLM.
*
* 2) If we are reading certain system files which never get
* cluster locks (local alloc, truncate log).
*
* 3) If the process doing the iget() is responsible for
* orphan dir recovery. We're holding the orphan dir lock and
* can get into a deadlock with another process on another
* node in ->delete_inode().
*
* #1 and #2 can be simply solved by never taking the lock
* here for system files (which are the only type we read
* during mount). It's a heavier approach, but our main
* concern is user-accessible files anyway.
*
* #3 works itself out because we'll eventually take the
* cluster lock before trusting anything anyway.
*/
can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE)
&& !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY)
&& !ocfs2_mount_local(osb);
trace_ocfs2_read_locked_inode(
(unsigned long long)OCFS2_I(inode)->ip_blkno, can_lock);
/*
* To maintain backwards compatibility with older versions of
* ocfs2-tools, we still store the generation value for system
* files. The only ones that actually matter to userspace are
* the journals, but it's easier and inexpensive to just flag
* all system files similarly.
*/
if (args->fi_flags & OCFS2_FI_FLAG_SYSFILE)
generation = osb->fs_generation;
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres,
OCFS2_LOCK_TYPE_META,
generation, inode);
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
OCFS2_LOCK_TYPE_OPEN,
0, inode);
if (can_lock) {
status = ocfs2_open_lock(inode);
if (status) {
make_bad_inode(inode);
mlog_errno(status);
return status;
}
status = ocfs2_inode_lock(inode, NULL, lock_level);
if (status) {
make_bad_inode(inode);
mlog_errno(status);
return status;
}
}
if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
status = ocfs2_try_open_lock(inode, 0);
if (status) {
make_bad_inode(inode);
return status;
}
}
if (can_lock) {
if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_CHK)
status = ocfs2_filecheck_read_inode_block_full(inode,
&bh, OCFS2_BH_IGNORE_CACHE, 0);
else if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_FIX)
status = ocfs2_filecheck_read_inode_block_full(inode,
&bh, OCFS2_BH_IGNORE_CACHE, 1);
else
status = ocfs2_read_inode_block_full(inode,
&bh, OCFS2_BH_IGNORE_CACHE);
} else {
status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
/*
* If buffer is in jbd, then its checksum may not have been
* computed as yet.
*/
if (!status && !buffer_jbd(bh)) {
if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_CHK)
status = ocfs2_filecheck_validate_inode_block(
osb->sb, bh);
else if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_FIX)
status = ocfs2_filecheck_repair_inode_block(
osb->sb, bh);
else
status = ocfs2_validate_inode_block(
osb->sb, bh);
}
}
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = -EINVAL;
fe = (struct ocfs2_dinode *) bh->b_data;
/*
* This is a code bug. Right now the caller needs to
* understand whether it is asking for a system file inode or
* not so the proper lock names can be built.
*/
mlog_bug_on_msg(!!(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) !=
!!(args->fi_flags & OCFS2_FI_FLAG_SYSFILE),
"Inode %llu: system file state is ambiguous\n",
(unsigned long long)args->fi_blkno);
if (S_ISCHR(le16_to_cpu(fe->i_mode)) ||
S_ISBLK(le16_to_cpu(fe->i_mode)))
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
ocfs2_populate_inode(inode, fe, 0);
BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno));
if (buffer_dirty(bh) && !buffer_jbd(bh)) {
if (can_lock) {
ocfs2_inode_unlock(inode, lock_level);
lock_level = 1;
ocfs2_inode_lock(inode, NULL, lock_level);
}
status = ocfs2_write_block(osb, bh, INODE_CACHE(inode));
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
status = 0;
bail:
if (can_lock)
ocfs2_inode_unlock(inode, lock_level);
if (status < 0)
make_bad_inode(inode);
brelse(bh);
return status;
}
void ocfs2_sync_blockdev(struct super_block *sb)
{
sync_blockdev(sb->s_bdev);
}
static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh)
{
int status = 0;
struct ocfs2_dinode *fe;
handle_t *handle = NULL;
fe = (struct ocfs2_dinode *) fe_bh->b_data;
/*
* This check will also skip truncate of inodes with inline
* data and fast symlinks.
*/
if (fe->i_clusters) {
if (ocfs2_should_order_data(inode))
ocfs2_begin_ordered_truncate(inode, 0);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto out;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out;
}
i_size_write(inode, 0);
status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
if (status < 0) {
mlog_errno(status);
goto out;
}
ocfs2_commit_trans(osb, handle);
handle = NULL;
status = ocfs2_commit_truncate(osb, inode, fe_bh);
if (status < 0)
mlog_errno(status);
}
out:
if (handle)
ocfs2_commit_trans(osb, handle);
return status;
}
static int ocfs2_remove_inode(struct inode *inode,
struct buffer_head *di_bh,
struct inode *orphan_dir_inode,
struct buffer_head *orphan_dir_bh)
{
int status;
struct inode *inode_alloc_inode = NULL;
struct buffer_head *inode_alloc_bh = NULL;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
inode_alloc_inode =
ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
le16_to_cpu(di->i_suballoc_slot));
if (!inode_alloc_inode) {
status = -ENOENT;
mlog_errno(status);
goto bail;
}
inode_lock(inode_alloc_inode);
status = ocfs2_inode_lock(inode_alloc_inode, &inode_alloc_bh, 1);
if (status < 0) {
inode_unlock(inode_alloc_inode);
mlog_errno(status);
goto bail;
}
handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS +
ocfs2_quota_trans_credits(inode->i_sb));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto bail_unlock;
}
if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
orphan_dir_bh, false);
if (status < 0) {
mlog_errno(status);
goto bail_commit;
}
}
/* set the inodes dtime */
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail_commit;
}
di->i_dtime = cpu_to_le64(ktime_get_real_seconds());
di->i_flags &= cpu_to_le32(~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL));
ocfs2_journal_dirty(handle, di_bh);
ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh);
dquot_free_inode(inode);
status = ocfs2_free_dinode(handle, inode_alloc_inode,
inode_alloc_bh, di);
if (status < 0)
mlog_errno(status);
bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock:
ocfs2_inode_unlock(inode_alloc_inode, 1);
inode_unlock(inode_alloc_inode);
brelse(inode_alloc_bh);
bail:
iput(inode_alloc_inode);
return status;
}
/*
* Serialize with orphan dir recovery. If the process doing
* recovery on this orphan dir does an iget() with the dir
* i_rwsem held, we'll deadlock here. Instead we detect this
* and exit early - recovery will wipe this inode for us.
*/
static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
int slot)
{
int ret = 0;
spin_lock(&osb->osb_lock);
if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) {
ret = -EDEADLK;
goto out;
}
/* This signals to the orphan recovery process that it should
* wait for us to handle the wipe. */
osb->osb_orphan_wipes[slot]++;
out:
spin_unlock(&osb->osb_lock);
trace_ocfs2_check_orphan_recovery_state(slot, ret);
return ret;
}
static void ocfs2_signal_wipe_completion(struct ocfs2_super *osb,
int slot)
{
spin_lock(&osb->osb_lock);
osb->osb_orphan_wipes[slot]--;
spin_unlock(&osb->osb_lock);
wake_up(&osb->osb_wipe_event);
}
static int ocfs2_wipe_inode(struct inode *inode,
struct buffer_head *di_bh)
{
int status, orphaned_slot = -1;
struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
orphaned_slot = le16_to_cpu(di->i_orphaned_slot);
status = ocfs2_check_orphan_recovery_state(osb, orphaned_slot);
if (status)
return status;
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
orphaned_slot);
if (!orphan_dir_inode) {
status = -ENOENT;
mlog_errno(status);
goto bail;
}
/* Lock the orphan dir. The lock will be held for the entire
* delete_inode operation. We do this now to avoid races with
* recovery completion on other nodes. */
inode_lock(orphan_dir_inode);
status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
if (status < 0) {
inode_unlock(orphan_dir_inode);
mlog_errno(status);
goto bail;
}
}
/* we do this while holding the orphan dir lock because we
* don't want recovery being run from another node to try an
* inode delete underneath us -- this will result in two nodes
* truncating the same file! */
status = ocfs2_truncate_for_delete(osb, inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_dir;
}
/* Remove any dir index tree */
if (S_ISDIR(inode->i_mode)) {
status = ocfs2_dx_dir_truncate(inode, di_bh);
if (status) {
mlog_errno(status);
goto bail_unlock_dir;
}
}
/*Free extended attribute resources associated with this inode.*/
status = ocfs2_xattr_remove(inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_dir;
}
status = ocfs2_remove_refcount_tree(inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_dir;
}
status = ocfs2_remove_inode(inode, di_bh, orphan_dir_inode,
orphan_dir_bh);
if (status < 0)
mlog_errno(status);
bail_unlock_dir:
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)
return status;
ocfs2_inode_unlock(orphan_dir_inode, 1);
inode_unlock(orphan_dir_inode);
brelse(orphan_dir_bh);
bail:
iput(orphan_dir_inode);
ocfs2_signal_wipe_completion(osb, orphaned_slot);
return status;
}
/* There is a series of simple checks that should be done before a
* trylock is even considered. Encapsulate those in this function. */
static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
{
int ret = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task,
(unsigned long long)oi->ip_blkno,
oi->ip_flags);
/* We shouldn't be getting here for the root directory
* inode.. */
if (inode == osb->root_inode) {
mlog(ML_ERROR, "Skipping delete of root inode.\n");
goto bail;
}
/*
* If we're coming from downconvert_thread we can't go into our own
* voting [hello, deadlock city!] so we cannot delete the inode. But
* since we dropped last inode ref when downconverting dentry lock,
* we cannot have the file open and thus the node doing unlink will
* take care of deleting the inode.
*/
if (current == osb->dc_task)
goto bail;
spin_lock(&oi->ip_lock);
/* OCFS2 *never* deletes system files. This should technically
* never get here as system file inodes should always have a
* positive link count. */
if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
mlog(ML_ERROR, "Skipping delete of system file %llu\n",
(unsigned long long)oi->ip_blkno);
goto bail_unlock;
}
ret = 1;
bail_unlock:
spin_unlock(&oi->ip_lock);
bail:
return ret;
}
/* Query the cluster to determine whether we should wipe an inode from
* disk or not.
*
* Requires the inode to have the cluster lock. */
static int ocfs2_query_inode_wipe(struct inode *inode,
struct buffer_head *di_bh,
int *wipe)
{
int status = 0, reason = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di;
*wipe = 0;
trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno,
inode->i_nlink);
/* While we were waiting for the cluster lock in
* ocfs2_delete_inode, another node might have asked to delete
* the inode. Recheck our flags to catch this. */
if (!ocfs2_inode_is_valid_to_delete(inode)) {
reason = 1;
goto bail;
}
/* Now that we have an up to date inode, we can double check
* the link count. */
if (inode->i_nlink)
goto bail;
/* Do some basic inode verification... */
di = (struct ocfs2_dinode *) di_bh->b_data;
if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL)) &&
!(oi->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
/*
* Inodes in the orphan dir must have ORPHANED_FL. The only
* inodes that come back out of the orphan dir are reflink
* targets. A reflink target may be moved out of the orphan
* dir between the time we scan the directory and the time we
* process it. This would lead to HAS_REFCOUNT_FL being set but
* ORPHANED_FL not.
*/
if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
reason = 2;
goto bail;
}
/* for lack of a better error? */
status = -EEXIST;
mlog(ML_ERROR,
"Inode %llu (on-disk %llu) not orphaned! "
"Disk flags 0x%x, inode flags 0x%x\n",
(unsigned long long)oi->ip_blkno,
(unsigned long long)le64_to_cpu(di->i_blkno),
le32_to_cpu(di->i_flags), oi->ip_flags);
goto bail;
}
/* has someone already deleted us?! baaad... */
if (di->i_dtime) {
status = -EEXIST;
mlog_errno(status);
goto bail;
}
/*
* This is how ocfs2 determines whether an inode is still live
* within the cluster. Every node takes a shared read lock on
* the inode open lock in ocfs2_read_locked_inode(). When we
* get to ->delete_inode(), each node tries to convert it's
* lock to an exclusive. Trylocks are serialized by the inode
* meta data lock. If the upconvert succeeds, we know the inode
* is no longer live and can be deleted.
*
* Though we call this with the meta data lock held, the
* trylock keeps us from ABBA deadlock.
*/
status = ocfs2_try_open_lock(inode, 1);
if (status == -EAGAIN) {
status = 0;
reason = 3;
goto bail;
}
if (status < 0) {
mlog_errno(status);
goto bail;
}
*wipe = 1;
trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot));
bail:
trace_ocfs2_query_inode_wipe_end(status, reason);
return status;
}
/* Support function for ocfs2_delete_inode. Will help us keep the
* inode data in a consistent state for clear_inode. Always truncates
* pages, optionally sync's them first. */
static void ocfs2_cleanup_delete_inode(struct inode *inode,
int sync_data)
{
trace_ocfs2_cleanup_delete_inode(
(unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
if (sync_data)
filemap_write_and_wait(inode->i_mapping);
truncate_inode_pages_final(&inode->i_data);
}
static void ocfs2_delete_inode(struct inode *inode)
{
int wipe, status;
sigset_t oldset;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di = NULL;
trace_ocfs2_delete_inode(inode->i_ino,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
is_bad_inode(inode));
/* When we fail in read_inode() we mark inode as bad. The second test
* catches the case when inode allocation fails before allocating
* a block for inode. */
if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno)
goto bail;
if (!ocfs2_inode_is_valid_to_delete(inode)) {
/* It's probably not necessary to truncate_inode_pages
* here but we do it for safety anyway (it will most
* likely be a no-op anyway) */
ocfs2_cleanup_delete_inode(inode, 0);
goto bail;
}
dquot_initialize(inode);
/* We want to block signals in delete_inode as the lock and
* messaging paths may return us -ERESTARTSYS. Which would
* cause us to exit early, resulting in inodes being orphaned
* forever. */
ocfs2_block_signals(&oldset);
/*
* Synchronize us against ocfs2_get_dentry. We take this in
* shared mode so that all nodes can still concurrently
* process deletes.
*/
status = ocfs2_nfs_sync_lock(OCFS2_SB(inode->i_sb), 0);
if (status < 0) {
mlog(ML_ERROR, "getting nfs sync lock(PR) failed %d\n", status);
ocfs2_cleanup_delete_inode(inode, 0);
goto bail_unblock;
}
/* Lock down the inode. This gives us an up to date view of
* it's metadata (for verification), and allows us to
* serialize delete_inode on multiple nodes.
*
* Even though we might be doing a truncate, we don't take the
* allocation lock here as it won't be needed - nobody will
* have the file open.
*/
status = ocfs2_inode_lock(inode, &di_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
ocfs2_cleanup_delete_inode(inode, 0);
goto bail_unlock_nfs_sync;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
/* Skip inode deletion and wait for dio orphan entry recovered
* first */
if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
ocfs2_cleanup_delete_inode(inode, 0);
goto bail_unlock_inode;
}
/* Query the cluster. This will be the final decision made
* before we go ahead and wipe the inode. */
status = ocfs2_query_inode_wipe(inode, di_bh, &wipe);
if (!wipe || status < 0) {
/* Error and remote inode busy both mean we won't be
* removing the inode, so they take almost the same
* path. */
if (status < 0)
mlog_errno(status);
/* Someone in the cluster has disallowed a wipe of
* this inode, or it was never completely
* orphaned. Write out the pages and exit now. */
ocfs2_cleanup_delete_inode(inode, 1);
goto bail_unlock_inode;
}
ocfs2_cleanup_delete_inode(inode, 0);
status = ocfs2_wipe_inode(inode, di_bh);
if (status < 0) {
if (status != -EDEADLK)
mlog_errno(status);
goto bail_unlock_inode;
}
/*
* Mark the inode as successfully deleted.
*
* This is important for ocfs2_clear_inode() as it will check
* this flag and skip any checkpointing work
*
* ocfs2_stuff_meta_lvb() also uses this flag to invalidate
* the LVB for other nodes.
*/
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_DELETED;
bail_unlock_inode:
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
bail_unlock_nfs_sync:
ocfs2_nfs_sync_unlock(OCFS2_SB(inode->i_sb), 0);
bail_unblock:
ocfs2_unblock_signals(&oldset);
bail:
return;
}
static void ocfs2_clear_inode(struct inode *inode)
{
int status;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
clear_inode(inode);
trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink);
mlog_bug_on_msg(osb == NULL,
"Inode=%lu\n", inode->i_ino);
dquot_drop(inode);
/* To preven remote deletes we hold open lock before, now it
* is time to unlock PR and EX open locks. */
ocfs2_open_unlock(inode);
/* Do these before all the other work so that we don't bounce
* the downconvert thread while waiting to destroy the locks. */
ocfs2_mark_lockres_freeing(osb, &oi->ip_rw_lockres);
ocfs2_mark_lockres_freeing(osb, &oi->ip_inode_lockres);
ocfs2_mark_lockres_freeing(osb, &oi->ip_open_lockres);
ocfs2_resv_discard(&osb->osb_la_resmap,
&oi->ip_la_data_resv);
ocfs2_resv_init_once(&oi->ip_la_data_resv);
/* We very well may get a clear_inode before all an inodes
* metadata has hit disk. Of course, we can't drop any cluster
* locks until the journal has finished with it. The only
* exception here are successfully wiped inodes - their
* metadata can now be considered to be part of the system
* inodes from which it came. */
if (!(oi->ip_flags & OCFS2_INODE_DELETED))
ocfs2_checkpoint_inode(inode);
mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
"Clear inode of %llu, inode has io markers\n",
(unsigned long long)oi->ip_blkno);
mlog_bug_on_msg(!list_empty(&oi->ip_unwritten_list),
"Clear inode of %llu, inode has unwritten extents\n",
(unsigned long long)oi->ip_blkno);
ocfs2_extent_map_trunc(inode, 0);
status = ocfs2_drop_inode_locks(inode);
if (status < 0)
mlog_errno(status);
ocfs2_lock_res_free(&oi->ip_rw_lockres);
ocfs2_lock_res_free(&oi->ip_inode_lockres);
ocfs2_lock_res_free(&oi->ip_open_lockres);
ocfs2_metadata_cache_exit(INODE_CACHE(inode));
mlog_bug_on_msg(INODE_CACHE(inode)->ci_num_cached,
"Clear inode of %llu, inode has %u cache items\n",
(unsigned long long)oi->ip_blkno,
INODE_CACHE(inode)->ci_num_cached);
mlog_bug_on_msg(!(INODE_CACHE(inode)->ci_flags & OCFS2_CACHE_FL_INLINE),
"Clear inode of %llu, inode has a bad flag\n",
(unsigned long long)oi->ip_blkno);
mlog_bug_on_msg(spin_is_locked(&oi->ip_lock),
"Clear inode of %llu, inode is locked\n",
(unsigned long long)oi->ip_blkno);
mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex),
"Clear inode of %llu, io_mutex is locked\n",
(unsigned long long)oi->ip_blkno);
mutex_unlock(&oi->ip_io_mutex);
/*
* down_trylock() returns 0, down_write_trylock() returns 1
* kernel 1, world 0
*/
mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
"Clear inode of %llu, alloc_sem is locked\n",
(unsigned long long)oi->ip_blkno);
up_write(&oi->ip_alloc_sem);
mlog_bug_on_msg(oi->ip_open_count,
"Clear inode of %llu has open count %d\n",
(unsigned long long)oi->ip_blkno, oi->ip_open_count);
/* Clear all other flags. */
oi->ip_flags = 0;
oi->ip_dir_start_lookup = 0;
oi->ip_blkno = 0ULL;
/*
* ip_jinode is used to track txns against this inode. We ensure that
* the journal is flushed before journal shutdown. Thus it is safe to
* have inodes get cleaned up after journal shutdown.
*/
jbd2_journal_release_jbd_inode(osb->journal->j_journal,
&oi->ip_jinode);
}
void ocfs2_evict_inode(struct inode *inode)
{
if (!inode->i_nlink ||
(OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) {
ocfs2_delete_inode(inode);
} else {
truncate_inode_pages_final(&inode->i_data);
}
ocfs2_clear_inode(inode);
}
/* Called under inode_lock, with no more references on the
* struct inode, so it's safe here to check the flags field
* and to manipulate i_nlink without any other locks. */
int ocfs2_drop_inode(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink, oi->ip_flags);
assert_spin_locked(&inode->i_lock);
inode->i_state |= I_WILL_FREE;
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
spin_lock(&inode->i_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state &= ~I_WILL_FREE;
return 1;
}
/*
* This is called from our getattr.
*/
int ocfs2_inode_revalidate(struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
int status = 0;
trace_ocfs2_inode_revalidate(inode,
inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL,
inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0);
if (!inode) {
status = -ENOENT;
goto bail;
}
spin_lock(&OCFS2_I(inode)->ip_lock);
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&OCFS2_I(inode)->ip_lock);
status = -ENOENT;
goto bail;
}
spin_unlock(&OCFS2_I(inode)->ip_lock);
/* Let ocfs2_inode_lock do the work of updating our struct
* inode for us. */
status = ocfs2_inode_lock(inode, NULL, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto bail;
}
ocfs2_inode_unlock(inode, 0);
bail:
return status;
}
/*
* Updates a disk inode from a
* struct inode.
* Only takes ip_lock.
*/
int ocfs2_mark_inode_dirty(handle_t *handle,
struct inode *inode,
struct buffer_head *bh)
{
int status;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
trace_ocfs2_mark_inode_dirty((unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
spin_lock(&OCFS2_I(inode)->ip_lock);
fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
ocfs2_get_inode_flags(OCFS2_I(inode));
fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr);
fe->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
spin_unlock(&OCFS2_I(inode)->ip_lock);
fe->i_size = cpu_to_le64(i_size_read(inode));
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_uid = cpu_to_le32(i_uid_read(inode));
fe->i_gid = cpu_to_le32(i_gid_read(inode));
fe->i_mode = cpu_to_le16(inode->i_mode);
fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
ocfs2_journal_dirty(handle, bh);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
leave:
return status;
}
/*
*
* Updates a struct inode from a disk inode.
* does no i/o, only takes ip_lock.
*/
void ocfs2_refresh_inode(struct inode *inode,
struct ocfs2_dinode *fe)
{
spin_lock(&OCFS2_I(inode)->ip_lock);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
ocfs2_set_inode_flags(inode);
i_size_write(inode, le64_to_cpu(fe->i_size));
set_nlink(inode, ocfs2_read_links_count(fe));
i_uid_write(inode, le32_to_cpu(fe->i_uid));
i_gid_write(inode, le32_to_cpu(fe->i_gid));
inode->i_mode = le16_to_cpu(fe->i_mode);
if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
inode->i_blocks = 0;
else
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
le32_to_cpu(fe->i_ctime_nsec));
spin_unlock(&OCFS2_I(inode)->ip_lock);
}
int ocfs2_validate_inode_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
if (rc) {
mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
(unsigned long long)bh->b_blocknr);
goto bail;
}
/*
* Errors after here are fatal.
*/
rc = -EINVAL;
if (!OCFS2_IS_VALID_DINODE(di)) {
rc = ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
di->i_signature);
goto bail;
}
if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
rc = ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(di->i_blkno));
goto bail;
}
if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
rc = ocfs2_error(sb,
"Invalid dinode #%llu: OCFS2_VALID_FL not set\n",
(unsigned long long)bh->b_blocknr);
goto bail;
}
if (le32_to_cpu(di->i_fs_generation) !=
OCFS2_SB(sb)->fs_generation) {
rc = ocfs2_error(sb,
"Invalid dinode #%llu: fs_generation is %u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(di->i_fs_generation));
goto bail;
}
rc = 0;
bail:
return rc;
}
static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc = 0;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
trace_ocfs2_filecheck_validate_inode_block(
(unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* Call ocfs2_validate_meta_ecc() first since it has ecc repair
* function, but we should not return error immediately when ecc
* validation fails, because the reason is quite likely the invalid
* inode number inputed.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
if (rc) {
mlog(ML_ERROR,
"Filecheck: checksum failed for dinode %llu\n",
(unsigned long long)bh->b_blocknr);
rc = -OCFS2_FILECHECK_ERR_BLOCKECC;
}
if (!OCFS2_IS_VALID_DINODE(di)) {
mlog(ML_ERROR,
"Filecheck: invalid dinode #%llu: signature = %.*s\n",
(unsigned long long)bh->b_blocknr, 7, di->i_signature);
rc = -OCFS2_FILECHECK_ERR_INVALIDINO;
goto bail;
} else if (rc)
goto bail;
if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
mlog(ML_ERROR,
"Filecheck: invalid dinode #%llu: i_blkno is %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(di->i_blkno));
rc = -OCFS2_FILECHECK_ERR_BLOCKNO;
goto bail;
}
if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
mlog(ML_ERROR,
"Filecheck: invalid dinode #%llu: OCFS2_VALID_FL "
"not set\n",
(unsigned long long)bh->b_blocknr);
rc = -OCFS2_FILECHECK_ERR_VALIDFLAG;
goto bail;
}
if (le32_to_cpu(di->i_fs_generation) !=
OCFS2_SB(sb)->fs_generation) {
mlog(ML_ERROR,
"Filecheck: invalid dinode #%llu: fs_generation is %u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(di->i_fs_generation));
rc = -OCFS2_FILECHECK_ERR_GENERATION;
}
bail:
return rc;
}
static int ocfs2_filecheck_repair_inode_block(struct super_block *sb,
struct buffer_head *bh)
{
int changed = 0;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
if (!ocfs2_filecheck_validate_inode_block(sb, bh))
return 0;
trace_ocfs2_filecheck_repair_inode_block(
(unsigned long long)bh->b_blocknr);
if (ocfs2_is_hard_readonly(OCFS2_SB(sb)) ||
ocfs2_is_soft_readonly(OCFS2_SB(sb))) {
mlog(ML_ERROR,
"Filecheck: cannot repair dinode #%llu "
"on readonly filesystem\n",
(unsigned long long)bh->b_blocknr);
return -OCFS2_FILECHECK_ERR_READONLY;
}
if (buffer_jbd(bh)) {
mlog(ML_ERROR,
"Filecheck: cannot repair dinode #%llu, "
"its buffer is in jbd\n",
(unsigned long long)bh->b_blocknr);
return -OCFS2_FILECHECK_ERR_INJBD;
}
if (!OCFS2_IS_VALID_DINODE(di)) {
/* Cannot fix invalid inode block */
return -OCFS2_FILECHECK_ERR_INVALIDINO;
}
if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
/* Cannot just add VALID_FL flag back as a fix,
* need more things to check here.
*/
return -OCFS2_FILECHECK_ERR_VALIDFLAG;
}
if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
di->i_blkno = cpu_to_le64(bh->b_blocknr);
changed = 1;
mlog(ML_ERROR,
"Filecheck: reset dinode #%llu: i_blkno to %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(di->i_blkno));
}
if (le32_to_cpu(di->i_fs_generation) !=
OCFS2_SB(sb)->fs_generation) {
di->i_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
changed = 1;
mlog(ML_ERROR,
"Filecheck: reset dinode #%llu: fs_generation to %u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(di->i_fs_generation));
}
if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) {
ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check);
mark_buffer_dirty(bh);
mlog(ML_ERROR,
"Filecheck: reset dinode #%llu: compute meta ecc\n",
(unsigned long long)bh->b_blocknr);
}
return 0;
}
static int
ocfs2_filecheck_read_inode_block_full(struct inode *inode,
struct buffer_head **bh,
int flags, int type)
{
int rc;
struct buffer_head *tmp = *bh;
if (!type) /* Check inode block */
rc = ocfs2_read_blocks(INODE_CACHE(inode),
OCFS2_I(inode)->ip_blkno,
1, &tmp, flags,
ocfs2_filecheck_validate_inode_block);
else /* Repair inode block */
rc = ocfs2_read_blocks(INODE_CACHE(inode),
OCFS2_I(inode)->ip_blkno,
1, &tmp, flags,
ocfs2_filecheck_repair_inode_block);
/* If ocfs2_read_blocks() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh,
int flags)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_blocks(INODE_CACHE(inode), OCFS2_I(inode)->ip_blkno,
1, &tmp, flags, ocfs2_validate_inode_block);
/* If ocfs2_read_blocks() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh)
{
return ocfs2_read_inode_block_full(inode, bh, 0);
}
static u64 ocfs2_inode_cache_owner(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
return oi->ip_blkno;
}
static struct super_block *ocfs2_inode_cache_get_super(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
return oi->vfs_inode.i_sb;
}
static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
spin_lock(&oi->ip_lock);
}
static void ocfs2_inode_cache_unlock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
spin_unlock(&oi->ip_lock);
}
static void ocfs2_inode_cache_io_lock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
mutex_lock(&oi->ip_io_mutex);
}
static void ocfs2_inode_cache_io_unlock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
mutex_unlock(&oi->ip_io_mutex);
}
const struct ocfs2_caching_operations ocfs2_inode_caching_ops = {
.co_owner = ocfs2_inode_cache_owner,
.co_get_super = ocfs2_inode_cache_get_super,
.co_cache_lock = ocfs2_inode_cache_lock,
.co_cache_unlock = ocfs2_inode_cache_unlock,
.co_io_lock = ocfs2_inode_cache_io_lock,
.co_io_unlock = ocfs2_inode_cache_io_unlock,
};
| linux-master | fs/ocfs2/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/ocfs2/ioctl.c
*
* Copyright (C) 2006 Herbert Poetzl
* adapted from Remy Card's ext2/ioctl.c
*/
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/blkdev.h>
#include <linux/compat.h>
#include <linux/fileattr.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
#include "journal.h"
#include "ocfs2_fs.h"
#include "ioctl.h"
#include "resize.h"
#include "refcounttree.h"
#include "sysfile.h"
#include "dir.h"
#include "buffer_head_io.h"
#include "suballoc.h"
#include "move_extents.h"
#define o2info_from_user(a, b) \
copy_from_user(&(a), (b), sizeof(a))
#define o2info_to_user(a, b) \
copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
/*
* This is just a best-effort to tell userspace that this request
* caused the error.
*/
static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
struct ocfs2_info_request __user *req)
{
kreq->ir_flags |= OCFS2_INFO_FL_ERROR;
(void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags));
}
static inline void o2info_set_request_filled(struct ocfs2_info_request *req)
{
req->ir_flags |= OCFS2_INFO_FL_FILLED;
}
static inline void o2info_clear_request_filled(struct ocfs2_info_request *req)
{
req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
}
static inline int o2info_coherent(struct ocfs2_info_request *req)
{
return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT));
}
int ocfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
{
struct inode *inode = d_inode(dentry);
unsigned int flags;
int status;
status = ocfs2_inode_lock(inode, NULL, 0);
if (status < 0) {
mlog_errno(status);
return status;
}
ocfs2_get_inode_flags(OCFS2_I(inode));
flags = OCFS2_I(inode)->ip_attr;
ocfs2_inode_unlock(inode, 0);
fileattr_fill_flags(fa, flags & OCFS2_FL_VISIBLE);
return status;
}
int ocfs2_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa)
{
struct inode *inode = d_inode(dentry);
unsigned int flags = fa->flags;
struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle = NULL;
struct buffer_head *bh = NULL;
unsigned oldflags;
int status;
if (fileattr_has_fsx(fa))
return -EOPNOTSUPP;
status = ocfs2_inode_lock(inode, &bh, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (!S_ISDIR(inode->i_mode))
flags &= ~OCFS2_DIRSYNC_FL;
oldflags = ocfs2_inode->ip_attr;
flags = flags & OCFS2_FL_MODIFIABLE;
flags |= oldflags & ~OCFS2_FL_MODIFIABLE;
/* Check already done by VFS, but repeat with ocfs lock */
status = -EPERM;
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
!capable(CAP_LINUX_IMMUTABLE))
goto bail_unlock;
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto bail_unlock;
}
ocfs2_inode->ip_attr = flags;
ocfs2_set_inode_flags(inode);
status = ocfs2_mark_inode_dirty(handle, inode, bh);
if (status < 0)
mlog_errno(status);
ocfs2_commit_trans(osb, handle);
bail_unlock:
ocfs2_inode_unlock(inode, 1);
bail:
brelse(bh);
return status;
}
static int ocfs2_info_handle_blocksize(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_blocksize oib;
if (o2info_from_user(oib, req))
return -EFAULT;
oib.ib_blocksize = inode->i_sb->s_blocksize;
o2info_set_request_filled(&oib.ib_req);
if (o2info_to_user(oib, req))
return -EFAULT;
return 0;
}
static int ocfs2_info_handle_clustersize(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_clustersize oic;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (o2info_from_user(oic, req))
return -EFAULT;
oic.ic_clustersize = osb->s_clustersize;
o2info_set_request_filled(&oic.ic_req);
if (o2info_to_user(oic, req))
return -EFAULT;
return 0;
}
static int ocfs2_info_handle_maxslots(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_maxslots oim;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (o2info_from_user(oim, req))
return -EFAULT;
oim.im_max_slots = osb->max_slots;
o2info_set_request_filled(&oim.im_req);
if (o2info_to_user(oim, req))
return -EFAULT;
return 0;
}
static int ocfs2_info_handle_label(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_label oil;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (o2info_from_user(oil, req))
return -EFAULT;
memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
o2info_set_request_filled(&oil.il_req);
if (o2info_to_user(oil, req))
return -EFAULT;
return 0;
}
static int ocfs2_info_handle_uuid(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_uuid oiu;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (o2info_from_user(oiu, req))
return -EFAULT;
memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
o2info_set_request_filled(&oiu.iu_req);
if (o2info_to_user(oiu, req))
return -EFAULT;
return 0;
}
static int ocfs2_info_handle_fs_features(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_fs_features oif;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (o2info_from_user(oif, req))
return -EFAULT;
oif.if_compat_features = osb->s_feature_compat;
oif.if_incompat_features = osb->s_feature_incompat;
oif.if_ro_compat_features = osb->s_feature_ro_compat;
o2info_set_request_filled(&oif.if_req);
if (o2info_to_user(oif, req))
return -EFAULT;
return 0;
}
static int ocfs2_info_handle_journal_size(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_journal_size oij;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (o2info_from_user(oij, req))
return -EFAULT;
oij.ij_journal_size = i_size_read(osb->journal->j_inode);
o2info_set_request_filled(&oij.ij_req);
if (o2info_to_user(oij, req))
return -EFAULT;
return 0;
}
static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
struct inode *inode_alloc, u64 blkno,
struct ocfs2_info_freeinode *fi,
u32 slot)
{
int status = 0, unlock = 0;
struct buffer_head *bh = NULL;
struct ocfs2_dinode *dinode_alloc = NULL;
if (inode_alloc)
inode_lock(inode_alloc);
if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
status = ocfs2_inode_lock(inode_alloc, &bh, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
unlock = 1;
} else {
status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
dinode_alloc = (struct ocfs2_dinode *)bh->b_data;
fi->ifi_stat[slot].lfi_total =
le32_to_cpu(dinode_alloc->id1.bitmap1.i_total);
fi->ifi_stat[slot].lfi_free =
le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) -
le32_to_cpu(dinode_alloc->id1.bitmap1.i_used);
bail:
if (unlock)
ocfs2_inode_unlock(inode_alloc, 0);
if (inode_alloc)
inode_unlock(inode_alloc);
brelse(bh);
return status;
}
static int ocfs2_info_handle_freeinode(struct inode *inode,
struct ocfs2_info_request __user *req)
{
u32 i;
u64 blkno = -1;
char namebuf[40];
int status, type = INODE_ALLOC_SYSTEM_INODE;
struct ocfs2_info_freeinode *oifi = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *inode_alloc = NULL;
oifi = kzalloc(sizeof(struct ocfs2_info_freeinode), GFP_KERNEL);
if (!oifi) {
status = -ENOMEM;
mlog_errno(status);
goto out_err;
}
if (o2info_from_user(*oifi, req)) {
status = -EFAULT;
goto out_free;
}
oifi->ifi_slotnum = osb->max_slots;
for (i = 0; i < oifi->ifi_slotnum; i++) {
if (o2info_coherent(&oifi->ifi_req)) {
inode_alloc = ocfs2_get_system_file_inode(osb, type, i);
if (!inode_alloc) {
mlog(ML_ERROR, "unable to get alloc inode in "
"slot %u\n", i);
status = -EIO;
goto bail;
}
} else {
ocfs2_sprintf_system_inode_name(namebuf,
sizeof(namebuf),
type, i);
status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
namebuf,
strlen(namebuf),
&blkno);
if (status < 0) {
status = -ENOENT;
goto bail;
}
}
status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i);
iput(inode_alloc);
inode_alloc = NULL;
if (status < 0)
goto bail;
}
o2info_set_request_filled(&oifi->ifi_req);
if (o2info_to_user(*oifi, req)) {
status = -EFAULT;
goto out_free;
}
status = 0;
bail:
if (status)
o2info_set_request_error(&oifi->ifi_req, req);
out_free:
kfree(oifi);
out_err:
return status;
}
static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist,
unsigned int chunksize)
{
u32 index;
index = __ilog2_u32(chunksize);
if (index >= OCFS2_INFO_MAX_HIST)
index = OCFS2_INFO_MAX_HIST - 1;
hist->fc_chunks[index]++;
hist->fc_clusters[index] += chunksize;
}
static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats,
unsigned int chunksize)
{
if (chunksize > stats->ffs_max)
stats->ffs_max = chunksize;
if (chunksize < stats->ffs_min)
stats->ffs_min = chunksize;
stats->ffs_avg += chunksize;
stats->ffs_free_chunks_real++;
}
static void ocfs2_info_update_ffg(struct ocfs2_info_freefrag *ffg,
unsigned int chunksize)
{
o2ffg_update_histogram(&(ffg->iff_ffs.ffs_fc_hist), chunksize);
o2ffg_update_stats(&(ffg->iff_ffs), chunksize);
}
static int ocfs2_info_freefrag_scan_chain(struct ocfs2_super *osb,
struct inode *gb_inode,
struct ocfs2_dinode *gb_dinode,
struct ocfs2_chain_rec *rec,
struct ocfs2_info_freefrag *ffg,
u32 chunks_in_group)
{
int status = 0, used;
u64 blkno;
struct buffer_head *bh = NULL;
struct ocfs2_group_desc *bg = NULL;
unsigned int max_bits, num_clusters;
unsigned int offset = 0, cluster, chunk;
unsigned int chunk_free, last_chunksize = 0;
if (!le32_to_cpu(rec->c_free))
goto bail;
do {
if (!bg)
blkno = le64_to_cpu(rec->c_blkno);
else
blkno = le64_to_cpu(bg->bg_next_group);
if (bh) {
brelse(bh);
bh = NULL;
}
if (o2info_coherent(&ffg->iff_req))
status = ocfs2_read_group_descriptor(gb_inode,
gb_dinode,
blkno, &bh);
else
status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
if (status < 0) {
mlog(ML_ERROR, "Can't read the group descriptor # "
"%llu from device.", (unsigned long long)blkno);
status = -EIO;
goto bail;
}
bg = (struct ocfs2_group_desc *)bh->b_data;
if (!le16_to_cpu(bg->bg_free_bits_count))
continue;
max_bits = le16_to_cpu(bg->bg_bits);
offset = 0;
for (chunk = 0; chunk < chunks_in_group; chunk++) {
/*
* last chunk may be not an entire one.
*/
if ((offset + ffg->iff_chunksize) > max_bits)
num_clusters = max_bits - offset;
else
num_clusters = ffg->iff_chunksize;
chunk_free = 0;
for (cluster = 0; cluster < num_clusters; cluster++) {
used = ocfs2_test_bit(offset,
(unsigned long *)bg->bg_bitmap);
/*
* - chunk_free counts free clusters in #N chunk.
* - last_chunksize records the size(in) clusters
* for the last real free chunk being counted.
*/
if (!used) {
last_chunksize++;
chunk_free++;
}
if (used && last_chunksize) {
ocfs2_info_update_ffg(ffg,
last_chunksize);
last_chunksize = 0;
}
offset++;
}
if (chunk_free == ffg->iff_chunksize)
ffg->iff_ffs.ffs_free_chunks++;
}
/*
* need to update the info for last free chunk.
*/
if (last_chunksize)
ocfs2_info_update_ffg(ffg, last_chunksize);
} while (le64_to_cpu(bg->bg_next_group));
bail:
brelse(bh);
return status;
}
static int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super *osb,
struct inode *gb_inode, u64 blkno,
struct ocfs2_info_freefrag *ffg)
{
u32 chunks_in_group;
int status = 0, unlock = 0, i;
struct buffer_head *bh = NULL;
struct ocfs2_chain_list *cl = NULL;
struct ocfs2_chain_rec *rec = NULL;
struct ocfs2_dinode *gb_dinode = NULL;
if (gb_inode)
inode_lock(gb_inode);
if (o2info_coherent(&ffg->iff_req)) {
status = ocfs2_inode_lock(gb_inode, &bh, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
unlock = 1;
} else {
status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
gb_dinode = (struct ocfs2_dinode *)bh->b_data;
cl = &(gb_dinode->id2.i_chain);
/*
* Chunksize(in) clusters from userspace should be
* less than clusters in a group.
*/
if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) {
status = -EINVAL;
goto bail;
}
memset(&ffg->iff_ffs, 0, sizeof(struct ocfs2_info_freefrag_stats));
ffg->iff_ffs.ffs_min = ~0U;
ffg->iff_ffs.ffs_clusters =
le32_to_cpu(gb_dinode->id1.bitmap1.i_total);
ffg->iff_ffs.ffs_free_clusters = ffg->iff_ffs.ffs_clusters -
le32_to_cpu(gb_dinode->id1.bitmap1.i_used);
chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1;
for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
rec = &(cl->cl_recs[i]);
status = ocfs2_info_freefrag_scan_chain(osb, gb_inode,
gb_dinode,
rec, ffg,
chunks_in_group);
if (status)
goto bail;
}
if (ffg->iff_ffs.ffs_free_chunks_real)
ffg->iff_ffs.ffs_avg = (ffg->iff_ffs.ffs_avg /
ffg->iff_ffs.ffs_free_chunks_real);
bail:
if (unlock)
ocfs2_inode_unlock(gb_inode, 0);
if (gb_inode)
inode_unlock(gb_inode);
iput(gb_inode);
brelse(bh);
return status;
}
static int ocfs2_info_handle_freefrag(struct inode *inode,
struct ocfs2_info_request __user *req)
{
u64 blkno = -1;
char namebuf[40];
int status, type = GLOBAL_BITMAP_SYSTEM_INODE;
struct ocfs2_info_freefrag *oiff;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *gb_inode = NULL;
oiff = kzalloc(sizeof(struct ocfs2_info_freefrag), GFP_KERNEL);
if (!oiff) {
status = -ENOMEM;
mlog_errno(status);
goto out_err;
}
if (o2info_from_user(*oiff, req)) {
status = -EFAULT;
goto out_free;
}
/*
* chunksize from userspace should be power of 2.
*/
if ((oiff->iff_chunksize & (oiff->iff_chunksize - 1)) ||
(!oiff->iff_chunksize)) {
status = -EINVAL;
goto bail;
}
if (o2info_coherent(&oiff->iff_req)) {
gb_inode = ocfs2_get_system_file_inode(osb, type,
OCFS2_INVALID_SLOT);
if (!gb_inode) {
mlog(ML_ERROR, "unable to get global_bitmap inode\n");
status = -EIO;
goto bail;
}
} else {
ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type,
OCFS2_INVALID_SLOT);
status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
namebuf,
strlen(namebuf),
&blkno);
if (status < 0) {
status = -ENOENT;
goto bail;
}
}
status = ocfs2_info_freefrag_scan_bitmap(osb, gb_inode, blkno, oiff);
if (status < 0)
goto bail;
o2info_set_request_filled(&oiff->iff_req);
if (o2info_to_user(*oiff, req)) {
status = -EFAULT;
goto out_free;
}
status = 0;
bail:
if (status)
o2info_set_request_error(&oiff->iff_req, req);
out_free:
kfree(oiff);
out_err:
return status;
}
static int ocfs2_info_handle_unknown(struct inode *inode,
struct ocfs2_info_request __user *req)
{
struct ocfs2_info_request oir;
if (o2info_from_user(oir, req))
return -EFAULT;
o2info_clear_request_filled(&oir);
if (o2info_to_user(oir, req))
return -EFAULT;
return 0;
}
/*
* Validate and distinguish OCFS2_IOC_INFO requests.
*
* - validate the magic number.
* - distinguish different requests.
* - validate size of different requests.
*/
static int ocfs2_info_handle_request(struct inode *inode,
struct ocfs2_info_request __user *req)
{
int status = -EFAULT;
struct ocfs2_info_request oir;
if (o2info_from_user(oir, req))
goto bail;
status = -EINVAL;
if (oir.ir_magic != OCFS2_INFO_MAGIC)
goto bail;
switch (oir.ir_code) {
case OCFS2_INFO_BLOCKSIZE:
if (oir.ir_size == sizeof(struct ocfs2_info_blocksize))
status = ocfs2_info_handle_blocksize(inode, req);
break;
case OCFS2_INFO_CLUSTERSIZE:
if (oir.ir_size == sizeof(struct ocfs2_info_clustersize))
status = ocfs2_info_handle_clustersize(inode, req);
break;
case OCFS2_INFO_MAXSLOTS:
if (oir.ir_size == sizeof(struct ocfs2_info_maxslots))
status = ocfs2_info_handle_maxslots(inode, req);
break;
case OCFS2_INFO_LABEL:
if (oir.ir_size == sizeof(struct ocfs2_info_label))
status = ocfs2_info_handle_label(inode, req);
break;
case OCFS2_INFO_UUID:
if (oir.ir_size == sizeof(struct ocfs2_info_uuid))
status = ocfs2_info_handle_uuid(inode, req);
break;
case OCFS2_INFO_FS_FEATURES:
if (oir.ir_size == sizeof(struct ocfs2_info_fs_features))
status = ocfs2_info_handle_fs_features(inode, req);
break;
case OCFS2_INFO_JOURNAL_SIZE:
if (oir.ir_size == sizeof(struct ocfs2_info_journal_size))
status = ocfs2_info_handle_journal_size(inode, req);
break;
case OCFS2_INFO_FREEINODE:
if (oir.ir_size == sizeof(struct ocfs2_info_freeinode))
status = ocfs2_info_handle_freeinode(inode, req);
break;
case OCFS2_INFO_FREEFRAG:
if (oir.ir_size == sizeof(struct ocfs2_info_freefrag))
status = ocfs2_info_handle_freefrag(inode, req);
break;
default:
status = ocfs2_info_handle_unknown(inode, req);
break;
}
bail:
return status;
}
static int ocfs2_get_request_ptr(struct ocfs2_info *info, int idx,
u64 *req_addr, int compat_flag)
{
int status = -EFAULT;
u64 __user *bp = NULL;
if (compat_flag) {
#ifdef CONFIG_COMPAT
/*
* pointer bp stores the base address of a pointers array,
* which collects all addresses of separate request.
*/
bp = (u64 __user *)(unsigned long)compat_ptr(info->oi_requests);
#else
BUG();
#endif
} else
bp = (u64 __user *)(unsigned long)(info->oi_requests);
if (o2info_from_user(*req_addr, bp + idx))
goto bail;
status = 0;
bail:
return status;
}
/*
* OCFS2_IOC_INFO handles an array of requests passed from userspace.
*
* ocfs2_info_handle() recevies a large info aggregation, grab and
* validate the request count from header, then break it into small
* pieces, later specific handlers can handle them one by one.
*
* Idea here is to make each separate request small enough to ensure
* a better backward&forward compatibility, since a small piece of
* request will be less likely to be broken if disk layout get changed.
*/
static noinline_for_stack int
ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info, int compat_flag)
{
int i, status = 0;
u64 req_addr;
struct ocfs2_info_request __user *reqp;
if ((info->oi_count > OCFS2_INFO_MAX_REQUEST) ||
(!info->oi_requests)) {
status = -EINVAL;
goto bail;
}
for (i = 0; i < info->oi_count; i++) {
status = ocfs2_get_request_ptr(info, i, &req_addr, compat_flag);
if (status)
break;
reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
if (!reqp) {
status = -EINVAL;
goto bail;
}
status = ocfs2_info_handle_request(inode, reqp);
if (status)
break;
}
bail:
return status;
}
long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
void __user *argp = (void __user *)arg;
int status;
switch (cmd) {
case OCFS2_IOC_RESVSP:
case OCFS2_IOC_RESVSP64:
case OCFS2_IOC_UNRESVSP:
case OCFS2_IOC_UNRESVSP64:
{
struct ocfs2_space_resv sr;
if (copy_from_user(&sr, (int __user *) arg, sizeof(sr)))
return -EFAULT;
return ocfs2_change_file_space(filp, cmd, &sr);
}
case OCFS2_IOC_GROUP_EXTEND:
{
int new_clusters;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
if (get_user(new_clusters, (int __user *)arg))
return -EFAULT;
status = mnt_want_write_file(filp);
if (status)
return status;
status = ocfs2_group_extend(inode, new_clusters);
mnt_drop_write_file(filp);
return status;
}
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
{
struct ocfs2_new_group_input input;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
if (copy_from_user(&input, (int __user *) arg, sizeof(input)))
return -EFAULT;
status = mnt_want_write_file(filp);
if (status)
return status;
status = ocfs2_group_add(inode, &input);
mnt_drop_write_file(filp);
return status;
}
case OCFS2_IOC_REFLINK:
{
struct reflink_arguments args;
const char __user *old_path;
const char __user *new_path;
bool preserve;
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
old_path = (const char __user *)(unsigned long)args.old_path;
new_path = (const char __user *)(unsigned long)args.new_path;
preserve = (args.preserve != 0);
return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
}
case OCFS2_IOC_INFO:
{
struct ocfs2_info info;
if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
return -EFAULT;
return ocfs2_info_handle(inode, &info, 0);
}
case FITRIM:
{
struct super_block *sb = inode->i_sb;
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!bdev_max_discard_sectors(sb->s_bdev))
return -EOPNOTSUPP;
if (copy_from_user(&range, argp, sizeof(range)))
return -EFAULT;
range.minlen = max_t(u64, bdev_discard_granularity(sb->s_bdev),
range.minlen);
ret = ocfs2_trim_fs(sb, &range);
if (ret < 0)
return ret;
if (copy_to_user(argp, &range, sizeof(range)))
return -EFAULT;
return 0;
}
case OCFS2_IOC_MOVE_EXT:
return ocfs2_ioctl_move_extents(filp, argp);
default:
return -ENOTTY;
}
}
#ifdef CONFIG_COMPAT
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
bool preserve;
struct reflink_arguments args;
struct inode *inode = file_inode(file);
struct ocfs2_info info;
void __user *argp = (void __user *)arg;
switch (cmd) {
case OCFS2_IOC_RESVSP:
case OCFS2_IOC_RESVSP64:
case OCFS2_IOC_UNRESVSP:
case OCFS2_IOC_UNRESVSP64:
case OCFS2_IOC_GROUP_EXTEND:
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
break;
case OCFS2_IOC_REFLINK:
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
preserve = (args.preserve != 0);
return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
compat_ptr(args.new_path), preserve);
case OCFS2_IOC_INFO:
if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
return -EFAULT;
return ocfs2_info_handle(inode, &info, 1);
case FITRIM:
case OCFS2_IOC_MOVE_EXT:
break;
default:
return -ENOIOCTLCMD;
}
return ocfs2_ioctl(file, cmd, arg);
}
#endif
| linux-master | fs/ocfs2/ioctl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* slot_map.c
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "heartbeat.h"
#include "inode.h"
#include "slot_map.h"
#include "super.h"
#include "sysfile.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
struct ocfs2_slot {
int sl_valid;
unsigned int sl_node_num;
};
struct ocfs2_slot_info {
int si_extended;
int si_slots_per_block;
struct inode *si_inode;
unsigned int si_blocks;
struct buffer_head **si_bh;
unsigned int si_num_slots;
struct ocfs2_slot si_slots[];
};
static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
unsigned int node_num);
static void ocfs2_invalidate_slot(struct ocfs2_slot_info *si,
int slot_num)
{
BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots));
si->si_slots[slot_num].sl_valid = 0;
}
static void ocfs2_set_slot(struct ocfs2_slot_info *si,
int slot_num, unsigned int node_num)
{
BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots));
si->si_slots[slot_num].sl_valid = 1;
si->si_slots[slot_num].sl_node_num = node_num;
}
/* This version is for the extended slot map */
static void ocfs2_update_slot_info_extended(struct ocfs2_slot_info *si)
{
int b, i, slotno;
struct ocfs2_slot_map_extended *se;
slotno = 0;
for (b = 0; b < si->si_blocks; b++) {
se = (struct ocfs2_slot_map_extended *)si->si_bh[b]->b_data;
for (i = 0;
(i < si->si_slots_per_block) &&
(slotno < si->si_num_slots);
i++, slotno++) {
if (se->se_slots[i].es_valid)
ocfs2_set_slot(si, slotno,
le32_to_cpu(se->se_slots[i].es_node_num));
else
ocfs2_invalidate_slot(si, slotno);
}
}
}
/*
* Post the slot information on disk into our slot_info struct.
* Must be protected by osb_lock.
*/
static void ocfs2_update_slot_info_old(struct ocfs2_slot_info *si)
{
int i;
struct ocfs2_slot_map *sm;
sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data;
for (i = 0; i < si->si_num_slots; i++) {
if (le16_to_cpu(sm->sm_slots[i]) == (u16)OCFS2_INVALID_SLOT)
ocfs2_invalidate_slot(si, i);
else
ocfs2_set_slot(si, i, le16_to_cpu(sm->sm_slots[i]));
}
}
static void ocfs2_update_slot_info(struct ocfs2_slot_info *si)
{
/*
* The slot data will have been refreshed when ocfs2_super_lock
* was taken.
*/
if (si->si_extended)
ocfs2_update_slot_info_extended(si);
else
ocfs2_update_slot_info_old(si);
}
int ocfs2_refresh_slot_info(struct ocfs2_super *osb)
{
int ret;
struct ocfs2_slot_info *si = osb->slot_info;
if (si == NULL)
return 0;
BUG_ON(si->si_blocks == 0);
BUG_ON(si->si_bh == NULL);
trace_ocfs2_refresh_slot_info(si->si_blocks);
/*
* We pass -1 as blocknr because we expect all of si->si_bh to
* be !NULL. Thus, ocfs2_read_blocks() will ignore blocknr. If
* this is not true, the read of -1 (UINT64_MAX) will fail.
*/
ret = ocfs2_read_blocks(INODE_CACHE(si->si_inode), -1, si->si_blocks,
si->si_bh, OCFS2_BH_IGNORE_CACHE, NULL);
if (ret == 0) {
spin_lock(&osb->osb_lock);
ocfs2_update_slot_info(si);
spin_unlock(&osb->osb_lock);
}
return ret;
}
/* post the our slot info stuff into it's destination bh and write it
* out. */
static void ocfs2_update_disk_slot_extended(struct ocfs2_slot_info *si,
int slot_num,
struct buffer_head **bh)
{
int blkind = slot_num / si->si_slots_per_block;
int slotno = slot_num % si->si_slots_per_block;
struct ocfs2_slot_map_extended *se;
BUG_ON(blkind >= si->si_blocks);
se = (struct ocfs2_slot_map_extended *)si->si_bh[blkind]->b_data;
se->se_slots[slotno].es_valid = si->si_slots[slot_num].sl_valid;
if (si->si_slots[slot_num].sl_valid)
se->se_slots[slotno].es_node_num =
cpu_to_le32(si->si_slots[slot_num].sl_node_num);
*bh = si->si_bh[blkind];
}
static void ocfs2_update_disk_slot_old(struct ocfs2_slot_info *si,
int slot_num,
struct buffer_head **bh)
{
int i;
struct ocfs2_slot_map *sm;
sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data;
for (i = 0; i < si->si_num_slots; i++) {
if (si->si_slots[i].sl_valid)
sm->sm_slots[i] =
cpu_to_le16(si->si_slots[i].sl_node_num);
else
sm->sm_slots[i] = cpu_to_le16(OCFS2_INVALID_SLOT);
}
*bh = si->si_bh[0];
}
static int ocfs2_update_disk_slot(struct ocfs2_super *osb,
struct ocfs2_slot_info *si,
int slot_num)
{
int status;
struct buffer_head *bh;
spin_lock(&osb->osb_lock);
if (si->si_extended)
ocfs2_update_disk_slot_extended(si, slot_num, &bh);
else
ocfs2_update_disk_slot_old(si, slot_num, &bh);
spin_unlock(&osb->osb_lock);
status = ocfs2_write_block(osb, bh, INODE_CACHE(si->si_inode));
if (status < 0)
mlog_errno(status);
return status;
}
/*
* Calculate how many bytes are needed by the slot map. Returns
* an error if the slot map file is too small.
*/
static int ocfs2_slot_map_physical_size(struct ocfs2_super *osb,
struct inode *inode,
unsigned long long *bytes)
{
unsigned long long bytes_needed;
if (ocfs2_uses_extended_slot_map(osb)) {
bytes_needed = osb->max_slots *
sizeof(struct ocfs2_extended_slot);
} else {
bytes_needed = osb->max_slots * sizeof(__le16);
}
if (bytes_needed > i_size_read(inode)) {
mlog(ML_ERROR,
"Slot map file is too small! (size %llu, needed %llu)\n",
i_size_read(inode), bytes_needed);
return -ENOSPC;
}
*bytes = bytes_needed;
return 0;
}
/* try to find global node in the slot info. Returns -ENOENT
* if nothing is found. */
static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
unsigned int node_num)
{
int i, ret = -ENOENT;
for(i = 0; i < si->si_num_slots; i++) {
if (si->si_slots[i].sl_valid &&
(node_num == si->si_slots[i].sl_node_num)) {
ret = i;
break;
}
}
return ret;
}
static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si,
int preferred)
{
int i, ret = -ENOSPC;
if ((preferred >= 0) && (preferred < si->si_num_slots)) {
if (!si->si_slots[preferred].sl_valid) {
ret = preferred;
goto out;
}
}
for(i = 0; i < si->si_num_slots; i++) {
if (!si->si_slots[i].sl_valid) {
ret = i;
break;
}
}
out:
return ret;
}
int ocfs2_node_num_to_slot(struct ocfs2_super *osb, unsigned int node_num)
{
int slot;
struct ocfs2_slot_info *si = osb->slot_info;
spin_lock(&osb->osb_lock);
slot = __ocfs2_node_num_to_slot(si, node_num);
spin_unlock(&osb->osb_lock);
return slot;
}
int ocfs2_slot_to_node_num_locked(struct ocfs2_super *osb, int slot_num,
unsigned int *node_num)
{
struct ocfs2_slot_info *si = osb->slot_info;
assert_spin_locked(&osb->osb_lock);
BUG_ON(slot_num < 0);
BUG_ON(slot_num >= osb->max_slots);
if (!si->si_slots[slot_num].sl_valid)
return -ENOENT;
*node_num = si->si_slots[slot_num].sl_node_num;
return 0;
}
static void __ocfs2_free_slot_info(struct ocfs2_slot_info *si)
{
unsigned int i;
if (si == NULL)
return;
iput(si->si_inode);
if (si->si_bh) {
for (i = 0; i < si->si_blocks; i++) {
if (si->si_bh[i]) {
brelse(si->si_bh[i]);
si->si_bh[i] = NULL;
}
}
kfree(si->si_bh);
}
kfree(si);
}
int ocfs2_clear_slot(struct ocfs2_super *osb, int slot_num)
{
struct ocfs2_slot_info *si = osb->slot_info;
if (si == NULL)
return 0;
spin_lock(&osb->osb_lock);
ocfs2_invalidate_slot(si, slot_num);
spin_unlock(&osb->osb_lock);
return ocfs2_update_disk_slot(osb, osb->slot_info, slot_num);
}
static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
struct ocfs2_slot_info *si)
{
int status = 0;
u64 blkno;
unsigned long long blocks, bytes = 0;
unsigned int i;
struct buffer_head *bh;
status = ocfs2_slot_map_physical_size(osb, si->si_inode, &bytes);
if (status)
goto bail;
blocks = ocfs2_blocks_for_bytes(si->si_inode->i_sb, bytes);
BUG_ON(blocks > UINT_MAX);
si->si_blocks = blocks;
if (!si->si_blocks)
goto bail;
if (si->si_extended)
si->si_slots_per_block =
(osb->sb->s_blocksize /
sizeof(struct ocfs2_extended_slot));
else
si->si_slots_per_block = osb->sb->s_blocksize / sizeof(__le16);
/* The size checks above should ensure this */
BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks);
trace_ocfs2_map_slot_buffers(bytes, si->si_blocks);
si->si_bh = kcalloc(si->si_blocks, sizeof(struct buffer_head *),
GFP_KERNEL);
if (!si->si_bh) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
for (i = 0; i < si->si_blocks; i++) {
status = ocfs2_extent_map_get_blocks(si->si_inode, i,
&blkno, NULL, NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
}
trace_ocfs2_map_slot_buffers_block((unsigned long long)blkno, i);
bh = NULL; /* Acquire a fresh bh */
status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno,
1, &bh, OCFS2_BH_IGNORE_CACHE, NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
}
si->si_bh[i] = bh;
}
bail:
return status;
}
int ocfs2_init_slot_info(struct ocfs2_super *osb)
{
int status;
struct inode *inode = NULL;
struct ocfs2_slot_info *si;
si = kzalloc(struct_size(si, si_slots, osb->max_slots), GFP_KERNEL);
if (!si) {
status = -ENOMEM;
mlog_errno(status);
return status;
}
si->si_extended = ocfs2_uses_extended_slot_map(osb);
si->si_num_slots = osb->max_slots;
inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!inode) {
status = -EINVAL;
mlog_errno(status);
goto bail;
}
si->si_inode = inode;
status = ocfs2_map_slot_buffers(osb, si);
if (status < 0) {
mlog_errno(status);
goto bail;
}
osb->slot_info = (struct ocfs2_slot_info *)si;
bail:
if (status < 0)
__ocfs2_free_slot_info(si);
return status;
}
void ocfs2_free_slot_info(struct ocfs2_super *osb)
{
struct ocfs2_slot_info *si = osb->slot_info;
osb->slot_info = NULL;
__ocfs2_free_slot_info(si);
}
int ocfs2_find_slot(struct ocfs2_super *osb)
{
int status;
int slot;
struct ocfs2_slot_info *si;
si = osb->slot_info;
spin_lock(&osb->osb_lock);
ocfs2_update_slot_info(si);
/* search for ourselves first and take the slot if it already
* exists. Perhaps we need to mark this in a variable for our
* own journal recovery? Possibly not, though we certainly
* need to warn to the user */
slot = __ocfs2_node_num_to_slot(si, osb->node_num);
if (slot < 0) {
/* if no slot yet, then just take 1st available
* one. */
slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
if (slot < 0) {
spin_unlock(&osb->osb_lock);
mlog(ML_ERROR, "no free slots available!\n");
status = -EINVAL;
goto bail;
}
} else
printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
"allocated to this node!\n", slot, osb->dev_str);
ocfs2_set_slot(si, slot, osb->node_num);
osb->slot_num = slot;
spin_unlock(&osb->osb_lock);
trace_ocfs2_find_slot(osb->slot_num);
status = ocfs2_update_disk_slot(osb, si, osb->slot_num);
if (status < 0) {
mlog_errno(status);
/*
* if write block failed, invalidate slot to avoid overwrite
* slot during dismount in case another node rightly has mounted
*/
spin_lock(&osb->osb_lock);
ocfs2_invalidate_slot(si, osb->slot_num);
osb->slot_num = OCFS2_INVALID_SLOT;
spin_unlock(&osb->osb_lock);
}
bail:
return status;
}
void ocfs2_put_slot(struct ocfs2_super *osb)
{
int status, slot_num;
struct ocfs2_slot_info *si = osb->slot_info;
if (!si)
return;
spin_lock(&osb->osb_lock);
ocfs2_update_slot_info(si);
slot_num = osb->slot_num;
ocfs2_invalidate_slot(si, osb->slot_num);
osb->slot_num = OCFS2_INVALID_SLOT;
spin_unlock(&osb->osb_lock);
status = ocfs2_update_disk_slot(osb, si, slot_num);
if (status < 0)
mlog_errno(status);
ocfs2_free_slot_info(osb);
}
| linux-master | fs/ocfs2/slot_map.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* namei.c
*
* Create and rename file, directory, symlinks
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*
* Portions of this code from linux/fs/ext3/dir.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card ([email protected])
* Laboratoire MASI - Institut Blaise pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/dir.c
*
* Copyright (C) 1991, 1992 Linux Torvalds
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/quotaops.h>
#include <linux/iversion.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dcache.h"
#include "dir.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "inode.h"
#include "journal.h"
#include "namei.h"
#include "suballoc.h"
#include "super.h"
#include "symlink.h"
#include "sysfile.h"
#include "uptodate.h"
#include "xattr.h"
#include "acl.h"
#include "ocfs2_trace.h"
#include "ioctl.h"
#include "buffer_head_io.h"
static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode *dir,
struct inode *inode,
dev_t dev,
struct buffer_head **new_fe_bh,
struct buffer_head *parent_fe_bh,
handle_t *handle,
struct ocfs2_alloc_context *inode_ac);
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
u64 blkno,
char *name,
struct ocfs2_dir_lookup_result *lookup,
bool dio);
static int ocfs2_orphan_add(struct ocfs2_super *osb,
handle_t *handle,
struct inode *inode,
struct buffer_head *fe_bh,
char *name,
struct ocfs2_dir_lookup_result *lookup,
struct inode *orphan_dir_inode,
bool dio);
static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
handle_t *handle,
struct inode *inode,
const char *symname);
static int ocfs2_double_lock(struct ocfs2_super *osb,
struct buffer_head **bh1,
struct inode *inode1,
struct buffer_head **bh2,
struct inode *inode2,
int rename);
static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
/* An orphan dir name is an 8 byte value, printed as a hex string */
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
int status;
u64 blkno;
struct inode *inode = NULL;
struct dentry *ret;
struct ocfs2_inode_info *oi;
trace_ocfs2_lookup(dir, dentry, dentry->d_name.len,
dentry->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno, 0);
if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) {
ret = ERR_PTR(-ENAMETOOLONG);
goto bail;
}
status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
ret = ERR_PTR(status);
goto bail;
}
status = ocfs2_lookup_ino_from_name(dir, dentry->d_name.name,
dentry->d_name.len, &blkno);
if (status < 0)
goto bail_add;
inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0);
if (IS_ERR(inode)) {
ret = ERR_PTR(-EACCES);
goto bail_unlock;
}
oi = OCFS2_I(inode);
/* Clear any orphaned state... If we were able to look up the
* inode from a directory, it certainly can't be orphaned. We
* might have the bad state from a node which intended to
* orphan this inode but crashed before it could commit the
* unlink. */
spin_lock(&oi->ip_lock);
oi->ip_flags &= ~OCFS2_INODE_MAYBE_ORPHANED;
spin_unlock(&oi->ip_lock);
bail_add:
ret = d_splice_alias(inode, dentry);
if (inode) {
/*
* If d_splice_alias() finds a DCACHE_DISCONNECTED
* dentry, it will d_move() it on top of ourse. The
* return value will indicate this however, so in
* those cases, we switch them around for the locking
* code.
*
* NOTE: This dentry already has ->d_op set from
* ocfs2_get_parent() and ocfs2_get_dentry()
*/
if (!IS_ERR_OR_NULL(ret))
dentry = ret;
status = ocfs2_dentry_attach_lock(dentry, inode,
OCFS2_I(dir)->ip_blkno);
if (status) {
mlog_errno(status);
ret = ERR_PTR(status);
goto bail_unlock;
}
} else
ocfs2_dentry_attach_gen(dentry);
bail_unlock:
/* Don't drop the cluster lock until *after* the d_add --
* unlink on another node will message us to remove that
* dentry under this lock so otherwise we can race this with
* the downconvert thread and have a stale dentry. */
ocfs2_inode_unlock(dir, 0);
bail:
trace_ocfs2_lookup_ret(ret);
return ret;
}
static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
{
struct inode *inode;
int status;
inode = new_inode(dir->i_sb);
if (!inode) {
mlog(ML_ERROR, "new_inode failed!\n");
return ERR_PTR(-ENOMEM);
}
/* populate as many fields early on as possible - many of
* these are used by the support functions here and in
* callers. */
if (S_ISDIR(mode))
set_nlink(inode, 2);
mode = mode_strip_sgid(&nop_mnt_idmap, dir, mode);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
status = dquot_initialize(inode);
if (status)
return ERR_PTR(status);
return inode;
}
static void ocfs2_cleanup_add_entry_failure(struct ocfs2_super *osb,
struct dentry *dentry, struct inode *inode)
{
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
ocfs2_lock_res_free(&dl->dl_lockres);
BUG_ON(dl->dl_count != 1);
spin_lock(&dentry_attach_lock);
dentry->d_fsdata = NULL;
spin_unlock(&dentry_attach_lock);
kfree(dl);
iput(inode);
}
static int ocfs2_mknod(struct mnt_idmap *idmap,
struct inode *dir,
struct dentry *dentry,
umode_t mode,
dev_t dev)
{
int status = 0;
struct buffer_head *parent_fe_bh = NULL;
handle_t *handle = NULL;
struct ocfs2_super *osb;
struct ocfs2_dinode *dirfe;
struct ocfs2_dinode *fe = NULL;
struct buffer_head *new_fe_bh = NULL;
struct inode *inode = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
int want_clusters = 0;
int want_meta = 0;
int xattr_credits = 0;
struct ocfs2_security_xattr_info si = {
.name = NULL,
.enable = 1,
};
int did_quota_inode = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
int did_block_signals = 0;
struct ocfs2_dentry_lock *dl = NULL;
trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long)dev, mode);
status = dquot_initialize(dir);
if (status) {
mlog_errno(status);
return status;
}
/* get our super block */
osb = OCFS2_SB(dir->i_sb);
status = ocfs2_inode_lock(dir, &parent_fe_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
if (S_ISDIR(mode) && (dir->i_nlink >= ocfs2_link_max(osb))) {
status = -EMLINK;
goto leave;
}
dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
if (!ocfs2_read_links_count(dirfe)) {
/* can't make a file in a deleted directory. */
status = -ENOENT;
goto leave;
}
status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
dentry->d_name.len);
if (status)
goto leave;
/* get a spot inside the dir. */
status = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
dentry->d_name.name,
dentry->d_name.len, &lookup);
if (status < 0) {
mlog_errno(status);
goto leave;
}
/* reserve an inode spot */
status = ocfs2_reserve_new_inode(osb, &inode_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto leave;
}
inode = ocfs2_get_init_inode(dir, mode);
if (IS_ERR(inode)) {
status = PTR_ERR(inode);
inode = NULL;
mlog_errno(status);
goto leave;
}
/* get security xattr */
status = ocfs2_init_security_get(inode, dir, &dentry->d_name, &si);
if (status) {
if (status == -EOPNOTSUPP)
si.enable = 0;
else {
mlog_errno(status);
goto leave;
}
}
/* calculate meta data/clusters for setting security and acl xattr */
status = ocfs2_calc_xattr_init(dir, parent_fe_bh, mode,
&si, &want_clusters,
&xattr_credits, &want_meta);
if (status < 0) {
mlog_errno(status);
goto leave;
}
/* Reserve a cluster if creating an extent based directory. */
if (S_ISDIR(mode) && !ocfs2_supports_inline_data(osb)) {
want_clusters += 1;
/* Dir indexing requires extra space as well */
if (ocfs2_supports_indexed_dirs(osb))
want_meta++;
}
status = ocfs2_reserve_new_metadata_blocks(osb, want_meta, &meta_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto leave;
}
status = ocfs2_reserve_clusters(osb, want_clusters, &data_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto leave;
}
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
S_ISDIR(mode),
xattr_credits));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto leave;
}
/* Starting to change things, restart is no longer possible. */
ocfs2_block_signals(&oldset);
did_block_signals = 1;
status = dquot_alloc_inode(inode);
if (status)
goto leave;
did_quota_inode = 1;
/* do the real work now. */
status = ocfs2_mknod_locked(osb, dir, inode, dev,
&new_fe_bh, parent_fe_bh, handle,
inode_ac);
if (status < 0) {
mlog_errno(status);
goto leave;
}
fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
if (S_ISDIR(mode)) {
status = ocfs2_fill_new_dir(osb, handle, dir, inode,
new_fe_bh, data_ac, meta_ac);
if (status < 0) {
mlog_errno(status);
goto leave;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(dir),
parent_fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
ocfs2_add_links_count(dirfe, 1);
ocfs2_journal_dirty(handle, parent_fe_bh);
inc_nlink(dir);
}
status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
meta_ac, data_ac);
if (status < 0) {
mlog_errno(status);
goto roll_back;
}
if (si.enable) {
status = ocfs2_init_security_set(handle, inode, new_fe_bh, &si,
meta_ac, data_ac);
if (status < 0) {
mlog_errno(status);
goto roll_back;
}
}
/*
* Do this before adding the entry to the directory. We add
* also set d_op after success so that ->d_iput() will cleanup
* the dentry lock even if ocfs2_add_entry() fails below.
*/
status = ocfs2_dentry_attach_lock(dentry, inode,
OCFS2_I(dir)->ip_blkno);
if (status) {
mlog_errno(status);
goto roll_back;
}
dl = dentry->d_fsdata;
status = ocfs2_add_entry(handle, dentry, inode,
OCFS2_I(inode)->ip_blkno, parent_fe_bh,
&lookup);
if (status < 0) {
mlog_errno(status);
goto roll_back;
}
insert_inode_hash(inode);
d_instantiate(dentry, inode);
status = 0;
roll_back:
if (status < 0 && S_ISDIR(mode)) {
ocfs2_add_links_count(dirfe, -1);
drop_nlink(dir);
}
leave:
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
if (handle) {
if (status < 0 && fe)
ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle);
}
ocfs2_inode_unlock(dir, 1);
if (did_block_signals)
ocfs2_unblock_signals(&oldset);
brelse(new_fe_bh);
brelse(parent_fe_bh);
kfree(si.value);
ocfs2_free_dir_lookup_result(&lookup);
if (inode_ac)
ocfs2_free_alloc_context(inode_ac);
if (data_ac)
ocfs2_free_alloc_context(data_ac);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
/*
* We should call iput after the i_rwsem of the bitmap been
* unlocked in ocfs2_free_alloc_context, or the
* ocfs2_delete_inode will mutex_lock again.
*/
if ((status < 0) && inode) {
if (dl)
ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
clear_nlink(inode);
iput(inode);
}
if (status)
mlog_errno(status);
return status;
}
static int __ocfs2_mknod_locked(struct inode *dir,
struct inode *inode,
dev_t dev,
struct buffer_head **new_fe_bh,
struct buffer_head *parent_fe_bh,
handle_t *handle,
struct ocfs2_alloc_context *inode_ac,
u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
{
int status = 0;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dinode *fe = NULL;
struct ocfs2_extent_list *fel;
u16 feat;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct timespec64 ts;
*new_fe_bh = NULL;
/* populate as many fields early on as possible - many of
* these are used by the support functions here and in
* callers. */
inode->i_ino = ino_from_blkno(osb->sb, fe_blkno);
oi->ip_blkno = fe_blkno;
spin_lock(&osb->osb_lock);
inode->i_generation = osb->s_next_generation++;
spin_unlock(&osb->osb_lock);
*new_fe_bh = sb_getblk(osb->sb, fe_blkno);
if (!*new_fe_bh) {
status = -ENOMEM;
mlog_errno(status);
goto leave;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), *new_fe_bh);
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
*new_fe_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
fe = (struct ocfs2_dinode *) (*new_fe_bh)->b_data;
memset(fe, 0, osb->sb->s_blocksize);
fe->i_generation = cpu_to_le32(inode->i_generation);
fe->i_fs_generation = cpu_to_le32(osb->fs_generation);
fe->i_blkno = cpu_to_le64(fe_blkno);
fe->i_suballoc_loc = cpu_to_le64(suballoc_loc);
fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
fe->i_uid = cpu_to_le32(i_uid_read(inode));
fe->i_gid = cpu_to_le32(i_gid_read(inode));
fe->i_mode = cpu_to_le16(inode->i_mode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_last_eb_blk = 0;
strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
ktime_get_real_ts64(&ts);
fe->i_atime = fe->i_ctime = fe->i_mtime =
cpu_to_le64(ts.tv_sec);
fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
cpu_to_le32(ts.tv_nsec);
fe->i_dtime = 0;
/*
* If supported, directories start with inline data. If inline
* isn't supported, but indexing is, we start them as indexed.
*/
feat = le16_to_cpu(fe->i_dyn_features);
if (S_ISDIR(inode->i_mode) && ocfs2_supports_inline_data(osb)) {
fe->i_dyn_features = cpu_to_le16(feat | OCFS2_INLINE_DATA_FL);
fe->id2.i_data.id_count = cpu_to_le16(
ocfs2_max_inline_data_with_xattr(osb->sb, fe));
} else {
fel = &fe->id2.i_list;
fel->l_tree_depth = 0;
fel->l_next_free_rec = 0;
fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
}
ocfs2_journal_dirty(handle, *new_fe_bh);
ocfs2_populate_inode(inode, fe, 1);
ocfs2_ci_set_new(osb, INODE_CACHE(inode));
if (!ocfs2_mount_local(osb)) {
status = ocfs2_create_new_inode_locks(inode);
if (status < 0)
mlog_errno(status);
}
ocfs2_update_inode_fsync_trans(handle, inode, 1);
leave:
if (status < 0) {
if (*new_fe_bh) {
brelse(*new_fe_bh);
*new_fe_bh = NULL;
}
}
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode *dir,
struct inode *inode,
dev_t dev,
struct buffer_head **new_fe_bh,
struct buffer_head *parent_fe_bh,
handle_t *handle,
struct ocfs2_alloc_context *inode_ac)
{
int status = 0;
u64 suballoc_loc, fe_blkno = 0;
u16 suballoc_bit;
*new_fe_bh = NULL;
status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
inode_ac, &suballoc_loc,
&suballoc_bit, &fe_blkno);
if (status < 0) {
mlog_errno(status);
return status;
}
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
parent_fe_bh, handle, inode_ac,
fe_blkno, suballoc_loc, suballoc_bit);
}
static int ocfs2_mkdir(struct mnt_idmap *idmap,
struct inode *dir,
struct dentry *dentry,
umode_t mode)
{
int ret;
trace_ocfs2_mkdir(dir, dentry, dentry->d_name.len, dentry->d_name.name,
OCFS2_I(dir)->ip_blkno, mode);
ret = ocfs2_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0);
if (ret)
mlog_errno(ret);
return ret;
}
static int ocfs2_create(struct mnt_idmap *idmap,
struct inode *dir,
struct dentry *dentry,
umode_t mode,
bool excl)
{
int ret;
trace_ocfs2_create(dir, dentry, dentry->d_name.len, dentry->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno, mode);
ret = ocfs2_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFREG, 0);
if (ret)
mlog_errno(ret);
return ret;
}
static int ocfs2_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *dentry)
{
handle_t *handle;
struct inode *inode = d_inode(old_dentry);
struct inode *old_dir = d_inode(old_dentry->d_parent);
int err;
struct buffer_head *fe_bh = NULL;
struct buffer_head *old_dir_bh = NULL;
struct buffer_head *parent_fe_bh = NULL;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
u64 old_de_ino;
trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
old_dentry->d_name.len, old_dentry->d_name.name,
dentry->d_name.len, dentry->d_name.name);
if (S_ISDIR(inode->i_mode))
return -EPERM;
err = dquot_initialize(dir);
if (err) {
mlog_errno(err);
return err;
}
err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
&parent_fe_bh, dir, 0);
if (err < 0) {
if (err != -ENOENT)
mlog_errno(err);
return err;
}
/* make sure both dirs have bhs
* get an extra ref on old_dir_bh if old==new */
if (!parent_fe_bh) {
if (old_dir_bh) {
parent_fe_bh = old_dir_bh;
get_bh(parent_fe_bh);
} else {
mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
err = -EIO;
goto out;
}
}
if (!dir->i_nlink) {
err = -ENOENT;
goto out;
}
err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
old_dentry->d_name.len, &old_de_ino);
if (err) {
err = -ENOENT;
goto out;
}
/*
* Check whether another node removed the source inode while we
* were in the vfs.
*/
if (old_de_ino != OCFS2_I(inode)->ip_blkno) {
err = -ENOENT;
goto out;
}
err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
dentry->d_name.len);
if (err)
goto out;
err = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
dentry->d_name.name,
dentry->d_name.len, &lookup);
if (err < 0) {
mlog_errno(err);
goto out;
}
err = ocfs2_inode_lock(inode, &fe_bh, 1);
if (err < 0) {
if (err != -ENOENT)
mlog_errno(err);
goto out;
}
fe = (struct ocfs2_dinode *) fe_bh->b_data;
if (ocfs2_read_links_count(fe) >= ocfs2_link_max(osb)) {
err = -EMLINK;
goto out_unlock_inode;
}
handle = ocfs2_start_trans(osb, ocfs2_link_credits(osb->sb));
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
handle = NULL;
mlog_errno(err);
goto out_unlock_inode;
}
/* Starting to change things, restart is no longer possible. */
ocfs2_block_signals(&oldset);
err = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (err < 0) {
mlog_errno(err);
goto out_commit;
}
inc_nlink(inode);
inode_set_ctime_current(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
OCFS2_I(inode)->ip_blkno,
parent_fe_bh, &lookup);
if (err) {
ocfs2_add_links_count(fe, -1);
drop_nlink(inode);
mlog_errno(err);
goto out_commit;
}
err = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno);
if (err) {
mlog_errno(err);
goto out_commit;
}
ihold(inode);
d_instantiate(dentry, inode);
out_commit:
ocfs2_commit_trans(osb, handle);
ocfs2_unblock_signals(&oldset);
out_unlock_inode:
ocfs2_inode_unlock(inode, 1);
out:
ocfs2_double_unlock(old_dir, dir);
brelse(fe_bh);
brelse(parent_fe_bh);
brelse(old_dir_bh);
ocfs2_free_dir_lookup_result(&lookup);
if (err)
mlog_errno(err);
return err;
}
/*
* Takes and drops an exclusive lock on the given dentry. This will
* force other nodes to drop it.
*/
static int ocfs2_remote_dentry_delete(struct dentry *dentry)
{
int ret;
ret = ocfs2_dentry_lock(dentry, 1);
if (ret)
mlog_errno(ret);
else
ocfs2_dentry_unlock(dentry, 1);
return ret;
}
static inline int ocfs2_inode_is_unlinkable(struct inode *inode)
{
if (S_ISDIR(inode->i_mode)) {
if (inode->i_nlink == 2)
return 1;
return 0;
}
if (inode->i_nlink == 1)
return 1;
return 0;
}
static int ocfs2_unlink(struct inode *dir,
struct dentry *dentry)
{
int status;
int child_locked = 0;
bool is_unlinkable = false;
struct inode *inode = d_inode(dentry);
struct inode *orphan_dir = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
u64 blkno;
struct ocfs2_dinode *fe = NULL;
struct buffer_head *fe_bh = NULL;
struct buffer_head *parent_node_bh = NULL;
handle_t *handle = NULL;
char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
struct ocfs2_dir_lookup_result lookup = { NULL, };
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
trace_ocfs2_unlink(dir, dentry, dentry->d_name.len,
dentry->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
status = dquot_initialize(dir);
if (status) {
mlog_errno(status);
return status;
}
BUG_ON(d_inode(dentry->d_parent) != dir);
if (inode == osb->root_inode)
return -EPERM;
status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1,
OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
status = ocfs2_find_files_on_disk(dentry->d_name.name,
dentry->d_name.len, &blkno, dir,
&lookup);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto leave;
}
if (OCFS2_I(inode)->ip_blkno != blkno) {
status = -ENOENT;
trace_ocfs2_unlink_noent(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)blkno,
OCFS2_I(inode)->ip_flags);
goto leave;
}
status = ocfs2_inode_lock(inode, &fe_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto leave;
}
child_locked = 1;
if (S_ISDIR(inode->i_mode)) {
if (inode->i_nlink != 2 || !ocfs2_empty_dir(inode)) {
status = -ENOTEMPTY;
goto leave;
}
}
status = ocfs2_remote_dentry_delete(dentry);
if (status < 0) {
/* This remote delete should succeed under all normal
* circumstances. */
mlog_errno(status);
goto leave;
}
if (ocfs2_inode_is_unlinkable(inode)) {
status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
OCFS2_I(inode)->ip_blkno,
orphan_name, &orphan_insert,
false);
if (status < 0) {
mlog_errno(status);
goto leave;
}
is_unlinkable = true;
}
handle = ocfs2_start_trans(osb, ocfs2_unlink_credits(osb->sb));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto leave;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
fe = (struct ocfs2_dinode *) fe_bh->b_data;
/* delete the name from the parent dir */
status = ocfs2_delete_entry(handle, dir, &lookup);
if (status < 0) {
mlog_errno(status);
goto leave;
}
if (S_ISDIR(inode->i_mode))
drop_nlink(inode);
drop_nlink(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
ocfs2_journal_dirty(handle, fe_bh);
dir->i_mtime = inode_set_ctime_current(dir);
if (S_ISDIR(inode->i_mode))
drop_nlink(dir);
status = ocfs2_mark_inode_dirty(handle, dir, parent_node_bh);
if (status < 0) {
mlog_errno(status);
if (S_ISDIR(inode->i_mode))
inc_nlink(dir);
goto leave;
}
if (is_unlinkable) {
status = ocfs2_orphan_add(osb, handle, inode, fe_bh,
orphan_name, &orphan_insert, orphan_dir, false);
if (status < 0)
mlog_errno(status);
}
leave:
if (handle)
ocfs2_commit_trans(osb, handle);
if (orphan_dir) {
/* This was locked for us in ocfs2_prepare_orphan_dir() */
ocfs2_inode_unlock(orphan_dir, 1);
inode_unlock(orphan_dir);
iput(orphan_dir);
}
if (child_locked)
ocfs2_inode_unlock(inode, 1);
ocfs2_inode_unlock(dir, 1);
brelse(fe_bh);
brelse(parent_node_bh);
ocfs2_free_dir_lookup_result(&orphan_insert);
ocfs2_free_dir_lookup_result(&lookup);
if (status && (status != -ENOTEMPTY) && (status != -ENOENT))
mlog_errno(status);
return status;
}
static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
u64 src_inode_no, u64 dest_inode_no)
{
int ret = 0, i = 0;
u64 parent_inode_no = 0;
u64 child_inode_no = src_inode_no;
struct inode *child_inode;
#define MAX_LOOKUP_TIMES 32
while (1) {
child_inode = ocfs2_iget(osb, child_inode_no, 0, 0);
if (IS_ERR(child_inode)) {
ret = PTR_ERR(child_inode);
break;
}
ret = ocfs2_inode_lock(child_inode, NULL, 0);
if (ret < 0) {
iput(child_inode);
if (ret != -ENOENT)
mlog_errno(ret);
break;
}
ret = ocfs2_lookup_ino_from_name(child_inode, "..", 2,
&parent_inode_no);
ocfs2_inode_unlock(child_inode, 0);
iput(child_inode);
if (ret < 0) {
ret = -ENOENT;
break;
}
if (parent_inode_no == dest_inode_no) {
ret = 1;
break;
}
if (parent_inode_no == osb->root_inode->i_ino) {
ret = 0;
break;
}
child_inode_no = parent_inode_no;
if (++i >= MAX_LOOKUP_TIMES) {
mlog_ratelimited(ML_NOTICE, "max lookup times reached, "
"filesystem may have nested directories, "
"src inode: %llu, dest inode: %llu.\n",
(unsigned long long)src_inode_no,
(unsigned long long)dest_inode_no);
ret = 0;
break;
}
}
return ret;
}
/*
* The only place this should be used is rename and link!
* if they have the same id, then the 1st one is the only one locked.
*/
static int ocfs2_double_lock(struct ocfs2_super *osb,
struct buffer_head **bh1,
struct inode *inode1,
struct buffer_head **bh2,
struct inode *inode2,
int rename)
{
int status;
int inode1_is_ancestor, inode2_is_ancestor;
struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
(unsigned long long)oi2->ip_blkno);
if (*bh1)
*bh1 = NULL;
if (*bh2)
*bh2 = NULL;
/* we always want to lock the one with the lower lockid first.
* and if they are nested, we lock ancestor first */
if (oi1->ip_blkno != oi2->ip_blkno) {
inode1_is_ancestor = ocfs2_check_if_ancestor(osb, oi2->ip_blkno,
oi1->ip_blkno);
if (inode1_is_ancestor < 0) {
status = inode1_is_ancestor;
goto bail;
}
inode2_is_ancestor = ocfs2_check_if_ancestor(osb, oi1->ip_blkno,
oi2->ip_blkno);
if (inode2_is_ancestor < 0) {
status = inode2_is_ancestor;
goto bail;
}
if ((inode1_is_ancestor == 1) ||
(oi1->ip_blkno < oi2->ip_blkno &&
inode2_is_ancestor == 0)) {
/* switch id1 and id2 around */
swap(bh2, bh1);
swap(inode2, inode1);
}
/* lock id2 */
status = ocfs2_inode_lock_nested(inode2, bh2, 1,
rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto bail;
}
}
/* lock id1 */
status = ocfs2_inode_lock_nested(inode1, bh1, 1,
rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT);
if (status < 0) {
/*
* An error return must mean that no cluster locks
* were held on function exit.
*/
if (oi1->ip_blkno != oi2->ip_blkno) {
ocfs2_inode_unlock(inode2, 1);
brelse(*bh2);
*bh2 = NULL;
}
if (status != -ENOENT)
mlog_errno(status);
}
trace_ocfs2_double_lock_end(
(unsigned long long)oi1->ip_blkno,
(unsigned long long)oi2->ip_blkno);
bail:
if (status)
mlog_errno(status);
return status;
}
static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2)
{
ocfs2_inode_unlock(inode1, 1);
if (inode1 != inode2)
ocfs2_inode_unlock(inode2, 1);
}
static int ocfs2_rename(struct mnt_idmap *idmap,
struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags)
{
int status = 0, rename_lock = 0, parents_locked = 0, target_exists = 0;
int old_child_locked = 0, new_child_locked = 0, update_dot_dot = 0;
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
struct inode *orphan_dir = NULL;
struct ocfs2_dinode *newfe = NULL;
char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
struct buffer_head *newfe_bh = NULL;
struct buffer_head *old_inode_bh = NULL;
struct ocfs2_super *osb = NULL;
u64 newfe_blkno, old_de_ino;
handle_t *handle = NULL;
struct buffer_head *old_dir_bh = NULL;
struct buffer_head *new_dir_bh = NULL;
u32 old_dir_nlink = old_dir->i_nlink;
struct ocfs2_dinode *old_di;
struct ocfs2_dir_lookup_result old_inode_dot_dot_res = { NULL, };
struct ocfs2_dir_lookup_result target_lookup_res = { NULL, };
struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, };
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
struct ocfs2_dir_lookup_result target_insert = { NULL, };
bool should_add_orphan = false;
if (flags)
return -EINVAL;
/* At some point it might be nice to break this function up a
* bit. */
trace_ocfs2_rename(old_dir, old_dentry, new_dir, new_dentry,
old_dentry->d_name.len, old_dentry->d_name.name,
new_dentry->d_name.len, new_dentry->d_name.name);
status = dquot_initialize(old_dir);
if (status) {
mlog_errno(status);
goto bail;
}
status = dquot_initialize(new_dir);
if (status) {
mlog_errno(status);
goto bail;
}
osb = OCFS2_SB(old_dir->i_sb);
if (new_inode) {
if (!igrab(new_inode))
BUG();
}
/* Assume a directory hierarchy thusly:
* a/b/c
* a/d
* a,b,c, and d are all directories.
*
* from cwd of 'a' on both nodes:
* node1: mv b/c d
* node2: mv d b/c
*
* And that's why, just like the VFS, we need a file system
* rename lock. */
if (old_dir != new_dir && S_ISDIR(old_inode->i_mode)) {
status = ocfs2_rename_lock(osb);
if (status < 0) {
mlog_errno(status);
goto bail;
}
rename_lock = 1;
/* here we cannot guarantee the inodes haven't just been
* changed, so check if they are nested again */
status = ocfs2_check_if_ancestor(osb, new_dir->i_ino,
old_inode->i_ino);
if (status < 0) {
mlog_errno(status);
goto bail;
} else if (status == 1) {
status = -EPERM;
trace_ocfs2_rename_not_permitted(
(unsigned long long)old_inode->i_ino,
(unsigned long long)new_dir->i_ino);
goto bail;
}
}
/* if old and new are the same, this'll just do one lock. */
status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
&new_dir_bh, new_dir, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
}
parents_locked = 1;
if (!new_dir->i_nlink) {
status = -EACCES;
goto bail;
}
/* make sure both dirs have bhs
* get an extra ref on old_dir_bh if old==new */
if (!new_dir_bh) {
if (old_dir_bh) {
new_dir_bh = old_dir_bh;
get_bh(new_dir_bh);
} else {
mlog(ML_ERROR, "no old_dir_bh!\n");
status = -EIO;
goto bail;
}
}
/*
* Aside from allowing a meta data update, the locking here
* also ensures that the downconvert thread on other nodes
* won't have to concurrently downconvert the inode and the
* dentry locks.
*/
status = ocfs2_inode_lock_nested(old_inode, &old_inode_bh, 1,
OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto bail;
}
old_child_locked = 1;
status = ocfs2_remote_dentry_delete(old_dentry);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (S_ISDIR(old_inode->i_mode)) {
u64 old_inode_parent;
update_dot_dot = 1;
status = ocfs2_find_files_on_disk("..", 2, &old_inode_parent,
old_inode,
&old_inode_dot_dot_res);
if (status) {
status = -EIO;
goto bail;
}
if (old_inode_parent != OCFS2_I(old_dir)->ip_blkno) {
status = -EIO;
goto bail;
}
if (!new_inode && new_dir != old_dir &&
new_dir->i_nlink >= ocfs2_link_max(osb)) {
status = -EMLINK;
goto bail;
}
}
status = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
old_dentry->d_name.len,
&old_de_ino);
if (status) {
status = -ENOENT;
goto bail;
}
/*
* Check for inode number is _not_ due to possible IO errors.
* We might rmdir the source, keep it as pwd of some process
* and merrily kill the link to whatever was created under the
* same name. Goodbye sticky bit ;-<
*/
if (old_de_ino != OCFS2_I(old_inode)->ip_blkno) {
status = -ENOENT;
goto bail;
}
/* check if the target already exists (in which case we need
* to delete it */
status = ocfs2_find_files_on_disk(new_dentry->d_name.name,
new_dentry->d_name.len,
&newfe_blkno, new_dir,
&target_lookup_res);
/* The only error we allow here is -ENOENT because the new
* file not existing is perfectly valid. */
if ((status < 0) && (status != -ENOENT)) {
/* If we cannot find the file specified we should just */
/* return the error... */
mlog_errno(status);
goto bail;
}
if (status == 0)
target_exists = 1;
if (!target_exists && new_inode) {
/*
* Target was unlinked by another node while we were
* waiting to get to ocfs2_rename(). There isn't
* anything we can do here to help the situation, so
* bubble up the appropriate error.
*/
status = -ENOENT;
goto bail;
}
/* In case we need to overwrite an existing file, we blow it
* away first */
if (target_exists) {
/* VFS didn't think there existed an inode here, but
* someone else in the cluster must have raced our
* rename to create one. Today we error cleanly, in
* the future we should consider calling iget to build
* a new struct inode for this entry. */
if (!new_inode) {
status = -EACCES;
trace_ocfs2_rename_target_exists(new_dentry->d_name.len,
new_dentry->d_name.name);
goto bail;
}
if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) {
status = -EACCES;
trace_ocfs2_rename_disagree(
(unsigned long long)OCFS2_I(new_inode)->ip_blkno,
(unsigned long long)newfe_blkno,
OCFS2_I(new_inode)->ip_flags);
goto bail;
}
status = ocfs2_inode_lock(new_inode, &newfe_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto bail;
}
new_child_locked = 1;
status = ocfs2_remote_dentry_delete(new_dentry);
if (status < 0) {
mlog_errno(status);
goto bail;
}
newfe = (struct ocfs2_dinode *) newfe_bh->b_data;
trace_ocfs2_rename_over_existing(
(unsigned long long)newfe_blkno, newfe_bh, newfe_bh ?
(unsigned long long)newfe_bh->b_blocknr : 0ULL);
if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) {
status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
OCFS2_I(new_inode)->ip_blkno,
orphan_name, &orphan_insert,
false);
if (status < 0) {
mlog_errno(status);
goto bail;
}
should_add_orphan = true;
}
} else {
BUG_ON(d_inode(new_dentry->d_parent) != new_dir);
status = ocfs2_check_dir_for_entry(new_dir,
new_dentry->d_name.name,
new_dentry->d_name.len);
if (status)
goto bail;
status = ocfs2_prepare_dir_for_insert(osb, new_dir, new_dir_bh,
new_dentry->d_name.name,
new_dentry->d_name.len,
&target_insert);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
handle = ocfs2_start_trans(osb, ocfs2_rename_credits(osb->sb));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto bail;
}
if (target_exists) {
if (S_ISDIR(new_inode->i_mode)) {
if (new_inode->i_nlink != 2 ||
!ocfs2_empty_dir(new_inode)) {
status = -ENOTEMPTY;
goto bail;
}
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(new_inode),
newfe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
/* change the dirent to point to the correct inode */
status = ocfs2_update_entry(new_dir, handle, &target_lookup_res,
old_inode);
if (status < 0) {
mlog_errno(status);
goto bail;
}
inode_inc_iversion(new_dir);
if (S_ISDIR(new_inode->i_mode))
ocfs2_set_links_count(newfe, 0);
else
ocfs2_add_links_count(newfe, -1);
ocfs2_journal_dirty(handle, newfe_bh);
if (should_add_orphan) {
status = ocfs2_orphan_add(osb, handle, new_inode,
newfe_bh, orphan_name,
&orphan_insert, orphan_dir, false);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
} else {
/* if the name was not found in new_dir, add it now */
status = ocfs2_add_entry(handle, new_dentry, old_inode,
OCFS2_I(old_inode)->ip_blkno,
new_dir_bh, &target_insert);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
inode_set_ctime_current(old_inode);
mark_inode_dirty(old_inode);
status = ocfs2_journal_access_di(handle, INODE_CACHE(old_inode),
old_inode_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status >= 0) {
old_di = (struct ocfs2_dinode *) old_inode_bh->b_data;
old_di->i_ctime = cpu_to_le64(inode_get_ctime(old_inode).tv_sec);
old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(old_inode).tv_nsec);
ocfs2_journal_dirty(handle, old_inode_bh);
} else
mlog_errno(status);
/*
* Now that the name has been added to new_dir, remove the old name.
*
* We don't keep any directory entry context around until now
* because the insert might have changed the type of directory
* we're dealing with.
*/
status = ocfs2_find_entry(old_dentry->d_name.name,
old_dentry->d_name.len, old_dir,
&old_entry_lookup);
if (status) {
if (!is_journal_aborted(osb->journal->j_journal)) {
ocfs2_error(osb->sb, "new entry %.*s is added, but old entry %.*s "
"is not deleted.",
new_dentry->d_name.len, new_dentry->d_name.name,
old_dentry->d_name.len, old_dentry->d_name.name);
}
goto bail;
}
status = ocfs2_delete_entry(handle, old_dir, &old_entry_lookup);
if (status < 0) {
mlog_errno(status);
if (!is_journal_aborted(osb->journal->j_journal)) {
ocfs2_error(osb->sb, "new entry %.*s is added, but old entry %.*s "
"is not deleted.",
new_dentry->d_name.len, new_dentry->d_name.name,
old_dentry->d_name.len, old_dentry->d_name.name);
}
goto bail;
}
if (new_inode) {
drop_nlink(new_inode);
inode_set_ctime_current(new_inode);
}
old_dir->i_mtime = inode_set_ctime_current(old_dir);
if (update_dot_dot) {
status = ocfs2_update_entry(old_inode, handle,
&old_inode_dot_dot_res, new_dir);
drop_nlink(old_dir);
if (new_inode) {
drop_nlink(new_inode);
} else {
inc_nlink(new_dir);
mark_inode_dirty(new_dir);
}
}
mark_inode_dirty(old_dir);
ocfs2_mark_inode_dirty(handle, old_dir, old_dir_bh);
if (new_inode) {
mark_inode_dirty(new_inode);
ocfs2_mark_inode_dirty(handle, new_inode, newfe_bh);
}
if (old_dir != new_dir) {
/* Keep the same times on both directories.*/
new_dir->i_mtime = inode_set_ctime_to_ts(new_dir,
inode_get_ctime(old_dir));
/*
* This will also pick up the i_nlink change from the
* block above.
*/
ocfs2_mark_inode_dirty(handle, new_dir, new_dir_bh);
}
if (old_dir_nlink != old_dir->i_nlink) {
if (!old_dir_bh) {
mlog(ML_ERROR, "need to change nlink for old dir "
"%llu from %d to %d but bh is NULL!\n",
(unsigned long long)OCFS2_I(old_dir)->ip_blkno,
(int)old_dir_nlink, old_dir->i_nlink);
} else {
struct ocfs2_dinode *fe;
status = ocfs2_journal_access_di(handle,
INODE_CACHE(old_dir),
old_dir_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
fe = (struct ocfs2_dinode *) old_dir_bh->b_data;
ocfs2_set_links_count(fe, old_dir->i_nlink);
ocfs2_journal_dirty(handle, old_dir_bh);
}
}
ocfs2_dentry_move(old_dentry, new_dentry, old_dir, new_dir);
status = 0;
bail:
if (handle)
ocfs2_commit_trans(osb, handle);
if (orphan_dir) {
/* This was locked for us in ocfs2_prepare_orphan_dir() */
ocfs2_inode_unlock(orphan_dir, 1);
inode_unlock(orphan_dir);
iput(orphan_dir);
}
if (new_child_locked)
ocfs2_inode_unlock(new_inode, 1);
if (old_child_locked)
ocfs2_inode_unlock(old_inode, 1);
if (parents_locked)
ocfs2_double_unlock(old_dir, new_dir);
if (rename_lock)
ocfs2_rename_unlock(osb);
if (new_inode)
sync_mapping_buffers(old_inode->i_mapping);
iput(new_inode);
ocfs2_free_dir_lookup_result(&target_lookup_res);
ocfs2_free_dir_lookup_result(&old_entry_lookup);
ocfs2_free_dir_lookup_result(&old_inode_dot_dot_res);
ocfs2_free_dir_lookup_result(&orphan_insert);
ocfs2_free_dir_lookup_result(&target_insert);
brelse(newfe_bh);
brelse(old_inode_bh);
brelse(old_dir_bh);
brelse(new_dir_bh);
if (status)
mlog_errno(status);
return status;
}
/*
* we expect i_size = strlen(symname). Copy symname into the file
* data, including the null terminator.
*/
static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
handle_t *handle,
struct inode *inode,
const char *symname)
{
struct buffer_head **bhs = NULL;
const char *c;
struct super_block *sb = osb->sb;
u64 p_blkno, p_blocks;
int virtual, blocks, status, i, bytes_left;
bytes_left = i_size_read(inode) + 1;
/* we can't trust i_blocks because we're actually going to
* write i_size + 1 bytes. */
blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
trace_ocfs2_create_symlink_data((unsigned long long)inode->i_blocks,
i_size_read(inode), blocks);
/* Sanity check -- make sure we're going to fit. */
if (bytes_left >
ocfs2_clusters_to_bytes(sb, OCFS2_I(inode)->ip_clusters)) {
status = -EIO;
mlog_errno(status);
goto bail;
}
bhs = kcalloc(blocks, sizeof(struct buffer_head *), GFP_KERNEL);
if (!bhs) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
status = ocfs2_extent_map_get_blocks(inode, 0, &p_blkno, &p_blocks,
NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
}
/* links can never be larger than one cluster so we know this
* is all going to be contiguous, but do a sanity check
* anyway. */
if ((p_blocks << sb->s_blocksize_bits) < bytes_left) {
status = -EIO;
mlog_errno(status);
goto bail;
}
virtual = 0;
while(bytes_left > 0) {
c = &symname[virtual * sb->s_blocksize];
bhs[virtual] = sb_getblk(sb, p_blkno);
if (!bhs[virtual]) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode),
bhs[virtual]);
status = ocfs2_journal_access(handle, INODE_CACHE(inode),
bhs[virtual],
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
memset(bhs[virtual]->b_data, 0, sb->s_blocksize);
memcpy(bhs[virtual]->b_data, c,
(bytes_left > sb->s_blocksize) ? sb->s_blocksize :
bytes_left);
ocfs2_journal_dirty(handle, bhs[virtual]);
virtual++;
p_blkno++;
bytes_left -= sb->s_blocksize;
}
status = 0;
bail:
if (bhs) {
for(i = 0; i < blocks; i++)
brelse(bhs[i]);
kfree(bhs);
}
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_symlink(struct mnt_idmap *idmap,
struct inode *dir,
struct dentry *dentry,
const char *symname)
{
int status, l, credits;
u64 newsize;
struct ocfs2_super *osb = NULL;
struct inode *inode = NULL;
struct super_block *sb;
struct buffer_head *new_fe_bh = NULL;
struct buffer_head *parent_fe_bh = NULL;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_dinode *dirfe;
handle_t *handle = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *xattr_ac = NULL;
int want_clusters = 0;
int xattr_credits = 0;
struct ocfs2_security_xattr_info si = {
.name = NULL,
.enable = 1,
};
int did_quota = 0, did_quota_inode = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
int did_block_signals = 0;
struct ocfs2_dentry_lock *dl = NULL;
trace_ocfs2_symlink_begin(dir, dentry, symname,
dentry->d_name.len, dentry->d_name.name);
status = dquot_initialize(dir);
if (status) {
mlog_errno(status);
goto bail;
}
sb = dir->i_sb;
osb = OCFS2_SB(sb);
l = strlen(symname) + 1;
credits = ocfs2_calc_symlink_credits(sb);
/* lock the parent directory */
status = ocfs2_inode_lock(dir, &parent_fe_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
if (!ocfs2_read_links_count(dirfe)) {
/* can't make a file in a deleted directory. */
status = -ENOENT;
goto bail;
}
status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
dentry->d_name.len);
if (status)
goto bail;
status = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
dentry->d_name.name,
dentry->d_name.len, &lookup);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_reserve_new_inode(osb, &inode_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
inode = ocfs2_get_init_inode(dir, S_IFLNK | S_IRWXUGO);
if (IS_ERR(inode)) {
status = PTR_ERR(inode);
inode = NULL;
mlog_errno(status);
goto bail;
}
/* get security xattr */
status = ocfs2_init_security_get(inode, dir, &dentry->d_name, &si);
if (status) {
if (status == -EOPNOTSUPP)
si.enable = 0;
else {
mlog_errno(status);
goto bail;
}
}
/* calculate meta data/clusters for setting security xattr */
if (si.enable) {
status = ocfs2_calc_security_init(dir, &si, &want_clusters,
&xattr_credits, &xattr_ac);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
/* don't reserve bitmap space for fast symlinks. */
if (l > ocfs2_fast_symlink_chars(sb))
want_clusters += 1;
status = ocfs2_reserve_clusters(osb, want_clusters, &data_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
handle = ocfs2_start_trans(osb, credits + xattr_credits);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto bail;
}
/* Starting to change things, restart is no longer possible. */
ocfs2_block_signals(&oldset);
did_block_signals = 1;
status = dquot_alloc_inode(inode);
if (status)
goto bail;
did_quota_inode = 1;
trace_ocfs2_symlink_create(dir, dentry, dentry->d_name.len,
dentry->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno,
inode->i_mode);
status = ocfs2_mknod_locked(osb, dir, inode,
0, &new_fe_bh, parent_fe_bh, handle,
inode_ac);
if (status < 0) {
mlog_errno(status);
goto bail;
}
fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
inode->i_rdev = 0;
newsize = l - 1;
inode->i_op = &ocfs2_symlink_inode_operations;
inode_nohighmem(inode);
if (l > ocfs2_fast_symlink_chars(sb)) {
u32 offset = 0;
status = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (status)
goto bail;
did_quota = 1;
inode->i_mapping->a_ops = &ocfs2_aops;
status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0,
new_fe_bh,
handle, data_ac, NULL,
NULL);
if (status < 0) {
if (status != -ENOSPC && status != -EINTR) {
mlog(ML_ERROR,
"Failed to extend file to %llu\n",
(unsigned long long)newsize);
mlog_errno(status);
status = -ENOSPC;
}
goto bail;
}
i_size_write(inode, newsize);
inode->i_blocks = ocfs2_inode_sector_count(inode);
} else {
inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
memcpy((char *) fe->id2.i_symlink, symname, l);
i_size_write(inode, newsize);
inode->i_blocks = 0;
}
status = ocfs2_mark_inode_dirty(handle, inode, new_fe_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (!ocfs2_inode_is_fast_symlink(inode)) {
status = ocfs2_create_symlink_data(osb, handle, inode,
symname);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
if (si.enable) {
status = ocfs2_init_security_set(handle, inode, new_fe_bh, &si,
xattr_ac, data_ac);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
/*
* Do this before adding the entry to the directory. We add
* also set d_op after success so that ->d_iput() will cleanup
* the dentry lock even if ocfs2_add_entry() fails below.
*/
status = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno);
if (status) {
mlog_errno(status);
goto bail;
}
dl = dentry->d_fsdata;
status = ocfs2_add_entry(handle, dentry, inode,
le64_to_cpu(fe->i_blkno), parent_fe_bh,
&lookup);
if (status < 0) {
mlog_errno(status);
goto bail;
}
insert_inode_hash(inode);
d_instantiate(dentry, inode);
bail:
if (status < 0 && did_quota)
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
if (handle) {
if (status < 0 && fe)
ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle);
}
ocfs2_inode_unlock(dir, 1);
if (did_block_signals)
ocfs2_unblock_signals(&oldset);
brelse(new_fe_bh);
brelse(parent_fe_bh);
kfree(si.value);
ocfs2_free_dir_lookup_result(&lookup);
if (inode_ac)
ocfs2_free_alloc_context(inode_ac);
if (data_ac)
ocfs2_free_alloc_context(data_ac);
if (xattr_ac)
ocfs2_free_alloc_context(xattr_ac);
if ((status < 0) && inode) {
if (dl)
ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
clear_nlink(inode);
iput(inode);
}
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_blkno_stringify(u64 blkno, char *name)
{
int status, namelen;
namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx",
(long long)blkno);
if (namelen <= 0) {
if (namelen)
status = namelen;
else
status = -EINVAL;
mlog_errno(status);
goto bail;
}
if (namelen != OCFS2_ORPHAN_NAMELEN) {
status = -EINVAL;
mlog_errno(status);
goto bail;
}
trace_ocfs2_blkno_stringify(blkno, name, namelen);
status = 0;
bail:
if (status < 0)
mlog_errno(status);
return status;
}
static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
struct buffer_head **ret_orphan_dir_bh)
{
struct inode *orphan_dir_inode;
struct buffer_head *orphan_dir_bh = NULL;
int ret = 0;
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
osb->slot_num);
if (!orphan_dir_inode) {
ret = -ENOENT;
mlog_errno(ret);
return ret;
}
inode_lock(orphan_dir_inode);
ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
if (ret < 0) {
inode_unlock(orphan_dir_inode);
iput(orphan_dir_inode);
mlog_errno(ret);
return ret;
}
*ret_orphan_dir = orphan_dir_inode;
*ret_orphan_dir_bh = orphan_dir_bh;
return 0;
}
static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
struct buffer_head *orphan_dir_bh,
u64 blkno,
char *name,
struct ocfs2_dir_lookup_result *lookup,
bool dio)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
int namelen = dio ?
(OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN) :
OCFS2_ORPHAN_NAMELEN;
if (dio) {
ret = snprintf(name, OCFS2_DIO_ORPHAN_PREFIX_LEN + 1, "%s",
OCFS2_DIO_ORPHAN_PREFIX);
if (ret != OCFS2_DIO_ORPHAN_PREFIX_LEN) {
ret = -EINVAL;
mlog_errno(ret);
return ret;
}
ret = ocfs2_blkno_stringify(blkno,
name + OCFS2_DIO_ORPHAN_PREFIX_LEN);
} else
ret = ocfs2_blkno_stringify(blkno, name);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
orphan_dir_bh, name,
namelen, lookup);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
return 0;
}
/**
* ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
* insertion of an orphan.
* @osb: ocfs2 file system
* @ret_orphan_dir: Orphan dir inode - returned locked!
* @blkno: Actual block number of the inode to be inserted into orphan dir.
* @lookup: dir lookup result, to be passed back into functions like
* ocfs2_orphan_add
*
* Returns zero on success and the ret_orphan_dir, name and lookup
* fields will be populated.
*
* Returns non-zero on failure.
*/
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
u64 blkno,
char *name,
struct ocfs2_dir_lookup_result *lookup,
bool dio)
{
struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
int ret = 0;
ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
&orphan_dir_bh);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
blkno, name, lookup, dio);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
*ret_orphan_dir = orphan_dir_inode;
out:
brelse(orphan_dir_bh);
if (ret) {
ocfs2_inode_unlock(orphan_dir_inode, 1);
inode_unlock(orphan_dir_inode);
iput(orphan_dir_inode);
}
if (ret)
mlog_errno(ret);
return ret;
}
static int ocfs2_orphan_add(struct ocfs2_super *osb,
handle_t *handle,
struct inode *inode,
struct buffer_head *fe_bh,
char *name,
struct ocfs2_dir_lookup_result *lookup,
struct inode *orphan_dir_inode,
bool dio)
{
struct buffer_head *orphan_dir_bh = NULL;
int status = 0;
struct ocfs2_dinode *orphan_fe;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
int namelen = dio ?
(OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN) :
OCFS2_ORPHAN_NAMELEN;
trace_ocfs2_orphan_add_begin(
(unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh);
if (status < 0) {
mlog_errno(status);
goto leave;
}
status = ocfs2_journal_access_di(handle,
INODE_CACHE(orphan_dir_inode),
orphan_dir_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
/*
* We're going to journal the change of i_flags and i_orphaned_slot.
* It's safe anyway, though some callers may duplicate the journaling.
* Journaling within the func just make the logic look more
* straightforward.
*/
status = ocfs2_journal_access_di(handle,
INODE_CACHE(inode),
fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
/* we're a cluster, and nlink can change on disk from
* underneath us... */
orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data;
if (S_ISDIR(inode->i_mode))
ocfs2_add_links_count(orphan_fe, 1);
set_nlink(orphan_dir_inode, ocfs2_read_links_count(orphan_fe));
ocfs2_journal_dirty(handle, orphan_dir_bh);
status = __ocfs2_add_entry(handle, orphan_dir_inode, name,
namelen, inode,
OCFS2_I(inode)->ip_blkno,
orphan_dir_bh, lookup);
if (status < 0) {
mlog_errno(status);
goto rollback;
}
if (dio) {
/* Update flag OCFS2_DIO_ORPHANED_FL and record the orphan
* slot.
*/
fe->i_flags |= cpu_to_le32(OCFS2_DIO_ORPHANED_FL);
fe->i_dio_orphaned_slot = cpu_to_le16(osb->slot_num);
} else {
fe->i_flags |= cpu_to_le32(OCFS2_ORPHANED_FL);
OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR;
/* Record which orphan dir our inode now resides
* in. delete_inode will use this to determine which orphan
* dir to lock. */
fe->i_orphaned_slot = cpu_to_le16(osb->slot_num);
}
ocfs2_journal_dirty(handle, fe_bh);
trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
osb->slot_num);
rollback:
if (status < 0) {
if (S_ISDIR(inode->i_mode))
ocfs2_add_links_count(orphan_fe, -1);
set_nlink(orphan_dir_inode, ocfs2_read_links_count(orphan_fe));
}
leave:
brelse(orphan_dir_bh);
return status;
}
/* unlike orphan_add, we expect the orphan dir to already be locked here. */
int ocfs2_orphan_del(struct ocfs2_super *osb,
handle_t *handle,
struct inode *orphan_dir_inode,
struct inode *inode,
struct buffer_head *orphan_dir_bh,
bool dio)
{
char name[OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN + 1];
struct ocfs2_dinode *orphan_fe;
int status = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
if (dio) {
status = snprintf(name, OCFS2_DIO_ORPHAN_PREFIX_LEN + 1, "%s",
OCFS2_DIO_ORPHAN_PREFIX);
if (status != OCFS2_DIO_ORPHAN_PREFIX_LEN) {
status = -EINVAL;
mlog_errno(status);
return status;
}
status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno,
name + OCFS2_DIO_ORPHAN_PREFIX_LEN);
} else
status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
if (status < 0) {
mlog_errno(status);
goto leave;
}
trace_ocfs2_orphan_del(
(unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
name, strlen(name));
status = ocfs2_journal_access_di(handle,
INODE_CACHE(orphan_dir_inode),
orphan_dir_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
/* find it's spot in the orphan directory */
status = ocfs2_find_entry(name, strlen(name), orphan_dir_inode,
&lookup);
if (status) {
mlog_errno(status);
goto leave;
}
/* remove it from the orphan directory */
status = ocfs2_delete_entry(handle, orphan_dir_inode, &lookup);
if (status < 0) {
mlog_errno(status);
goto leave;
}
/* do the i_nlink dance! :) */
orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data;
if (S_ISDIR(inode->i_mode))
ocfs2_add_links_count(orphan_fe, -1);
set_nlink(orphan_dir_inode, ocfs2_read_links_count(orphan_fe));
ocfs2_journal_dirty(handle, orphan_dir_bh);
leave:
ocfs2_free_dir_lookup_result(&lookup);
if (status)
mlog_errno(status);
return status;
}
/**
* ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to receive a newly
* allocated file. This is different from the typical 'add to orphan dir'
* operation in that the inode does not yet exist. This is a problem because
* the orphan dir stringifies the inode block number to come up with it's
* dirent. Obviously if the inode does not yet exist we have a chicken and egg
* problem. This function works around it by calling deeper into the orphan
* and suballoc code than other callers. Use this only by necessity.
* @dir: The directory which this inode will ultimately wind up under - not the
* orphan dir!
* @dir_bh: buffer_head the @dir inode block
* @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
* with the string to be used for orphan dirent. Pass back to the orphan dir
* code.
* @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
* dir code.
* @ret_di_blkno: block number where the new inode will be allocated.
* @orphan_insert: Dir insert context to be passed back into orphan dir code.
* @ret_inode_ac: Inode alloc context to be passed back to the allocator.
*
* Returns zero on success and the ret_orphan_dir, name and lookup
* fields will be populated.
*
* Returns non-zero on failure.
*/
static int ocfs2_prep_new_orphaned_file(struct inode *dir,
struct buffer_head *dir_bh,
char *orphan_name,
struct inode **ret_orphan_dir,
u64 *ret_di_blkno,
struct ocfs2_dir_lookup_result *orphan_insert,
struct ocfs2_alloc_context **ret_inode_ac)
{
int ret;
u64 di_blkno;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct inode *orphan_dir = NULL;
struct buffer_head *orphan_dir_bh = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
/* reserve an inode spot */
ret = ocfs2_reserve_new_inode(osb, &inode_ac);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
&di_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
di_blkno, orphan_name, orphan_insert,
false);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
out:
if (ret == 0) {
*ret_orphan_dir = orphan_dir;
*ret_di_blkno = di_blkno;
*ret_inode_ac = inode_ac;
/*
* orphan_name and orphan_insert are already up to
* date via prepare_orphan_dir
*/
} else {
/* Unroll reserve_new_inode* */
if (inode_ac)
ocfs2_free_alloc_context(inode_ac);
/* Unroll orphan dir locking */
inode_unlock(orphan_dir);
ocfs2_inode_unlock(orphan_dir, 1);
iput(orphan_dir);
}
brelse(orphan_dir_bh);
return ret;
}
int ocfs2_create_inode_in_orphan(struct inode *dir,
int mode,
struct inode **new_inode)
{
int status, did_quota_inode = 0;
struct inode *inode = NULL;
struct inode *orphan_dir = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
handle_t *handle = NULL;
char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
struct buffer_head *parent_di_bh = NULL;
struct buffer_head *new_di_bh = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
u64 di_blkno, suballoc_loc;
u16 suballoc_bit;
status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
orphan_name, &orphan_dir,
&di_blkno, &orphan_insert, &inode_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto leave;
}
inode = ocfs2_get_init_inode(dir, mode);
if (IS_ERR(inode)) {
status = PTR_ERR(inode);
inode = NULL;
mlog_errno(status);
goto leave;
}
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 0, 0));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto leave;
}
status = dquot_alloc_inode(inode);
if (status)
goto leave;
did_quota_inode = 1;
status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
&suballoc_loc,
&suballoc_bit, di_blkno);
if (status < 0) {
mlog_errno(status);
goto leave;
}
clear_nlink(inode);
/* do the real work now. */
status = __ocfs2_mknod_locked(dir, inode,
0, &new_di_bh, parent_di_bh, handle,
inode_ac, di_blkno, suballoc_loc,
suballoc_bit);
if (status < 0) {
mlog_errno(status);
goto leave;
}
status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name,
&orphan_insert, orphan_dir, false);
if (status < 0) {
mlog_errno(status);
goto leave;
}
/* get open lock so that only nodes can't remove it from orphan dir. */
status = ocfs2_open_lock(inode);
if (status < 0)
mlog_errno(status);
insert_inode_hash(inode);
leave:
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
if (handle)
ocfs2_commit_trans(osb, handle);
if (orphan_dir) {
/* This was locked for us in ocfs2_prepare_orphan_dir() */
ocfs2_inode_unlock(orphan_dir, 1);
inode_unlock(orphan_dir);
iput(orphan_dir);
}
if ((status < 0) && inode) {
clear_nlink(inode);
iput(inode);
}
if (inode_ac)
ocfs2_free_alloc_context(inode_ac);
brelse(new_di_bh);
if (!status)
*new_inode = inode;
ocfs2_free_dir_lookup_result(&orphan_insert);
ocfs2_inode_unlock(dir, 1);
brelse(parent_di_bh);
return status;
}
int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb,
struct inode *inode)
{
char orphan_name[OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN + 1];
struct inode *orphan_dir_inode = NULL;
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
struct buffer_head *di_bh = NULL;
int status = 0;
handle_t *handle = NULL;
struct ocfs2_dinode *di = NULL;
status = ocfs2_inode_lock(inode, &di_bh, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
}
di = (struct ocfs2_dinode *) di_bh->b_data;
/*
* Another append dio crashed?
* If so, manually recover it first.
*/
if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
status = ocfs2_truncate_file(inode, di_bh, i_size_read(inode));
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail_unlock_inode;
}
status = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_inode;
}
}
status = ocfs2_prepare_orphan_dir(osb, &orphan_dir_inode,
OCFS2_I(inode)->ip_blkno,
orphan_name,
&orphan_insert,
true);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_inode;
}
handle = ocfs2_start_trans(osb,
OCFS2_INODE_ADD_TO_ORPHAN_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
goto bail_unlock_orphan;
}
status = ocfs2_orphan_add(osb, handle, inode, di_bh, orphan_name,
&orphan_insert, orphan_dir_inode, true);
if (status)
mlog_errno(status);
ocfs2_commit_trans(osb, handle);
bail_unlock_orphan:
ocfs2_inode_unlock(orphan_dir_inode, 1);
inode_unlock(orphan_dir_inode);
iput(orphan_dir_inode);
ocfs2_free_dir_lookup_result(&orphan_insert);
bail_unlock_inode:
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
bail:
return status;
}
int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb,
struct inode *inode, struct buffer_head *di_bh,
int update_isize, loff_t end)
{
struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
handle_t *handle = NULL;
int status = 0;
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
le16_to_cpu(di->i_dio_orphaned_slot));
if (!orphan_dir_inode) {
status = -ENOENT;
mlog_errno(status);
goto bail;
}
inode_lock(orphan_dir_inode);
status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
if (status < 0) {
inode_unlock(orphan_dir_inode);
iput(orphan_dir_inode);
mlog_errno(status);
goto bail;
}
handle = ocfs2_start_trans(osb,
OCFS2_INODE_DEL_FROM_ORPHAN_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
goto bail_unlock_orphan;
}
BUG_ON(!(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL)));
status = ocfs2_orphan_del(osb, handle, orphan_dir_inode,
inode, orphan_dir_bh, true);
if (status < 0) {
mlog_errno(status);
goto bail_commit;
}
status = ocfs2_journal_access_di(handle,
INODE_CACHE(inode),
di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail_commit;
}
di->i_flags &= ~cpu_to_le32(OCFS2_DIO_ORPHANED_FL);
di->i_dio_orphaned_slot = 0;
if (update_isize) {
status = ocfs2_set_inode_size(handle, inode, di_bh, end);
if (status)
mlog_errno(status);
} else
ocfs2_journal_dirty(handle, di_bh);
bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock_orphan:
ocfs2_inode_unlock(orphan_dir_inode, 1);
inode_unlock(orphan_dir_inode);
brelse(orphan_dir_bh);
iput(orphan_dir_inode);
bail:
return status;
}
int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
struct inode *inode,
struct dentry *dentry)
{
int status = 0;
struct buffer_head *parent_di_bh = NULL;
handle_t *handle = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dinode *dir_di, *di;
struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
struct buffer_head *di_bh = NULL;
struct ocfs2_dir_lookup_result lookup = { NULL, };
trace_ocfs2_mv_orphaned_inode_to_new(dir, dentry,
dentry->d_name.len, dentry->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
dir_di = (struct ocfs2_dinode *) parent_di_bh->b_data;
if (!dir_di->i_links_count) {
/* can't make a file in a deleted directory. */
status = -ENOENT;
goto leave;
}
status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
dentry->d_name.len);
if (status)
goto leave;
/* get a spot inside the dir. */
status = ocfs2_prepare_dir_for_insert(osb, dir, parent_di_bh,
dentry->d_name.name,
dentry->d_name.len, &lookup);
if (status < 0) {
mlog_errno(status);
goto leave;
}
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
osb->slot_num);
if (!orphan_dir_inode) {
status = -ENOENT;
mlog_errno(status);
goto leave;
}
inode_lock(orphan_dir_inode);
status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
if (status < 0) {
mlog_errno(status);
inode_unlock(orphan_dir_inode);
iput(orphan_dir_inode);
goto leave;
}
status = ocfs2_read_inode_block(inode, &di_bh);
if (status < 0) {
mlog_errno(status);
goto orphan_unlock;
}
handle = ocfs2_start_trans(osb, ocfs2_rename_credits(osb->sb));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto orphan_unlock;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_commit;
}
status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
orphan_dir_bh, false);
if (status < 0) {
mlog_errno(status);
goto out_commit;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
di->i_flags &= ~cpu_to_le32(OCFS2_ORPHANED_FL);
di->i_orphaned_slot = 0;
set_nlink(inode, 1);
ocfs2_set_links_count(di, inode->i_nlink);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, di_bh);
status = ocfs2_add_entry(handle, dentry, inode,
OCFS2_I(inode)->ip_blkno, parent_di_bh,
&lookup);
if (status < 0) {
mlog_errno(status);
goto out_commit;
}
status = ocfs2_dentry_attach_lock(dentry, inode,
OCFS2_I(dir)->ip_blkno);
if (status) {
mlog_errno(status);
goto out_commit;
}
d_instantiate(dentry, inode);
status = 0;
out_commit:
ocfs2_commit_trans(osb, handle);
orphan_unlock:
ocfs2_inode_unlock(orphan_dir_inode, 1);
inode_unlock(orphan_dir_inode);
iput(orphan_dir_inode);
leave:
ocfs2_inode_unlock(dir, 1);
brelse(di_bh);
brelse(parent_di_bh);
brelse(orphan_dir_bh);
ocfs2_free_dir_lookup_result(&lookup);
if (status)
mlog_errno(status);
return status;
}
const struct inode_operations ocfs2_dir_iops = {
.create = ocfs2_create,
.lookup = ocfs2_lookup,
.link = ocfs2_link,
.unlink = ocfs2_unlink,
.rmdir = ocfs2_unlink,
.symlink = ocfs2_symlink,
.mkdir = ocfs2_mkdir,
.mknod = ocfs2_mknod,
.rename = ocfs2_rename,
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
.permission = ocfs2_permission,
.listxattr = ocfs2_listxattr,
.fiemap = ocfs2_fiemap,
.get_inode_acl = ocfs2_iop_get_acl,
.set_acl = ocfs2_iop_set_acl,
.fileattr_get = ocfs2_fileattr_get,
.fileattr_set = ocfs2_fileattr_set,
};
| linux-master | fs/ocfs2/namei.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* localalloc.c
*
* Node local data allocation
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
#include "suballoc.h"
#include "super.h"
#include "sysfile.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc,
u32 *numbits,
struct ocfs2_alloc_reservation *resv);
static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_dinode *alloc,
struct inode *main_bm_inode,
struct buffer_head *main_bm_bh);
static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
struct ocfs2_alloc_context **ac,
struct inode **bitmap_inode,
struct buffer_head **bitmap_bh);
static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac);
static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
struct inode *local_alloc_inode);
/*
* ocfs2_la_default_mb() - determine a default size, in megabytes of
* the local alloc.
*
* Generally, we'd like to pick as large a local alloc as
* possible. Performance on large workloads tends to scale
* proportionally to la size. In addition to that, the reservations
* code functions more efficiently as it can reserve more windows for
* write.
*
* Some things work against us when trying to choose a large local alloc:
*
* - We need to ensure our sizing is picked to leave enough space in
* group descriptors for other allocations (such as block groups,
* etc). Picking default sizes which are a multiple of 4 could help
* - block groups are allocated in 2mb and 4mb chunks.
*
* - Likewise, we don't want to starve other nodes of bits on small
* file systems. This can easily be taken care of by limiting our
* default to a reasonable size (256M) on larger cluster sizes.
*
* - Some file systems can't support very large sizes - 4k and 8k in
* particular are limited to less than 128 and 256 megabytes respectively.
*
* The following reference table shows group descriptor and local
* alloc maximums at various cluster sizes (4k blocksize)
*
* csize: 4K group: 126M la: 121M
* csize: 8K group: 252M la: 243M
* csize: 16K group: 504M la: 486M
* csize: 32K group: 1008M la: 972M
* csize: 64K group: 2016M la: 1944M
* csize: 128K group: 4032M la: 3888M
* csize: 256K group: 8064M la: 7776M
* csize: 512K group: 16128M la: 15552M
* csize: 1024K group: 32256M la: 31104M
*/
#define OCFS2_LA_MAX_DEFAULT_MB 256
#define OCFS2_LA_OLD_DEFAULT 8
unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
{
unsigned int la_mb;
unsigned int gd_mb;
unsigned int la_max_mb;
unsigned int megs_per_slot;
struct super_block *sb = osb->sb;
gd_mb = ocfs2_clusters_to_megabytes(osb->sb,
8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat));
/*
* This takes care of files systems with very small group
* descriptors - 512 byte blocksize at cluster sizes lower
* than 16K and also 1k blocksize with 4k cluster size.
*/
if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192)
|| (sb->s_blocksize == 1024 && osb->s_clustersize == 4096))
return OCFS2_LA_OLD_DEFAULT;
/*
* Leave enough room for some block groups and make the final
* value we work from a multiple of 4.
*/
gd_mb -= 16;
gd_mb &= 0xFFFFFFFB;
la_mb = gd_mb;
/*
* Keep window sizes down to a reasonable default
*/
if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) {
/*
* Some clustersize / blocksize combinations will have
* given us a larger than OCFS2_LA_MAX_DEFAULT_MB
* default size, but get poor distribution when
* limited to exactly 256 megabytes.
*
* As an example, 16K clustersize at 4K blocksize
* gives us a cluster group size of 504M. Paring the
* local alloc size down to 256 however, would give us
* only one window and around 200MB left in the
* cluster group. Instead, find the first size below
* 256 which would give us an even distribution.
*
* Larger cluster group sizes actually work out pretty
* well when pared to 256, so we don't have to do this
* for any group that fits more than two
* OCFS2_LA_MAX_DEFAULT_MB windows.
*/
if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB))
la_mb = 256;
else {
unsigned int gd_mult = gd_mb;
while (gd_mult > 256)
gd_mult = gd_mult >> 1;
la_mb = gd_mult;
}
}
megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots;
megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot);
/* Too many nodes, too few disk clusters. */
if (megs_per_slot < la_mb)
la_mb = megs_per_slot;
/* We can't store more bits than we can in a block. */
la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
ocfs2_local_alloc_size(sb) * 8);
if (la_mb > la_max_mb)
la_mb = la_max_mb;
return la_mb;
}
void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
{
struct super_block *sb = osb->sb;
unsigned int la_default_mb = ocfs2_la_default_mb(osb);
unsigned int la_max_mb;
la_max_mb = ocfs2_clusters_to_megabytes(sb,
ocfs2_local_alloc_size(sb) * 8);
trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
if (requested_mb == -1) {
/* No user request - use defaults */
osb->local_alloc_default_bits =
ocfs2_megabytes_to_clusters(sb, la_default_mb);
} else if (requested_mb > la_max_mb) {
/* Request is too big, we give the maximum available */
osb->local_alloc_default_bits =
ocfs2_megabytes_to_clusters(sb, la_max_mb);
} else {
osb->local_alloc_default_bits =
ocfs2_megabytes_to_clusters(sb, requested_mb);
}
osb->local_alloc_bits = osb->local_alloc_default_bits;
}
static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
{
return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
osb->local_alloc_state == OCFS2_LA_ENABLED);
}
void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
unsigned int num_clusters)
{
spin_lock(&osb->osb_lock);
if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
osb->local_alloc_state == OCFS2_LA_THROTTLED)
if (num_clusters >= osb->local_alloc_default_bits) {
cancel_delayed_work(&osb->la_enable_wq);
osb->local_alloc_state = OCFS2_LA_ENABLED;
}
spin_unlock(&osb->osb_lock);
}
void ocfs2_la_enable_worker(struct work_struct *work)
{
struct ocfs2_super *osb =
container_of(work, struct ocfs2_super,
la_enable_wq.work);
spin_lock(&osb->osb_lock);
osb->local_alloc_state = OCFS2_LA_ENABLED;
spin_unlock(&osb->osb_lock);
}
/*
* Tell us whether a given allocation should use the local alloc
* file. Otherwise, it has to go to the main bitmap.
*
* This function does semi-dirty reads of local alloc size and state!
* This is ok however, as the values are re-checked once under mutex.
*/
int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
{
int ret = 0;
int la_bits;
spin_lock(&osb->osb_lock);
la_bits = osb->local_alloc_bits;
if (!ocfs2_la_state_enabled(osb))
goto bail;
/* la_bits should be at least twice the size (in clusters) of
* a new block group. We want to be sure block group
* allocations go through the local alloc, so allow an
* allocation to take up to half the bitmap. */
if (bits > (la_bits / 2))
goto bail;
ret = 1;
bail:
trace_ocfs2_alloc_should_use_local(
(unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
spin_unlock(&osb->osb_lock);
return ret;
}
int ocfs2_load_local_alloc(struct ocfs2_super *osb)
{
int status = 0;
struct ocfs2_dinode *alloc = NULL;
struct buffer_head *alloc_bh = NULL;
u32 num_used;
struct inode *inode = NULL;
struct ocfs2_local_alloc *la;
if (osb->local_alloc_bits == 0)
goto bail;
if (osb->local_alloc_bits >= osb->bitmap_cpg) {
mlog(ML_NOTICE, "Requested local alloc window %d is larger "
"than max possible %u. Using defaults.\n",
osb->local_alloc_bits, (osb->bitmap_cpg - 1));
osb->local_alloc_bits =
ocfs2_megabytes_to_clusters(osb->sb,
ocfs2_la_default_mb(osb));
}
/* read the alloc off disk */
inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE,
osb->slot_num);
if (!inode) {
status = -EINVAL;
mlog_errno(status);
goto bail;
}
status = ocfs2_read_inode_block_full(inode, &alloc_bh,
OCFS2_BH_IGNORE_CACHE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
if (!(le32_to_cpu(alloc->i_flags) &
(OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
mlog(ML_ERROR, "Invalid local alloc inode, %llu\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
status = -EINVAL;
goto bail;
}
if ((la->la_size == 0) ||
(le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
le16_to_cpu(la->la_size));
status = -EINVAL;
goto bail;
}
/* do a little verification. */
num_used = ocfs2_local_alloc_count_bits(alloc);
/* hopefully the local alloc has always been recovered before
* we load it. */
if (num_used
|| alloc->id1.bitmap1.i_used
|| alloc->id1.bitmap1.i_total
|| la->la_bm_off) {
mlog(ML_ERROR, "inconsistent detected, clean journal with"
" unrecovered local alloc, please run fsck.ocfs2!\n"
"found = %u, set = %u, taken = %u, off = %u\n",
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
le32_to_cpu(alloc->id1.bitmap1.i_total),
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
status = -EINVAL;
goto bail;
}
osb->local_alloc_bh = alloc_bh;
osb->local_alloc_state = OCFS2_LA_ENABLED;
bail:
if (status < 0)
brelse(alloc_bh);
iput(inode);
trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
if (status)
mlog_errno(status);
return status;
}
/*
* return any unused bits to the bitmap and write out a clean
* local_alloc.
*
* local_alloc_bh is optional. If not passed, we will simply use the
* one off osb. If you do pass it however, be warned that it *will* be
* returned brelse'd and NULL'd out.*/
void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
{
int status;
handle_t *handle;
struct inode *local_alloc_inode = NULL;
struct buffer_head *bh = NULL;
struct buffer_head *main_bm_bh = NULL;
struct inode *main_bm_inode = NULL;
struct ocfs2_dinode *alloc_copy = NULL;
struct ocfs2_dinode *alloc = NULL;
cancel_delayed_work(&osb->la_enable_wq);
if (osb->ocfs2_wq)
flush_workqueue(osb->ocfs2_wq);
if (osb->local_alloc_state == OCFS2_LA_UNUSED)
goto out;
local_alloc_inode =
ocfs2_get_system_file_inode(osb,
LOCAL_ALLOC_SYSTEM_INODE,
osb->slot_num);
if (!local_alloc_inode) {
status = -ENOENT;
mlog_errno(status);
goto out;
}
osb->local_alloc_state = OCFS2_LA_DISABLED;
ocfs2_resmap_uninit(&osb->osb_la_resmap);
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!main_bm_inode) {
status = -EINVAL;
mlog_errno(status);
goto out;
}
inode_lock(main_bm_inode);
status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
if (status < 0) {
mlog_errno(status);
goto out_mutex;
}
/* WINDOW_MOVE_CREDITS is a bit heavy... */
handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
if (IS_ERR(handle)) {
mlog_errno(PTR_ERR(handle));
handle = NULL;
goto out_unlock;
}
bh = osb->local_alloc_bh;
alloc = (struct ocfs2_dinode *) bh->b_data;
alloc_copy = kmemdup(alloc, bh->b_size, GFP_NOFS);
if (!alloc_copy) {
status = -ENOMEM;
goto out_commit;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode),
bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_commit;
}
ocfs2_clear_local_alloc(alloc);
ocfs2_journal_dirty(handle, bh);
brelse(bh);
osb->local_alloc_bh = NULL;
osb->local_alloc_state = OCFS2_LA_UNUSED;
status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
main_bm_inode, main_bm_bh);
if (status < 0)
mlog_errno(status);
out_commit:
ocfs2_commit_trans(osb, handle);
out_unlock:
brelse(main_bm_bh);
ocfs2_inode_unlock(main_bm_inode, 1);
out_mutex:
inode_unlock(main_bm_inode);
iput(main_bm_inode);
out:
iput(local_alloc_inode);
kfree(alloc_copy);
}
/*
* We want to free the bitmap bits outside of any recovery context as
* we'll need a cluster lock to do so, but we must clear the local
* alloc before giving up the recovered nodes journal. To solve this,
* we kmalloc a copy of the local alloc before it's change for the
* caller to process with ocfs2_complete_local_alloc_recovery
*/
int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
int slot_num,
struct ocfs2_dinode **alloc_copy)
{
int status = 0;
struct buffer_head *alloc_bh = NULL;
struct inode *inode = NULL;
struct ocfs2_dinode *alloc;
trace_ocfs2_begin_local_alloc_recovery(slot_num);
*alloc_copy = NULL;
inode = ocfs2_get_system_file_inode(osb,
LOCAL_ALLOC_SYSTEM_INODE,
slot_num);
if (!inode) {
status = -EINVAL;
mlog_errno(status);
goto bail;
}
inode_lock(inode);
status = ocfs2_read_inode_block_full(inode, &alloc_bh,
OCFS2_BH_IGNORE_CACHE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
*alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL);
if (!(*alloc_copy)) {
status = -ENOMEM;
goto bail;
}
memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size);
alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
ocfs2_clear_local_alloc(alloc);
ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check);
status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode));
if (status < 0)
mlog_errno(status);
bail:
if (status < 0) {
kfree(*alloc_copy);
*alloc_copy = NULL;
}
brelse(alloc_bh);
if (inode) {
inode_unlock(inode);
iput(inode);
}
if (status)
mlog_errno(status);
return status;
}
/*
* Step 2: By now, we've completed the journal recovery, we've stamped
* a clean local alloc on disk and dropped the node out of the
* recovery map. Dlm locks will no longer stall, so lets clear out the
* main bitmap.
*/
int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc)
{
int status;
handle_t *handle;
struct buffer_head *main_bm_bh = NULL;
struct inode *main_bm_inode;
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
if (!main_bm_inode) {
status = -EINVAL;
mlog_errno(status);
goto out;
}
inode_lock(main_bm_inode);
status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
if (status < 0) {
mlog_errno(status);
goto out_mutex;
}
handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto out_unlock;
}
/* we want the bitmap change to be recorded on disk asap */
handle->h_sync = 1;
status = ocfs2_sync_local_to_main(osb, handle, alloc,
main_bm_inode, main_bm_bh);
if (status < 0)
mlog_errno(status);
ocfs2_commit_trans(osb, handle);
out_unlock:
ocfs2_inode_unlock(main_bm_inode, 1);
out_mutex:
inode_unlock(main_bm_inode);
brelse(main_bm_bh);
iput(main_bm_inode);
out:
if (!status)
ocfs2_init_steal_slots(osb);
if (status)
mlog_errno(status);
return status;
}
/*
* make sure we've got at least bits_wanted contiguous bits in the
* local alloc. You lose them when you drop i_rwsem.
*
* We will add ourselves to the transaction passed in, but may start
* our own in order to shift windows.
*/
int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
u32 bits_wanted,
struct ocfs2_alloc_context *ac)
{
int status;
struct ocfs2_dinode *alloc;
struct inode *local_alloc_inode;
unsigned int free_bits;
BUG_ON(!ac);
local_alloc_inode =
ocfs2_get_system_file_inode(osb,
LOCAL_ALLOC_SYSTEM_INODE,
osb->slot_num);
if (!local_alloc_inode) {
status = -ENOENT;
mlog_errno(status);
goto bail;
}
inode_lock(local_alloc_inode);
/*
* We must double check state and allocator bits because
* another process may have changed them while holding i_rwsem.
*/
spin_lock(&osb->osb_lock);
if (!ocfs2_la_state_enabled(osb) ||
(bits_wanted > osb->local_alloc_bits)) {
spin_unlock(&osb->osb_lock);
status = -ENOSPC;
goto bail;
}
spin_unlock(&osb->osb_lock);
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
#ifdef CONFIG_OCFS2_DEBUG_FS
if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
ocfs2_local_alloc_count_bits(alloc)) {
status = ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
(unsigned long long)le64_to_cpu(alloc->i_blkno),
le32_to_cpu(alloc->id1.bitmap1.i_used),
ocfs2_local_alloc_count_bits(alloc));
goto bail;
}
#endif
free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
le32_to_cpu(alloc->id1.bitmap1.i_used);
if (bits_wanted > free_bits) {
/* uhoh, window change time. */
status =
ocfs2_local_alloc_slide_window(osb, local_alloc_inode);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
/*
* Under certain conditions, the window slide code
* might have reduced the number of bits available or
* disabled the local alloc entirely. Re-check
* here and return -ENOSPC if necessary.
*/
status = -ENOSPC;
if (!ocfs2_la_state_enabled(osb))
goto bail;
free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
le32_to_cpu(alloc->id1.bitmap1.i_used);
if (bits_wanted > free_bits)
goto bail;
}
ac->ac_inode = local_alloc_inode;
/* We should never use localalloc from another slot */
ac->ac_alloc_slot = osb->slot_num;
ac->ac_which = OCFS2_AC_USE_LOCAL;
get_bh(osb->local_alloc_bh);
ac->ac_bh = osb->local_alloc_bh;
status = 0;
bail:
if (status < 0 && local_alloc_inode) {
inode_unlock(local_alloc_inode);
iput(local_alloc_inode);
}
trace_ocfs2_reserve_local_alloc_bits(
(unsigned long long)ac->ac_max_block,
bits_wanted, osb->slot_num, status);
if (status)
mlog_errno(status);
return status;
}
int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 bits_wanted,
u32 *bit_off,
u32 *num_bits)
{
int status, start;
struct inode *local_alloc_inode;
void *bitmap;
struct ocfs2_dinode *alloc;
struct ocfs2_local_alloc *la;
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
local_alloc_inode = ac->ac_inode;
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
ac->ac_resv);
if (start == -1) {
/* TODO: Shouldn't we just BUG here? */
status = -ENOSPC;
mlog_errno(status);
goto bail;
}
bitmap = la->la_bitmap;
*bit_off = le32_to_cpu(la->la_bm_off) + start;
*num_bits = bits_wanted;
status = ocfs2_journal_access_di(handle,
INODE_CACHE(local_alloc_inode),
osb->local_alloc_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start,
bits_wanted);
while(bits_wanted--)
ocfs2_set_bit(start++, bitmap);
le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
ocfs2_journal_dirty(handle, osb->local_alloc_bh);
bail:
if (status)
mlog_errno(status);
return status;
}
int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 bit_off,
u32 num_bits)
{
int status, start;
u32 clear_bits;
struct inode *local_alloc_inode;
void *bitmap;
struct ocfs2_dinode *alloc;
struct ocfs2_local_alloc *la;
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
local_alloc_inode = ac->ac_inode;
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
bitmap = la->la_bitmap;
start = bit_off - le32_to_cpu(la->la_bm_off);
clear_bits = num_bits;
status = ocfs2_journal_access_di(handle,
INODE_CACHE(local_alloc_inode),
osb->local_alloc_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
while (clear_bits--)
ocfs2_clear_bit(start++, bitmap);
le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
ocfs2_journal_dirty(handle, osb->local_alloc_bh);
bail:
return status;
}
static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
{
u32 count;
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
count = memweight(la->la_bitmap, le16_to_cpu(la->la_size));
trace_ocfs2_local_alloc_count_bits(count);
return count;
}
static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc,
u32 *numbits,
struct ocfs2_alloc_reservation *resv)
{
int numfound = 0, bitoff, left, startoff;
int local_resv = 0;
struct ocfs2_alloc_reservation r;
void *bitmap = NULL;
struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
if (!alloc->id1.bitmap1.i_total) {
bitoff = -1;
goto bail;
}
if (!resv) {
local_resv = 1;
ocfs2_resv_init_once(&r);
ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP);
resv = &r;
}
numfound = *numbits;
if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) {
if (numfound < *numbits)
*numbits = numfound;
goto bail;
}
/*
* Code error. While reservations are enabled, local
* allocation should _always_ go through them.
*/
BUG_ON(osb->osb_resv_level != 0);
/*
* Reservations are disabled. Handle this the old way.
*/
bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
numfound = bitoff = startoff = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
if (bitoff == left) {
/* mlog(0, "bitoff (%d) == left", bitoff); */
break;
}
/* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
"numfound = %d\n", bitoff, startoff, numfound);*/
/* Ok, we found a zero bit... is it contig. or do we
* start over?*/
if (bitoff == startoff) {
/* we found a zero */
numfound++;
startoff++;
} else {
/* got a zero after some ones */
numfound = 1;
startoff = bitoff+1;
}
/* we got everything we needed */
if (numfound == *numbits) {
/* mlog(0, "Found it all!\n"); */
break;
}
}
trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
if (numfound == *numbits)
bitoff = startoff - numfound;
else
bitoff = -1;
bail:
if (local_resv)
ocfs2_resv_discard(resmap, resv);
trace_ocfs2_local_alloc_find_clear_bits(*numbits,
le32_to_cpu(alloc->id1.bitmap1.i_total),
bitoff, numfound);
return bitoff;
}
static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
{
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
int i;
alloc->id1.bitmap1.i_total = 0;
alloc->id1.bitmap1.i_used = 0;
la->la_bm_off = 0;
for(i = 0; i < le16_to_cpu(la->la_size); i++)
la->la_bitmap[i] = 0;
}
#if 0
/* turn this on and uncomment below to aid debugging window shifts. */
static void ocfs2_verify_zero_bits(unsigned long *bitmap,
unsigned int start,
unsigned int count)
{
unsigned int tmp = count;
while(tmp--) {
if (ocfs2_test_bit(start + tmp, bitmap)) {
printk("ocfs2_verify_zero_bits: start = %u, count = "
"%u\n", start, count);
printk("ocfs2_verify_zero_bits: bit %u is set!",
start + tmp);
BUG();
}
}
}
#endif
/*
* sync the local alloc to main bitmap.
*
* assumes you've already locked the main bitmap -- the bitmap inode
* passed is used for caching.
*/
static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_dinode *alloc,
struct inode *main_bm_inode,
struct buffer_head *main_bm_bh)
{
int status = 0;
int bit_off, left, count, start;
u64 la_start_blk;
u64 blkno;
void *bitmap;
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
trace_ocfs2_sync_local_to_main(
le32_to_cpu(alloc->id1.bitmap1.i_total),
le32_to_cpu(alloc->id1.bitmap1.i_used));
if (!alloc->id1.bitmap1.i_total) {
goto bail;
}
if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
le32_to_cpu(alloc->id1.bitmap1.i_total)) {
goto bail;
}
la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
le32_to_cpu(la->la_bm_off));
bitmap = la->la_bitmap;
start = count = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
!= -1) {
if ((bit_off < left) && (bit_off == start)) {
count++;
start++;
continue;
}
if (count) {
blkno = la_start_blk +
ocfs2_clusters_to_blocks(osb->sb,
start - count);
trace_ocfs2_sync_local_to_main_free(
count, start - count,
(unsigned long long)la_start_blk,
(unsigned long long)blkno);
status = ocfs2_release_clusters(handle,
main_bm_inode,
main_bm_bh, blkno,
count);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
if (bit_off >= left)
break;
count = 1;
start = bit_off + 1;
}
bail:
if (status)
mlog_errno(status);
return status;
}
enum ocfs2_la_event {
OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */
OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has
* enough bits theoretically
* free, but a contiguous
* allocation could not be
* found. */
OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have
* enough bits free to satisfy
* our request. */
};
#define OCFS2_LA_ENABLE_INTERVAL (30 * HZ)
/*
* Given an event, calculate the size of our next local alloc window.
*
* This should always be called under i_rwsem of the local alloc inode
* so that local alloc disabling doesn't race with processes trying to
* use the allocator.
*
* Returns the state which the local alloc was left in. This value can
* be ignored by some paths.
*/
static int ocfs2_recalc_la_window(struct ocfs2_super *osb,
enum ocfs2_la_event event)
{
unsigned int bits;
int state;
spin_lock(&osb->osb_lock);
if (osb->local_alloc_state == OCFS2_LA_DISABLED) {
WARN_ON_ONCE(osb->local_alloc_state == OCFS2_LA_DISABLED);
goto out_unlock;
}
/*
* ENOSPC and fragmentation are treated similarly for now.
*/
if (event == OCFS2_LA_EVENT_ENOSPC ||
event == OCFS2_LA_EVENT_FRAGMENTED) {
/*
* We ran out of contiguous space in the primary
* bitmap. Drastically reduce the number of bits used
* by local alloc until we have to disable it.
*/
bits = osb->local_alloc_bits >> 1;
if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) {
/*
* By setting state to THROTTLED, we'll keep
* the number of local alloc bits used down
* until an event occurs which would give us
* reason to assume the bitmap situation might
* have changed.
*/
osb->local_alloc_state = OCFS2_LA_THROTTLED;
osb->local_alloc_bits = bits;
} else {
osb->local_alloc_state = OCFS2_LA_DISABLED;
}
queue_delayed_work(osb->ocfs2_wq, &osb->la_enable_wq,
OCFS2_LA_ENABLE_INTERVAL);
goto out_unlock;
}
/*
* Don't increase the size of the local alloc window until we
* know we might be able to fulfill the request. Otherwise, we
* risk bouncing around the global bitmap during periods of
* low space.
*/
if (osb->local_alloc_state != OCFS2_LA_THROTTLED)
osb->local_alloc_bits = osb->local_alloc_default_bits;
out_unlock:
state = osb->local_alloc_state;
spin_unlock(&osb->osb_lock);
return state;
}
static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
struct ocfs2_alloc_context **ac,
struct inode **bitmap_inode,
struct buffer_head **bitmap_bh)
{
int status;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
retry_enospc:
(*ac)->ac_bits_wanted = osb->local_alloc_bits;
status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
if (status == -ENOSPC) {
if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
OCFS2_LA_DISABLED)
goto bail;
ocfs2_free_ac_resource(*ac);
memset(*ac, 0, sizeof(struct ocfs2_alloc_context));
goto retry_enospc;
}
if (status < 0) {
mlog_errno(status);
goto bail;
}
*bitmap_inode = (*ac)->ac_inode;
igrab(*bitmap_inode);
*bitmap_bh = (*ac)->ac_bh;
get_bh(*bitmap_bh);
status = 0;
bail:
if ((status < 0) && *ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
if (status)
mlog_errno(status);
return status;
}
/*
* pass it the bitmap lock in lock_bh if you have it.
*/
static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac)
{
int status = 0;
u32 cluster_off, cluster_count;
struct ocfs2_dinode *alloc = NULL;
struct ocfs2_local_alloc *la;
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
trace_ocfs2_local_alloc_new_window(
le32_to_cpu(alloc->id1.bitmap1.i_total),
osb->local_alloc_bits);
/* Instruct the allocation code to try the most recently used
* cluster group. We'll re-record the group used this pass
* below. */
ac->ac_last_group = osb->la_last_gd;
/* we used the generic suballoc reserve function, but we set
* everything up nicely, so there's no reason why we can't use
* the more specific cluster api to claim bits. */
status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits,
&cluster_off, &cluster_count);
if (status == -ENOSPC) {
retry_enospc:
/*
* Note: We could also try syncing the journal here to
* allow use of any free bits which the current
* transaction can't give us access to. --Mark
*/
if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) ==
OCFS2_LA_DISABLED)
goto bail;
ac->ac_bits_wanted = osb->local_alloc_bits;
status = ocfs2_claim_clusters(handle, ac,
osb->local_alloc_bits,
&cluster_off,
&cluster_count);
if (status == -ENOSPC)
goto retry_enospc;
/*
* We only shrunk the *minimum* number of in our
* request - it's entirely possible that the allocator
* might give us more than we asked for.
*/
if (status == 0) {
spin_lock(&osb->osb_lock);
osb->local_alloc_bits = cluster_count;
spin_unlock(&osb->osb_lock);
}
}
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
osb->la_last_gd = ac->ac_last_group;
la->la_bm_off = cpu_to_le32(cluster_off);
alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
/* just in case... In the future when we find space ourselves,
* we don't have to get all contiguous -- but we'll have to
* set all previously used bits in bitmap and update
* la_bits_set before setting the bits in the main bitmap. */
alloc->id1.bitmap1.i_used = 0;
memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
le16_to_cpu(la->la_size));
ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
trace_ocfs2_local_alloc_new_window_result(
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
le32_to_cpu(alloc->id1.bitmap1.i_total));
bail:
if (status)
mlog_errno(status);
return status;
}
/* Note that we do *NOT* lock the local alloc inode here as
* it's been locked already for us. */
static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
struct inode *local_alloc_inode)
{
int status = 0;
struct buffer_head *main_bm_bh = NULL;
struct inode *main_bm_inode = NULL;
handle_t *handle = NULL;
struct ocfs2_dinode *alloc;
struct ocfs2_dinode *alloc_copy = NULL;
struct ocfs2_alloc_context *ac = NULL;
ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
/* This will lock the main bitmap for us. */
status = ocfs2_local_alloc_reserve_for_window(osb,
&ac,
&main_bm_inode,
&main_bm_bh);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto bail;
}
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
/* We want to clear the local alloc before doing anything
* else, so that if we error later during this operation,
* local alloc shutdown won't try to double free main bitmap
* bits. Make a copy so the sync function knows which bits to
* free. */
alloc_copy = kmemdup(alloc, osb->local_alloc_bh->b_size, GFP_NOFS);
if (!alloc_copy) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
status = ocfs2_journal_access_di(handle,
INODE_CACHE(local_alloc_inode),
osb->local_alloc_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
ocfs2_clear_local_alloc(alloc);
ocfs2_journal_dirty(handle, osb->local_alloc_bh);
status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
main_bm_inode, main_bm_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_local_alloc_new_window(osb, handle, ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
atomic_inc(&osb->alloc_stats.moves);
bail:
if (handle)
ocfs2_commit_trans(osb, handle);
brelse(main_bm_bh);
iput(main_bm_inode);
kfree(alloc_copy);
if (ac)
ocfs2_free_alloc_context(ac);
if (status)
mlog_errno(status);
return status;
}
| linux-master | fs/ocfs2/localalloc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <asm/byteorder.h>
#include <linux/swap.h>
#include <linux/mpage.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "aops.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "inode.h"
#include "journal.h"
#include "suballoc.h"
#include "super.h"
#include "symlink.h"
#include "refcounttree.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#include "dir.h"
#include "namei.h"
#include "sysfile.h"
static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int err = -EIO;
int status;
struct ocfs2_dinode *fe = NULL;
struct buffer_head *bh = NULL;
struct buffer_head *buffer_cache_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
void *kaddr;
trace_ocfs2_symlink_get_block(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)iblock, bh_result, create);
BUG_ON(ocfs2_inode_is_fast_symlink(inode));
if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
(unsigned long long)iblock);
goto bail;
}
status = ocfs2_read_inode_block(inode, &bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
fe = (struct ocfs2_dinode *) bh->b_data;
if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
le32_to_cpu(fe->i_clusters))) {
err = -ENOMEM;
mlog(ML_ERROR, "block offset is outside the allocated size: "
"%llu\n", (unsigned long long)iblock);
goto bail;
}
/* We don't use the page cache to create symlink data, so if
* need be, copy it over from the buffer cache. */
if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
iblock;
buffer_cache_bh = sb_getblk(osb->sb, blkno);
if (!buffer_cache_bh) {
err = -ENOMEM;
mlog(ML_ERROR, "couldn't getblock for symlink!\n");
goto bail;
}
/* we haven't locked out transactions, so a commit
* could've happened. Since we've got a reference on
* the bh, even if it commits while we're doing the
* copy, the data is still good. */
if (buffer_jbd(buffer_cache_bh)
&& ocfs2_inode_is_new(inode)) {
kaddr = kmap_atomic(bh_result->b_page);
if (!kaddr) {
mlog(ML_ERROR, "couldn't kmap!\n");
goto bail;
}
memcpy(kaddr + (bh_result->b_size * iblock),
buffer_cache_bh->b_data,
bh_result->b_size);
kunmap_atomic(kaddr);
set_buffer_uptodate(bh_result);
}
brelse(buffer_cache_bh);
}
map_bh(bh_result, inode->i_sb,
le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
err = 0;
bail:
brelse(bh);
return err;
}
static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
down_read(&oi->ip_alloc_sem);
ret = ocfs2_get_block(inode, iblock, bh_result, create);
up_read(&oi->ip_alloc_sem);
return ret;
}
int ocfs2_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int err = 0;
unsigned int ext_flags;
u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
u64 p_blkno, count, past_eof;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)iblock, bh_result, create);
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
inode, inode->i_ino);
if (S_ISLNK(inode->i_mode)) {
/* this always does I/O for some reason. */
err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
goto bail;
}
err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
&ext_flags);
if (err) {
mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
"%llu, NULL)\n", err, inode, (unsigned long long)iblock,
(unsigned long long)p_blkno);
goto bail;
}
if (max_blocks < count)
count = max_blocks;
/*
* ocfs2 never allocates in this function - the only time we
* need to use BH_New is when we're extending i_size on a file
* system which doesn't support holes, in which case BH_New
* allows __block_write_begin() to zero.
*
* If we see this on a sparse file system, then a truncate has
* raced us and removed the cluster. In this case, we clear
* the buffers dirty and uptodate bits and let the buffer code
* ignore it as a hole.
*/
if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
clear_buffer_dirty(bh_result);
clear_buffer_uptodate(bh_result);
goto bail;
}
/* Treat the unwritten extent as a hole for zeroing purposes. */
if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
map_bh(bh_result, inode->i_sb, p_blkno);
bh_result->b_size = count << inode->i_blkbits;
if (!ocfs2_sparse_alloc(osb)) {
if (p_blkno == 0) {
err = -EIO;
mlog(ML_ERROR,
"iblock = %llu p_blkno = %llu blkno=(%llu)\n",
(unsigned long long)iblock,
(unsigned long long)p_blkno,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
dump_stack();
goto bail;
}
}
past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)past_eof);
if (create && (iblock >= past_eof))
set_buffer_new(bh_result);
bail:
if (err < 0)
err = -EIO;
return err;
}
int ocfs2_read_inline_data(struct inode *inode, struct page *page,
struct buffer_head *di_bh)
{
void *kaddr;
loff_t size;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
return -EROFS;
}
size = i_size_read(inode);
if (size > PAGE_SIZE ||
size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
ocfs2_error(inode->i_sb,
"Inode %llu has with inline data has bad size: %Lu\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)size);
return -EROFS;
}
kaddr = kmap_atomic(page);
if (size)
memcpy(kaddr, di->id2.i_data.id_data, size);
/* Clear the remaining part of the page */
memset(kaddr + size, 0, PAGE_SIZE - size);
flush_dcache_page(page);
kunmap_atomic(kaddr);
SetPageUptodate(page);
return 0;
}
static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
{
int ret;
struct buffer_head *di_bh = NULL;
BUG_ON(!PageLocked(page));
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_inline_data(inode, page, di_bh);
out:
unlock_page(page);
brelse(di_bh);
return ret;
}
static int ocfs2_read_folio(struct file *file, struct folio *folio)
{
struct inode *inode = folio->mapping->host;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
loff_t start = folio_pos(folio);
int ret, unlock = 1;
trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
if (ret != 0) {
if (ret == AOP_TRUNCATED_PAGE)
unlock = 0;
mlog_errno(ret);
goto out;
}
if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
/*
* Unlock the folio and cycle ip_alloc_sem so that we don't
* busyloop waiting for ip_alloc_sem to unlock
*/
ret = AOP_TRUNCATED_PAGE;
folio_unlock(folio);
unlock = 0;
down_read(&oi->ip_alloc_sem);
up_read(&oi->ip_alloc_sem);
goto out_inode_unlock;
}
/*
* i_size might have just been updated as we grabed the meta lock. We
* might now be discovering a truncate that hit on another node.
* block_read_full_folio->get_block freaks out if it is asked to read
* beyond the end of a file, so we check here. Callers
* (generic_file_read, vm_ops->fault) are clever enough to check i_size
* and notice that the folio they just read isn't needed.
*
* XXX sys_readahead() seems to get that wrong?
*/
if (start >= i_size_read(inode)) {
folio_zero_segment(folio, 0, folio_size(folio));
folio_mark_uptodate(folio);
ret = 0;
goto out_alloc;
}
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
ret = ocfs2_readpage_inline(inode, &folio->page);
else
ret = block_read_full_folio(folio, ocfs2_get_block);
unlock = 0;
out_alloc:
up_read(&oi->ip_alloc_sem);
out_inode_unlock:
ocfs2_inode_unlock(inode, 0);
out:
if (unlock)
folio_unlock(folio);
return ret;
}
/*
* This is used only for read-ahead. Failures or difficult to handle
* situations are safe to ignore.
*
* Right now, we don't bother with BH_Boundary - in-inode extent lists
* are quite large (243 extents on 4k blocks), so most inodes don't
* grow out to a tree. If need be, detecting boundary extents could
* trivially be added in a future version of ocfs2_get_block().
*/
static void ocfs2_readahead(struct readahead_control *rac)
{
int ret;
struct inode *inode = rac->mapping->host;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
/*
* Use the nonblocking flag for the dlm code to avoid page
* lock inversion, but don't bother with retrying.
*/
ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
if (ret)
return;
if (down_read_trylock(&oi->ip_alloc_sem) == 0)
goto out_unlock;
/*
* Don't bother with inline-data. There isn't anything
* to read-ahead in that case anyway...
*/
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
goto out_up;
/*
* Check whether a remote node truncated this file - we just
* drop out in that case as it's not worth handling here.
*/
if (readahead_pos(rac) >= i_size_read(inode))
goto out_up;
mpage_readahead(rac, ocfs2_get_block);
out_up:
up_read(&oi->ip_alloc_sem);
out_unlock:
ocfs2_inode_unlock(inode, 0);
}
/* Note: Because we don't support holes, our allocation has
* already happened (allocation writes zeros to the file data)
* so we don't have to worry about ordered writes in
* ocfs2_writepage.
*
* ->writepage is called during the process of invalidating the page cache
* during blocked lock processing. It can't block on any cluster locks
* to during block mapping. It's relying on the fact that the block
* mapping can't have disappeared under the dirty pages that it is
* being asked to write back.
*/
static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
{
trace_ocfs2_writepage(
(unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
page->index);
return block_write_full_page(page, ocfs2_get_block, wbc);
}
/* Taken from ext3. We don't necessarily need the full blown
* functionality yet, but IMHO it's better to cut and paste the whole
* thing so we can avoid introducing our own bugs (and easily pick up
* their fixes when they happen) --Mark */
int walk_page_buffers( handle_t *handle,
struct buffer_head *head,
unsigned from,
unsigned to,
int *partial,
int (*fn)( handle_t *handle,
struct buffer_head *bh))
{
struct buffer_head *bh;
unsigned block_start, block_end;
unsigned blocksize = head->b_size;
int err, ret = 0;
struct buffer_head *next;
for ( bh = head, block_start = 0;
ret == 0 && (bh != head || !block_start);
block_start = block_end, bh = next)
{
next = bh->b_this_page;
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (partial && !buffer_uptodate(bh))
*partial = 1;
continue;
}
err = (*fn)(handle, bh);
if (!ret)
ret = err;
}
return ret;
}
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
{
sector_t status;
u64 p_blkno = 0;
int err = 0;
struct inode *inode = mapping->host;
trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)block);
/*
* The swap code (ab-)uses ->bmap to get a block mapping and then
* bypasseѕ the file system for actual I/O. We really can't allow
* that on refcounted inodes, so we have to skip out here. And yes,
* 0 is the magic code for a bmap error..
*/
if (ocfs2_is_refcount_inode(inode))
return 0;
/* We don't need to lock journal system files, since they aren't
* accessed concurrently from multiple nodes.
*/
if (!INODE_JOURNAL(inode)) {
err = ocfs2_inode_lock(inode, NULL, 0);
if (err) {
if (err != -ENOENT)
mlog_errno(err);
goto bail;
}
down_read(&OCFS2_I(inode)->ip_alloc_sem);
}
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
NULL);
if (!INODE_JOURNAL(inode)) {
up_read(&OCFS2_I(inode)->ip_alloc_sem);
ocfs2_inode_unlock(inode, 0);
}
if (err) {
mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
(unsigned long long)block);
mlog_errno(err);
goto bail;
}
bail:
status = err ? 0 : p_blkno;
return status;
}
static bool ocfs2_release_folio(struct folio *folio, gfp_t wait)
{
if (!folio_buffers(folio))
return false;
return try_to_free_buffers(folio);
}
static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
u32 cpos,
unsigned int *start,
unsigned int *end)
{
unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
unsigned int cpp;
cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
cluster_start = cpos % cpp;
cluster_start = cluster_start << osb->s_clustersize_bits;
cluster_end = cluster_start + osb->s_clustersize;
}
BUG_ON(cluster_start > PAGE_SIZE);
BUG_ON(cluster_end > PAGE_SIZE);
if (start)
*start = cluster_start;
if (end)
*end = cluster_end;
}
/*
* 'from' and 'to' are the region in the page to avoid zeroing.
*
* If pagesize > clustersize, this function will avoid zeroing outside
* of the cluster boundary.
*
* from == to == 0 is code for "zero the entire cluster region"
*/
static void ocfs2_clear_page_regions(struct page *page,
struct ocfs2_super *osb, u32 cpos,
unsigned from, unsigned to)
{
void *kaddr;
unsigned int cluster_start, cluster_end;
ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
kaddr = kmap_atomic(page);
if (from || to) {
if (from > cluster_start)
memset(kaddr + cluster_start, 0, from - cluster_start);
if (to < cluster_end)
memset(kaddr + to, 0, cluster_end - to);
} else {
memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
}
kunmap_atomic(kaddr);
}
/*
* Nonsparse file systems fully allocate before we get to the write
* code. This prevents ocfs2_write() from tagging the write as an
* allocating one, which means ocfs2_map_page_blocks() might try to
* read-in the blocks at the tail of our file. Avoid reading them by
* testing i_size against each block offset.
*/
static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
unsigned int block_start)
{
u64 offset = page_offset(page) + block_start;
if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
return 1;
if (i_size_read(inode) > offset)
return 1;
return 0;
}
/*
* Some of this taken from __block_write_begin(). We already have our
* mapping by now though, and the entire write will be allocating or
* it won't, so not much need to use BH_New.
*
* This will also skip zeroing, which is handled externally.
*/
int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new)
{
int ret = 0;
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
unsigned int block_end, block_start;
unsigned int bsize = i_blocksize(inode);
if (!page_has_buffers(page))
create_empty_buffers(page, bsize, 0);
head = page_buffers(page);
for (bh = head, block_start = 0; bh != head || !block_start;
bh = bh->b_this_page, block_start += bsize) {
block_end = block_start + bsize;
clear_buffer_new(bh);
/*
* Ignore blocks outside of our i/o range -
* they may belong to unallocated clusters.
*/
if (block_start >= to || block_end <= from) {
if (PageUptodate(page))
set_buffer_uptodate(bh);
continue;
}
/*
* For an allocating write with cluster size >= page
* size, we always write the entire page.
*/
if (new)
set_buffer_new(bh);
if (!buffer_mapped(bh)) {
map_bh(bh, inode->i_sb, *p_blkno);
clean_bdev_bh_alias(bh);
}
if (PageUptodate(page)) {
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_new(bh) &&
ocfs2_should_read_blk(inode, page, block_start) &&
(block_start < from || block_end > to)) {
bh_read_nowait(bh, 0);
*wait_bh++=bh;
}
*p_blkno = *p_blkno + 1;
}
/*
* If we issued read requests - let them complete.
*/
while(wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
ret = -EIO;
}
if (ret == 0 || !new)
return ret;
/*
* If we get -EIO above, zero out any newly allocated blocks
* to avoid exposing stale data.
*/
bh = head;
block_start = 0;
do {
block_end = block_start + bsize;
if (block_end <= from)
goto next_bh;
if (block_start >= to)
break;
zero_user(page, block_start, bh->b_size);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
next_bh:
block_start = block_end;
bh = bh->b_this_page;
} while (bh != head);
return ret;
}
#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
#define OCFS2_MAX_CTXT_PAGES 1
#else
#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
#endif
#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
struct ocfs2_unwritten_extent {
struct list_head ue_node;
struct list_head ue_ip_node;
u32 ue_cpos;
u32 ue_phys;
};
/*
* Describe the state of a single cluster to be written to.
*/
struct ocfs2_write_cluster_desc {
u32 c_cpos;
u32 c_phys;
/*
* Give this a unique field because c_phys eventually gets
* filled.
*/
unsigned c_new;
unsigned c_clear_unwritten;
unsigned c_needs_zero;
};
struct ocfs2_write_ctxt {
/* Logical cluster position / len of write */
u32 w_cpos;
u32 w_clen;
/* First cluster allocated in a nonsparse extend */
u32 w_first_new_cpos;
/* Type of caller. Must be one of buffer, mmap, direct. */
ocfs2_write_type_t w_type;
struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
/*
* This is true if page_size > cluster_size.
*
* It triggers a set of special cases during write which might
* have to deal with allocating writes to partial pages.
*/
unsigned int w_large_pages;
/*
* Pages involved in this write.
*
* w_target_page is the page being written to by the user.
*
* w_pages is an array of pages which always contains
* w_target_page, and in the case of an allocating write with
* page_size < cluster size, it will contain zero'd and mapped
* pages adjacent to w_target_page which need to be written
* out in so that future reads from that region will get
* zero's.
*/
unsigned int w_num_pages;
struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
struct page *w_target_page;
/*
* w_target_locked is used for page_mkwrite path indicating no unlocking
* against w_target_page in ocfs2_write_end_nolock.
*/
unsigned int w_target_locked:1;
/*
* ocfs2_write_end() uses this to know what the real range to
* write in the target should be.
*/
unsigned int w_target_from;
unsigned int w_target_to;
/*
* We could use journal_current_handle() but this is cleaner,
* IMHO -Mark
*/
handle_t *w_handle;
struct buffer_head *w_di_bh;
struct ocfs2_cached_dealloc_ctxt w_dealloc;
struct list_head w_unwritten_list;
unsigned int w_unwritten_count;
};
void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
{
int i;
for(i = 0; i < num_pages; i++) {
if (pages[i]) {
unlock_page(pages[i]);
mark_page_accessed(pages[i]);
put_page(pages[i]);
}
}
}
static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
{
int i;
/*
* w_target_locked is only set to true in the page_mkwrite() case.
* The intent is to allow us to lock the target page from write_begin()
* to write_end(). The caller must hold a ref on w_target_page.
*/
if (wc->w_target_locked) {
BUG_ON(!wc->w_target_page);
for (i = 0; i < wc->w_num_pages; i++) {
if (wc->w_target_page == wc->w_pages[i]) {
wc->w_pages[i] = NULL;
break;
}
}
mark_page_accessed(wc->w_target_page);
put_page(wc->w_target_page);
}
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
}
static void ocfs2_free_unwritten_list(struct inode *inode,
struct list_head *head)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL;
list_for_each_entry_safe(ue, tmp, head, ue_node) {
list_del(&ue->ue_node);
spin_lock(&oi->ip_lock);
list_del(&ue->ue_ip_node);
spin_unlock(&oi->ip_lock);
kfree(ue);
}
}
static void ocfs2_free_write_ctxt(struct inode *inode,
struct ocfs2_write_ctxt *wc)
{
ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
ocfs2_unlock_pages(wc);
brelse(wc->w_di_bh);
kfree(wc);
}
static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
struct ocfs2_super *osb, loff_t pos,
unsigned len, ocfs2_write_type_t type,
struct buffer_head *di_bh)
{
u32 cend;
struct ocfs2_write_ctxt *wc;
wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
if (!wc)
return -ENOMEM;
wc->w_cpos = pos >> osb->s_clustersize_bits;
wc->w_first_new_cpos = UINT_MAX;
cend = (pos + len - 1) >> osb->s_clustersize_bits;
wc->w_clen = cend - wc->w_cpos + 1;
get_bh(di_bh);
wc->w_di_bh = di_bh;
wc->w_type = type;
if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
wc->w_large_pages = 1;
else
wc->w_large_pages = 0;
ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
INIT_LIST_HEAD(&wc->w_unwritten_list);
*wcp = wc;
return 0;
}
/*
* If a page has any new buffers, zero them out here, and mark them uptodate
* and dirty so they'll be written out (in order to prevent uninitialised
* block data from leaking). And clear the new bit.
*/
static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
{
unsigned int block_start, block_end;
struct buffer_head *head, *bh;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
return;
bh = head = page_buffers(page);
block_start = 0;
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
if (!PageUptodate(page)) {
unsigned start, end;
start = max(from, block_start);
end = min(to, block_end);
zero_user_segment(page, start, end);
set_buffer_uptodate(bh);
}
clear_buffer_new(bh);
mark_buffer_dirty(bh);
}
}
block_start = block_end;
bh = bh->b_this_page;
} while (bh != head);
}
/*
* Only called when we have a failure during allocating write to write
* zero's to the newly allocated region.
*/
static void ocfs2_write_failure(struct inode *inode,
struct ocfs2_write_ctxt *wc,
loff_t user_pos, unsigned user_len)
{
int i;
unsigned from = user_pos & (PAGE_SIZE - 1),
to = user_pos + user_len;
struct page *tmppage;
if (wc->w_target_page)
ocfs2_zero_new_buffers(wc->w_target_page, from, to);
for(i = 0; i < wc->w_num_pages; i++) {
tmppage = wc->w_pages[i];
if (tmppage && page_has_buffers(tmppage)) {
if (ocfs2_should_order_data(inode))
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
user_pos, user_len);
block_commit_write(tmppage, from, to);
}
}
}
static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
struct ocfs2_write_ctxt *wc,
struct page *page, u32 cpos,
loff_t user_pos, unsigned user_len,
int new)
{
int ret;
unsigned int map_from = 0, map_to = 0;
unsigned int cluster_start, cluster_end;
unsigned int user_data_from = 0, user_data_to = 0;
ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
&cluster_start, &cluster_end);
/* treat the write as new if the a hole/lseek spanned across
* the page boundary.
*/
new = new | ((i_size_read(inode) <= page_offset(page)) &&
(page_offset(page) <= user_pos));
if (page == wc->w_target_page) {
map_from = user_pos & (PAGE_SIZE - 1);
map_to = map_from + user_len;
if (new)
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
cluster_start, cluster_end,
new);
else
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
map_from, map_to, new);
if (ret) {
mlog_errno(ret);
goto out;
}
user_data_from = map_from;
user_data_to = map_to;
if (new) {
map_from = cluster_start;
map_to = cluster_end;
}
} else {
/*
* If we haven't allocated the new page yet, we
* shouldn't be writing it out without copying user
* data. This is likely a math error from the caller.
*/
BUG_ON(!new);
map_from = cluster_start;
map_to = cluster_end;
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
cluster_start, cluster_end, new);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/*
* Parts of newly allocated pages need to be zero'd.
*
* Above, we have also rewritten 'to' and 'from' - as far as
* the rest of the function is concerned, the entire cluster
* range inside of a page needs to be written.
*
* We can skip this if the page is up to date - it's already
* been zero'd from being read in as a hole.
*/
if (new && !PageUptodate(page))
ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
cpos, user_data_from, user_data_to);
flush_dcache_page(page);
out:
return ret;
}
/*
* This function will only grab one clusters worth of pages.
*/
static int ocfs2_grab_pages_for_write(struct address_space *mapping,
struct ocfs2_write_ctxt *wc,
u32 cpos, loff_t user_pos,
unsigned user_len, int new,
struct page *mmap_page)
{
int ret = 0, i;
unsigned long start, target_index, end_index, index;
struct inode *inode = mapping->host;
loff_t last_byte;
target_index = user_pos >> PAGE_SHIFT;
/*
* Figure out how many pages we'll be manipulating here. For
* non allocating write, we just change the one
* page. Otherwise, we'll need a whole clusters worth. If we're
* writing past i_size, we only need enough pages to cover the
* last page of the write.
*/
if (new) {
wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
/*
* We need the index *past* the last page we could possibly
* touch. This is the page past the end of the write or
* i_size, whichever is greater.
*/
last_byte = max(user_pos + user_len, i_size_read(inode));
BUG_ON(last_byte < 1);
end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
if ((start + wc->w_num_pages) > end_index)
wc->w_num_pages = end_index - start;
} else {
wc->w_num_pages = 1;
start = target_index;
}
end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
for(i = 0; i < wc->w_num_pages; i++) {
index = start + i;
if (index >= target_index && index <= end_index &&
wc->w_type == OCFS2_WRITE_MMAP) {
/*
* ocfs2_pagemkwrite() is a little different
* and wants us to directly use the page
* passed in.
*/
lock_page(mmap_page);
/* Exit and let the caller retry */
if (mmap_page->mapping != mapping) {
WARN_ON(mmap_page->mapping);
unlock_page(mmap_page);
ret = -EAGAIN;
goto out;
}
get_page(mmap_page);
wc->w_pages[i] = mmap_page;
wc->w_target_locked = true;
} else if (index >= target_index && index <= end_index &&
wc->w_type == OCFS2_WRITE_DIRECT) {
/* Direct write has no mapping page. */
wc->w_pages[i] = NULL;
continue;
} else {
wc->w_pages[i] = find_or_create_page(mapping, index,
GFP_NOFS);
if (!wc->w_pages[i]) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
}
wait_for_stable_page(wc->w_pages[i]);
if (index == target_index)
wc->w_target_page = wc->w_pages[i];
}
out:
if (ret)
wc->w_target_locked = false;
return ret;
}
/*
* Prepare a single cluster for write one cluster into the file.
*/
static int ocfs2_write_cluster(struct address_space *mapping,
u32 *phys, unsigned int new,
unsigned int clear_unwritten,
unsigned int should_zero,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_write_ctxt *wc, u32 cpos,
loff_t user_pos, unsigned user_len)
{
int ret, i;
u64 p_blkno;
struct inode *inode = mapping->host;
struct ocfs2_extent_tree et;
int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
if (new) {
u32 tmp_pos;
/*
* This is safe to call with the page locks - it won't take
* any additional semaphores or cluster locks.
*/
tmp_pos = cpos;
ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
&tmp_pos, 1, !clear_unwritten,
wc->w_di_bh, wc->w_handle,
data_ac, meta_ac, NULL);
/*
* This shouldn't happen because we must have already
* calculated the correct meta data allocation required. The
* internal tree allocation code should know how to increase
* transaction credits itself.
*
* If need be, we could handle -EAGAIN for a
* RESTART_TRANS here.
*/
mlog_bug_on_msg(ret == -EAGAIN,
"Inode %llu: EAGAIN return during allocation.\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
} else if (clear_unwritten) {
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
wc->w_di_bh);
ret = ocfs2_mark_extent_written(inode, &et,
wc->w_handle, cpos, 1, *phys,
meta_ac, &wc->w_dealloc);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
/*
* The only reason this should fail is due to an inability to
* find the extent added.
*/
ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL);
if (ret < 0) {
mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
"at logical cluster %u",
(unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
goto out;
}
BUG_ON(*phys == 0);
p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys);
if (!should_zero)
p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
for(i = 0; i < wc->w_num_pages; i++) {
int tmpret;
/* This is the direct io target page. */
if (wc->w_pages[i] == NULL) {
p_blkno++;
continue;
}
tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
wc->w_pages[i], cpos,
user_pos, user_len,
should_zero);
if (tmpret) {
mlog_errno(tmpret);
if (ret == 0)
ret = tmpret;
}
}
/*
* We only have cleanup to do in case of allocating write.
*/
if (ret && new)
ocfs2_write_failure(inode, wc, user_pos, user_len);
out:
return ret;
}
static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_write_ctxt *wc,
loff_t pos, unsigned len)
{
int ret, i;
loff_t cluster_off;
unsigned int local_len = len;
struct ocfs2_write_cluster_desc *desc;
struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
for (i = 0; i < wc->w_clen; i++) {
desc = &wc->w_desc[i];
/*
* We have to make sure that the total write passed in
* doesn't extend past a single cluster.
*/
local_len = len;
cluster_off = pos & (osb->s_clustersize - 1);
if ((cluster_off + local_len) > osb->s_clustersize)
local_len = osb->s_clustersize - cluster_off;
ret = ocfs2_write_cluster(mapping, &desc->c_phys,
desc->c_new,
desc->c_clear_unwritten,
desc->c_needs_zero,
data_ac, meta_ac,
wc, desc->c_cpos, pos, local_len);
if (ret) {
mlog_errno(ret);
goto out;
}
len -= local_len;
pos += local_len;
}
ret = 0;
out:
return ret;
}
/*
* ocfs2_write_end() wants to know which parts of the target page it
* should complete the write on. It's easiest to compute them ahead of
* time when a more complete view of the write is available.
*/
static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
struct ocfs2_write_ctxt *wc,
loff_t pos, unsigned len, int alloc)
{
struct ocfs2_write_cluster_desc *desc;
wc->w_target_from = pos & (PAGE_SIZE - 1);
wc->w_target_to = wc->w_target_from + len;
if (alloc == 0)
return;
/*
* Allocating write - we may have different boundaries based
* on page size and cluster size.
*
* NOTE: We can no longer compute one value from the other as
* the actual write length and user provided length may be
* different.
*/
if (wc->w_large_pages) {
/*
* We only care about the 1st and last cluster within
* our range and whether they should be zero'd or not. Either
* value may be extended out to the start/end of a
* newly allocated cluster.
*/
desc = &wc->w_desc[0];
if (desc->c_needs_zero)
ocfs2_figure_cluster_boundaries(osb,
desc->c_cpos,
&wc->w_target_from,
NULL);
desc = &wc->w_desc[wc->w_clen - 1];
if (desc->c_needs_zero)
ocfs2_figure_cluster_boundaries(osb,
desc->c_cpos,
NULL,
&wc->w_target_to);
} else {
wc->w_target_from = 0;
wc->w_target_to = PAGE_SIZE;
}
}
/*
* Check if this extent is marked UNWRITTEN by direct io. If so, we need not to
* do the zero work. And should not to clear UNWRITTEN since it will be cleared
* by the direct io procedure.
* If this is a new extent that allocated by direct io, we should mark it in
* the ip_unwritten_list.
*/
static int ocfs2_unwritten_check(struct inode *inode,
struct ocfs2_write_ctxt *wc,
struct ocfs2_write_cluster_desc *desc)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_unwritten_extent *ue = NULL, *new = NULL;
int ret = 0;
if (!desc->c_needs_zero)
return 0;
retry:
spin_lock(&oi->ip_lock);
/* Needs not to zero no metter buffer or direct. The one who is zero
* the cluster is doing zero. And he will clear unwritten after all
* cluster io finished. */
list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) {
if (desc->c_cpos == ue->ue_cpos) {
BUG_ON(desc->c_new);
desc->c_needs_zero = 0;
desc->c_clear_unwritten = 0;
goto unlock;
}
}
if (wc->w_type != OCFS2_WRITE_DIRECT)
goto unlock;
if (new == NULL) {
spin_unlock(&oi->ip_lock);
new = kmalloc(sizeof(struct ocfs2_unwritten_extent),
GFP_NOFS);
if (new == NULL) {
ret = -ENOMEM;
goto out;
}
goto retry;
}
/* This direct write will doing zero. */
new->ue_cpos = desc->c_cpos;
new->ue_phys = desc->c_phys;
desc->c_clear_unwritten = 0;
list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list);
list_add_tail(&new->ue_node, &wc->w_unwritten_list);
wc->w_unwritten_count++;
new = NULL;
unlock:
spin_unlock(&oi->ip_lock);
out:
kfree(new);
return ret;
}
/*
* Populate each single-cluster write descriptor in the write context
* with information about the i/o to be done.
*
* Returns the number of clusters that will have to be allocated, as
* well as a worst case estimate of the number of extent records that
* would have to be created during a write to an unwritten region.
*/
static int ocfs2_populate_write_desc(struct inode *inode,
struct ocfs2_write_ctxt *wc,
unsigned int *clusters_to_alloc,
unsigned int *extents_to_split)
{
int ret;
struct ocfs2_write_cluster_desc *desc;
unsigned int num_clusters = 0;
unsigned int ext_flags = 0;
u32 phys = 0;
int i;
*clusters_to_alloc = 0;
*extents_to_split = 0;
for (i = 0; i < wc->w_clen; i++) {
desc = &wc->w_desc[i];
desc->c_cpos = wc->w_cpos + i;
if (num_clusters == 0) {
/*
* Need to look up the next extent record.
*/
ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
/* We should already CoW the refcountd extent. */
BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
/*
* Assume worst case - that we're writing in
* the middle of the extent.
*
* We can assume that the write proceeds from
* left to right, in which case the extent
* insert code is smart enough to coalesce the
* next splits into the previous records created.
*/
if (ext_flags & OCFS2_EXT_UNWRITTEN)
*extents_to_split = *extents_to_split + 2;
} else if (phys) {
/*
* Only increment phys if it doesn't describe
* a hole.
*/
phys++;
}
/*
* If w_first_new_cpos is < UINT_MAX, we have a non-sparse
* file that got extended. w_first_new_cpos tells us
* where the newly allocated clusters are so we can
* zero them.
*/
if (desc->c_cpos >= wc->w_first_new_cpos) {
BUG_ON(phys == 0);
desc->c_needs_zero = 1;
}
desc->c_phys = phys;
if (phys == 0) {
desc->c_new = 1;
desc->c_needs_zero = 1;
desc->c_clear_unwritten = 1;
*clusters_to_alloc = *clusters_to_alloc + 1;
}
if (ext_flags & OCFS2_EXT_UNWRITTEN) {
desc->c_clear_unwritten = 1;
desc->c_needs_zero = 1;
}
ret = ocfs2_unwritten_check(inode, wc, desc);
if (ret) {
mlog_errno(ret);
goto out;
}
num_clusters--;
}
ret = 0;
out:
return ret;
}
static int ocfs2_write_begin_inline(struct address_space *mapping,
struct inode *inode,
struct ocfs2_write_ctxt *wc)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct page *page;
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
page = find_or_create_page(mapping, 0, GFP_NOFS);
if (!page) {
ocfs2_commit_trans(osb, handle);
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
/*
* If we don't set w_num_pages then this page won't get unlocked
* and freed on cleanup of the write context.
*/
wc->w_pages[0] = wc->w_target_page = page;
wc->w_num_pages = 1;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
ocfs2_commit_trans(osb, handle);
mlog_errno(ret);
goto out;
}
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
ocfs2_set_inode_data_inline(inode, di);
if (!PageUptodate(page)) {
ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
if (ret) {
ocfs2_commit_trans(osb, handle);
goto out;
}
}
wc->w_handle = handle;
out:
return ret;
}
int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
return 1;
return 0;
}
static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode, loff_t pos,
unsigned len, struct page *mmap_page,
struct ocfs2_write_ctxt *wc)
{
int ret, written = 0;
loff_t end = pos + len;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = NULL;
trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
len, (unsigned long long)pos,
oi->ip_dyn_features);
/*
* Handle inodes which already have inline data 1st.
*/
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
if (mmap_page == NULL &&
ocfs2_size_fits_inline_data(wc->w_di_bh, end))
goto do_inline_write;
/*
* The write won't fit - we have to give this inode an
* inline extent list now.
*/
ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
if (ret)
mlog_errno(ret);
goto out;
}
/*
* Check whether the inode can accept inline data.
*/
if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
return 0;
/*
* Check whether the write can fit.
*/
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
if (mmap_page ||
end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
return 0;
do_inline_write:
ret = ocfs2_write_begin_inline(mapping, inode, wc);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* This signals to the caller that the data can be written
* inline.
*/
written = 1;
out:
return written ? written : ret;
}
/*
* This function only does anything for file systems which can't
* handle sparse files.
*
* What we want to do here is fill in any hole between the current end
* of allocation and the end of our write. That way the rest of the
* write path can treat it as an non-allocating write, which has no
* special case code for sparse/nonsparse files.
*/
static int ocfs2_expand_nonsparse_inode(struct inode *inode,
struct buffer_head *di_bh,
loff_t pos, unsigned len,
struct ocfs2_write_ctxt *wc)
{
int ret;
loff_t newsize = pos + len;
BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
if (newsize <= i_size_read(inode))
return 0;
ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
if (ret)
mlog_errno(ret);
/* There is no wc if this is call from direct. */
if (wc)
wc->w_first_new_cpos =
ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
return ret;
}
static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
loff_t pos)
{
int ret = 0;
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
if (pos > i_size_read(inode))
ret = ocfs2_zero_extend(inode, di_bh, pos);
return ret;
}
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct page **pagep, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page)
{
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
struct ocfs2_write_ctxt *wc;
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
handle_t *handle;
struct ocfs2_extent_tree et;
int try_free = 1, ret1;
try_again:
ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh);
if (ret) {
mlog_errno(ret);
return ret;
}
if (ocfs2_supports_inline_data(osb)) {
ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
mmap_page, wc);
if (ret == 1) {
ret = 0;
goto success;
}
if (ret < 0) {
mlog_errno(ret);
goto out;
}
}
/* Direct io change i_size late, should not zero tail here. */
if (type != OCFS2_WRITE_DIRECT) {
if (ocfs2_sparse_alloc(osb))
ret = ocfs2_zero_tail(inode, di_bh, pos);
else
ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
len, wc);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_check_range_for_refcount(inode, pos, len);
if (ret < 0) {
mlog_errno(ret);
goto out;
} else if (ret == 1) {
clusters_need = wc->w_clen;
ret = ocfs2_refcount_cow(inode, di_bh,
wc->w_cpos, wc->w_clen, UINT_MAX);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
&extents_to_split);
if (ret) {
mlog_errno(ret);
goto out;
}
clusters_need += clusters_to_alloc;
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
trace_ocfs2_write_begin_nolock(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(long long)i_size_read(inode),
le32_to_cpu(di->i_clusters),
pos, len, type, mmap_page,
clusters_to_alloc, extents_to_split);
/*
* We set w_target_from, w_target_to here so that
* ocfs2_write_end() knows which range in the target page to
* write out. An allocation requires that we write the entire
* cluster range.
*/
if (clusters_to_alloc || extents_to_split) {
/*
* XXX: We are stretching the limits of
* ocfs2_lock_allocators(). It greatly over-estimates
* the work to be done.
*/
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
wc->w_di_bh);
ret = ocfs2_lock_allocators(inode, &et,
clusters_to_alloc, extents_to_split,
&data_ac, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
if (data_ac)
data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
credits = ocfs2_calc_extend_credits(inode->i_sb,
&di->id2.i_list);
} else if (type == OCFS2_WRITE_DIRECT)
/* direct write needs not to start trans if no extents alloc. */
goto success;
/*
* We have to zero sparse allocated clusters, unwritten extent clusters,
* and non-sparse clusters we just extended. For non-sparse writes,
* we know zeros will only be needed in the first and/or last cluster.
*/
if (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
wc->w_desc[wc->w_clen - 1].c_needs_zero))
cluster_of_pages = 1;
else
cluster_of_pages = 0;
ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
wc->w_handle = handle;
if (clusters_to_alloc) {
ret = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
if (ret)
goto out_commit;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_quota;
}
/*
* Fill our page array first. That way we've grabbed enough so
* that we can zero and flush if we error after adding the
* extent.
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page);
if (ret) {
/*
* ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
* the target page. In this case, we exit with no error and no target
* page. This will trigger the caller, page_mkwrite(), to re-try
* the operation.
*/
if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
BUG_ON(wc->w_target_page);
ret = 0;
goto out_quota;
}
mlog_errno(ret);
goto out_quota;
}
ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
len);
if (ret) {
mlog_errno(ret);
goto out_quota;
}
if (data_ac)
ocfs2_free_alloc_context(data_ac);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
success:
if (pagep)
*pagep = wc->w_target_page;
*fsdata = wc;
return 0;
out_quota:
if (clusters_to_alloc)
dquot_free_space(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
out_commit:
ocfs2_commit_trans(osb, handle);
out:
/*
* The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
* even in case of error here like ENOSPC and ENOMEM. So, we need
* to unlock the target page manually to prevent deadlocks when
* retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
* to VM code.
*/
if (wc->w_target_locked)
unlock_page(mmap_page);
ocfs2_free_write_ctxt(inode, wc);
if (data_ac) {
ocfs2_free_alloc_context(data_ac);
data_ac = NULL;
}
if (meta_ac) {
ocfs2_free_alloc_context(meta_ac);
meta_ac = NULL;
}
if (ret == -ENOSPC && try_free) {
/*
* Try to free some truncate log so that we can have enough
* clusters to allocate.
*/
try_free = 0;
ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
if (ret1 == 1)
goto try_again;
if (ret1 < 0)
mlog_errno(ret1);
}
return ret;
}
static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
struct buffer_head *di_bh = NULL;
struct inode *inode = mapping->host;
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret) {
mlog_errno(ret);
return ret;
}
/*
* Take alloc sem here to prevent concurrent lookups. That way
* the mapping, zeroing and tree manipulation within
* ocfs2_write() will be safe against ->read_folio(). This
* should also serve to lock out allocation from a shared
* writeable region.
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
pagep, fsdata, di_bh, NULL);
if (ret) {
mlog_errno(ret);
goto out_fail;
}
brelse(di_bh);
return 0;
out_fail:
up_write(&OCFS2_I(inode)->ip_alloc_sem);
brelse(di_bh);
ocfs2_inode_unlock(inode, 1);
return ret;
}
static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
unsigned len, unsigned *copied,
struct ocfs2_dinode *di,
struct ocfs2_write_ctxt *wc)
{
void *kaddr;
if (unlikely(*copied < len)) {
if (!PageUptodate(wc->w_target_page)) {
*copied = 0;
return;
}
}
kaddr = kmap_atomic(wc->w_target_page);
memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
kunmap_atomic(kaddr);
trace_ocfs2_write_end_inline(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)pos, *copied,
le16_to_cpu(di->id2.i_data.id_count),
le16_to_cpu(di->i_dyn_features));
}
int ocfs2_write_end_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, void *fsdata)
{
int i, ret;
unsigned from, to, start = pos & (PAGE_SIZE - 1);
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_write_ctxt *wc = fsdata;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
handle_t *handle = wc->w_handle;
struct page *tmppage;
BUG_ON(!list_empty(&wc->w_unwritten_list));
if (handle) {
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
copied = ret;
mlog_errno(ret);
goto out;
}
}
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
goto out_write_size;
}
if (unlikely(copied < len) && wc->w_target_page) {
loff_t new_isize;
if (!PageUptodate(wc->w_target_page))
copied = 0;
new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
if (new_isize > page_offset(wc->w_target_page))
ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
start+len);
else {
/*
* When page is fully beyond new isize (data copy
* failed), do not bother zeroing the page. Invalidate
* it instead so that writeback does not get confused
* put page & buffer dirty bits into inconsistent
* state.
*/
block_invalidate_folio(page_folio(wc->w_target_page),
0, PAGE_SIZE);
}
}
if (wc->w_target_page)
flush_dcache_page(wc->w_target_page);
for(i = 0; i < wc->w_num_pages; i++) {
tmppage = wc->w_pages[i];
/* This is the direct io target page. */
if (tmppage == NULL)
continue;
if (tmppage == wc->w_target_page) {
from = wc->w_target_from;
to = wc->w_target_to;
BUG_ON(from > PAGE_SIZE ||
to > PAGE_SIZE ||
to < from);
} else {
/*
* Pages adjacent to the target (if any) imply
* a hole-filling write in which case we want
* to flush their entire range.
*/
from = 0;
to = PAGE_SIZE;
}
if (page_has_buffers(tmppage)) {
if (handle && ocfs2_should_order_data(inode)) {
loff_t start_byte =
((loff_t)tmppage->index << PAGE_SHIFT) +
from;
loff_t length = to - from;
ocfs2_jbd2_inode_add_write(handle, inode,
start_byte, length);
}
block_commit_write(tmppage, from, to);
}
}
out_write_size:
/* Direct io do not update i_size here. */
if (wc->w_type != OCFS2_WRITE_DIRECT) {
pos += copied;
if (pos > i_size_read(inode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
inode->i_mtime = inode_set_ctime_current(inode);
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
if (handle)
ocfs2_update_inode_fsync_trans(handle, inode, 1);
}
if (handle)
ocfs2_journal_dirty(handle, wc->w_di_bh);
out:
/* unlock pages before dealloc since it needs acquiring j_trans_barrier
* lock, or it will cause a deadlock since journal commit threads holds
* this lock and will ask for the page lock when flushing the data.
* put it here to preserve the unlock order.
*/
ocfs2_unlock_pages(wc);
if (handle)
ocfs2_commit_trans(osb, handle);
ocfs2_run_deallocs(osb, &wc->w_dealloc);
brelse(wc->w_di_bh);
kfree(wc);
return copied;
}
static int ocfs2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
int ret;
struct inode *inode = mapping->host;
ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata);
up_write(&OCFS2_I(inode)->ip_alloc_sem);
ocfs2_inode_unlock(inode, 1);
return ret;
}
struct ocfs2_dio_write_ctxt {
struct list_head dw_zero_list;
unsigned dw_zero_count;
int dw_orphaned;
pid_t dw_writer_pid;
};
static struct ocfs2_dio_write_ctxt *
ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc)
{
struct ocfs2_dio_write_ctxt *dwc = NULL;
if (bh->b_private)
return bh->b_private;
dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS);
if (dwc == NULL)
return NULL;
INIT_LIST_HEAD(&dwc->dw_zero_list);
dwc->dw_zero_count = 0;
dwc->dw_orphaned = 0;
dwc->dw_writer_pid = task_pid_nr(current);
bh->b_private = dwc;
*alloc = 1;
return dwc;
}
static void ocfs2_dio_free_write_ctx(struct inode *inode,
struct ocfs2_dio_write_ctxt *dwc)
{
ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list);
kfree(dwc);
}
/*
* TODO: Make this into a generic get_blocks function.
*
* From do_direct_io in direct-io.c:
* "So what we do is to permit the ->get_blocks function to populate
* bh.b_size with the size of IO which is permitted at this offset and
* this i_blkbits."
*
* This function is called directly from get_more_blocks in direct-io.c.
*
* called like this: dio->get_blocks(dio->inode, fs_startblk,
* fs_count, map_bh, dio->rw == WRITE);
*/
static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_write_ctxt *wc;
struct ocfs2_write_cluster_desc *desc = NULL;
struct ocfs2_dio_write_ctxt *dwc = NULL;
struct buffer_head *di_bh = NULL;
u64 p_blkno;
unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
loff_t pos = iblock << i_blkbits;
sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
unsigned len, total_len = bh_result->b_size;
int ret = 0, first_get_block = 0;
len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
len = min(total_len, len);
/*
* bh_result->b_size is count in get_more_blocks according to write
* "pos" and "end", we need map twice to return different buffer state:
* 1. area in file size, not set NEW;
* 2. area out file size, set NEW.
*
* iblock endblk
* |--------|---------|---------|---------
* |<-------area in file------->|
*/
if ((iblock <= endblk) &&
((iblock + ((len - 1) >> i_blkbits)) > endblk))
len = (endblk - iblock + 1) << i_blkbits;
mlog(0, "get block of %lu at %llu:%u req %u\n",
inode->i_ino, pos, len, total_len);
/*
* Because we need to change file size in ocfs2_dio_end_io_write(), or
* we may need to add it to orphan dir. So can not fall to fast path
* while file size will be changed.
*/
if (pos + total_len <= i_size_read(inode)) {
/* This is the fast path for re-write. */
ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
if (buffer_mapped(bh_result) &&
!buffer_new(bh_result) &&
ret == 0)
goto out;
/* Clear state set by ocfs2_get_block. */
bh_result->b_state = 0;
}
dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block);
if (unlikely(dwc == NULL)) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) >
ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) &&
!dwc->dw_orphaned) {
/*
* when we are going to alloc extents beyond file size, add the
* inode to orphan dir, so we can recall those spaces when
* system crashed during write.
*/
ret = ocfs2_add_inode_to_orphan(osb, inode);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
dwc->dw_orphaned = 1;
}
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret) {
mlog_errno(ret);
goto out;
}
down_write(&oi->ip_alloc_sem);
if (first_get_block) {
if (ocfs2_sparse_alloc(osb))
ret = ocfs2_zero_tail(inode, di_bh, pos);
else
ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
total_len, NULL);
if (ret < 0) {
mlog_errno(ret);
goto unlock;
}
}
ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len,
OCFS2_WRITE_DIRECT, NULL,
(void **)&wc, di_bh, NULL);
if (ret) {
mlog_errno(ret);
goto unlock;
}
desc = &wc->w_desc[0];
p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys);
BUG_ON(p_blkno == 0);
p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1);
map_bh(bh_result, inode->i_sb, p_blkno);
bh_result->b_size = len;
if (desc->c_needs_zero)
set_buffer_new(bh_result);
if (iblock > endblk)
set_buffer_new(bh_result);
/* May sleep in end_io. It should not happen in a irq context. So defer
* it to dio work queue. */
set_buffer_defer_completion(bh_result);
if (!list_empty(&wc->w_unwritten_list)) {
struct ocfs2_unwritten_extent *ue = NULL;
ue = list_first_entry(&wc->w_unwritten_list,
struct ocfs2_unwritten_extent,
ue_node);
BUG_ON(ue->ue_cpos != desc->c_cpos);
/* The physical address may be 0, fill it. */
ue->ue_phys = desc->c_phys;
list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
dwc->dw_zero_count += wc->w_unwritten_count;
}
ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
BUG_ON(ret != len);
ret = 0;
unlock:
up_write(&oi->ip_alloc_sem);
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
out:
if (ret < 0)
ret = -EIO;
return ret;
}
static int ocfs2_dio_end_io_write(struct inode *inode,
struct ocfs2_dio_write_ctxt *dwc,
loff_t offset,
ssize_t bytes)
{
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_extent_tree et;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_unwritten_extent *ue = NULL;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
handle_t *handle = NULL;
loff_t end = offset + bytes;
int ret = 0, credits = 0;
ocfs2_init_dealloc_ctxt(&dealloc);
/* We do clear unwritten, delete orphan, change i_size here. If neither
* of these happen, we can skip all this. */
if (list_empty(&dwc->dw_zero_list) &&
end <= i_size_read(inode) &&
!dwc->dw_orphaned)
goto out;
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
down_write(&oi->ip_alloc_sem);
/* Delete orphan before acquire i_rwsem. */
if (dwc->dw_orphaned) {
BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
end = end > i_size_read(inode) ? end : 0;
ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
!!end, end);
if (ret < 0)
mlog_errno(ret);
}
di = (struct ocfs2_dinode *)di_bh->b_data;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
/* Attach dealloc with extent tree in case that we may reuse extents
* which are already unlinked from current extent tree due to extent
* rotation and merging.
*/
et.et_dealloc = &dealloc;
ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2,
&data_ac, &meta_ac);
if (ret) {
mlog_errno(ret);
goto unlock;
}
credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list);
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto unlock;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto commit;
}
list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
ret = ocfs2_mark_extent_written(inode, &et, handle,
ue->ue_cpos, 1,
ue->ue_phys,
meta_ac, &dealloc);
if (ret < 0) {
mlog_errno(ret);
break;
}
}
if (end > i_size_read(inode)) {
ret = ocfs2_set_inode_size(handle, inode, di_bh, end);
if (ret < 0)
mlog_errno(ret);
}
commit:
ocfs2_commit_trans(osb, handle);
unlock:
up_write(&oi->ip_alloc_sem);
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
ocfs2_run_deallocs(osb, &dealloc);
ocfs2_dio_free_write_ctx(inode, dwc);
return ret;
}
/*
* ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
* particularly interested in the aio/dio case. We use the rw_lock DLM lock
* to protect io on one node from truncation on another.
*/
static int ocfs2_dio_end_io(struct kiocb *iocb,
loff_t offset,
ssize_t bytes,
void *private)
{
struct inode *inode = file_inode(iocb->ki_filp);
int level;
int ret = 0;
/* this io's submitter should not have unlocked this before we could */
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
if (bytes <= 0)
mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
(long long)bytes);
if (private) {
if (bytes > 0)
ret = ocfs2_dio_end_io_write(inode, private, offset,
bytes);
else
ocfs2_dio_free_write_ctx(inode, private);
}
ocfs2_iocb_clear_rw_locked(iocb);
level = ocfs2_iocb_rw_locked_level(iocb);
ocfs2_rw_unlock(inode, level);
return ret;
}
static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
get_block_t *get_block;
/*
* Fallback to buffered I/O if we see an inode without
* extents.
*/
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return 0;
/* Fallback to buffered I/O if we do not support append dio. */
if (iocb->ki_pos + iter->count > i_size_read(inode) &&
!ocfs2_supports_append_dio(osb))
return 0;
if (iov_iter_rw(iter) == READ)
get_block = ocfs2_lock_get_block;
else
get_block = ocfs2_dio_wr_get_block;
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, get_block,
ocfs2_dio_end_io, 0);
}
const struct address_space_operations ocfs2_aops = {
.dirty_folio = block_dirty_folio,
.read_folio = ocfs2_read_folio,
.readahead = ocfs2_readahead,
.writepage = ocfs2_writepage,
.write_begin = ocfs2_write_begin,
.write_end = ocfs2_write_end,
.bmap = ocfs2_bmap,
.direct_IO = ocfs2_direct_IO,
.invalidate_folio = block_invalidate_folio,
.release_folio = ocfs2_release_folio,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
| linux-master | fs/ocfs2/aops.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* io.c
*
* Buffer cache handling
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/bio.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "inode.h"
#include "journal.h"
#include "uptodate.h"
#include "buffer_head_io.h"
#include "ocfs2_trace.h"
/*
* Bits on bh->b_state used by ocfs2.
*
* These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart.
*/
enum ocfs2_state_bits {
BH_NeedsValidate = BH_JBDPrivateStart,
};
/* Expand the magic b_state functions */
BUFFER_FNS(NeedsValidate, needs_validate);
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
struct ocfs2_caching_info *ci)
{
int ret = 0;
trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
BUG_ON(buffer_jbd(bh));
/* No need to check for a soft readonly file system here. non
* journalled writes are only ever done on system files which
* can get modified during recovery even if read-only. */
if (ocfs2_is_hard_readonly(osb)) {
ret = -EROFS;
mlog_errno(ret);
goto out;
}
ocfs2_metadata_cache_io_lock(ci);
lock_buffer(bh);
set_buffer_uptodate(bh);
/* remove from dirty list before I/O. */
clear_buffer_dirty(bh);
get_bh(bh); /* for end_buffer_write_sync() */
bh->b_end_io = end_buffer_write_sync;
submit_bh(REQ_OP_WRITE, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh)) {
ocfs2_set_buffer_uptodate(ci, bh);
} else {
/* We don't need to remove the clustered uptodate
* information for this bh as it's not marked locally
* uptodate. */
ret = -EIO;
mlog_errno(ret);
}
ocfs2_metadata_cache_io_unlock(ci);
out:
return ret;
}
/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
* will be easier to handle read failure.
*/
int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
unsigned int nr, struct buffer_head *bhs[])
{
int status = 0;
unsigned int i;
struct buffer_head *bh;
int new_bh = 0;
trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
if (!nr)
goto bail;
/* Don't put buffer head and re-assign it to NULL if it is allocated
* outside since the caller can't be aware of this alternation!
*/
new_bh = (bhs[0] == NULL);
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
bhs[i] = sb_getblk(osb->sb, block++);
if (bhs[i] == NULL) {
status = -ENOMEM;
mlog_errno(status);
break;
}
}
bh = bhs[i];
if (buffer_jbd(bh)) {
trace_ocfs2_read_blocks_sync_jbd(
(unsigned long long)bh->b_blocknr);
continue;
}
if (buffer_dirty(bh)) {
/* This should probably be a BUG, or
* at least return an error. */
mlog(ML_ERROR,
"trying to sync read a dirty "
"buffer! (blocknr = %llu), skipping\n",
(unsigned long long)bh->b_blocknr);
continue;
}
lock_buffer(bh);
if (buffer_jbd(bh)) {
#ifdef CATCH_BH_JBD_RACES
mlog(ML_ERROR,
"block %llu had the JBD bit set "
"while I was in lock_buffer!",
(unsigned long long)bh->b_blocknr);
BUG();
#else
unlock_buffer(bh);
continue;
#endif
}
get_bh(bh); /* for end_buffer_read_sync() */
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, bh);
}
read_failure:
for (i = nr; i > 0; i--) {
bh = bhs[i - 1];
if (unlikely(status)) {
if (new_bh && bh) {
/* If middle bh fails, let previous bh
* finish its read and then put it to
* aovoid bh leak
*/
if (!buffer_jbd(bh))
wait_on_buffer(bh);
put_bh(bh);
bhs[i - 1] = NULL;
} else if (bh && buffer_uptodate(bh)) {
clear_buffer_uptodate(bh);
}
continue;
}
/* No need to wait on the buffer if it's managed by JBD. */
if (!buffer_jbd(bh))
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* Status won't be cleared from here on out,
* so we can safely record this and loop back
* to cleanup the other buffers. */
status = -EIO;
goto read_failure;
}
}
bail:
return status;
}
/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
* will be easier to handle read failure.
*/
int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
struct buffer_head *bh))
{
int status = 0;
int i, ignore_cache = 0;
struct buffer_head *bh;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
int new_bh = 0;
trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
BUG_ON(!ci);
BUG_ON((flags & OCFS2_BH_READAHEAD) &&
(flags & OCFS2_BH_IGNORE_CACHE));
if (bhs == NULL) {
status = -EINVAL;
mlog_errno(status);
goto bail;
}
if (nr < 0) {
mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
status = -EINVAL;
mlog_errno(status);
goto bail;
}
if (nr == 0) {
status = 0;
goto bail;
}
/* Don't put buffer head and re-assign it to NULL if it is allocated
* outside since the caller can't be aware of this alternation!
*/
new_bh = (bhs[0] == NULL);
ocfs2_metadata_cache_io_lock(ci);
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
bhs[i] = sb_getblk(sb, block++);
if (bhs[i] == NULL) {
ocfs2_metadata_cache_io_unlock(ci);
status = -ENOMEM;
mlog_errno(status);
/* Don't forget to put previous bh! */
break;
}
}
bh = bhs[i];
ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
/* There are three read-ahead cases here which we need to
* be concerned with. All three assume a buffer has
* previously been submitted with OCFS2_BH_READAHEAD
* and it hasn't yet completed I/O.
*
* 1) The current request is sync to disk. This rarely
* happens these days, and never when performance
* matters - the code can just wait on the buffer
* lock and re-submit.
*
* 2) The current request is cached, but not
* readahead. ocfs2_buffer_uptodate() will return
* false anyway, so we'll wind up waiting on the
* buffer lock to do I/O. We re-check the request
* with after getting the lock to avoid a re-submit.
*
* 3) The current request is readahead (and so must
* also be a caching one). We short circuit if the
* buffer is locked (under I/O) and if it's in the
* uptodate cache. The re-check from #2 catches the
* case that the previous read-ahead completes just
* before our is-it-in-flight check.
*/
if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
trace_ocfs2_read_blocks_from_disk(
(unsigned long long)bh->b_blocknr,
(unsigned long long)ocfs2_metadata_cache_owner(ci));
/* We're using ignore_cache here to say
* "go to disk" */
ignore_cache = 1;
}
trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
if (buffer_jbd(bh)) {
continue;
}
if (ignore_cache) {
if (buffer_dirty(bh)) {
/* This should probably be a BUG, or
* at least return an error. */
continue;
}
/* A read-ahead request was made - if the
* buffer is already under read-ahead from a
* previously submitted request than we are
* done here. */
if ((flags & OCFS2_BH_READAHEAD)
&& ocfs2_buffer_read_ahead(ci, bh))
continue;
lock_buffer(bh);
if (buffer_jbd(bh)) {
#ifdef CATCH_BH_JBD_RACES
mlog(ML_ERROR, "block %llu had the JBD bit set "
"while I was in lock_buffer!",
(unsigned long long)bh->b_blocknr);
BUG();
#else
unlock_buffer(bh);
continue;
#endif
}
/* Re-check ocfs2_buffer_uptodate() as a
* previously read-ahead buffer may have
* completed I/O while we were waiting for the
* buffer lock. */
if (!(flags & OCFS2_BH_IGNORE_CACHE)
&& !(flags & OCFS2_BH_READAHEAD)
&& ocfs2_buffer_uptodate(ci, bh)) {
unlock_buffer(bh);
continue;
}
get_bh(bh); /* for end_buffer_read_sync() */
if (validate)
set_buffer_needs_validate(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, bh);
continue;
}
}
read_failure:
for (i = (nr - 1); i >= 0; i--) {
bh = bhs[i];
if (!(flags & OCFS2_BH_READAHEAD)) {
if (unlikely(status)) {
/* Clear the buffers on error including those
* ever succeeded in reading
*/
if (new_bh && bh) {
/* If middle bh fails, let previous bh
* finish its read and then put it to
* aovoid bh leak
*/
if (!buffer_jbd(bh))
wait_on_buffer(bh);
put_bh(bh);
bhs[i] = NULL;
} else if (bh && buffer_uptodate(bh)) {
clear_buffer_uptodate(bh);
}
continue;
}
/* We know this can't have changed as we hold the
* owner sem. Avoid doing any work on the bh if the
* journal has it. */
if (!buffer_jbd(bh))
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* Status won't be cleared from here on out,
* so we can safely record this and loop back
* to cleanup the other buffers. Don't need to
* remove the clustered uptodate information
* for this bh as it's not marked locally
* uptodate. */
status = -EIO;
clear_buffer_needs_validate(bh);
goto read_failure;
}
if (buffer_needs_validate(bh)) {
/* We never set NeedsValidate if the
* buffer was held by the journal, so
* that better not have changed */
BUG_ON(buffer_jbd(bh));
clear_buffer_needs_validate(bh);
status = validate(sb, bh);
if (status)
goto read_failure;
}
}
/* Always set the buffer in the cache, even if it was
* a forced read, or read-ahead which hasn't yet
* completed. */
ocfs2_set_buffer_uptodate(ci, bh);
}
ocfs2_metadata_cache_io_unlock(ci);
trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
flags, ignore_cache);
bail:
return status;
}
/* Check whether the blkno is the super block or one of the backups. */
static void ocfs2_check_super_or_backup(struct super_block *sb,
sector_t blkno)
{
int i;
u64 backup_blkno;
if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
return;
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
backup_blkno = ocfs2_backup_super_blkno(sb, i);
if (backup_blkno == blkno)
return;
}
BUG();
}
/*
* Write super block and backups doesn't need to collaborate with journal,
* so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
* into this function.
*/
int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
struct buffer_head *bh)
{
int ret = 0;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
BUG_ON(buffer_jbd(bh));
ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
ret = -EROFS;
mlog_errno(ret);
goto out;
}
lock_buffer(bh);
set_buffer_uptodate(bh);
/* remove from dirty list before I/O. */
clear_buffer_dirty(bh);
get_bh(bh); /* for end_buffer_write_sync() */
bh->b_end_io = end_buffer_write_sync;
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
submit_bh(REQ_OP_WRITE, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
ret = -EIO;
mlog_errno(ret);
}
out:
return ret;
}
| linux-master | fs/ocfs2/buffer_head_io.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of operations over global quota file
*/
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/quota.h>
#include <linux/quotaops.h>
#include <linux/dqblk_qtree.h>
#include <linux/jiffies.h>
#include <linux/writeback.h>
#include <linux/workqueue.h>
#include <linux/llist.h>
#include <linux/iversion.h>
#include <cluster/masklog.h>
#include "ocfs2_fs.h"
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "inode.h"
#include "journal.h"
#include "file.h"
#include "sysfile.h"
#include "dlmglue.h"
#include "uptodate.h"
#include "super.h"
#include "buffer_head_io.h"
#include "quota.h"
#include "ocfs2_trace.h"
/*
* Locking of quotas with OCFS2 is rather complex. Here are rules that
* should be obeyed by all the functions:
* - any write of quota structure (either to local or global file) is protected
* by dqio_sem or dquot->dq_lock.
* - any modification of global quota file holds inode cluster lock, i_rwsem,
* and ip_alloc_sem of the global quota file (achieved by
* ocfs2_lock_global_qf). It also has to hold qinfo_lock.
* - an allocation of new blocks for local quota file is protected by
* its ip_alloc_sem
*
* A rough sketch of locking dependencies (lf = local file, gf = global file):
* Normal filesystem operation:
* start_trans -> dqio_sem -> write to lf
* Syncing of local and global file:
* ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
* write to gf
* -> write to lf
* Acquire dquot for the first time:
* dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
* -> alloc space for gf
* -> start_trans -> qinfo_lock -> write to gf
* -> ip_alloc_sem of lf -> alloc space for lf
* -> write to lf
* Release last reference to dquot:
* dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
* -> write to lf
* Note that all the above operations also hold the inode cluster lock of lf.
* Recovery:
* inode cluster lock of recovered lf
* -> read bitmaps -> ip_alloc_sem of lf
* -> ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
* write to gf
*/
static void qsync_work_fn(struct work_struct *work);
static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
{
struct ocfs2_global_disk_dqblk *d = dp;
struct mem_dqblk *m = &dquot->dq_dqb;
/* Update from disk only entries not set by the admin */
if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
}
if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
}
if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
m->dqb_btime = le64_to_cpu(d->dqb_btime);
if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
m->dqb_itime = le64_to_cpu(d->dqb_itime);
OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
}
static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
{
struct ocfs2_global_disk_dqblk *d = dp;
struct mem_dqblk *m = &dquot->dq_dqb;
d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
d->dqb_btime = cpu_to_le64(m->dqb_btime);
d->dqb_itime = cpu_to_le64(m->dqb_itime);
d->dqb_pad1 = d->dqb_pad2 = 0;
}
static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
{
struct ocfs2_global_disk_dqblk *d = dp;
struct ocfs2_mem_dqinfo *oinfo =
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
if (qtree_entry_unused(&oinfo->dqi_gi, dp))
return 0;
return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
le32_to_cpu(d->dqb_id)),
dquot->dq_id);
}
const struct qtree_fmt_operations ocfs2_global_ops = {
.mem2disk_dqblk = ocfs2_global_mem2diskdqb,
.disk2mem_dqblk = ocfs2_global_disk2memdqb,
.is_id = ocfs2_global_is_id,
};
int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
{
struct ocfs2_disk_dqtrailer *dqt =
ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
}
int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
struct buffer_head **bhp)
{
int rc;
*bhp = NULL;
rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
ocfs2_validate_quota_block);
if (rc)
mlog_errno(rc);
return rc;
}
/* Read data from global quotafile - avoid pagecache and such because we cannot
* afford acquiring the locks... We use quota cluster lock to serialize
* operations. Caller is responsible for acquiring it. */
ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
struct inode *gqinode = oinfo->dqi_gqinode;
loff_t i_size = i_size_read(gqinode);
int offset = off & (sb->s_blocksize - 1);
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
struct buffer_head *bh;
size_t toread, tocopy;
u64 pblock = 0, pcount = 0;
if (off > i_size)
return 0;
if (off + len > i_size)
len = i_size - off;
toread = len;
while (toread > 0) {
tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
if (!pcount) {
err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
&pcount, NULL);
if (err) {
mlog_errno(err);
return err;
}
} else {
pcount--;
pblock++;
}
bh = NULL;
err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
if (err) {
mlog_errno(err);
return err;
}
memcpy(data, bh->b_data + offset, tocopy);
brelse(bh);
offset = 0;
toread -= tocopy;
data += tocopy;
blk++;
}
return len;
}
/* Write to quotafile (we know the transaction is already started and has
* enough credits) */
ssize_t ocfs2_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct inode *gqinode = oinfo->dqi_gqinode;
int offset = off & (sb->s_blocksize - 1);
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0, new = 0, ja_type;
struct buffer_head *bh = NULL;
handle_t *handle = journal_current_handle();
u64 pblock, pcount;
if (!handle) {
mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
"because transaction was not started.\n",
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
WARN_ON(1);
len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
}
if (i_size_read(gqinode) < off + len) {
loff_t rounded_end =
ocfs2_align_bytes_to_blocks(sb, off + len);
/* Space is already allocated in ocfs2_acquire_dquot() */
err = ocfs2_simple_size_update(gqinode,
oinfo->dqi_gqi_bh,
rounded_end);
if (err < 0)
goto out;
new = 1;
}
err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
if (err) {
mlog_errno(err);
goto out;
}
/* Not rewriting whole block? */
if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
!new) {
err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
} else {
bh = sb_getblk(sb, pblock);
if (!bh)
err = -ENOMEM;
ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
}
if (err) {
mlog_errno(err);
goto out;
}
lock_buffer(bh);
if (new)
memset(bh->b_data, 0, sb->s_blocksize);
memcpy(bh->b_data + offset, data, len);
flush_dcache_page(bh->b_page);
set_buffer_uptodate(bh);
unlock_buffer(bh);
ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
ja_type);
if (err < 0) {
brelse(bh);
goto out;
}
ocfs2_journal_dirty(handle, bh);
brelse(bh);
out:
if (err) {
mlog_errno(err);
return err;
}
inode_inc_iversion(gqinode);
ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
return len;
}
int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
int status;
struct buffer_head *bh = NULL;
status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
if (status < 0)
return status;
spin_lock(&dq_data_lock);
if (!oinfo->dqi_gqi_count++)
oinfo->dqi_gqi_bh = bh;
else
WARN_ON(bh != oinfo->dqi_gqi_bh);
spin_unlock(&dq_data_lock);
if (ex) {
inode_lock(oinfo->dqi_gqinode);
down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
} else {
down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
}
return 0;
}
void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
if (ex) {
up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
inode_unlock(oinfo->dqi_gqinode);
} else {
up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
}
ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
brelse(oinfo->dqi_gqi_bh);
spin_lock(&dq_data_lock);
if (!--oinfo->dqi_gqi_count)
oinfo->dqi_gqi_bh = NULL;
spin_unlock(&dq_data_lock);
}
/* Read information header from global quota file */
int ocfs2_global_read_info(struct super_block *sb, int type)
{
unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
GROUP_QUOTA_SYSTEM_INODE };
struct ocfs2_global_disk_dqinfo dinfo;
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
u64 pcount;
int status;
oinfo->dqi_gi.dqi_sb = sb;
oinfo->dqi_gi.dqi_type = type;
ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
oinfo->dqi_gqi_bh = NULL;
oinfo->dqi_gqi_count = 0;
/* Read global header */
oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
OCFS2_INVALID_SLOT);
if (!oinfo->dqi_gqinode) {
mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
type);
status = -EINVAL;
goto out_err;
}
status = ocfs2_lock_global_qf(oinfo, 0);
if (status < 0) {
mlog_errno(status);
goto out_err;
}
status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
&pcount, NULL);
if (status < 0)
goto out_unlock;
status = ocfs2_qinfo_lock(oinfo, 0);
if (status < 0)
goto out_unlock;
status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct ocfs2_global_disk_dqinfo),
OCFS2_GLOBAL_INFO_OFF);
ocfs2_qinfo_unlock(oinfo, 0);
ocfs2_unlock_global_qf(oinfo, 0);
if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
status);
if (status >= 0)
status = -EIO;
mlog_errno(status);
goto out_err;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
OCFS2_QBLK_RESERVED_SPACE;
oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
schedule_delayed_work(&oinfo->dqi_sync_work,
msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
return status;
out_unlock:
ocfs2_unlock_global_qf(oinfo, 0);
mlog_errno(status);
goto out_err;
}
/* Write information to global quota file. Expects exclusive lock on quota
* file inode and quota info */
static int __ocfs2_global_write_info(struct super_block *sb, int type)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_global_disk_dqinfo dinfo;
ssize_t size;
spin_lock(&dq_data_lock);
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
spin_unlock(&dq_data_lock);
dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
sizeof(struct ocfs2_global_disk_dqinfo),
OCFS2_GLOBAL_INFO_OFF);
if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
mlog(ML_ERROR, "Cannot write global quota info structure\n");
if (size >= 0)
size = -EIO;
return size;
}
return 0;
}
int ocfs2_global_write_info(struct super_block *sb, int type)
{
int err;
struct quota_info *dqopt = sb_dqopt(sb);
struct ocfs2_mem_dqinfo *info = dqopt->info[type].dqi_priv;
down_write(&dqopt->dqio_sem);
err = ocfs2_qinfo_lock(info, 1);
if (err < 0)
goto out_sem;
err = __ocfs2_global_write_info(sb, type);
ocfs2_qinfo_unlock(info, 1);
out_sem:
up_write(&dqopt->dqio_sem);
return err;
}
static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
{
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
/*
* We may need to allocate tree blocks and a leaf block but not the
* root block
*/
return oinfo->dqi_gi.dqi_qtree_depth;
}
static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
{
/* We modify all the allocated blocks, tree root, info block and
* the inode */
return (ocfs2_global_qinit_alloc(sb, type) + 2) *
OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
}
/* Sync local information about quota modifications with global quota file.
* Caller must have started the transaction and obtained exclusive lock for
* global quota file inode */
int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
{
int err, err2;
struct super_block *sb = dquot->dq_sb;
int type = dquot->dq_id.type;
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
struct ocfs2_global_disk_dqblk dqblk;
s64 spacechange, inodechange;
time64_t olditime, oldbtime;
err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct ocfs2_global_disk_dqblk),
dquot->dq_off);
if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
if (err >= 0) {
mlog(ML_ERROR, "Short read from global quota file "
"(%u read)\n", err);
err = -EIO;
}
goto out;
}
/* Update space and inode usage. Get also other information from
* global quota file so that we don't overwrite any changes there.
* We are */
spin_lock(&dquot->dq_dqb_lock);
spacechange = dquot->dq_dqb.dqb_curspace -
OCFS2_DQUOT(dquot)->dq_origspace;
inodechange = dquot->dq_dqb.dqb_curinodes -
OCFS2_DQUOT(dquot)->dq_originodes;
olditime = dquot->dq_dqb.dqb_itime;
oldbtime = dquot->dq_dqb.dqb_btime;
ocfs2_global_disk2memdqb(dquot, &dqblk);
trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id),
dquot->dq_dqb.dqb_curspace,
(long long)spacechange,
dquot->dq_dqb.dqb_curinodes,
(long long)inodechange);
if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
dquot->dq_dqb.dqb_curspace += spacechange;
if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
dquot->dq_dqb.dqb_curinodes += inodechange;
/* Set properly space grace time... */
if (dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
oldbtime > 0) {
if (dquot->dq_dqb.dqb_btime > 0)
dquot->dq_dqb.dqb_btime =
min(dquot->dq_dqb.dqb_btime, oldbtime);
else
dquot->dq_dqb.dqb_btime = oldbtime;
}
} else {
dquot->dq_dqb.dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
/* Set properly inode grace time... */
if (dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
olditime > 0) {
if (dquot->dq_dqb.dqb_itime > 0)
dquot->dq_dqb.dqb_itime =
min(dquot->dq_dqb.dqb_itime, olditime);
else
dquot->dq_dqb.dqb_itime = olditime;
}
} else {
dquot->dq_dqb.dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
}
/* All information is properly updated, clear the flags */
__clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
spin_unlock(&dquot->dq_dqb_lock);
err = ocfs2_qinfo_lock(info, freeing);
if (err < 0) {
mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
" (type=%d, id=%u)\n", dquot->dq_id.type,
(unsigned)from_kqid(&init_user_ns, dquot->dq_id));
goto out;
}
if (freeing)
OCFS2_DQUOT(dquot)->dq_use_count--;
err = qtree_write_dquot(&info->dqi_gi, dquot);
if (err < 0)
goto out_qlock;
if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
err = qtree_release_dquot(&info->dqi_gi, dquot);
if (info_dirty(sb_dqinfo(sb, type))) {
err2 = __ocfs2_global_write_info(sb, type);
if (!err)
err = err2;
}
}
out_qlock:
ocfs2_qinfo_unlock(info, freeing);
out:
if (err < 0)
mlog_errno(err);
return err;
}
/*
* Functions for periodic syncing of dquots with global file
*/
static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
{
handle_t *handle;
struct super_block *sb = dquot->dq_sb;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
struct ocfs2_super *osb = OCFS2_SB(sb);
int status = 0;
trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id),
dquot->dq_id.type,
type, sb->s_id);
if (type != dquot->dq_id.type)
goto out;
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
down_write(&sb_dqopt(sb)->dqio_sem);
status = ocfs2_sync_dquot(dquot);
if (status < 0)
mlog_errno(status);
/* We have to write local structure as well... */
status = ocfs2_local_write_dquot(dquot);
if (status < 0)
mlog_errno(status);
up_write(&sb_dqopt(sb)->dqio_sem);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
return status;
}
static void qsync_work_fn(struct work_struct *work)
{
struct ocfs2_mem_dqinfo *oinfo = container_of(work,
struct ocfs2_mem_dqinfo,
dqi_sync_work.work);
struct super_block *sb = oinfo->dqi_gqinode->i_sb;
/*
* We have to be careful here not to deadlock on s_umount as umount
* disabling quotas may be in progress and it waits for this work to
* complete. If trylock fails, we'll do the sync next time...
*/
if (down_read_trylock(&sb->s_umount)) {
dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
up_read(&sb->s_umount);
}
schedule_delayed_work(&oinfo->dqi_sync_work,
msecs_to_jiffies(oinfo->dqi_syncms));
}
/*
* Wrappers for generic quota functions
*/
static int ocfs2_write_dquot(struct dquot *dquot)
{
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id),
dquot->dq_id.type);
handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out;
}
down_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
status = ocfs2_local_write_dquot(dquot);
up_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
ocfs2_commit_trans(osb, handle);
out:
return status;
}
static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
{
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
/*
* We modify tree, leaf block, global info, local chunk header,
* global and local inode; OCFS2_QINFO_WRITE_CREDITS already
* accounts for inode update
*/
return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
OCFS2_QINFO_WRITE_CREDITS +
OCFS2_INODE_UPDATE_CREDITS;
}
void ocfs2_drop_dquot_refs(struct work_struct *work)
{
struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
dquot_drop_work);
struct llist_node *list;
struct ocfs2_dquot *odquot, *next_odquot;
list = llist_del_all(&osb->dquot_drop_list);
llist_for_each_entry_safe(odquot, next_odquot, list, list) {
/* Drop the reference we acquired in ocfs2_dquot_release() */
dqput(&odquot->dq_dquot);
}
}
/*
* Called when the last reference to dquot is dropped. If we are called from
* downconvert thread, we cannot do all the handling here because grabbing
* quota lock could deadlock (the node holding the quota lock could need some
* other cluster lock to proceed but with blocked downconvert thread we cannot
* release any lock).
*/
static int ocfs2_release_dquot(struct dquot *dquot)
{
handle_t *handle;
struct ocfs2_mem_dqinfo *oinfo =
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id),
dquot->dq_id.type);
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
if (dquot_is_busy(dquot))
goto out;
/* Running from downconvert thread? Postpone quota processing to wq */
if (current == osb->dc_task) {
/*
* Grab our own reference to dquot and queue it for delayed
* dropping. Quota code rechecks after calling
* ->release_dquot() and won't free dquot structure.
*/
dqgrab(dquot);
/* First entry on list -> queue work */
if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list))
queue_work(osb->ocfs2_wq, &osb->dquot_drop_work);
goto out;
}
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(osb,
ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
status = ocfs2_global_release_dquot(dquot);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
status = ocfs2_local_release_dquot(handle, dquot);
/*
* If we fail here, we cannot do much as global structure is
* already released. So just complain...
*/
if (status < 0)
mlog_errno(status);
/*
* Clear dq_off so that we search for the structure in quota file next
* time we acquire it. The structure might be deleted and reallocated
* elsewhere by another node while our dquot structure is on freelist.
*/
dquot->dq_off = 0;
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_trans:
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
mutex_unlock(&dquot->dq_lock);
if (status)
mlog_errno(status);
return status;
}
/*
* Read global dquot structure from disk or create it if it does
* not exist. Also update use count of the global structure and
* create structure in node-local quota file.
*/
static int ocfs2_acquire_dquot(struct dquot *dquot)
{
int status = 0, err;
int ex = 0;
struct super_block *sb = dquot->dq_sb;
struct ocfs2_super *osb = OCFS2_SB(sb);
int type = dquot->dq_id.type;
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
struct inode *gqinode = info->dqi_gqinode;
int need_alloc = ocfs2_global_qinit_alloc(sb, type);
handle_t *handle;
trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id),
type);
mutex_lock(&dquot->dq_lock);
/*
* We need an exclusive lock, because we're going to update use count
* and instantiate possibly new dquot structure
*/
status = ocfs2_lock_global_qf(info, 1);
if (status < 0)
goto out;
status = ocfs2_qinfo_lock(info, 0);
if (status < 0)
goto out_dq;
/*
* We always want to read dquot structure from disk because we don't
* know what happened with it while it was on freelist.
*/
status = qtree_read_dquot(&info->dqi_gi, dquot);
ocfs2_qinfo_unlock(info, 0);
if (status < 0)
goto out_dq;
OCFS2_DQUOT(dquot)->dq_use_count++;
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
if (!dquot->dq_off) { /* No real quota entry? */
ex = 1;
/*
* Add blocks to quota file before we start a transaction since
* locking allocators ranks above a transaction start
*/
WARN_ON(journal_current_handle());
status = ocfs2_extend_no_holes(gqinode, NULL,
i_size_read(gqinode) + (need_alloc << sb->s_blocksize_bits),
i_size_read(gqinode));
if (status < 0)
goto out_dq;
}
handle = ocfs2_start_trans(osb,
ocfs2_calc_global_qinit_credits(sb, type));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
goto out_dq;
}
status = ocfs2_qinfo_lock(info, ex);
if (status < 0)
goto out_trans;
status = qtree_write_dquot(&info->dqi_gi, dquot);
if (ex && info_dirty(sb_dqinfo(sb, type))) {
err = __ocfs2_global_write_info(sb, type);
if (!status)
status = err;
}
ocfs2_qinfo_unlock(info, ex);
out_trans:
ocfs2_commit_trans(osb, handle);
out_dq:
ocfs2_unlock_global_qf(info, 1);
if (status < 0)
goto out;
status = ocfs2_create_local_dquot(dquot);
if (status < 0)
goto out;
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out:
mutex_unlock(&dquot->dq_lock);
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
{
int type = qid->type;
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
int status = 0;
trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
if (!sb_has_quota_loaded(sb, type)) {
status = -ESRCH;
goto out;
}
status = ocfs2_lock_global_qf(info, 0);
if (status < 0)
goto out;
status = ocfs2_qinfo_lock(info, 0);
if (status < 0)
goto out_global;
status = qtree_get_next_id(&info->dqi_gi, qid);
ocfs2_qinfo_unlock(info, 0);
out_global:
ocfs2_unlock_global_qf(info, 0);
out:
/*
* Avoid logging ENOENT since it just means there isn't next ID and
* ESRCH which means quota isn't enabled for the filesystem.
*/
if (status && status != -ENOENT && status != -ESRCH)
mlog_errno(status);
return status;
}
static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
{
unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
(1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
(1 << (DQ_LASTSET_B + QIF_INODES_B)) |
(1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
(1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
(1 << (DQ_LASTSET_B + QIF_ITIME_B));
int sync = 0;
int status;
struct super_block *sb = dquot->dq_sb;
int type = dquot->dq_id.type;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(sb);
trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id),
type);
/* In case user set some limits, sync dquot immediately to global
* quota file so that information propagates quicker */
spin_lock(&dquot->dq_dqb_lock);
if (dquot->dq_flags & mask)
sync = 1;
spin_unlock(&dquot->dq_dqb_lock);
/* This is a slight hack but we can't afford getting global quota
* lock if we already have a transaction started. */
if (!sync || journal_current_handle()) {
status = ocfs2_write_dquot(dquot);
goto out;
}
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
down_write(&sb_dqopt(sb)->dqio_sem);
status = ocfs2_sync_dquot(dquot);
if (status < 0) {
mlog_errno(status);
goto out_dlock;
}
/* Now write updated local dquot structure */
status = ocfs2_local_write_dquot(dquot);
out_dlock:
up_write(&sb_dqopt(sb)->dqio_sem);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
if (status)
mlog_errno(status);
return status;
}
/* This should happen only after set_dqinfo(). */
static int ocfs2_write_info(struct super_block *sb, int type)
{
handle_t *handle;
int status = 0;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
status = dquot_commit_info(sb, type);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
if (status)
mlog_errno(status);
return status;
}
static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
{
struct ocfs2_dquot *dquot =
kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
if (!dquot)
return NULL;
return &dquot->dq_dquot;
}
static void ocfs2_destroy_dquot(struct dquot *dquot)
{
kmem_cache_free(ocfs2_dquot_cachep, dquot);
}
const struct dquot_operations ocfs2_quota_operations = {
/* We never make dquot dirty so .write_dquot is never called */
.acquire_dquot = ocfs2_acquire_dquot,
.release_dquot = ocfs2_release_dquot,
.mark_dirty = ocfs2_mark_dquot_dirty,
.write_info = ocfs2_write_info,
.alloc_dquot = ocfs2_alloc_dquot,
.destroy_dquot = ocfs2_destroy_dquot,
.get_next_id = ocfs2_get_next_id,
};
| linux-master | fs/ocfs2/quota_global.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* export.c
*
* Functions to facilitate NFS exporting
*
* Copyright (C) 2002, 2005 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dir.h"
#include "dlmglue.h"
#include "dcache.h"
#include "export.h"
#include "inode.h"
#include "buffer_head_io.h"
#include "suballoc.h"
#include "ocfs2_trace.h"
struct ocfs2_inode_handle
{
u64 ih_blkno;
u32 ih_generation;
};
static struct dentry *ocfs2_get_dentry(struct super_block *sb,
struct ocfs2_inode_handle *handle)
{
struct inode *inode;
struct ocfs2_super *osb = OCFS2_SB(sb);
u64 blkno = handle->ih_blkno;
int status, set;
struct dentry *result;
trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno);
if (blkno == 0) {
result = ERR_PTR(-ESTALE);
goto bail;
}
inode = ocfs2_ilookup(sb, blkno);
/*
* If the inode exists in memory, we only need to check it's
* generation number
*/
if (inode)
goto check_gen;
/*
* This will synchronize us against ocfs2_delete_inode() on
* all nodes
*/
status = ocfs2_nfs_sync_lock(osb, 1);
if (status < 0) {
mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
goto check_err;
}
status = ocfs2_test_inode_bit(osb, blkno, &set);
if (status < 0) {
if (status == -EINVAL) {
/*
* The blkno NFS gave us doesn't even show up
* as an inode, we return -ESTALE to be
* nice
*/
status = -ESTALE;
} else
mlog(ML_ERROR, "test inode bit failed %d\n", status);
goto unlock_nfs_sync;
}
trace_ocfs2_get_dentry_test_bit(status, set);
/* If the inode allocator bit is clear, this inode must be stale */
if (!set) {
status = -ESTALE;
goto unlock_nfs_sync;
}
inode = ocfs2_iget(osb, blkno, 0, 0);
unlock_nfs_sync:
ocfs2_nfs_sync_unlock(osb, 1);
check_err:
if (status < 0) {
if (status == -ESTALE) {
trace_ocfs2_get_dentry_stale((unsigned long long)blkno,
handle->ih_generation);
}
result = ERR_PTR(status);
goto bail;
}
if (IS_ERR(inode)) {
mlog_errno(PTR_ERR(inode));
result = ERR_CAST(inode);
goto bail;
}
check_gen:
if (handle->ih_generation != inode->i_generation) {
trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
handle->ih_generation,
inode->i_generation);
iput(inode);
result = ERR_PTR(-ESTALE);
goto bail;
}
result = d_obtain_alias(inode);
if (IS_ERR(result))
mlog_errno(PTR_ERR(result));
bail:
trace_ocfs2_get_dentry_end(result);
return result;
}
static struct dentry *ocfs2_get_parent(struct dentry *child)
{
int status;
u64 blkno;
struct dentry *parent;
struct inode *dir = d_inode(child);
int set;
trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno);
status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
if (status < 0) {
mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
parent = ERR_PTR(status);
goto bail;
}
status = ocfs2_inode_lock(dir, NULL, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
parent = ERR_PTR(status);
goto unlock_nfs_sync;
}
status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
if (status < 0) {
parent = ERR_PTR(-ENOENT);
goto bail_unlock;
}
status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
if (status < 0) {
if (status == -EINVAL) {
status = -ESTALE;
} else
mlog(ML_ERROR, "test inode bit failed %d\n", status);
parent = ERR_PTR(status);
goto bail_unlock;
}
trace_ocfs2_get_dentry_test_bit(status, set);
if (!set) {
status = -ESTALE;
parent = ERR_PTR(status);
goto bail_unlock;
}
parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
bail_unlock:
ocfs2_inode_unlock(dir, 0);
unlock_nfs_sync:
ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
bail:
trace_ocfs2_get_parent_end(parent);
return parent;
}
static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
struct inode *parent)
{
int len = *max_len;
int type = 1;
u64 blkno;
u32 generation;
__le32 *fh = (__force __le32 *) fh_in;
#ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION
#error "You go ahead and fix that mess, then. Somehow"
trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
dentry->d_name.name,
fh, len, connectable);
#endif
if (parent && (len < 6)) {
*max_len = 6;
type = FILEID_INVALID;
goto bail;
} else if (len < 3) {
*max_len = 3;
type = FILEID_INVALID;
goto bail;
}
blkno = OCFS2_I(inode)->ip_blkno;
generation = inode->i_generation;
trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation);
len = 3;
fh[0] = cpu_to_le32((u32)(blkno >> 32));
fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff));
fh[2] = cpu_to_le32(generation);
if (parent) {
blkno = OCFS2_I(parent)->ip_blkno;
generation = parent->i_generation;
fh[3] = cpu_to_le32((u32)(blkno >> 32));
fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
fh[5] = cpu_to_le32(generation);
len = 6;
type = 2;
trace_ocfs2_encode_fh_parent((unsigned long long)blkno,
generation);
}
*max_len = len;
bail:
trace_ocfs2_encode_fh_type(type);
return type;
}
static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct ocfs2_inode_handle handle;
if (fh_len < 3 || fh_type > 2)
return NULL;
handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32;
handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]);
handle.ih_generation = le32_to_cpu(fid->raw[2]);
return ocfs2_get_dentry(sb, &handle);
}
static struct dentry *ocfs2_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct ocfs2_inode_handle parent;
if (fh_type != 2 || fh_len < 6)
return NULL;
parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32;
parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]);
parent.ih_generation = le32_to_cpu(fid->raw[5]);
return ocfs2_get_dentry(sb, &parent);
}
const struct export_operations ocfs2_export_ops = {
.encode_fh = ocfs2_encode_fh,
.fh_to_dentry = ocfs2_fh_to_dentry,
.fh_to_parent = ocfs2_fh_to_parent,
.get_parent = ocfs2_get_parent,
};
| linux-master | fs/ocfs2/export.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* stackglue.c
*
* Code which implements an OCFS2 specific interface to underlying
* cluster stacks.
*
* Copyright (C) 2007, 2009 Oracle. All rights reserved.
*/
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/sysctl.h>
#include "ocfs2_fs.h"
#include "stackglue.h"
#define OCFS2_STACK_PLUGIN_O2CB "o2cb"
#define OCFS2_STACK_PLUGIN_USER "user"
#define OCFS2_MAX_HB_CTL_PATH 256
static struct ocfs2_protocol_version locking_max_version;
static DEFINE_SPINLOCK(ocfs2_stack_lock);
static LIST_HEAD(ocfs2_stack_list);
static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1];
static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
/*
* The stack currently in use. If not null, active_stack->sp_count > 0,
* the module is pinned, and the locking protocol cannot be changed.
*/
static struct ocfs2_stack_plugin *active_stack;
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;
assert_spin_locked(&ocfs2_stack_lock);
list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
if (!strcmp(p->sp_name, name))
return p;
}
return NULL;
}
static int ocfs2_stack_driver_request(const char *stack_name,
const char *plugin_name)
{
int rc;
struct ocfs2_stack_plugin *p;
spin_lock(&ocfs2_stack_lock);
/*
* If the stack passed by the filesystem isn't the selected one,
* we can't continue.
*/
if (strcmp(stack_name, cluster_stack_name)) {
rc = -EBUSY;
goto out;
}
if (active_stack) {
/*
* If the active stack isn't the one we want, it cannot
* be selected right now.
*/
if (!strcmp(active_stack->sp_name, plugin_name))
rc = 0;
else
rc = -EBUSY;
goto out;
}
p = ocfs2_stack_lookup(plugin_name);
if (!p || !try_module_get(p->sp_owner)) {
rc = -ENOENT;
goto out;
}
active_stack = p;
rc = 0;
out:
/* If we found it, pin it */
if (!rc)
active_stack->sp_count++;
spin_unlock(&ocfs2_stack_lock);
return rc;
}
/*
* This function looks up the appropriate stack and makes it active. If
* there is no stack, it tries to load it. It will fail if the stack still
* cannot be found. It will also fail if a different stack is in use.
*/
static int ocfs2_stack_driver_get(const char *stack_name)
{
int rc;
char *plugin_name = OCFS2_STACK_PLUGIN_O2CB;
/*
* Classic stack does not pass in a stack name. This is
* compatible with older tools as well.
*/
if (!stack_name || !*stack_name)
stack_name = OCFS2_STACK_PLUGIN_O2CB;
if (strlen(stack_name) != OCFS2_STACK_LABEL_LEN) {
printk(KERN_ERR
"ocfs2 passed an invalid cluster stack label: \"%s\"\n",
stack_name);
return -EINVAL;
}
/* Anything that isn't the classic stack is a user stack */
if (strcmp(stack_name, OCFS2_STACK_PLUGIN_O2CB))
plugin_name = OCFS2_STACK_PLUGIN_USER;
rc = ocfs2_stack_driver_request(stack_name, plugin_name);
if (rc == -ENOENT) {
request_module("ocfs2_stack_%s", plugin_name);
rc = ocfs2_stack_driver_request(stack_name, plugin_name);
}
if (rc == -ENOENT) {
printk(KERN_ERR
"ocfs2: Cluster stack driver \"%s\" cannot be found\n",
plugin_name);
} else if (rc == -EBUSY) {
printk(KERN_ERR
"ocfs2: A different cluster stack is in use\n");
}
return rc;
}
static void ocfs2_stack_driver_put(void)
{
spin_lock(&ocfs2_stack_lock);
BUG_ON(active_stack == NULL);
BUG_ON(active_stack->sp_count == 0);
active_stack->sp_count--;
if (!active_stack->sp_count) {
module_put(active_stack->sp_owner);
active_stack = NULL;
}
spin_unlock(&ocfs2_stack_lock);
}
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin)
{
int rc;
spin_lock(&ocfs2_stack_lock);
if (!ocfs2_stack_lookup(plugin->sp_name)) {
plugin->sp_count = 0;
plugin->sp_max_proto = locking_max_version;
list_add(&plugin->sp_list, &ocfs2_stack_list);
printk(KERN_INFO "ocfs2: Registered cluster interface %s\n",
plugin->sp_name);
rc = 0;
} else {
printk(KERN_ERR "ocfs2: Stack \"%s\" already registered\n",
plugin->sp_name);
rc = -EEXIST;
}
spin_unlock(&ocfs2_stack_lock);
return rc;
}
EXPORT_SYMBOL_GPL(ocfs2_stack_glue_register);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin)
{
struct ocfs2_stack_plugin *p;
spin_lock(&ocfs2_stack_lock);
p = ocfs2_stack_lookup(plugin->sp_name);
if (p) {
BUG_ON(p != plugin);
BUG_ON(plugin == active_stack);
BUG_ON(plugin->sp_count != 0);
list_del_init(&plugin->sp_list);
printk(KERN_INFO "ocfs2: Unregistered cluster interface %s\n",
plugin->sp_name);
} else {
printk(KERN_ERR "Stack \"%s\" is not registered\n",
plugin->sp_name);
}
spin_unlock(&ocfs2_stack_lock);
}
EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister);
void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto)
{
struct ocfs2_stack_plugin *p;
spin_lock(&ocfs2_stack_lock);
if (memcmp(max_proto, &locking_max_version,
sizeof(struct ocfs2_protocol_version))) {
BUG_ON(locking_max_version.pv_major != 0);
locking_max_version = *max_proto;
list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
p->sp_max_proto = locking_max_version;
}
}
spin_unlock(&ocfs2_stack_lock);
}
EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_max_proto_version);
/*
* The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take no argument
* for the ast and bast functions. They will pass the lksb to the ast
* and bast. The caller can wrap the lksb with their own structure to
* get more information.
*/
int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
unsigned int namelen)
{
if (!lksb->lksb_conn)
lksb->lksb_conn = conn;
else
BUG_ON(lksb->lksb_conn != conn);
return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags,
name, namelen);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock);
int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
struct ocfs2_dlm_lksb *lksb,
u32 flags)
{
BUG_ON(lksb->lksb_conn == NULL);
return active_stack->sp_ops->dlm_unlock(conn, lksb, flags);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock);
int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_status(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lvb_valid(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid);
void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_lvb(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb);
void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
active_stack->sp_ops->dump_lksb(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_dump_lksb);
int ocfs2_stack_supports_plocks(void)
{
return active_stack && active_stack->sp_ops->plock;
}
EXPORT_SYMBOL_GPL(ocfs2_stack_supports_plocks);
/*
* ocfs2_plock() can only be safely called if
* ocfs2_stack_supports_plocks() returned true
*/
int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino,
struct file *file, int cmd, struct file_lock *fl)
{
WARN_ON_ONCE(active_stack->sp_ops->plock == NULL);
if (active_stack->sp_ops->plock)
return active_stack->sp_ops->plock(conn, ino, file, cmd, fl);
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(ocfs2_plock);
int ocfs2_cluster_connect(const char *stack_name,
const char *cluster_name,
int cluster_name_len,
const char *group,
int grouplen,
struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
struct ocfs2_cluster_connection **conn)
{
int rc = 0;
struct ocfs2_cluster_connection *new_conn;
BUG_ON(group == NULL);
BUG_ON(conn == NULL);
BUG_ON(recovery_handler == NULL);
if (grouplen > GROUP_NAME_MAX) {
rc = -EINVAL;
goto out;
}
if (memcmp(&lproto->lp_max_version, &locking_max_version,
sizeof(struct ocfs2_protocol_version))) {
rc = -EINVAL;
goto out;
}
new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection),
GFP_KERNEL);
if (!new_conn) {
rc = -ENOMEM;
goto out;
}
strscpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
new_conn->cc_namelen = grouplen;
if (cluster_name_len)
strscpy(new_conn->cc_cluster_name, cluster_name,
CLUSTER_NAME_MAX + 1);
new_conn->cc_cluster_name_len = cluster_name_len;
new_conn->cc_recovery_handler = recovery_handler;
new_conn->cc_recovery_data = recovery_data;
new_conn->cc_proto = lproto;
/* Start the new connection at our maximum compatibility level */
new_conn->cc_version = lproto->lp_max_version;
/* This will pin the stack driver if successful */
rc = ocfs2_stack_driver_get(stack_name);
if (rc)
goto out_free;
rc = active_stack->sp_ops->connect(new_conn);
if (rc) {
ocfs2_stack_driver_put();
goto out_free;
}
*conn = new_conn;
out_free:
if (rc)
kfree(new_conn);
out:
return rc;
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_connect);
/* The caller will ensure all nodes have the same cluster stack */
int ocfs2_cluster_connect_agnostic(const char *group,
int grouplen,
struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
struct ocfs2_cluster_connection **conn)
{
char *stack_name = NULL;
if (cluster_stack_name[0])
stack_name = cluster_stack_name;
return ocfs2_cluster_connect(stack_name, NULL, 0, group, grouplen,
lproto, recovery_handler, recovery_data,
conn);
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_connect_agnostic);
/* If hangup_pending is 0, the stack driver will be dropped */
int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
int hangup_pending)
{
int ret;
BUG_ON(conn == NULL);
ret = active_stack->sp_ops->disconnect(conn);
/* XXX Should we free it anyway? */
if (!ret) {
kfree(conn);
if (!hangup_pending)
ocfs2_stack_driver_put();
}
return ret;
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_disconnect);
/*
* Leave the group for this filesystem. This is executed by a userspace
* program (stored in ocfs2_hb_ctl_path).
*/
static void ocfs2_leave_group(const char *group)
{
int ret;
char *argv[5], *envp[3];
argv[0] = ocfs2_hb_ctl_path;
argv[1] = "-K";
argv[2] = "-u";
argv[3] = (char *)group;
argv[4] = NULL;
/* minimal command environment taken from cpu_run_sbin_hotplug */
envp[0] = "HOME=/";
envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[2] = NULL;
ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
if (ret < 0) {
printk(KERN_ERR
"ocfs2: Error %d running user helper "
"\"%s %s %s %s\"\n",
ret, argv[0], argv[1], argv[2], argv[3]);
}
}
/*
* Hangup is a required post-umount. ocfs2-tools software expects the
* filesystem to call "ocfs2_hb_ctl" during unmount. This happens
* regardless of whether the DLM got started, so we can't do it
* in ocfs2_cluster_disconnect(). The ocfs2_leave_group() function does
* the actual work.
*/
void ocfs2_cluster_hangup(const char *group, int grouplen)
{
BUG_ON(group == NULL);
BUG_ON(group[grouplen] != '\0');
ocfs2_leave_group(group);
/* cluster_disconnect() was called with hangup_pending==1 */
ocfs2_stack_driver_put();
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_hangup);
int ocfs2_cluster_this_node(struct ocfs2_cluster_connection *conn,
unsigned int *node)
{
return active_stack->sp_ops->this_node(conn, node);
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_this_node);
/*
* Sysfs bits
*/
static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
ssize_t ret = 0;
spin_lock(&ocfs2_stack_lock);
if (locking_max_version.pv_major)
ret = snprintf(buf, PAGE_SIZE, "%u.%u\n",
locking_max_version.pv_major,
locking_max_version.pv_minor);
spin_unlock(&ocfs2_stack_lock);
return ret;
}
static struct kobj_attribute ocfs2_attr_max_locking_protocol =
__ATTR(max_locking_protocol, S_IRUGO,
ocfs2_max_locking_protocol_show, NULL);
static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
ssize_t ret = 0, total = 0, remain = PAGE_SIZE;
struct ocfs2_stack_plugin *p;
spin_lock(&ocfs2_stack_lock);
list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
ret = snprintf(buf, remain, "%s\n",
p->sp_name);
if (ret >= remain) {
/* snprintf() didn't fit */
total = -E2BIG;
break;
}
total += ret;
remain -= ret;
}
spin_unlock(&ocfs2_stack_lock);
return total;
}
static struct kobj_attribute ocfs2_attr_loaded_cluster_plugins =
__ATTR(loaded_cluster_plugins, S_IRUGO,
ocfs2_loaded_cluster_plugins_show, NULL);
static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
ssize_t ret = 0;
spin_lock(&ocfs2_stack_lock);
if (active_stack) {
ret = snprintf(buf, PAGE_SIZE, "%s\n",
active_stack->sp_name);
if (ret >= PAGE_SIZE)
ret = -E2BIG;
}
spin_unlock(&ocfs2_stack_lock);
return ret;
}
static struct kobj_attribute ocfs2_attr_active_cluster_plugin =
__ATTR(active_cluster_plugin, S_IRUGO,
ocfs2_active_cluster_plugin_show, NULL);
static ssize_t ocfs2_cluster_stack_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
ssize_t ret;
spin_lock(&ocfs2_stack_lock);
ret = snprintf(buf, PAGE_SIZE, "%s\n", cluster_stack_name);
spin_unlock(&ocfs2_stack_lock);
return ret;
}
static ssize_t ocfs2_cluster_stack_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
size_t len = count;
ssize_t ret;
if (len == 0)
return len;
if (buf[len - 1] == '\n')
len--;
if ((len != OCFS2_STACK_LABEL_LEN) ||
(strnlen(buf, len) != len))
return -EINVAL;
spin_lock(&ocfs2_stack_lock);
if (active_stack) {
if (!strncmp(buf, cluster_stack_name, len))
ret = count;
else
ret = -EBUSY;
} else {
memcpy(cluster_stack_name, buf, len);
ret = count;
}
spin_unlock(&ocfs2_stack_lock);
return ret;
}
static struct kobj_attribute ocfs2_attr_cluster_stack =
__ATTR(cluster_stack, S_IRUGO | S_IWUSR,
ocfs2_cluster_stack_show,
ocfs2_cluster_stack_store);
static ssize_t ocfs2_dlm_recover_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "1\n");
}
static struct kobj_attribute ocfs2_attr_dlm_recover_support =
__ATTR(dlm_recover_callback_support, S_IRUGO,
ocfs2_dlm_recover_show, NULL);
static struct attribute *ocfs2_attrs[] = {
&ocfs2_attr_max_locking_protocol.attr,
&ocfs2_attr_loaded_cluster_plugins.attr,
&ocfs2_attr_active_cluster_plugin.attr,
&ocfs2_attr_cluster_stack.attr,
&ocfs2_attr_dlm_recover_support.attr,
NULL,
};
static const struct attribute_group ocfs2_attr_group = {
.attrs = ocfs2_attrs,
};
struct kset *ocfs2_kset;
EXPORT_SYMBOL_GPL(ocfs2_kset);
static void ocfs2_sysfs_exit(void)
{
kset_unregister(ocfs2_kset);
}
static int ocfs2_sysfs_init(void)
{
int ret;
ocfs2_kset = kset_create_and_add("ocfs2", NULL, fs_kobj);
if (!ocfs2_kset)
return -ENOMEM;
ret = sysfs_create_group(&ocfs2_kset->kobj, &ocfs2_attr_group);
if (ret)
goto error;
return 0;
error:
kset_unregister(ocfs2_kset);
return ret;
}
/*
* Sysctl bits
*
* The sysctl lives at /proc/sys/fs/ocfs2/nm/hb_ctl_path. The 'nm' doesn't
* make as much sense in a multiple cluster stack world, but it's safer
* and easier to preserve the name.
*/
static struct ctl_table ocfs2_nm_table[] = {
{
.procname = "hb_ctl_path",
.data = ocfs2_hb_ctl_path,
.maxlen = OCFS2_MAX_HB_CTL_PATH,
.mode = 0644,
.proc_handler = proc_dostring,
},
{ }
};
static struct ctl_table_header *ocfs2_table_header;
/*
* Initialization
*/
static int __init ocfs2_stack_glue_init(void)
{
int ret;
strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table);
if (!ocfs2_table_header) {
printk(KERN_ERR
"ocfs2 stack glue: unable to register sysctl\n");
return -ENOMEM; /* or something. */
}
ret = ocfs2_sysfs_init();
if (ret)
unregister_sysctl_table(ocfs2_table_header);
return ret;
}
static void __exit ocfs2_stack_glue_exit(void)
{
memset(&locking_max_version, 0,
sizeof(struct ocfs2_protocol_version));
ocfs2_sysfs_exit();
if (ocfs2_table_header)
unregister_sysctl_table(ocfs2_table_header);
}
MODULE_AUTHOR("Oracle");
MODULE_DESCRIPTION("ocfs2 cluster stack glue layer");
MODULE_LICENSE("GPL");
module_init(ocfs2_stack_glue_init);
module_exit(ocfs2_stack_glue_exit);
| linux-master | fs/ocfs2/stackglue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* acl.c
*
* Copyright (C) 2004, 2008 Oracle. All rights reserved.
*
* CREDITS:
* Lots of code in this file is copy from linux/fs/ext3/acl.c.
* Copyright (C) 2001-2003 Andreas Gruenbacher, <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
#include "journal.h"
#include "ocfs2_fs.h"
#include "xattr.h"
#include "acl.h"
/*
* Convert from xattr value to acl struct.
*/
static struct posix_acl *ocfs2_acl_from_xattr(const void *value, size_t size)
{
int n, count;
struct posix_acl *acl;
if (!value)
return NULL;
if (size < sizeof(struct posix_acl_entry))
return ERR_PTR(-EINVAL);
count = size / sizeof(struct posix_acl_entry);
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
for (n = 0; n < count; n++) {
struct ocfs2_acl_entry *entry =
(struct ocfs2_acl_entry *)value;
acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag);
acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
switch(acl->a_entries[n].e_tag) {
case ACL_USER:
acl->a_entries[n].e_uid =
make_kuid(&init_user_ns,
le32_to_cpu(entry->e_id));
break;
case ACL_GROUP:
acl->a_entries[n].e_gid =
make_kgid(&init_user_ns,
le32_to_cpu(entry->e_id));
break;
default:
break;
}
value += sizeof(struct posix_acl_entry);
}
return acl;
}
/*
* Convert acl struct to xattr value.
*/
static void *ocfs2_acl_to_xattr(const struct posix_acl *acl, size_t *size)
{
struct ocfs2_acl_entry *entry = NULL;
char *ocfs2_acl;
size_t n;
*size = acl->a_count * sizeof(struct posix_acl_entry);
ocfs2_acl = kmalloc(*size, GFP_NOFS);
if (!ocfs2_acl)
return ERR_PTR(-ENOMEM);
entry = (struct ocfs2_acl_entry *)ocfs2_acl;
for (n = 0; n < acl->a_count; n++, entry++) {
entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
switch(acl->a_entries[n].e_tag) {
case ACL_USER:
entry->e_id = cpu_to_le32(
from_kuid(&init_user_ns,
acl->a_entries[n].e_uid));
break;
case ACL_GROUP:
entry->e_id = cpu_to_le32(
from_kgid(&init_user_ns,
acl->a_entries[n].e_gid));
break;
default:
entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
break;
}
}
return ocfs2_acl;
}
static struct posix_acl *ocfs2_get_acl_nolock(struct inode *inode,
int type,
struct buffer_head *di_bh)
{
int name_index;
char *value = NULL;
struct posix_acl *acl;
int retval;
switch (type) {
case ACL_TYPE_ACCESS:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
break;
default:
return ERR_PTR(-EINVAL);
}
retval = ocfs2_xattr_get_nolock(inode, di_bh, name_index, "", NULL, 0);
if (retval > 0) {
value = kmalloc(retval, GFP_NOFS);
if (!value)
return ERR_PTR(-ENOMEM);
retval = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
"", value, retval);
}
if (retval > 0)
acl = ocfs2_acl_from_xattr(value, retval);
else if (retval == -ENODATA || retval == 0)
acl = NULL;
else
acl = ERR_PTR(retval);
kfree(value);
return acl;
}
/*
* Helper function to set i_mode in memory and disk. Some call paths
* will not have di_bh or a journal handle to pass, in which case it
* will create it's own.
*/
static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
handle_t *handle, umode_t new_mode)
{
int ret, commit_handle = 0;
struct ocfs2_dinode *di;
if (di_bh == NULL) {
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
} else
get_bh(di_bh);
if (handle == NULL) {
handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_brelse;
}
commit_handle = 1;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
inode->i_mode = new_mode;
inode_set_ctime_current(inode);
di->i_mode = cpu_to_le16(inode->i_mode);
di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
if (commit_handle)
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out_brelse:
brelse(di_bh);
out:
return ret;
}
/*
* Set the access or default ACL of an inode.
*/
static int ocfs2_set_acl(handle_t *handle,
struct inode *inode,
struct buffer_head *di_bh,
int type,
struct posix_acl *acl,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac)
{
int name_index;
void *value = NULL;
size_t size = 0;
int ret;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
switch (type) {
case ACL_TYPE_ACCESS:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
break;
default:
return -EINVAL;
}
if (acl) {
value = ocfs2_acl_to_xattr(acl, &size);
if (IS_ERR(value))
return (int)PTR_ERR(value);
}
if (handle)
ret = ocfs2_xattr_set_handle(handle, inode, di_bh, name_index,
"", value, size, 0,
meta_ac, data_ac);
else
ret = ocfs2_xattr_set(inode, name_index, "", value, size, 0);
kfree(value);
if (!ret)
set_cached_acl(inode, type, acl);
return ret;
}
int ocfs2_iop_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
struct buffer_head *bh = NULL;
int status, had_lock;
struct ocfs2_lock_holder oh;
struct inode *inode = d_inode(dentry);
had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
if (had_lock < 0)
return had_lock;
if (type == ACL_TYPE_ACCESS && acl) {
umode_t mode;
status = posix_acl_update_mode(&nop_mnt_idmap, inode, &mode,
&acl);
if (status)
goto unlock;
status = ocfs2_acl_set_mode(inode, bh, NULL, mode);
if (status)
goto unlock;
}
status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
unlock:
ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
brelse(bh);
return status;
}
struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type, bool rcu)
{
struct ocfs2_super *osb;
struct buffer_head *di_bh = NULL;
struct posix_acl *acl;
int had_lock;
struct ocfs2_lock_holder oh;
if (rcu)
return ERR_PTR(-ECHILD);
osb = OCFS2_SB(inode->i_sb);
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return NULL;
had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
if (had_lock < 0)
return ERR_PTR(had_lock);
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, type, di_bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
brelse(di_bh);
return acl;
}
int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl;
int ret;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return 0;
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
if (IS_ERR_OR_NULL(acl))
return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
acl, NULL, NULL);
posix_acl_release(acl);
return ret;
}
/*
* Initialize the ACLs of a new inode. If parent directory has default ACL,
* then clone to new inode. Called from ocfs2_mknod.
*/
int ocfs2_init_acl(handle_t *handle,
struct inode *inode,
struct inode *dir,
struct buffer_head *di_bh,
struct buffer_head *dir_bh,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
int ret = 0, ret2;
umode_t mode;
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
down_read(&OCFS2_I(dir)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
dir_bh);
up_read(&OCFS2_I(dir)->ip_xattr_sem);
if (IS_ERR(acl))
return PTR_ERR(acl);
}
if (!acl) {
mode = inode->i_mode & ~current_umask();
ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
if (ret) {
mlog_errno(ret);
goto cleanup;
}
}
}
if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
if (S_ISDIR(inode->i_mode)) {
ret = ocfs2_set_acl(handle, inode, di_bh,
ACL_TYPE_DEFAULT, acl,
meta_ac, data_ac);
if (ret)
goto cleanup;
}
mode = inode->i_mode;
ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
if (ret < 0)
return ret;
ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
if (ret2) {
mlog_errno(ret2);
ret = ret2;
goto cleanup;
}
if (ret > 0) {
ret = ocfs2_set_acl(handle, inode,
di_bh, ACL_TYPE_ACCESS,
acl, meta_ac, data_ac);
}
}
cleanup:
posix_acl_release(acl);
return ret;
}
| linux-master | fs/ocfs2/acl.c |
/*
* linux/cluster/ssi/cfs/symlink.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE
* or NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Questions/Comments/Bugfixes to [email protected]
*
* Copyright (C) 1992 Rick Sladkey
*
* Optimization changes Copyright (C) 1994 Florian La Roche
*
* Jun 7 1999, cache symlink lookups in the page cache. -DaveM
*
* Portions Copyright (C) 2001 Compaq Computer Corporation
*
* ocfs2 symlink handling code.
*
* Copyright (C) 2004, 2005 Oracle.
*
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "file.h"
#include "inode.h"
#include "journal.h"
#include "symlink.h"
#include "xattr.h"
#include "buffer_head_io.h"
static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
int status = ocfs2_read_inode_block(inode, &bh);
struct ocfs2_dinode *fe;
const char *link;
void *kaddr;
size_t len;
if (status < 0) {
mlog_errno(status);
return status;
}
fe = (struct ocfs2_dinode *) bh->b_data;
link = (char *) fe->id2.i_symlink;
/* will be less than a page size */
len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb));
kaddr = kmap_atomic(page);
memcpy(kaddr, link, len + 1);
kunmap_atomic(kaddr);
SetPageUptodate(page);
unlock_page(page);
brelse(bh);
return 0;
}
const struct address_space_operations ocfs2_fast_symlink_aops = {
.read_folio = ocfs2_fast_symlink_read_folio,
};
const struct inode_operations ocfs2_symlink_inode_operations = {
.get_link = page_get_link,
.getattr = ocfs2_getattr,
.setattr = ocfs2_setattr,
.listxattr = ocfs2_listxattr,
.fiemap = ocfs2_fiemap,
};
| linux-master | fs/ocfs2/symlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* stack_o2cb.c
*
* Code which interfaces ocfs2 with the o2cb stack.
*
* Copyright (C) 2007 Oracle. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/module.h>
/* Needed for AOP_TRUNCATED_PAGE in mlog_errno() */
#include <linux/fs.h>
#include "cluster/masklog.h"
#include "cluster/nodemanager.h"
#include "cluster/heartbeat.h"
#include "cluster/tcp.h"
#include "stackglue.h"
struct o2dlm_private {
struct dlm_eviction_cb op_eviction_cb;
};
static struct ocfs2_stack_plugin o2cb_stack;
/* These should be identical */
#if (DLM_LOCK_IV != LKM_IVMODE)
# error Lock modes do not match
#endif
#if (DLM_LOCK_NL != LKM_NLMODE)
# error Lock modes do not match
#endif
#if (DLM_LOCK_CR != LKM_CRMODE)
# error Lock modes do not match
#endif
#if (DLM_LOCK_CW != LKM_CWMODE)
# error Lock modes do not match
#endif
#if (DLM_LOCK_PR != LKM_PRMODE)
# error Lock modes do not match
#endif
#if (DLM_LOCK_PW != LKM_PWMODE)
# error Lock modes do not match
#endif
#if (DLM_LOCK_EX != LKM_EXMODE)
# error Lock modes do not match
#endif
static inline int mode_to_o2dlm(int mode)
{
BUG_ON(mode > LKM_MAXMODE);
return mode;
}
static int flags_to_o2dlm(u32 flags)
{
int o2dlm_flags = 0;
if (flags & DLM_LKF_NOQUEUE)
o2dlm_flags |= LKM_NOQUEUE;
if (flags & DLM_LKF_CANCEL)
o2dlm_flags |= LKM_CANCEL;
if (flags & DLM_LKF_CONVERT)
o2dlm_flags |= LKM_CONVERT;
if (flags & DLM_LKF_VALBLK)
o2dlm_flags |= LKM_VALBLK;
if (flags & DLM_LKF_IVVALBLK)
o2dlm_flags |= LKM_INVVALBLK;
if (flags & DLM_LKF_ORPHAN)
o2dlm_flags |= LKM_ORPHAN;
if (flags & DLM_LKF_FORCEUNLOCK)
o2dlm_flags |= LKM_FORCE;
if (flags & DLM_LKF_TIMEOUT)
o2dlm_flags |= LKM_TIMEOUT;
if (flags & DLM_LKF_LOCAL)
o2dlm_flags |= LKM_LOCAL;
return o2dlm_flags;
}
/*
* Map an o2dlm status to standard errno values.
*
* o2dlm only uses a handful of these, and returns even fewer to the
* caller. Still, we try to assign sane values to each error.
*
* The following value pairs have special meanings to dlmglue, thus
* the right hand side needs to stay unique - never duplicate the
* mapping elsewhere in the table!
*
* DLM_NORMAL: 0
* DLM_NOTQUEUED: -EAGAIN
* DLM_CANCELGRANT: -EBUSY
* DLM_CANCEL: -DLM_ECANCEL
*/
/* Keep in sync with dlmapi.h */
static int status_map[] = {
[DLM_NORMAL] = 0, /* Success */
[DLM_GRANTED] = -EINVAL,
[DLM_DENIED] = -EACCES,
[DLM_DENIED_NOLOCKS] = -EACCES,
[DLM_WORKING] = -EACCES,
[DLM_BLOCKED] = -EINVAL,
[DLM_BLOCKED_ORPHAN] = -EINVAL,
[DLM_DENIED_GRACE_PERIOD] = -EACCES,
[DLM_SYSERR] = -ENOMEM, /* It is what it is */
[DLM_NOSUPPORT] = -EPROTO,
[DLM_CANCELGRANT] = -EBUSY, /* Cancel after grant */
[DLM_IVLOCKID] = -EINVAL,
[DLM_SYNC] = -EINVAL,
[DLM_BADTYPE] = -EINVAL,
[DLM_BADRESOURCE] = -EINVAL,
[DLM_MAXHANDLES] = -ENOMEM,
[DLM_NOCLINFO] = -EINVAL,
[DLM_NOLOCKMGR] = -EINVAL,
[DLM_NOPURGED] = -EINVAL,
[DLM_BADARGS] = -EINVAL,
[DLM_VOID] = -EINVAL,
[DLM_NOTQUEUED] = -EAGAIN, /* Trylock failed */
[DLM_IVBUFLEN] = -EINVAL,
[DLM_CVTUNGRANT] = -EPERM,
[DLM_BADPARAM] = -EINVAL,
[DLM_VALNOTVALID] = -EINVAL,
[DLM_REJECTED] = -EPERM,
[DLM_ABORT] = -EINVAL,
[DLM_CANCEL] = -DLM_ECANCEL, /* Successful cancel */
[DLM_IVRESHANDLE] = -EINVAL,
[DLM_DEADLOCK] = -EDEADLK,
[DLM_DENIED_NOASTS] = -EINVAL,
[DLM_FORWARD] = -EINVAL,
[DLM_TIMEOUT] = -ETIMEDOUT,
[DLM_IVGROUPID] = -EINVAL,
[DLM_VERS_CONFLICT] = -EOPNOTSUPP,
[DLM_BAD_DEVICE_PATH] = -ENOENT,
[DLM_NO_DEVICE_PERMISSION] = -EPERM,
[DLM_NO_CONTROL_DEVICE] = -ENOENT,
[DLM_RECOVERING] = -ENOTCONN,
[DLM_MIGRATING] = -ERESTART,
[DLM_MAXSTATS] = -EINVAL,
};
static int dlm_status_to_errno(enum dlm_status status)
{
BUG_ON(status < 0 || status >= ARRAY_SIZE(status_map));
return status_map[status];
}
static void o2dlm_lock_ast_wrapper(void *astarg)
{
struct ocfs2_dlm_lksb *lksb = astarg;
lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void o2dlm_blocking_ast_wrapper(void *astarg, int level)
{
struct ocfs2_dlm_lksb *lksb = astarg;
lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
{
struct ocfs2_dlm_lksb *lksb = astarg;
int error = dlm_status_to_errno(status);
/*
* In o2dlm, you can get both the lock_ast() for the lock being
* granted and the unlock_ast() for the CANCEL failing. A
* successful cancel sends DLM_NORMAL here. If the
* lock grant happened before the cancel arrived, you get
* DLM_CANCELGRANT.
*
* There's no need for the double-ast. If we see DLM_CANCELGRANT,
* we just ignore it. We expect the lock_ast() to handle the
* granted lock.
*/
if (status == DLM_CANCELGRANT)
return;
lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, error);
}
static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
unsigned int namelen)
{
enum dlm_status status;
int o2dlm_mode = mode_to_o2dlm(mode);
int o2dlm_flags = flags_to_o2dlm(flags);
int ret;
status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm,
o2dlm_flags, name, namelen,
o2dlm_lock_ast_wrapper, lksb,
o2dlm_blocking_ast_wrapper);
ret = dlm_status_to_errno(status);
return ret;
}
static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn,
struct ocfs2_dlm_lksb *lksb,
u32 flags)
{
enum dlm_status status;
int o2dlm_flags = flags_to_o2dlm(flags);
int ret;
status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm,
o2dlm_flags, o2dlm_unlock_ast_wrapper, lksb);
ret = dlm_status_to_errno(status);
return ret;
}
static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return dlm_status_to_errno(lksb->lksb_o2dlm.status);
}
/*
* o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB
* contents, it will zero out the LVB. Thus the caller can always trust
* the contents.
*/
static int o2cb_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return 1;
}
static void *o2cb_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return (void *)(lksb->lksb_o2dlm.lvb);
}
static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
}
/*
* Check if this node is heartbeating and is connected to all other
* heartbeating nodes.
*/
static int o2cb_cluster_check(void)
{
u8 node_num;
int i;
unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
node_num = o2nm_this_node();
if (node_num == O2NM_MAX_NODES) {
printk(KERN_ERR "o2cb: This node has not been configured.\n");
return -EINVAL;
}
/*
* o2dlm expects o2net sockets to be created. If not, then
* dlm_join_domain() fails with a stack of errors which are both cryptic
* and incomplete. The idea here is to detect upfront whether we have
* managed to connect to all nodes or not. If not, then list the nodes
* to allow the user to check the configuration (incorrect IP, firewall,
* etc.) Yes, this is racy. But its not the end of the world.
*/
#define O2CB_MAP_STABILIZE_COUNT 60
for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
o2hb_fill_node_map(hbmap, O2NM_MAX_NODES);
if (!test_bit(node_num, hbmap)) {
printk(KERN_ERR "o2cb: %s heartbeat has not been "
"started.\n", (o2hb_global_heartbeat_active() ?
"Global" : "Local"));
return -EINVAL;
}
o2net_fill_node_map(netmap, O2NM_MAX_NODES);
/* Force set the current node to allow easy compare */
set_bit(node_num, netmap);
if (bitmap_equal(hbmap, netmap, O2NM_MAX_NODES))
return 0;
if (i < O2CB_MAP_STABILIZE_COUNT - 1)
msleep(1000);
}
printk(KERN_ERR "o2cb: This node could not connect to nodes:");
i = -1;
while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
i + 1)) < O2NM_MAX_NODES) {
if (!test_bit(i, netmap))
printk(" %u", i);
}
printk(".\n");
return -ENOTCONN;
}
/*
* Called from the dlm when it's about to evict a node. This is how the
* classic stack signals node death.
*/
static void o2dlm_eviction_cb(int node_num, void *data)
{
struct ocfs2_cluster_connection *conn = data;
printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
node_num, conn->cc_namelen, conn->cc_name);
conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
}
static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
{
int rc = 0;
u32 dlm_key;
struct dlm_ctxt *dlm;
struct o2dlm_private *priv;
struct dlm_protocol_version fs_version;
BUG_ON(conn == NULL);
BUG_ON(conn->cc_proto == NULL);
/* Ensure cluster stack is up and all nodes are connected */
rc = o2cb_cluster_check();
if (rc) {
printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
"before retrying.\n");
goto out;
}
priv = kzalloc(sizeof(struct o2dlm_private), GFP_KERNEL);
if (!priv) {
rc = -ENOMEM;
goto out_free;
}
/* This just fills the structure in. It is safe to pass conn. */
dlm_setup_eviction_cb(&priv->op_eviction_cb, o2dlm_eviction_cb,
conn);
conn->cc_private = priv;
/* used by the dlm code to make message headers unique, each
* node in this domain must agree on this. */
dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
fs_version.pv_major = conn->cc_version.pv_major;
fs_version.pv_minor = conn->cc_version.pv_minor;
dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
if (IS_ERR(dlm)) {
rc = PTR_ERR(dlm);
mlog_errno(rc);
goto out_free;
}
conn->cc_version.pv_major = fs_version.pv_major;
conn->cc_version.pv_minor = fs_version.pv_minor;
conn->cc_lockspace = dlm;
dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
out_free:
if (rc)
kfree(conn->cc_private);
out:
return rc;
}
static int o2cb_cluster_disconnect(struct ocfs2_cluster_connection *conn)
{
struct dlm_ctxt *dlm = conn->cc_lockspace;
struct o2dlm_private *priv = conn->cc_private;
dlm_unregister_eviction_cb(&priv->op_eviction_cb);
conn->cc_private = NULL;
kfree(priv);
dlm_unregister_domain(dlm);
conn->cc_lockspace = NULL;
return 0;
}
static int o2cb_cluster_this_node(struct ocfs2_cluster_connection *conn,
unsigned int *node)
{
int node_num;
node_num = o2nm_this_node();
if (node_num == O2NM_INVALID_NODE_NUM)
return -ENOENT;
if (node_num >= O2NM_MAX_NODES)
return -EOVERFLOW;
*node = node_num;
return 0;
}
static struct ocfs2_stack_operations o2cb_stack_ops = {
.connect = o2cb_cluster_connect,
.disconnect = o2cb_cluster_disconnect,
.this_node = o2cb_cluster_this_node,
.dlm_lock = o2cb_dlm_lock,
.dlm_unlock = o2cb_dlm_unlock,
.lock_status = o2cb_dlm_lock_status,
.lvb_valid = o2cb_dlm_lvb_valid,
.lock_lvb = o2cb_dlm_lvb,
.dump_lksb = o2cb_dump_lksb,
};
static struct ocfs2_stack_plugin o2cb_stack = {
.sp_name = "o2cb",
.sp_ops = &o2cb_stack_ops,
.sp_owner = THIS_MODULE,
};
static int __init o2cb_stack_init(void)
{
return ocfs2_stack_glue_register(&o2cb_stack);
}
static void __exit o2cb_stack_exit(void)
{
ocfs2_stack_glue_unregister(&o2cb_stack);
}
MODULE_AUTHOR("Oracle");
MODULE_DESCRIPTION("ocfs2 driver for the classic o2cb stack");
MODULE_LICENSE("GPL");
module_init(o2cb_stack_init);
module_exit(o2cb_stack_exit);
| linux-master | fs/ocfs2/stack_o2cb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* reservations.c
*
* Allocation reservations implementation
*
* Some code borrowed from fs/ext3/balloc.c and is:
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card ([email protected])
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* The rest is copyright (C) 2010 Novell. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "ocfs2_trace.h"
#ifdef CONFIG_OCFS2_DEBUG_FS
#define OCFS2_CHECK_RESERVATIONS
#endif
static DEFINE_SPINLOCK(resv_lock);
int ocfs2_dir_resv_allowed(struct ocfs2_super *osb)
{
return (osb->osb_resv_level && osb->osb_dir_resv_level);
}
static unsigned int ocfs2_resv_window_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
struct ocfs2_super *osb = resmap->m_osb;
unsigned int bits;
if (!(resv->r_flags & OCFS2_RESV_FLAG_DIR)) {
/* 8, 16, 32, 64, 128, 256, 512, 1024 */
bits = 4 << osb->osb_resv_level;
} else {
bits = 4 << osb->osb_dir_resv_level;
}
return bits;
}
static inline unsigned int ocfs2_resv_end(struct ocfs2_alloc_reservation *resv)
{
if (resv->r_len)
return resv->r_start + resv->r_len - 1;
return resv->r_start;
}
static inline int ocfs2_resv_empty(struct ocfs2_alloc_reservation *resv)
{
return !!(resv->r_len == 0);
}
static inline int ocfs2_resmap_disabled(struct ocfs2_reservation_map *resmap)
{
if (resmap->m_osb->osb_resv_level == 0)
return 1;
return 0;
}
static void ocfs2_dump_resv(struct ocfs2_reservation_map *resmap)
{
struct ocfs2_super *osb = resmap->m_osb;
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
int i = 0;
mlog(ML_NOTICE, "Dumping resmap for device %s. Bitmap length: %u\n",
osb->dev_str, resmap->m_bitmap_len);
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
mlog(ML_NOTICE, "start: %u\tend: %u\tlen: %u\tlast_start: %u"
"\tlast_len: %u\n", resv->r_start,
ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
resv->r_last_len);
node = rb_next(node);
i++;
}
mlog(ML_NOTICE, "%d reservations found. LRU follows\n", i);
i = 0;
list_for_each_entry(resv, &resmap->m_lru, r_lru) {
mlog(ML_NOTICE, "LRU(%d) start: %u\tend: %u\tlen: %u\t"
"last_start: %u\tlast_len: %u\n", i, resv->r_start,
ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
resv->r_last_len);
i++;
}
}
#ifdef OCFS2_CHECK_RESERVATIONS
static int ocfs2_validate_resmap_bits(struct ocfs2_reservation_map *resmap,
int i,
struct ocfs2_alloc_reservation *resv)
{
char *disk_bitmap = resmap->m_disk_bitmap;
unsigned int start = resv->r_start;
unsigned int end = ocfs2_resv_end(resv);
while (start <= end) {
if (ocfs2_test_bit(start, disk_bitmap)) {
mlog(ML_ERROR,
"reservation %d covers an allocated area "
"starting at bit %u!\n", i, start);
return 1;
}
start++;
}
return 0;
}
static void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
{
unsigned int off = 0;
int i = 0;
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
if (i > 0 && resv->r_start <= off) {
mlog(ML_ERROR, "reservation %d has bad start off!\n",
i);
goto bad;
}
if (resv->r_len == 0) {
mlog(ML_ERROR, "reservation %d has no length!\n",
i);
goto bad;
}
if (resv->r_start > ocfs2_resv_end(resv)) {
mlog(ML_ERROR, "reservation %d has invalid range!\n",
i);
goto bad;
}
if (ocfs2_resv_end(resv) >= resmap->m_bitmap_len) {
mlog(ML_ERROR, "reservation %d extends past bitmap!\n",
i);
goto bad;
}
if (ocfs2_validate_resmap_bits(resmap, i, resv))
goto bad;
off = ocfs2_resv_end(resv);
node = rb_next(node);
i++;
}
return;
bad:
ocfs2_dump_resv(resmap);
BUG();
}
#else
static inline void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
{
}
#endif
void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv)
{
memset(resv, 0, sizeof(*resv));
INIT_LIST_HEAD(&resv->r_lru);
}
void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
unsigned int flags)
{
BUG_ON(flags & ~OCFS2_RESV_TYPES);
resv->r_flags |= flags;
}
void ocfs2_resmap_init(struct ocfs2_super *osb,
struct ocfs2_reservation_map *resmap)
{
memset(resmap, 0, sizeof(*resmap));
resmap->m_osb = osb;
resmap->m_reservations = RB_ROOT;
/* m_bitmap_len is initialized to zero by the above memset. */
INIT_LIST_HEAD(&resmap->m_lru);
}
static void ocfs2_resv_mark_lru(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
assert_spin_locked(&resv_lock);
if (!list_empty(&resv->r_lru))
list_del_init(&resv->r_lru);
list_add_tail(&resv->r_lru, &resmap->m_lru);
}
static void __ocfs2_resv_trunc(struct ocfs2_alloc_reservation *resv)
{
resv->r_len = 0;
resv->r_start = 0;
}
static void ocfs2_resv_remove(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
if (resv->r_flags & OCFS2_RESV_FLAG_INUSE) {
list_del_init(&resv->r_lru);
rb_erase(&resv->r_node, &resmap->m_reservations);
resv->r_flags &= ~OCFS2_RESV_FLAG_INUSE;
}
}
static void __ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
assert_spin_locked(&resv_lock);
__ocfs2_resv_trunc(resv);
/*
* last_len and last_start no longer make sense if
* we're changing the range of our allocations.
*/
resv->r_last_len = resv->r_last_start = 0;
ocfs2_resv_remove(resmap, resv);
}
/* does nothing if 'resv' is null */
void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
if (resv) {
spin_lock(&resv_lock);
__ocfs2_resv_discard(resmap, resv);
spin_unlock(&resv_lock);
}
}
static void ocfs2_resmap_clear_all_resv(struct ocfs2_reservation_map *resmap)
{
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
assert_spin_locked(&resv_lock);
while ((node = rb_last(&resmap->m_reservations)) != NULL) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
__ocfs2_resv_discard(resmap, resv);
}
}
void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap,
unsigned int clen, char *disk_bitmap)
{
if (ocfs2_resmap_disabled(resmap))
return;
spin_lock(&resv_lock);
ocfs2_resmap_clear_all_resv(resmap);
resmap->m_bitmap_len = clen;
resmap->m_disk_bitmap = disk_bitmap;
spin_unlock(&resv_lock);
}
void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap)
{
/* Does nothing for now. Keep this around for API symmetry */
}
static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *new)
{
struct rb_root *root = &resmap->m_reservations;
struct rb_node *parent = NULL;
struct rb_node **p = &root->rb_node;
struct ocfs2_alloc_reservation *tmp;
assert_spin_locked(&resv_lock);
trace_ocfs2_resv_insert(new->r_start, new->r_len);
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node);
if (new->r_start < tmp->r_start) {
p = &(*p)->rb_left;
/*
* This is a good place to check for
* overlapping reservations.
*/
BUG_ON(ocfs2_resv_end(new) >= tmp->r_start);
} else if (new->r_start > ocfs2_resv_end(tmp)) {
p = &(*p)->rb_right;
} else {
/* This should never happen! */
mlog(ML_ERROR, "Duplicate reservation window!\n");
BUG();
}
}
rb_link_node(&new->r_node, parent, p);
rb_insert_color(&new->r_node, root);
new->r_flags |= OCFS2_RESV_FLAG_INUSE;
ocfs2_resv_mark_lru(resmap, new);
ocfs2_check_resmap(resmap);
}
/**
* ocfs2_find_resv_lhs() - find the window which contains goal
* @resmap: reservation map to search
* @goal: which bit to search for
*
* If a window containing that goal is not found, we return the window
* which comes before goal. Returns NULL on empty rbtree or no window
* before goal.
*/
static struct ocfs2_alloc_reservation *
ocfs2_find_resv_lhs(struct ocfs2_reservation_map *resmap, unsigned int goal)
{
struct ocfs2_alloc_reservation *resv = NULL;
struct ocfs2_alloc_reservation *prev_resv = NULL;
struct rb_node *node = resmap->m_reservations.rb_node;
assert_spin_locked(&resv_lock);
if (!node)
return NULL;
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
if (resv->r_start <= goal && ocfs2_resv_end(resv) >= goal)
break;
/* Check if we overshot the reservation just before goal? */
if (resv->r_start > goal) {
resv = prev_resv;
break;
}
prev_resv = resv;
node = rb_next(node);
}
return resv;
}
/*
* We are given a range within the bitmap, which corresponds to a gap
* inside the reservations tree (search_start, search_len). The range
* can be anything from the whole bitmap, to a gap between
* reservations.
*
* The start value of *rstart is insignificant.
*
* This function searches the bitmap range starting at search_start
* with length search_len for a set of contiguous free bits. We try
* to find up to 'wanted' bits, but can sometimes return less.
*
* Returns the length of allocation, 0 if no free bits are found.
*
* *cstart and *clen will also be populated with the result.
*/
static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
unsigned int wanted,
unsigned int search_start,
unsigned int search_len,
unsigned int *rstart,
unsigned int *rlen)
{
void *bitmap = resmap->m_disk_bitmap;
unsigned int best_start, best_len = 0;
int offset, start, found;
trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len,
wanted, resmap->m_bitmap_len);
found = best_start = best_len = 0;
start = search_start;
while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
start)) != -1) {
/* Search reached end of the region */
if (offset >= (search_start + search_len))
break;
if (offset == start) {
/* we found a zero */
found++;
/* move start to the next bit to test */
start++;
} else {
/* got a zero after some ones */
found = 1;
start = offset + 1;
}
if (found > best_len) {
best_len = found;
best_start = start - found;
}
if (found >= wanted)
break;
}
if (best_len == 0)
return 0;
if (best_len >= wanted)
best_len = wanted;
*rlen = best_len;
*rstart = best_start;
trace_ocfs2_resmap_find_free_bits_end(best_start, best_len);
return *rlen;
}
static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int goal, unsigned int wanted)
{
struct rb_root *root = &resmap->m_reservations;
unsigned int gap_start, gap_end, gap_len;
struct ocfs2_alloc_reservation *prev_resv, *next_resv;
struct rb_node *prev, *next;
unsigned int cstart, clen;
unsigned int best_start = 0, best_len = 0;
/*
* Nasty cases to consider:
*
* - rbtree is empty
* - our window should be first in all reservations
* - our window should be last in all reservations
* - need to make sure we don't go past end of bitmap
*/
trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv),
goal, wanted, RB_EMPTY_ROOT(root));
assert_spin_locked(&resv_lock);
if (RB_EMPTY_ROOT(root)) {
/*
* Easiest case - empty tree. We can just take
* whatever window of free bits we want.
*/
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
resmap->m_bitmap_len - goal,
&cstart, &clen);
/*
* This should never happen - the local alloc window
* will always have free bits when we're called.
*/
BUG_ON(goal == 0 && clen == 0);
if (clen == 0)
return;
resv->r_start = cstart;
resv->r_len = clen;
ocfs2_resv_insert(resmap, resv);
return;
}
prev_resv = ocfs2_find_resv_lhs(resmap, goal);
if (prev_resv == NULL) {
/*
* A NULL here means that the search code couldn't
* find a window that starts before goal.
*
* However, we can take the first window after goal,
* which is also by definition, the leftmost window in
* the entire tree. If we can find free bits in the
* gap between goal and the LHS window, then the
* reservation can safely be placed there.
*
* Otherwise we fall back to a linear search, checking
* the gaps in between windows for a place to
* allocate.
*/
next = rb_first(root);
next_resv = rb_entry(next, struct ocfs2_alloc_reservation,
r_node);
/*
* The search should never return such a window. (see
* comment above
*/
if (next_resv->r_start <= goal) {
mlog(ML_ERROR, "goal: %u next_resv: start %u len %u\n",
goal, next_resv->r_start, next_resv->r_len);
ocfs2_dump_resv(resmap);
BUG();
}
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
next_resv->r_start - goal,
&cstart, &clen);
if (clen) {
best_len = clen;
best_start = cstart;
if (best_len == wanted)
goto out_insert;
}
prev_resv = next_resv;
next_resv = NULL;
}
trace_ocfs2_resv_find_window_prev(prev_resv->r_start,
ocfs2_resv_end(prev_resv));
prev = &prev_resv->r_node;
/* Now we do a linear search for a window, starting at 'prev_rsv' */
while (1) {
next = rb_next(prev);
if (next) {
next_resv = rb_entry(next,
struct ocfs2_alloc_reservation,
r_node);
gap_start = ocfs2_resv_end(prev_resv) + 1;
gap_end = next_resv->r_start - 1;
gap_len = gap_end - gap_start + 1;
} else {
/*
* We're at the rightmost edge of the
* tree. See if a reservation between this
* window and the end of the bitmap will work.
*/
gap_start = ocfs2_resv_end(prev_resv) + 1;
gap_len = resmap->m_bitmap_len - gap_start;
gap_end = resmap->m_bitmap_len - 1;
}
trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1,
next ? ocfs2_resv_end(next_resv) : -1);
/*
* No need to check this gap if we have already found
* a larger region of free bits.
*/
if (gap_len <= best_len)
goto next_resv;
clen = ocfs2_resmap_find_free_bits(resmap, wanted, gap_start,
gap_len, &cstart, &clen);
if (clen == wanted) {
best_len = clen;
best_start = cstart;
goto out_insert;
} else if (clen > best_len) {
best_len = clen;
best_start = cstart;
}
next_resv:
if (!next)
break;
prev = next;
prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation,
r_node);
}
out_insert:
if (best_len) {
resv->r_start = best_start;
resv->r_len = best_len;
ocfs2_resv_insert(resmap, resv);
}
}
static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int wanted)
{
struct ocfs2_alloc_reservation *lru_resv;
int tmpwindow = !!(resv->r_flags & OCFS2_RESV_FLAG_TMP);
unsigned int min_bits;
if (!tmpwindow)
min_bits = ocfs2_resv_window_bits(resmap, resv) >> 1;
else
min_bits = wanted; /* We at know the temp window will use all
* of these bits */
/*
* Take the first reservation off the LRU as our 'target'. We
* don't try to be smart about it. There might be a case for
* searching based on size but I don't have enough data to be
* sure. --Mark (3/16/2010)
*/
lru_resv = list_first_entry(&resmap->m_lru,
struct ocfs2_alloc_reservation, r_lru);
trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start,
lru_resv->r_len,
ocfs2_resv_end(lru_resv));
/*
* Cannibalize (some or all) of the target reservation and
* feed it to the current window.
*/
if (lru_resv->r_len <= min_bits) {
/*
* Discard completely if size is less than or equal to a
* reasonable threshold - 50% of window bits for non temporary
* windows.
*/
resv->r_start = lru_resv->r_start;
resv->r_len = lru_resv->r_len;
__ocfs2_resv_discard(resmap, lru_resv);
} else {
unsigned int shrink;
if (tmpwindow)
shrink = min_bits;
else
shrink = lru_resv->r_len / 2;
lru_resv->r_len -= shrink;
resv->r_start = ocfs2_resv_end(lru_resv) + 1;
resv->r_len = shrink;
}
trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv),
resv->r_len, resv->r_last_start,
resv->r_last_len);
ocfs2_resv_insert(resmap, resv);
}
static void ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int wanted)
{
unsigned int goal = 0;
BUG_ON(!ocfs2_resv_empty(resv));
/*
* Begin by trying to get a window as close to the previous
* one as possible. Using the most recent allocation as a
* start goal makes sense.
*/
if (resv->r_last_len) {
goal = resv->r_last_start + resv->r_last_len;
if (goal >= resmap->m_bitmap_len)
goal = 0;
}
__ocfs2_resv_find_window(resmap, resv, goal, wanted);
/* Search from last alloc didn't work, try once more from beginning. */
if (ocfs2_resv_empty(resv) && goal != 0)
__ocfs2_resv_find_window(resmap, resv, 0, wanted);
if (ocfs2_resv_empty(resv)) {
/*
* Still empty? Pull oldest one off the LRU, remove it from
* tree, put this one in it's place.
*/
ocfs2_cannibalize_resv(resmap, resv, wanted);
}
BUG_ON(ocfs2_resv_empty(resv));
}
int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
int *cstart, int *clen)
{
if (resv == NULL || ocfs2_resmap_disabled(resmap))
return -ENOSPC;
spin_lock(&resv_lock);
if (ocfs2_resv_empty(resv)) {
/*
* We don't want to over-allocate for temporary
* windows. Otherwise, we run the risk of fragmenting the
* allocation space.
*/
unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
wanted = *clen;
/*
* Try to get a window here. If it works, we must fall
* through and test the bitmap . This avoids some
* ping-ponging of windows due to non-reserved space
* being allocation before we initialize a window for
* that inode.
*/
ocfs2_resv_find_window(resmap, resv, wanted);
trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len);
}
BUG_ON(ocfs2_resv_empty(resv));
*cstart = resv->r_start;
*clen = resv->r_len;
spin_unlock(&resv_lock);
return 0;
}
static void
ocfs2_adjust_resv_from_alloc(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int start, unsigned int end)
{
unsigned int rhs = 0;
unsigned int old_end = ocfs2_resv_end(resv);
BUG_ON(start != resv->r_start || old_end < end);
/*
* Completely used? We can remove it then.
*/
if (old_end == end) {
__ocfs2_resv_discard(resmap, resv);
return;
}
rhs = old_end - end;
/*
* This should have been trapped above.
*/
BUG_ON(rhs == 0);
resv->r_start = end + 1;
resv->r_len = old_end - resv->r_start + 1;
}
void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
u32 cstart, u32 clen)
{
unsigned int cend = cstart + clen - 1;
if (resmap == NULL || ocfs2_resmap_disabled(resmap))
return;
if (resv == NULL)
return;
BUG_ON(cstart != resv->r_start);
spin_lock(&resv_lock);
trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start,
ocfs2_resv_end(resv), resv->r_len,
resv->r_last_start,
resv->r_last_len);
BUG_ON(cstart < resv->r_start);
BUG_ON(cstart > ocfs2_resv_end(resv));
BUG_ON(cend > ocfs2_resv_end(resv));
ocfs2_adjust_resv_from_alloc(resmap, resv, cstart, cend);
resv->r_last_start = cstart;
resv->r_last_len = clen;
/*
* May have been discarded above from
* ocfs2_adjust_resv_from_alloc().
*/
if (!ocfs2_resv_empty(resv))
ocfs2_resv_mark_lru(resmap, resv);
trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv),
resv->r_len, resv->r_last_start,
resv->r_last_len);
ocfs2_check_resmap(resmap);
spin_unlock(&resv_lock);
}
| linux-master | fs/ocfs2/reservations.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* file.c
*
* File open, close, extend, truncate
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
#include <linux/sched.h>
#include <linux/splice.h>
#include <linux/mount.h>
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "aops.h"
#include "dir.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "sysfile.h"
#include "inode.h"
#include "ioctl.h"
#include "journal.h"
#include "locks.h"
#include "mmap.h"
#include "suballoc.h"
#include "super.h"
#include "xattr.h"
#include "acl.h"
#include "quota.h"
#include "refcounttree.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
static int ocfs2_init_file_private(struct inode *inode, struct file *file)
{
struct ocfs2_file_private *fp;
fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
if (!fp)
return -ENOMEM;
fp->fp_file = file;
mutex_init(&fp->fp_mutex);
ocfs2_file_lock_res_init(&fp->fp_flock, fp);
file->private_data = fp;
return 0;
}
static void ocfs2_free_file_private(struct inode *inode, struct file *file)
{
struct ocfs2_file_private *fp = file->private_data;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (fp) {
ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
ocfs2_lock_res_free(&fp->fp_flock);
kfree(fp);
file->private_data = NULL;
}
}
static int ocfs2_file_open(struct inode *inode, struct file *file)
{
int status;
int mode = file->f_flags;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
trace_ocfs2_file_open(inode, file, file->f_path.dentry,
(unsigned long long)oi->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name, mode);
if (file->f_mode & FMODE_WRITE) {
status = dquot_initialize(inode);
if (status)
goto leave;
}
spin_lock(&oi->ip_lock);
/* Check that the inode hasn't been wiped from disk by another
* node. If it hasn't then we're safe as long as we hold the
* spin lock until our increment of open count. */
if (oi->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&oi->ip_lock);
status = -ENOENT;
goto leave;
}
if (mode & O_DIRECT)
oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
oi->ip_open_count++;
spin_unlock(&oi->ip_lock);
status = ocfs2_init_file_private(inode, file);
if (status) {
/*
* We want to set open count back if we're failing the
* open.
*/
spin_lock(&oi->ip_lock);
oi->ip_open_count--;
spin_unlock(&oi->ip_lock);
}
file->f_mode |= FMODE_NOWAIT;
leave:
return status;
}
static int ocfs2_file_release(struct inode *inode, struct file *file)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
spin_lock(&oi->ip_lock);
if (!--oi->ip_open_count)
oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
trace_ocfs2_file_release(inode, file, file->f_path.dentry,
oi->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name,
oi->ip_open_count);
spin_unlock(&oi->ip_lock);
ocfs2_free_file_private(inode, file);
return 0;
}
static int ocfs2_dir_open(struct inode *inode, struct file *file)
{
return ocfs2_init_file_private(inode, file);
}
static int ocfs2_dir_release(struct inode *inode, struct file *file)
{
ocfs2_free_file_private(inode, file);
return 0;
}
static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
int datasync)
{
int err = 0;
struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_inode_info *oi = OCFS2_I(inode);
journal_t *journal = osb->journal->j_journal;
int ret;
tid_t commit_tid;
bool needs_barrier = false;
trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
oi->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name,
(unsigned long long)datasync);
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
err = file_write_and_wait_range(file, start, end);
if (err)
return err;
commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
if (journal->j_flags & JBD2_BARRIER &&
!jbd2_trans_will_send_data_barrier(journal, commit_tid))
needs_barrier = true;
err = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!err)
err = ret;
}
if (err)
mlog_errno(err);
return (err < 0) ? -EIO : 0;
}
int ocfs2_should_update_atime(struct inode *inode,
struct vfsmount *vfsmnt)
{
struct timespec64 now;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return 0;
if ((inode->i_flags & S_NOATIME) ||
((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
return 0;
/*
* We can be called with no vfsmnt structure - NFSD will
* sometimes do this.
*
* Note that our action here is different than touch_atime() -
* if we can't tell whether this is a noatime mount, then we
* don't know whether to trust the value of s_atime_quantum.
*/
if (vfsmnt == NULL)
return 0;
if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
return 0;
if (vfsmnt->mnt_flags & MNT_RELATIME) {
struct timespec64 ctime = inode_get_ctime(inode);
if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
(timespec64_compare(&inode->i_atime, &ctime) <= 0))
return 1;
return 0;
}
now = current_time(inode);
if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
return 0;
else
return 1;
}
int ocfs2_update_inode_atime(struct inode *inode,
struct buffer_head *bh)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/*
* Don't use ocfs2_mark_inode_dirty() here as we don't always
* have i_rwsem to guard against concurrent changes to other
* inode fields.
*/
inode->i_atime = current_time(inode);
di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
int ocfs2_set_inode_size(handle_t *handle,
struct inode *inode,
struct buffer_head *fe_bh,
u64 new_i_size)
{
int status;
i_size_write(inode, new_i_size);
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_mtime = inode_set_ctime_current(inode);
status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
bail:
return status;
}
int ocfs2_simple_size_update(struct inode *inode,
struct buffer_head *di_bh,
u64 new_i_size)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle = NULL;
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_set_inode_size(handle, inode, di_bh,
new_i_size);
if (ret < 0)
mlog_errno(ret);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
static int ocfs2_cow_file_pos(struct inode *inode,
struct buffer_head *fe_bh,
u64 offset)
{
int status;
u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
unsigned int num_clusters = 0;
unsigned int ext_flags = 0;
/*
* If the new offset is aligned to the range of the cluster, there is
* no space for ocfs2_zero_range_for_truncate to fill, so no need to
* CoW either.
*/
if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
return 0;
status = ocfs2_get_clusters(inode, cpos, &phys,
&num_clusters, &ext_flags);
if (status) {
mlog_errno(status);
goto out;
}
if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
goto out;
return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
out:
return status;
}
static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh,
u64 new_i_size)
{
int status;
handle_t *handle;
struct ocfs2_dinode *di;
u64 cluster_bytes;
/*
* We need to CoW the cluster contains the offset if it is reflinked
* since we will call ocfs2_zero_range_for_truncate later which will
* write "0" from offset to the end of the cluster.
*/
status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
if (status) {
mlog_errno(status);
return status;
}
/* TODO: This needs to actually orphan the inode in this
* transaction. */
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_commit;
}
/*
* Do this before setting i_size.
*/
cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
cluster_bytes);
if (status) {
mlog_errno(status);
goto out_commit;
}
i_size_write(inode, new_i_size);
inode->i_mtime = inode_set_ctime_current(inode);
di = (struct ocfs2_dinode *) fe_bh->b_data;
di->i_size = cpu_to_le64(new_i_size);
di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
return status;
}
int ocfs2_truncate_file(struct inode *inode,
struct buffer_head *di_bh,
u64 new_i_size)
{
int status = 0;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
/* We trust di_bh because it comes from ocfs2_inode_lock(), which
* already validated it */
fe = (struct ocfs2_dinode *) di_bh->b_data;
trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)le64_to_cpu(fe->i_size),
(unsigned long long)new_i_size);
mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
"Inode %llu, inode i_size = %lld != di "
"i_size = %llu, i_flags = 0x%x\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
i_size_read(inode),
(unsigned long long)le64_to_cpu(fe->i_size),
le32_to_cpu(fe->i_flags));
if (new_i_size > le64_to_cpu(fe->i_size)) {
trace_ocfs2_truncate_file_error(
(unsigned long long)le64_to_cpu(fe->i_size),
(unsigned long long)new_i_size);
status = -EINVAL;
mlog_errno(status);
goto bail;
}
down_write(&OCFS2_I(inode)->ip_alloc_sem);
ocfs2_resv_discard(&osb->osb_la_resmap,
&OCFS2_I(inode)->ip_la_data_resv);
/*
* The inode lock forced other nodes to sync and drop their
* pages, which (correctly) happens even if we have a truncate
* without allocation change - ocfs2 cluster sizes can be much
* greater than page size, so we have to truncate them
* anyway.
*/
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
unmap_mapping_range(inode->i_mapping,
new_i_size + PAGE_SIZE - 1, 0, 1);
truncate_inode_pages(inode->i_mapping, new_i_size);
status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
i_size_read(inode), 1);
if (status)
mlog_errno(status);
goto bail_unlock_sem;
}
/* alright, we're going to need to do a full blown alloc size
* change. Orphan the inode so that recovery can complete the
* truncate if necessary. This does the task of marking
* i_size. */
status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_sem;
}
unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
truncate_inode_pages(inode->i_mapping, new_i_size);
status = ocfs2_commit_truncate(osb, inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_sem;
}
/* TODO: orphan dir cleanup here. */
bail_unlock_sem:
up_write(&OCFS2_I(inode)->ip_alloc_sem);
bail:
if (!status && OCFS2_I(inode)->ip_clusters == 0)
status = ocfs2_try_remove_refcount_tree(inode, di_bh);
return status;
}
/*
* extend file allocation only here.
* we'll update all the disk stuff, and oip->alloc_size
*
* expect stuff to be locked, a transaction started and enough data /
* metadata reservations in the contexts.
*
* Will return -EAGAIN, and a reason if a restart is needed.
* If passed in, *reason will always be set, even in error.
*/
int ocfs2_add_inode_data(struct ocfs2_super *osb,
struct inode *inode,
u32 *logical_offset,
u32 clusters_to_add,
int mark_unwritten,
struct buffer_head *fe_bh,
handle_t *handle,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret)
{
struct ocfs2_extent_tree et;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
return ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
clusters_to_add, mark_unwritten,
data_ac, meta_ac, reason_ret);
}
static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
u32 clusters_to_add, int mark_unwritten)
{
int status = 0;
int restart_func = 0;
int credits;
u32 prev_clusters;
struct buffer_head *bh = NULL;
struct ocfs2_dinode *fe = NULL;
handle_t *handle = NULL;
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
enum ocfs2_alloc_restarted why = RESTART_NONE;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_tree et;
int did_quota = 0;
/*
* Unwritten extent only exists for file systems which
* support holes.
*/
BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
status = ocfs2_read_inode_block(inode, &bh);
if (status < 0) {
mlog_errno(status);
goto leave;
}
fe = (struct ocfs2_dinode *) bh->b_data;
restart_all:
BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
&data_ac, &meta_ac);
if (status) {
mlog_errno(status);
goto leave;
}
credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto leave;
}
restarted_transaction:
trace_ocfs2_extend_allocation(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)i_size_read(inode),
le32_to_cpu(fe->i_clusters), clusters_to_add,
why, restart_func);
status = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
if (status)
goto leave;
did_quota = 1;
/* reserve a write to the file entry early on - that we if we
* run out of credits in the allocation path, we can still
* update i_size. */
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
prev_clusters = OCFS2_I(inode)->ip_clusters;
status = ocfs2_add_inode_data(osb,
inode,
&logical_start,
clusters_to_add,
mark_unwritten,
bh,
handle,
data_ac,
meta_ac,
&why);
if ((status < 0) && (status != -EAGAIN)) {
if (status != -ENOSPC)
mlog_errno(status);
goto leave;
}
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, bh);
spin_lock(&OCFS2_I(inode)->ip_lock);
clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
spin_unlock(&OCFS2_I(inode)->ip_lock);
/* Release unused quota reservation */
dquot_free_space(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
did_quota = 0;
if (why != RESTART_NONE && clusters_to_add) {
if (why == RESTART_META) {
restart_func = 1;
status = 0;
} else {
BUG_ON(why != RESTART_TRANS);
status = ocfs2_allocate_extend_trans(handle, 1);
if (status < 0) {
/* handle still has to be committed at
* this point. */
status = -ENOMEM;
mlog_errno(status);
goto leave;
}
goto restarted_transaction;
}
}
trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
le32_to_cpu(fe->i_clusters),
(unsigned long long)le64_to_cpu(fe->i_size),
OCFS2_I(inode)->ip_clusters,
(unsigned long long)i_size_read(inode));
leave:
if (status < 0 && did_quota)
dquot_free_space(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
if (handle) {
ocfs2_commit_trans(osb, handle);
handle = NULL;
}
if (data_ac) {
ocfs2_free_alloc_context(data_ac);
data_ac = NULL;
}
if (meta_ac) {
ocfs2_free_alloc_context(meta_ac);
meta_ac = NULL;
}
if ((!status) && restart_func) {
restart_func = 0;
goto restart_all;
}
brelse(bh);
bh = NULL;
return status;
}
/*
* While a write will already be ordering the data, a truncate will not.
* Thus, we need to explicitly order the zeroed pages.
*/
static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
struct buffer_head *di_bh,
loff_t start_byte,
loff_t length)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle = NULL;
int ret = 0;
if (!ocfs2_should_order_data(inode))
goto out;
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret)
mlog_errno(ret);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
out:
if (ret) {
if (!IS_ERR(handle))
ocfs2_commit_trans(osb, handle);
handle = ERR_PTR(ret);
}
return handle;
}
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
* worry about recursive locking in ->write_begin() and ->write_end(). */
static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
u64 abs_to, struct buffer_head *di_bh)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
unsigned long index = abs_from >> PAGE_SHIFT;
handle_t *handle;
int ret = 0;
unsigned zero_from, zero_to, block_start, block_end;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
BUG_ON(abs_from >= abs_to);
BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
BUG_ON(abs_from & (inode->i_blkbits - 1));
handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
abs_from,
abs_to - abs_from);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
page = find_or_create_page(mapping, index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
mlog_errno(ret);
goto out_commit_trans;
}
/* Get the offsets within the page that we want to zero */
zero_from = abs_from & (PAGE_SIZE - 1);
zero_to = abs_to & (PAGE_SIZE - 1);
if (!zero_to)
zero_to = PAGE_SIZE;
trace_ocfs2_write_zero_page(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)abs_from,
(unsigned long long)abs_to,
index, zero_from, zero_to);
/* We know that zero_from is block aligned */
for (block_start = zero_from; block_start < zero_to;
block_start = block_end) {
block_end = block_start + i_blocksize(inode);
/*
* block_start is block-aligned. Bump it by one to force
* __block_write_begin and block_commit_write to zero the
* whole block.
*/
ret = __block_write_begin(page, block_start + 1, 0,
ocfs2_get_block);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
/* must not update i_size! */
block_commit_write(page, block_start + 1, block_start + 1);
}
/*
* fs-writeback will release the dirty pages without page lock
* whose offset are over inode size, the release happens at
* block_write_full_page().
*/
i_size_write(inode, abs_to);
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
inode->i_mtime = inode_set_ctime_current(inode);
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
di->i_mtime_nsec = di->i_ctime_nsec;
if (handle) {
ocfs2_journal_dirty(handle, di_bh);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
}
out_unlock:
unlock_page(page);
put_page(page);
out_commit_trans:
if (handle)
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
return ret;
}
/*
* Find the next range to zero. We do this in terms of bytes because
* that's what ocfs2_zero_extend() wants, and it is dealing with the
* pagecache. We may return multiple extents.
*
* zero_start and zero_end are ocfs2_zero_extend()s current idea of what
* needs to be zeroed. range_start and range_end return the next zeroing
* range. A subsequent call should pass the previous range_end as its
* zero_start. If range_end is 0, there's nothing to do.
*
* Unwritten extents are skipped over. Refcounted extents are CoWd.
*/
static int ocfs2_zero_extend_get_range(struct inode *inode,
struct buffer_head *di_bh,
u64 zero_start, u64 zero_end,
u64 *range_start, u64 *range_end)
{
int rc = 0, needs_cow = 0;
u32 p_cpos, zero_clusters = 0;
u32 zero_cpos =
zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
unsigned int num_clusters = 0;
unsigned int ext_flags = 0;
while (zero_cpos < last_cpos) {
rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
&num_clusters, &ext_flags);
if (rc) {
mlog_errno(rc);
goto out;
}
if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
zero_clusters = num_clusters;
if (ext_flags & OCFS2_EXT_REFCOUNTED)
needs_cow = 1;
break;
}
zero_cpos += num_clusters;
}
if (!zero_clusters) {
*range_end = 0;
goto out;
}
while ((zero_cpos + zero_clusters) < last_cpos) {
rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
&p_cpos, &num_clusters,
&ext_flags);
if (rc) {
mlog_errno(rc);
goto out;
}
if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
break;
if (ext_flags & OCFS2_EXT_REFCOUNTED)
needs_cow = 1;
zero_clusters += num_clusters;
}
if ((zero_cpos + zero_clusters) > last_cpos)
zero_clusters = last_cpos - zero_cpos;
if (needs_cow) {
rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
zero_clusters, UINT_MAX);
if (rc) {
mlog_errno(rc);
goto out;
}
}
*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
zero_cpos + zero_clusters);
out:
return rc;
}
/*
* Zero one range returned from ocfs2_zero_extend_get_range(). The caller
* has made sure that the entire range needs zeroing.
*/
static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
u64 range_end, struct buffer_head *di_bh)
{
int rc = 0;
u64 next_pos;
u64 zero_pos = range_start;
trace_ocfs2_zero_extend_range(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)range_start,
(unsigned long long)range_end);
BUG_ON(range_start >= range_end);
while (zero_pos < range_end) {
next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
if (next_pos > range_end)
next_pos = range_end;
rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
if (rc < 0) {
mlog_errno(rc);
break;
}
zero_pos = next_pos;
/*
* Very large extends have the potential to lock up
* the cpu for extended periods of time.
*/
cond_resched();
}
return rc;
}
int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
loff_t zero_to_size)
{
int ret = 0;
u64 zero_start, range_start = 0, range_end = 0;
struct super_block *sb = inode->i_sb;
zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)zero_start,
(unsigned long long)i_size_read(inode));
while (zero_start < zero_to_size) {
ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
zero_to_size,
&range_start,
&range_end);
if (ret) {
mlog_errno(ret);
break;
}
if (!range_end)
break;
/* Trim the ends */
if (range_start < zero_start)
range_start = zero_start;
if (range_end > zero_to_size)
range_end = zero_to_size;
ret = ocfs2_zero_extend_range(inode, range_start,
range_end, di_bh);
if (ret) {
mlog_errno(ret);
break;
}
zero_start = range_end;
}
return ret;
}
int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
u64 new_i_size, u64 zero_to)
{
int ret;
u32 clusters_to_add;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
/*
* Only quota files call this without a bh, and they can't be
* refcounted.
*/
BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
if (clusters_to_add < oi->ip_clusters)
clusters_to_add = 0;
else
clusters_to_add -= oi->ip_clusters;
if (clusters_to_add) {
ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
clusters_to_add, 0);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/*
* Call this even if we don't add any clusters to the tree. We
* still need to zero the area between the old i_size and the
* new i_size.
*/
ret = ocfs2_zero_extend(inode, di_bh, zero_to);
if (ret < 0)
mlog_errno(ret);
out:
return ret;
}
static int ocfs2_extend_file(struct inode *inode,
struct buffer_head *di_bh,
u64 new_i_size)
{
int ret = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
BUG_ON(!di_bh);
/* setattr sometimes calls us like this. */
if (new_i_size == 0)
goto out;
if (i_size_read(inode) == new_i_size)
goto out;
BUG_ON(new_i_size < i_size_read(inode));
/*
* The alloc sem blocks people in read/write from reading our
* allocation until we're done changing it. We depend on
* i_rwsem to block other extend/truncate calls while we're
* here. We even have to hold it for sparse files because there
* might be some tail zeroing.
*/
down_write(&oi->ip_alloc_sem);
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
/*
* We can optimize small extends by keeping the inodes
* inline data.
*/
if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
up_write(&oi->ip_alloc_sem);
goto out_update_size;
}
ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
if (ret) {
up_write(&oi->ip_alloc_sem);
mlog_errno(ret);
goto out;
}
}
if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
else
ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
new_i_size);
up_write(&oi->ip_alloc_sem);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
out_update_size:
ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
if (ret < 0)
mlog_errno(ret);
out:
return ret;
}
int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
int status = 0, size_change;
int inode_locked = 0;
struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
struct ocfs2_super *osb = OCFS2_SB(sb);
struct buffer_head *bh = NULL;
handle_t *handle = NULL;
struct dquot *transfer_to[MAXQUOTAS] = { };
int qtype;
int had_lock;
struct ocfs2_lock_holder oh;
trace_ocfs2_setattr(inode, dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
dentry->d_name.len, dentry->d_name.name,
attr->ia_valid, attr->ia_mode,
from_kuid(&init_user_ns, attr->ia_uid),
from_kgid(&init_user_ns, attr->ia_gid));
/* ensuring we don't even attempt to truncate a symlink */
if (S_ISLNK(inode->i_mode))
attr->ia_valid &= ~ATTR_SIZE;
#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
| ATTR_GID | ATTR_UID | ATTR_MODE)
if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
return 0;
status = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (status)
return status;
if (is_quota_modification(&nop_mnt_idmap, inode, attr)) {
status = dquot_initialize(inode);
if (status)
return status;
}
size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
if (size_change) {
/*
* Here we should wait dio to finish before inode lock
* to avoid a deadlock between ocfs2_setattr() and
* ocfs2_dio_end_io_write()
*/
inode_dio_wait(inode);
status = ocfs2_rw_lock(inode, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
if (had_lock < 0) {
status = had_lock;
goto bail_unlock_rw;
} else if (had_lock) {
/*
* As far as we know, ocfs2_setattr() could only be the first
* VFS entry point in the call chain of recursive cluster
* locking issue.
*
* For instance:
* chmod_common()
* notify_change()
* ocfs2_setattr()
* posix_acl_chmod()
* ocfs2_iop_get_acl()
*
* But, we're not 100% sure if it's always true, because the
* ordering of the VFS entry points in the call chain is out
* of our control. So, we'd better dump the stack here to
* catch the other cases of recursive locking.
*/
mlog(ML_ERROR, "Another case of recursive locking:\n");
dump_stack();
}
inode_locked = 1;
if (size_change) {
status = inode_newsize_ok(inode, attr->ia_size);
if (status)
goto bail_unlock;
if (i_size_read(inode) >= attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
status = ocfs2_begin_ordered_truncate(inode,
attr->ia_size);
if (status)
goto bail_unlock;
}
status = ocfs2_truncate_file(inode, bh, attr->ia_size);
} else
status = ocfs2_extend_file(inode, bh, attr->ia_size);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
status = -ENOSPC;
goto bail_unlock;
}
}
if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
(attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
/*
* Gather pointers to quota structures so that allocation /
* freeing of quota structures happens here and not inside
* dquot_transfer() where we have problems with lock ordering
*/
if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
&& OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
if (IS_ERR(transfer_to[USRQUOTA])) {
status = PTR_ERR(transfer_to[USRQUOTA]);
transfer_to[USRQUOTA] = NULL;
goto bail_unlock;
}
}
if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
&& OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
if (IS_ERR(transfer_to[GRPQUOTA])) {
status = PTR_ERR(transfer_to[GRPQUOTA]);
transfer_to[GRPQUOTA] = NULL;
goto bail_unlock;
}
}
down_write(&OCFS2_I(inode)->ip_alloc_sem);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
2 * ocfs2_quota_trans_credits(sb));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto bail_unlock_alloc;
}
status = __dquot_transfer(inode, transfer_to);
if (status < 0)
goto bail_commit;
} else {
down_write(&OCFS2_I(inode)->ip_alloc_sem);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto bail_unlock_alloc;
}
}
setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
status = ocfs2_mark_inode_dirty(handle, inode, bh);
if (status < 0)
mlog_errno(status);
bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock_alloc:
up_write(&OCFS2_I(inode)->ip_alloc_sem);
bail_unlock:
if (status && inode_locked) {
ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
inode_locked = 0;
}
bail_unlock_rw:
if (size_change)
ocfs2_rw_unlock(inode, 1);
bail:
/* Release quota pointers in case we acquired them */
for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
dqput(transfer_to[qtype]);
if (!status && attr->ia_valid & ATTR_MODE) {
status = ocfs2_acl_chmod(inode, bh);
if (status < 0)
mlog_errno(status);
}
if (inode_locked)
ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
brelse(bh);
return status;
}
int ocfs2_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
struct super_block *sb = path->dentry->d_sb;
struct ocfs2_super *osb = sb->s_fs_info;
int err;
err = ocfs2_inode_revalidate(path->dentry);
if (err) {
if (err != -ENOENT)
mlog_errno(err);
goto bail;
}
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
/*
* If there is inline data in the inode, the inode will normally not
* have data blocks allocated (it may have an external xattr block).
* Report at least one sector for such files, so tools like tar, rsync,
* others don't incorrectly think the file is completely sparse.
*/
if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
stat->blocks += (stat->size + 511)>>9;
/* We set the blksize from the cluster size for performance */
stat->blksize = osb->s_clustersize;
bail:
return err;
}
int ocfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
int ret, had_lock;
struct ocfs2_lock_holder oh;
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
if (had_lock < 0) {
ret = had_lock;
goto out;
} else if (had_lock) {
/* See comments in ocfs2_setattr() for details.
* The call chain of this case could be:
* do_sys_open()
* may_open()
* inode_permission()
* ocfs2_permission()
* ocfs2_iop_get_acl()
*/
mlog(ML_ERROR, "Another case of recursive locking:\n");
dump_stack();
}
ret = generic_permission(&nop_mnt_idmap, inode, mask);
ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
out:
return ret;
}
static int __ocfs2_write_remove_suid(struct inode *inode,
struct buffer_head *bh)
{
int ret;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di;
trace_ocfs2_write_remove_suid(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
inode->i_mode);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out_trans;
}
inode->i_mode &= ~S_ISUID;
if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
inode->i_mode &= ~S_ISGID;
di = (struct ocfs2_dinode *) bh->b_data;
di->i_mode = cpu_to_le16(inode->i_mode);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, bh);
out_trans:
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
static int ocfs2_write_remove_suid(struct inode *inode)
{
int ret;
struct buffer_head *bh = NULL;
ret = ocfs2_read_inode_block(inode, &bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = __ocfs2_write_remove_suid(inode, bh);
out:
brelse(bh);
return ret;
}
/*
* Allocate enough extents to cover the region starting at byte offset
* start for len bytes. Existing extents are skipped, any extents
* added are marked as "unwritten".
*/
static int ocfs2_allocate_unwritten_extents(struct inode *inode,
u64 start, u64 len)
{
int ret;
u32 cpos, phys_cpos, clusters, alloc_size;
u64 end = start + len;
struct buffer_head *di_bh = NULL;
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Nothing to do if the requested reservation range
* fits within the inode.
*/
if (ocfs2_size_fits_inline_data(di_bh, end))
goto out;
ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
}
/*
* We consider both start and len to be inclusive.
*/
cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
clusters -= cpos;
while (clusters) {
ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
&alloc_size, NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Hole or existing extent len can be arbitrary, so
* cap it to our own allocation request.
*/
if (alloc_size > clusters)
alloc_size = clusters;
if (phys_cpos) {
/*
* We already have an allocation at this
* region so we can safely skip it.
*/
goto next;
}
ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
next:
cpos += alloc_size;
clusters -= alloc_size;
}
ret = 0;
out:
brelse(di_bh);
return ret;
}
/*
* Truncate a byte range, avoiding pages within partial clusters. This
* preserves those pages for the zeroing code to write to.
*/
static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
u64 byte_len)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
loff_t start, end;
struct address_space *mapping = inode->i_mapping;
start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
end = byte_start + byte_len;
end = end & ~(osb->s_clustersize - 1);
if (start < end) {
unmap_mapping_range(mapping, start, end - start, 0);
truncate_inode_pages_range(mapping, start, end - 1);
}
}
/*
* zero out partial blocks of one cluster.
*
* start: file offset where zero starts, will be made upper block aligned.
* len: it will be trimmed to the end of current cluster if "start + len"
* is bigger than it.
*/
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
u64 start, u64 len)
{
int ret;
u64 start_block, end_block, nr_blocks;
u64 p_block, offset;
u32 cluster, p_cluster, nr_clusters;
struct super_block *sb = inode->i_sb;
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
if (start + len < end)
end = start + len;
start_block = ocfs2_blocks_for_bytes(sb, start);
end_block = ocfs2_blocks_for_bytes(sb, end);
nr_blocks = end_block - start_block;
if (!nr_blocks)
return 0;
cluster = ocfs2_bytes_to_clusters(sb, start);
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
&nr_clusters, NULL);
if (ret)
return ret;
if (!p_cluster)
return 0;
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}
static int ocfs2_zero_partial_clusters(struct inode *inode,
u64 start, u64 len)
{
int ret = 0;
u64 tmpend = 0;
u64 end = start + len;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int csize = osb->s_clustersize;
handle_t *handle;
loff_t isize = i_size_read(inode);
/*
* The "start" and "end" values are NOT necessarily part of
* the range whose allocation is being deleted. Rather, this
* is what the user passed in with the request. We must zero
* partial clusters here. There's no need to worry about
* physical allocation - the zeroing code knows to skip holes.
*/
trace_ocfs2_zero_partial_clusters(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)start, (unsigned long long)end);
/*
* If both edges are on a cluster boundary then there's no
* zeroing required as the region is part of the allocation to
* be truncated.
*/
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
goto out;
/* No page cache for EOF blocks, issue zero out to disk. */
if (end > isize) {
/*
* zeroout eof blocks in last cluster starting from
* "isize" even "start" > "isize" because it is
* complicated to zeroout just at "start" as "start"
* may be not aligned with block size, buffer write
* would be required to do that, but out of eof buffer
* write is not supported.
*/
ret = ocfs2_zeroout_partial_cluster(inode, isize,
end - isize);
if (ret) {
mlog_errno(ret);
goto out;
}
if (start >= isize)
goto out;
end = isize;
}
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
/*
* If start is on a cluster boundary and end is somewhere in another
* cluster, we have not COWed the cluster starting at start, unless
* end is also within the same cluster. So, in this case, we skip this
* first call to ocfs2_zero_range_for_truncate() truncate and move on
* to the next one.
*/
if ((start & (csize - 1)) != 0) {
/*
* We want to get the byte offset of the end of the 1st
* cluster.
*/
tmpend = (u64)osb->s_clustersize +
(start & ~(osb->s_clustersize - 1));
if (tmpend > end)
tmpend = end;
trace_ocfs2_zero_partial_clusters_range1(
(unsigned long long)start,
(unsigned long long)tmpend);
ret = ocfs2_zero_range_for_truncate(inode, handle, start,
tmpend);
if (ret)
mlog_errno(ret);
}
if (tmpend < end) {
/*
* This may make start and end equal, but the zeroing
* code will skip any work in that case so there's no
* need to catch it up here.
*/
start = end & ~(osb->s_clustersize - 1);
trace_ocfs2_zero_partial_clusters_range2(
(unsigned long long)start, (unsigned long long)end);
ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
if (ret)
mlog_errno(ret);
}
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
{
int i;
struct ocfs2_extent_rec *rec = NULL;
for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
rec = &el->l_recs[i];
if (le32_to_cpu(rec->e_cpos) < pos)
break;
}
return i;
}
/*
* Helper to calculate the punching pos and length in one run, we handle the
* following three cases in order:
*
* - remove the entire record
* - remove a partial record
* - no record needs to be removed (hole-punching completed)
*/
static void ocfs2_calc_trunc_pos(struct inode *inode,
struct ocfs2_extent_list *el,
struct ocfs2_extent_rec *rec,
u32 trunc_start, u32 *trunc_cpos,
u32 *trunc_len, u32 *trunc_end,
u64 *blkno, int *done)
{
int ret = 0;
u32 coff, range;
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
/*
* remove an entire extent record.
*/
*trunc_cpos = le32_to_cpu(rec->e_cpos);
/*
* Skip holes if any.
*/
if (range < *trunc_end)
*trunc_end = range;
*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
*blkno = le64_to_cpu(rec->e_blkno);
*trunc_end = le32_to_cpu(rec->e_cpos);
} else if (range > trunc_start) {
/*
* remove a partial extent record, which means we're
* removing the last extent record.
*/
*trunc_cpos = trunc_start;
/*
* skip hole if any.
*/
if (range < *trunc_end)
*trunc_end = range;
*trunc_len = *trunc_end - trunc_start;
coff = trunc_start - le32_to_cpu(rec->e_cpos);
*blkno = le64_to_cpu(rec->e_blkno) +
ocfs2_clusters_to_blocks(inode->i_sb, coff);
*trunc_end = trunc_start;
} else {
/*
* It may have two following possibilities:
*
* - last record has been removed
* - trunc_start was within a hole
*
* both two cases mean the completion of hole punching.
*/
ret = 1;
}
*done = ret;
}
int ocfs2_remove_inode_range(struct inode *inode,
struct buffer_head *di_bh, u64 byte_start,
u64 byte_len)
{
int ret = 0, flags = 0, done = 0, i;
u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
u32 cluster_in_el;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_cached_dealloc_ctxt dealloc;
struct address_space *mapping = inode->i_mapping;
struct ocfs2_extent_tree et;
struct ocfs2_path *path = NULL;
struct ocfs2_extent_list *el = NULL;
struct ocfs2_extent_rec *rec = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
trace_ocfs2_remove_inode_range(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)byte_start,
(unsigned long long)byte_len);
if (byte_len == 0)
return 0;
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
byte_start + byte_len, 0);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* There's no need to get fancy with the page cache
* truncate of an inline-data inode. We're talking
* about less than a page here, which will be cached
* in the dinode buffer anyway.
*/
unmap_mapping_range(mapping, 0, 0, 0);
truncate_inode_pages(mapping, 0);
goto out;
}
/*
* For reflinks, we may need to CoW 2 clusters which might be
* partially zero'd later, if hole's start and end offset were
* within one cluster(means is not exactly aligned to clustersize).
*/
if (ocfs2_is_refcount_inode(inode)) {
ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
if (ret) {
mlog_errno(ret);
goto out;
}
}
trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
cluster_in_el = trunc_end;
ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
if (ret) {
mlog_errno(ret);
goto out;
}
path = ocfs2_new_path_from_et(&et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
while (trunc_end > trunc_start) {
ret = ocfs2_find_path(INODE_CACHE(inode), path,
cluster_in_el);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
i = ocfs2_find_rec(el, trunc_end);
/*
* Need to go to previous extent block.
*/
if (i < 0) {
if (path->p_tree_depth == 0)
break;
ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
path,
&cluster_in_el);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We've reached the leftmost extent block,
* it's safe to leave.
*/
if (cluster_in_el == 0)
break;
/*
* The 'pos' searched for previous extent block is
* always one cluster less than actual trunc_end.
*/
trunc_end = cluster_in_el + 1;
ocfs2_reinit_path(path, 1);
continue;
} else
rec = &el->l_recs[i];
ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
&trunc_len, &trunc_end, &blkno, &done);
if (done)
break;
flags = rec->e_flags;
phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
phys_cpos, trunc_len, flags,
&dealloc, refcount_loc, false);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
cluster_in_el = trunc_end;
ocfs2_reinit_path(path, 1);
}
ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
out:
ocfs2_free_path(path);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
return ret;
}
/*
* Parts of this function taken from xfs_change_file_space()
*/
static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
loff_t f_pos, unsigned int cmd,
struct ocfs2_space_resv *sr,
int change_size)
{
int ret;
s64 llen;
loff_t size, orig_isize;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *di_bh = NULL;
handle_t *handle;
unsigned long long max_off = inode->i_sb->s_maxbytes;
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
inode_lock(inode);
/*
* This prevents concurrent writes on other nodes
*/
ret = ocfs2_rw_lock(inode, 1);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret) {
mlog_errno(ret);
goto out_rw_unlock;
}
if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
ret = -EPERM;
goto out_inode_unlock;
}
switch (sr->l_whence) {
case 0: /*SEEK_SET*/
break;
case 1: /*SEEK_CUR*/
sr->l_start += f_pos;
break;
case 2: /*SEEK_END*/
sr->l_start += i_size_read(inode);
break;
default:
ret = -EINVAL;
goto out_inode_unlock;
}
sr->l_whence = 0;
llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
if (sr->l_start < 0
|| sr->l_start > max_off
|| (sr->l_start + llen) < 0
|| (sr->l_start + llen) > max_off) {
ret = -EINVAL;
goto out_inode_unlock;
}
size = sr->l_start + sr->l_len;
if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
if (sr->l_len <= 0) {
ret = -EINVAL;
goto out_inode_unlock;
}
}
if (file && setattr_should_drop_suidgid(&nop_mnt_idmap, file_inode(file))) {
ret = __ocfs2_write_remove_suid(inode, di_bh);
if (ret) {
mlog_errno(ret);
goto out_inode_unlock;
}
}
down_write(&OCFS2_I(inode)->ip_alloc_sem);
switch (cmd) {
case OCFS2_IOC_RESVSP:
case OCFS2_IOC_RESVSP64:
/*
* This takes unsigned offsets, but the signed ones we
* pass have been checked against overflow above.
*/
ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
sr->l_len);
break;
case OCFS2_IOC_UNRESVSP:
case OCFS2_IOC_UNRESVSP64:
ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
sr->l_len);
break;
default:
ret = -EINVAL;
}
orig_isize = i_size_read(inode);
/* zeroout eof blocks in the cluster. */
if (!ret && change_size && orig_isize < size) {
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
size - orig_isize);
if (!ret)
i_size_write(inode, size);
}
up_write(&OCFS2_I(inode)->ip_alloc_sem);
if (ret) {
mlog_errno(ret);
goto out_inode_unlock;
}
/*
* We update c/mtime for these changes
*/
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_inode_unlock;
}
inode->i_mtime = inode_set_ctime_current(inode);
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
mlog_errno(ret);
if (file && (file->f_flags & O_SYNC))
handle->h_sync = 1;
ocfs2_commit_trans(osb, handle);
out_inode_unlock:
brelse(di_bh);
ocfs2_inode_unlock(inode, 1);
out_rw_unlock:
ocfs2_rw_unlock(inode, 1);
out:
inode_unlock(inode);
return ret;
}
int ocfs2_change_file_space(struct file *file, unsigned int cmd,
struct ocfs2_space_resv *sr)
{
struct inode *inode = file_inode(file);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int ret;
if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
!ocfs2_writes_unwritten_extents(osb))
return -ENOTTY;
else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
!ocfs2_sparse_alloc(osb))
return -ENOTTY;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
ret = mnt_want_write_file(file);
if (ret)
return ret;
ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
mnt_drop_write_file(file);
return ret;
}
static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct inode *inode = file_inode(file);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_space_resv sr;
int change_size = 1;
int cmd = OCFS2_IOC_RESVSP64;
int ret = 0;
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
if (!ocfs2_writes_unwritten_extents(osb))
return -EOPNOTSUPP;
if (mode & FALLOC_FL_KEEP_SIZE) {
change_size = 0;
} else {
ret = inode_newsize_ok(inode, offset + len);
if (ret)
return ret;
}
if (mode & FALLOC_FL_PUNCH_HOLE)
cmd = OCFS2_IOC_UNRESVSP64;
sr.l_whence = 0;
sr.l_start = (s64)offset;
sr.l_len = (s64)len;
return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
change_size);
}
int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
size_t count)
{
int ret = 0;
unsigned int extent_flags;
u32 cpos, clusters, extent_len, phys_cpos;
struct super_block *sb = inode->i_sb;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
!ocfs2_is_refcount_inode(inode) ||
OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return 0;
cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
while (clusters) {
ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
&extent_flags);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
ret = 1;
break;
}
if (extent_len > clusters)
extent_len = clusters;
clusters -= extent_len;
cpos += extent_len;
}
out:
return ret;
}
static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
{
int blockmask = inode->i_sb->s_blocksize - 1;
loff_t final_size = pos + count;
if ((pos & blockmask) || (final_size & blockmask))
return 1;
return 0;
}
static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
struct buffer_head **di_bh,
int meta_level,
int write_sem,
int wait)
{
int ret = 0;
if (wait)
ret = ocfs2_inode_lock(inode, di_bh, meta_level);
else
ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
if (ret < 0)
goto out;
if (wait) {
if (write_sem)
down_write(&OCFS2_I(inode)->ip_alloc_sem);
else
down_read(&OCFS2_I(inode)->ip_alloc_sem);
} else {
if (write_sem)
ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
else
ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
if (!ret) {
ret = -EAGAIN;
goto out_unlock;
}
}
return ret;
out_unlock:
brelse(*di_bh);
*di_bh = NULL;
ocfs2_inode_unlock(inode, meta_level);
out:
return ret;
}
static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
struct buffer_head **di_bh,
int meta_level,
int write_sem)
{
if (write_sem)
up_write(&OCFS2_I(inode)->ip_alloc_sem);
else
up_read(&OCFS2_I(inode)->ip_alloc_sem);
brelse(*di_bh);
*di_bh = NULL;
if (meta_level >= 0)
ocfs2_inode_unlock(inode, meta_level);
}
static int ocfs2_prepare_inode_for_write(struct file *file,
loff_t pos, size_t count, int wait)
{
int ret = 0, meta_level = 0, overwrite_io = 0;
int write_sem = 0;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
struct buffer_head *di_bh = NULL;
u32 cpos;
u32 clusters;
/*
* We start with a read level meta lock and only jump to an ex
* if we need to make modifications here.
*/
for(;;) {
ret = ocfs2_inode_lock_for_extent_tree(inode,
&di_bh,
meta_level,
write_sem,
wait);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto out;
}
/*
* Check if IO will overwrite allocated blocks in case
* IOCB_NOWAIT flag is set.
*/
if (!wait && !overwrite_io) {
overwrite_io = 1;
ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto out_unlock;
}
}
/* Clear suid / sgid if necessary. We do this here
* instead of later in the write path because
* remove_suid() calls ->setattr without any hint that
* we may have already done our cluster locking. Since
* ocfs2_setattr() *must* take cluster locks to
* proceed, this will lead us to recursively lock the
* inode. There's also the dinode i_size state which
* can be lost via setattr during extending writes (we
* set inode->i_size at the end of a write. */
if (setattr_should_drop_suidgid(&nop_mnt_idmap, inode)) {
if (meta_level == 0) {
ocfs2_inode_unlock_for_extent_tree(inode,
&di_bh,
meta_level,
write_sem);
meta_level = 1;
continue;
}
ret = ocfs2_write_remove_suid(inode);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
}
ret = ocfs2_check_range_for_refcount(inode, pos, count);
if (ret == 1) {
ocfs2_inode_unlock_for_extent_tree(inode,
&di_bh,
meta_level,
write_sem);
meta_level = 1;
write_sem = 1;
ret = ocfs2_inode_lock_for_extent_tree(inode,
&di_bh,
meta_level,
write_sem,
wait);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto out;
}
cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
clusters =
ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
}
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto out_unlock;
}
break;
}
out_unlock:
trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
pos, count, wait);
ocfs2_inode_unlock_for_extent_tree(inode,
&di_bh,
meta_level,
write_sem);
out:
return ret;
}
static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
int rw_level;
ssize_t written = 0;
ssize_t ret;
size_t count = iov_iter_count(from);
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
void *saved_ki_complete = NULL;
int append_write = ((iocb->ki_pos + count) >=
i_size_read(inode) ? 1 : 0);
int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name,
(unsigned int)from->nr_segs); /* GRRRRR */
if (!direct_io && nowait)
return -EOPNOTSUPP;
if (count == 0)
return 0;
if (nowait) {
if (!inode_trylock(inode))
return -EAGAIN;
} else
inode_lock(inode);
/*
* Concurrent O_DIRECT writes are allowed with
* mount_option "coherency=buffered".
* For append write, we must take rw EX.
*/
rw_level = (!direct_io || full_coherency || append_write);
if (nowait)
ret = ocfs2_try_rw_lock(inode, rw_level);
else
ret = ocfs2_rw_lock(inode, rw_level);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto out_mutex;
}
/*
* O_DIRECT writes with "coherency=full" need to take EX cluster
* inode_lock to guarantee coherency.
*/
if (direct_io && full_coherency) {
/*
* We need to take and drop the inode lock to force
* other nodes to drop their caches. Buffered I/O
* already does this in write_begin().
*/
if (nowait)
ret = ocfs2_try_inode_lock(inode, NULL, 1);
else
ret = ocfs2_inode_lock(inode, NULL, 1);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto out;
}
ocfs2_inode_unlock(inode, 1);
}
ret = generic_write_checks(iocb, from);
if (ret <= 0) {
if (ret)
mlog_errno(ret);
goto out;
}
count = ret;
ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto out;
}
if (direct_io && !is_sync_kiocb(iocb) &&
ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
/*
* Make it a sync io if it's an unaligned aio.
*/
saved_ki_complete = xchg(&iocb->ki_complete, NULL);
}
/* communicate with ocfs2_dio_end_io */
ocfs2_iocb_set_rw_locked(iocb, rw_level);
written = __generic_file_write_iter(iocb, from);
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(written == -EIOCBQUEUED && !direct_io);
/*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
* function pointer which is called when o_direct io completes so that
* it can unlock our rw lock.
* Unfortunately there are error cases which call end_io and others
* that don't. so we don't have to unlock the rw_lock if either an
* async dio is going to do it in the future or an end_io after an
* error has already done it.
*/
if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
rw_level = -1;
}
if (unlikely(written <= 0))
goto out;
if (((file->f_flags & O_DSYNC) && !direct_io) ||
IS_SYNC(inode)) {
ret = filemap_fdatawrite_range(file->f_mapping,
iocb->ki_pos - written,
iocb->ki_pos - 1);
if (ret < 0)
written = ret;
if (!ret) {
ret = jbd2_journal_force_commit(osb->journal->j_journal);
if (ret < 0)
written = ret;
}
if (!ret)
ret = filemap_fdatawait_range(file->f_mapping,
iocb->ki_pos - written,
iocb->ki_pos - 1);
}
out:
if (saved_ki_complete)
xchg(&iocb->ki_complete, saved_ki_complete);
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
out_mutex:
inode_unlock(inode);
if (written)
ret = written;
return ret;
}
static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
struct iov_iter *to)
{
int ret = 0, rw_level = -1, lock_level = 0;
struct file *filp = iocb->ki_filp;
struct inode *inode = file_inode(filp);
int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
filp->f_path.dentry->d_name.len,
filp->f_path.dentry->d_name.name,
to->nr_segs); /* GRRRRR */
if (!inode) {
ret = -EINVAL;
mlog_errno(ret);
goto bail;
}
if (!direct_io && nowait)
return -EOPNOTSUPP;
/*
* buffered reads protect themselves in ->read_folio(). O_DIRECT reads
* need locks to protect pending reads from racing with truncate.
*/
if (direct_io) {
if (nowait)
ret = ocfs2_try_rw_lock(inode, 0);
else
ret = ocfs2_rw_lock(inode, 0);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto bail;
}
rw_level = 0;
/* communicate with ocfs2_dio_end_io */
ocfs2_iocb_set_rw_locked(iocb, rw_level);
}
/*
* We're fine letting folks race truncates and extending
* writes with read across the cluster, just like they can
* locally. Hence no rw_lock during read.
*
* Take and drop the meta data lock to update inode fields
* like i_size. This allows the checks down below
* copy_splice_read() a chance of actually working.
*/
ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
!nowait);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto bail;
}
ocfs2_inode_unlock(inode, lock_level);
ret = generic_file_read_iter(iocb, to);
trace_generic_file_read_iter_ret(ret);
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !direct_io);
/* see ocfs2_file_write_iter */
if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
rw_level = -1;
}
bail:
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
return ret;
}
static ssize_t ocfs2_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
struct inode *inode = file_inode(in);
ssize_t ret = 0;
int lock_level = 0;
trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
in->f_path.dentry->d_name.len,
in->f_path.dentry->d_name.name,
flags);
/*
* We're fine letting folks race truncates and extending writes with
* read across the cluster, just like they can locally. Hence no
* rw_lock during read.
*
* Take and drop the meta data lock to update inode fields like i_size.
* This allows the checks down below filemap_splice_read() a chance of
* actually working.
*/
ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level, 1);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
goto bail;
}
ocfs2_inode_unlock(inode, lock_level);
ret = filemap_splice_read(in, ppos, pipe, len, flags);
trace_filemap_splice_read_ret(ret);
bail:
return ret;
}
/* Refer generic_file_llseek_unlocked() */
static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int ret = 0;
inode_lock(inode);
switch (whence) {
case SEEK_SET:
break;
case SEEK_END:
/* SEEK_END requires the OCFS2 inode lock for the file
* because it references the file's size.
*/
ret = ocfs2_inode_lock(inode, NULL, 0);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
offset += i_size_read(inode);
ocfs2_inode_unlock(inode, 0);
break;
case SEEK_CUR:
if (offset == 0) {
offset = file->f_pos;
goto out;
}
offset += file->f_pos;
break;
case SEEK_DATA:
case SEEK_HOLE:
ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
if (ret)
goto out;
break;
default:
ret = -EINVAL;
goto out;
}
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out:
inode_unlock(inode);
if (ret)
return ret;
return offset;
}
static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags)
{
struct inode *inode_in = file_inode(file_in);
struct inode *inode_out = file_inode(file_out);
struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
struct buffer_head *in_bh = NULL, *out_bh = NULL;
bool same_inode = (inode_in == inode_out);
loff_t remapped = 0;
ssize_t ret;
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
return -EINVAL;
if (!ocfs2_refcount_tree(osb))
return -EOPNOTSUPP;
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
/* Lock both files against IO */
ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
if (ret)
return ret;
/* Check file eligibility and prepare for block sharing. */
ret = -EINVAL;
if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
(OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
goto out_unlock;
ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
&len, remap_flags);
if (ret < 0 || len == 0)
goto out_unlock;
/* Lock out changes to the allocation maps and remap. */
down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
if (!same_inode)
down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
SINGLE_DEPTH_NESTING);
/* Zap any page cache for the destination file's range. */
truncate_inode_pages_range(&inode_out->i_data,
round_down(pos_out, PAGE_SIZE),
round_up(pos_out + len, PAGE_SIZE) - 1);
remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
inode_out, out_bh, pos_out, len);
up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
if (!same_inode)
up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
if (remapped < 0) {
ret = remapped;
mlog_errno(ret);
goto out_unlock;
}
/*
* Empty the extent map so that we may get the right extent
* record from the disk.
*/
ocfs2_extent_map_trunc(inode_in, 0);
ocfs2_extent_map_trunc(inode_out, 0);
ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
if (ret) {
mlog_errno(ret);
goto out_unlock;
}
out_unlock:
ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
return remapped > 0 ? remapped : ret;
}
const struct inode_operations ocfs2_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
.permission = ocfs2_permission,
.listxattr = ocfs2_listxattr,
.fiemap = ocfs2_fiemap,
.get_inode_acl = ocfs2_iop_get_acl,
.set_acl = ocfs2_iop_set_acl,
.fileattr_get = ocfs2_fileattr_get,
.fileattr_set = ocfs2_fileattr_set,
};
const struct inode_operations ocfs2_special_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
.permission = ocfs2_permission,
.get_inode_acl = ocfs2_iop_get_acl,
.set_acl = ocfs2_iop_set_acl,
};
/*
* Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
* ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
*/
const struct file_operations ocfs2_fops = {
.llseek = ocfs2_file_llseek,
.mmap = ocfs2_mmap,
.fsync = ocfs2_sync_file,
.release = ocfs2_file_release,
.open = ocfs2_file_open,
.read_iter = ocfs2_file_read_iter,
.write_iter = ocfs2_file_write_iter,
.unlocked_ioctl = ocfs2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ocfs2_compat_ioctl,
#endif
.lock = ocfs2_lock,
.flock = ocfs2_flock,
.splice_read = ocfs2_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ocfs2_fallocate,
.remap_file_range = ocfs2_remap_file_range,
};
WRAP_DIR_ITER(ocfs2_readdir) // FIXME!
const struct file_operations ocfs2_dops = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = shared_ocfs2_readdir,
.fsync = ocfs2_sync_file,
.release = ocfs2_dir_release,
.open = ocfs2_dir_open,
.unlocked_ioctl = ocfs2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ocfs2_compat_ioctl,
#endif
.lock = ocfs2_lock,
.flock = ocfs2_flock,
};
/*
* POSIX-lockless variants of our file_operations.
*
* These will be used if the underlying cluster stack does not support
* posix file locking, if the user passes the "localflocks" mount
* option, or if we have a local-only fs.
*
* ocfs2_flock is in here because all stacks handle UNIX file locks,
* so we still want it in the case of no stack support for
* plocks. Internally, it will do the right thing when asked to ignore
* the cluster.
*/
const struct file_operations ocfs2_fops_no_plocks = {
.llseek = ocfs2_file_llseek,
.mmap = ocfs2_mmap,
.fsync = ocfs2_sync_file,
.release = ocfs2_file_release,
.open = ocfs2_file_open,
.read_iter = ocfs2_file_read_iter,
.write_iter = ocfs2_file_write_iter,
.unlocked_ioctl = ocfs2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ocfs2_compat_ioctl,
#endif
.flock = ocfs2_flock,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ocfs2_fallocate,
.remap_file_range = ocfs2_remap_file_range,
};
const struct file_operations ocfs2_dops_no_plocks = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = shared_ocfs2_readdir,
.fsync = ocfs2_sync_file,
.release = ocfs2_dir_release,
.open = ocfs2_dir_open,
.unlocked_ioctl = ocfs2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ocfs2_compat_ioctl,
#endif
.flock = ocfs2_flock,
};
| linux-master | fs/ocfs2/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* heartbeat.c
*
* Register ourselves with the heartbeat service, keep our node maps
* up to date, and fire off recovery when needed.
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/bitmap.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
/* special case -1 for now
* TODO: should *really* make sure the calling func never passes -1!! */
static void ocfs2_node_map_init(struct ocfs2_node_map *map)
{
map->num_nodes = OCFS2_NODE_MAP_MAX_NODES;
bitmap_zero(map->map, OCFS2_NODE_MAP_MAX_NODES);
}
void ocfs2_init_node_maps(struct ocfs2_super *osb)
{
spin_lock_init(&osb->node_map_lock);
ocfs2_node_map_init(&osb->osb_recovering_orphan_dirs);
}
void ocfs2_do_node_down(int node_num, void *data)
{
struct ocfs2_super *osb = data;
BUG_ON(osb->node_num == node_num);
trace_ocfs2_do_node_down(node_num);
if (!osb->cconn) {
/*
* No cluster connection means we're not even ready to
* participate yet. We check the slots after the cluster
* comes up, so we will notice the node death then. We
* can safely ignore it here.
*/
return;
}
ocfs2_recovery_thread(osb, node_num);
}
void ocfs2_node_map_set_bit(struct ocfs2_super *osb,
struct ocfs2_node_map *map,
int bit)
{
if (bit==-1)
return;
BUG_ON(bit >= map->num_nodes);
spin_lock(&osb->node_map_lock);
set_bit(bit, map->map);
spin_unlock(&osb->node_map_lock);
}
void ocfs2_node_map_clear_bit(struct ocfs2_super *osb,
struct ocfs2_node_map *map,
int bit)
{
if (bit==-1)
return;
BUG_ON(bit >= map->num_nodes);
spin_lock(&osb->node_map_lock);
clear_bit(bit, map->map);
spin_unlock(&osb->node_map_lock);
}
int ocfs2_node_map_test_bit(struct ocfs2_super *osb,
struct ocfs2_node_map *map,
int bit)
{
int ret;
if (bit >= map->num_nodes) {
mlog(ML_ERROR, "bit=%d map->num_nodes=%d\n", bit, map->num_nodes);
BUG();
}
spin_lock(&osb->node_map_lock);
ret = test_bit(bit, map->map);
spin_unlock(&osb->node_map_lock);
return ret;
}
| linux-master | fs/ocfs2/heartbeat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of operations over local quota file
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/quota.h>
#include <linux/quotaops.h>
#include <linux/module.h>
#include <cluster/masklog.h>
#include "ocfs2_fs.h"
#include "ocfs2.h"
#include "inode.h"
#include "alloc.h"
#include "file.h"
#include "buffer_head_io.h"
#include "journal.h"
#include "sysfile.h"
#include "dlmglue.h"
#include "quota.h"
#include "uptodate.h"
#include "super.h"
#include "ocfs2_trace.h"
/* Number of local quota structures per block */
static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
{
return ((sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) /
sizeof(struct ocfs2_local_disk_dqblk));
}
/* Number of blocks with entries in one chunk */
static inline unsigned int ol_chunk_blocks(struct super_block *sb)
{
return ((sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) -
OCFS2_QBLK_RESERVED_SPACE) << 3) /
ol_quota_entries_per_block(sb);
}
/* Number of entries in a chunk bitmap */
static unsigned int ol_chunk_entries(struct super_block *sb)
{
return ol_chunk_blocks(sb) * ol_quota_entries_per_block(sb);
}
/* Offset of the chunk in quota file */
static unsigned int ol_quota_chunk_block(struct super_block *sb, int c)
{
/* 1 block for local quota file info, 1 block per chunk for chunk info */
return 1 + (ol_chunk_blocks(sb) + 1) * c;
}
static unsigned int ol_dqblk_block(struct super_block *sb, int c, int off)
{
int epb = ol_quota_entries_per_block(sb);
return ol_quota_chunk_block(sb, c) + 1 + off / epb;
}
static unsigned int ol_dqblk_block_off(struct super_block *sb, int c, int off)
{
int epb = ol_quota_entries_per_block(sb);
return (off % epb) * sizeof(struct ocfs2_local_disk_dqblk);
}
/* Offset of the dquot structure in the quota file */
static loff_t ol_dqblk_off(struct super_block *sb, int c, int off)
{
return (ol_dqblk_block(sb, c, off) << sb->s_blocksize_bits) +
ol_dqblk_block_off(sb, c, off);
}
static inline unsigned int ol_dqblk_block_offset(struct super_block *sb, loff_t off)
{
return off & ((1 << sb->s_blocksize_bits) - 1);
}
/* Compute offset in the chunk of a structure with the given offset */
static int ol_dqblk_chunk_off(struct super_block *sb, int c, loff_t off)
{
int epb = ol_quota_entries_per_block(sb);
return ((off >> sb->s_blocksize_bits) -
ol_quota_chunk_block(sb, c) - 1) * epb
+ ((unsigned int)(off & ((1 << sb->s_blocksize_bits) - 1))) /
sizeof(struct ocfs2_local_disk_dqblk);
}
/* Write bufferhead into the fs */
static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
void (*modify)(struct buffer_head *, void *), void *private)
{
struct super_block *sb = inode->i_sb;
handle_t *handle;
int status;
handle = ocfs2_start_trans(OCFS2_SB(sb),
OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
return status;
}
status = ocfs2_journal_access_dq(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
return status;
}
lock_buffer(bh);
modify(bh, private);
unlock_buffer(bh);
ocfs2_journal_dirty(handle, bh);
status = ocfs2_commit_trans(OCFS2_SB(sb), handle);
if (status < 0) {
mlog_errno(status);
return status;
}
return 0;
}
/*
* Read quota block from a given logical offset.
*
* This function acquires ip_alloc_sem and thus it must not be called with a
* transaction started.
*/
static int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
struct buffer_head **bh)
{
int rc = 0;
struct buffer_head *tmp = *bh;
if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block)
return ocfs2_error(inode->i_sb,
"Quota file %llu is probably corrupted! Requested to read block %Lu but file has size only %Lu\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)v_block,
(unsigned long long)i_size_read(inode));
rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
ocfs2_validate_quota_block);
if (rc)
mlog_errno(rc);
/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
/* Check whether we understand format of quota files */
static int ocfs2_local_check_quota_file(struct super_block *sb, int type)
{
unsigned int lmagics[OCFS2_MAXQUOTAS] = OCFS2_LOCAL_QMAGICS;
unsigned int lversions[OCFS2_MAXQUOTAS] = OCFS2_LOCAL_QVERSIONS;
unsigned int gmagics[OCFS2_MAXQUOTAS] = OCFS2_GLOBAL_QMAGICS;
unsigned int gversions[OCFS2_MAXQUOTAS] = OCFS2_GLOBAL_QVERSIONS;
unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
GROUP_QUOTA_SYSTEM_INODE };
struct buffer_head *bh = NULL;
struct inode *linode = sb_dqopt(sb)->files[type];
struct inode *ginode = NULL;
struct ocfs2_disk_dqheader *dqhead;
int status, ret = 0;
/* First check whether we understand local quota file */
status = ocfs2_read_quota_block(linode, 0, &bh);
if (status) {
mlog_errno(status);
mlog(ML_ERROR, "failed to read quota file header (type=%d)\n",
type);
goto out_err;
}
dqhead = (struct ocfs2_disk_dqheader *)(bh->b_data);
if (le32_to_cpu(dqhead->dqh_magic) != lmagics[type]) {
mlog(ML_ERROR, "quota file magic does not match (%u != %u),"
" type=%d\n", le32_to_cpu(dqhead->dqh_magic),
lmagics[type], type);
goto out_err;
}
if (le32_to_cpu(dqhead->dqh_version) != lversions[type]) {
mlog(ML_ERROR, "quota file version does not match (%u != %u),"
" type=%d\n", le32_to_cpu(dqhead->dqh_version),
lversions[type], type);
goto out_err;
}
brelse(bh);
bh = NULL;
/* Next check whether we understand global quota file */
ginode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
OCFS2_INVALID_SLOT);
if (!ginode) {
mlog(ML_ERROR, "cannot get global quota file inode "
"(type=%d)\n", type);
goto out_err;
}
/* Since the header is read only, we don't care about locking */
status = ocfs2_read_quota_block(ginode, 0, &bh);
if (status) {
mlog_errno(status);
mlog(ML_ERROR, "failed to read global quota file header "
"(type=%d)\n", type);
goto out_err;
}
dqhead = (struct ocfs2_disk_dqheader *)(bh->b_data);
if (le32_to_cpu(dqhead->dqh_magic) != gmagics[type]) {
mlog(ML_ERROR, "global quota file magic does not match "
"(%u != %u), type=%d\n",
le32_to_cpu(dqhead->dqh_magic), gmagics[type], type);
goto out_err;
}
if (le32_to_cpu(dqhead->dqh_version) != gversions[type]) {
mlog(ML_ERROR, "global quota file version does not match "
"(%u != %u), type=%d\n",
le32_to_cpu(dqhead->dqh_version), gversions[type],
type);
goto out_err;
}
ret = 1;
out_err:
brelse(bh);
iput(ginode);
return ret;
}
/* Release given list of quota file chunks */
static void ocfs2_release_local_quota_bitmaps(struct list_head *head)
{
struct ocfs2_quota_chunk *pos, *next;
list_for_each_entry_safe(pos, next, head, qc_chunk) {
list_del(&pos->qc_chunk);
brelse(pos->qc_headerbh);
kmem_cache_free(ocfs2_qf_chunk_cachep, pos);
}
}
/* Load quota bitmaps into memory */
static int ocfs2_load_local_quota_bitmaps(struct inode *inode,
struct ocfs2_local_disk_dqinfo *ldinfo,
struct list_head *head)
{
struct ocfs2_quota_chunk *newchunk;
int i, status;
INIT_LIST_HEAD(head);
for (i = 0; i < le32_to_cpu(ldinfo->dqi_chunks); i++) {
newchunk = kmem_cache_alloc(ocfs2_qf_chunk_cachep, GFP_NOFS);
if (!newchunk) {
ocfs2_release_local_quota_bitmaps(head);
return -ENOMEM;
}
newchunk->qc_num = i;
newchunk->qc_headerbh = NULL;
status = ocfs2_read_quota_block(inode,
ol_quota_chunk_block(inode->i_sb, i),
&newchunk->qc_headerbh);
if (status) {
mlog_errno(status);
kmem_cache_free(ocfs2_qf_chunk_cachep, newchunk);
ocfs2_release_local_quota_bitmaps(head);
return status;
}
list_add_tail(&newchunk->qc_chunk, head);
}
return 0;
}
static void olq_update_info(struct buffer_head *bh, void *private)
{
struct mem_dqinfo *info = private;
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_local_disk_dqinfo *ldinfo;
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
spin_lock(&dq_data_lock);
ldinfo->dqi_flags = cpu_to_le32(oinfo->dqi_flags);
ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks);
ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks);
spin_unlock(&dq_data_lock);
}
static int ocfs2_add_recovery_chunk(struct super_block *sb,
struct ocfs2_local_disk_chunk *dchunk,
int chunk,
struct list_head *head)
{
struct ocfs2_recovery_chunk *rc;
rc = kmalloc(sizeof(struct ocfs2_recovery_chunk), GFP_NOFS);
if (!rc)
return -ENOMEM;
rc->rc_chunk = chunk;
rc->rc_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
if (!rc->rc_bitmap) {
kfree(rc);
return -ENOMEM;
}
memcpy(rc->rc_bitmap, dchunk->dqc_bitmap,
(ol_chunk_entries(sb) + 7) >> 3);
list_add_tail(&rc->rc_list, head);
return 0;
}
static void free_recovery_list(struct list_head *head)
{
struct ocfs2_recovery_chunk *next;
struct ocfs2_recovery_chunk *rchunk;
list_for_each_entry_safe(rchunk, next, head, rc_list) {
list_del(&rchunk->rc_list);
kfree(rchunk->rc_bitmap);
kfree(rchunk);
}
}
void ocfs2_free_quota_recovery(struct ocfs2_quota_recovery *rec)
{
int type;
for (type = 0; type < OCFS2_MAXQUOTAS; type++)
free_recovery_list(&(rec->r_list[type]));
kfree(rec);
}
/* Load entries in our quota file we have to recover*/
static int ocfs2_recovery_load_quota(struct inode *lqinode,
struct ocfs2_local_disk_dqinfo *ldinfo,
int type,
struct list_head *head)
{
struct super_block *sb = lqinode->i_sb;
struct buffer_head *hbh;
struct ocfs2_local_disk_chunk *dchunk;
int i, chunks = le32_to_cpu(ldinfo->dqi_chunks);
int status = 0;
for (i = 0; i < chunks; i++) {
hbh = NULL;
status = ocfs2_read_quota_block(lqinode,
ol_quota_chunk_block(sb, i),
&hbh);
if (status) {
mlog_errno(status);
break;
}
dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data;
if (le32_to_cpu(dchunk->dqc_free) < ol_chunk_entries(sb))
status = ocfs2_add_recovery_chunk(sb, dchunk, i, head);
brelse(hbh);
if (status < 0)
break;
}
if (status < 0)
free_recovery_list(head);
return status;
}
static struct ocfs2_quota_recovery *ocfs2_alloc_quota_recovery(void)
{
int type;
struct ocfs2_quota_recovery *rec;
rec = kmalloc(sizeof(struct ocfs2_quota_recovery), GFP_NOFS);
if (!rec)
return NULL;
for (type = 0; type < OCFS2_MAXQUOTAS; type++)
INIT_LIST_HEAD(&(rec->r_list[type]));
return rec;
}
/* Load information we need for quota recovery into memory */
struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
struct ocfs2_super *osb,
int slot_num)
{
unsigned int feature[OCFS2_MAXQUOTAS] = {
OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
LOCAL_GROUP_QUOTA_SYSTEM_INODE };
struct super_block *sb = osb->sb;
struct ocfs2_local_disk_dqinfo *ldinfo;
struct inode *lqinode;
struct buffer_head *bh;
int type;
int status = 0;
struct ocfs2_quota_recovery *rec;
printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
"slot %u\n", osb->dev_str, slot_num);
rec = ocfs2_alloc_quota_recovery();
if (!rec)
return ERR_PTR(-ENOMEM);
/* First init... */
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
/* At this point, journal of the slot is already replayed so
* we can trust metadata and data of the quota file */
lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num);
if (!lqinode) {
status = -ENOENT;
goto out;
}
status = ocfs2_inode_lock_full(lqinode, NULL, 1,
OCFS2_META_LOCK_RECOVERY);
if (status < 0) {
mlog_errno(status);
goto out_put;
}
/* Now read local header */
bh = NULL;
status = ocfs2_read_quota_block(lqinode, 0, &bh);
if (status) {
mlog_errno(status);
mlog(ML_ERROR, "failed to read quota file info header "
"(slot=%d type=%d)\n", slot_num, type);
goto out_lock;
}
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
status = ocfs2_recovery_load_quota(lqinode, ldinfo, type,
&rec->r_list[type]);
brelse(bh);
out_lock:
ocfs2_inode_unlock(lqinode, 1);
out_put:
iput(lqinode);
if (status < 0)
break;
}
out:
if (status < 0) {
ocfs2_free_quota_recovery(rec);
rec = ERR_PTR(status);
}
return rec;
}
/* Sync changes in local quota file into global quota file and
* reinitialize local quota file.
* The function expects local quota file to be already locked and
* s_umount locked in shared mode. */
static int ocfs2_recover_local_quota_file(struct inode *lqinode,
int type,
struct ocfs2_quota_recovery *rec)
{
struct super_block *sb = lqinode->i_sb;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
struct ocfs2_local_disk_chunk *dchunk;
struct ocfs2_local_disk_dqblk *dqblk;
struct dquot *dquot;
handle_t *handle;
struct buffer_head *hbh = NULL, *qbh = NULL;
int status = 0;
int bit, chunk;
struct ocfs2_recovery_chunk *rchunk, *next;
qsize_t spacechange, inodechange;
trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type);
list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) {
chunk = rchunk->rc_chunk;
hbh = NULL;
status = ocfs2_read_quota_block(lqinode,
ol_quota_chunk_block(sb, chunk),
&hbh);
if (status) {
mlog_errno(status);
break;
}
dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data;
for_each_set_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) {
qbh = NULL;
status = ocfs2_read_quota_block(lqinode,
ol_dqblk_block(sb, chunk, bit),
&qbh);
if (status) {
mlog_errno(status);
break;
}
dqblk = (struct ocfs2_local_disk_dqblk *)(qbh->b_data +
ol_dqblk_block_off(sb, chunk, bit));
dquot = dqget(sb,
make_kqid(&init_user_ns, type,
le64_to_cpu(dqblk->dqb_id)));
if (IS_ERR(dquot)) {
status = PTR_ERR(dquot);
mlog(ML_ERROR, "Failed to get quota structure "
"for id %u, type %d. Cannot finish quota "
"file recovery.\n",
(unsigned)le64_to_cpu(dqblk->dqb_id),
type);
goto out_put_bh;
}
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0) {
mlog_errno(status);
goto out_put_dquot;
}
handle = ocfs2_start_trans(OCFS2_SB(sb),
OCFS2_QSYNC_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_drop_lock;
}
down_write(&sb_dqopt(sb)->dqio_sem);
spin_lock(&dquot->dq_dqb_lock);
/* Add usage from quota entry into quota changes
* of our node. Auxiliary variables are important
* due to signedness */
spacechange = le64_to_cpu(dqblk->dqb_spacemod);
inodechange = le64_to_cpu(dqblk->dqb_inodemod);
dquot->dq_dqb.dqb_curspace += spacechange;
dquot->dq_dqb.dqb_curinodes += inodechange;
spin_unlock(&dquot->dq_dqb_lock);
/* We want to drop reference held by the crashed
* node. Since we have our own reference we know
* global structure actually won't be freed. */
status = ocfs2_global_release_dquot(dquot);
if (status < 0) {
mlog_errno(status);
goto out_commit;
}
/* Release local quota file entry */
status = ocfs2_journal_access_dq(handle,
INODE_CACHE(lqinode),
qbh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_commit;
}
lock_buffer(qbh);
WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(qbh);
ocfs2_journal_dirty(handle, qbh);
out_commit:
up_write(&sb_dqopt(sb)->dqio_sem);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out_drop_lock:
ocfs2_unlock_global_qf(oinfo, 1);
out_put_dquot:
dqput(dquot);
out_put_bh:
brelse(qbh);
if (status < 0)
break;
}
brelse(hbh);
list_del(&rchunk->rc_list);
kfree(rchunk->rc_bitmap);
kfree(rchunk);
if (status < 0)
break;
}
if (status < 0)
free_recovery_list(&(rec->r_list[type]));
if (status)
mlog_errno(status);
return status;
}
/* Recover local quota files for given node different from us */
int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
struct ocfs2_quota_recovery *rec,
int slot_num)
{
unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
LOCAL_GROUP_QUOTA_SYSTEM_INODE };
struct super_block *sb = osb->sb;
struct ocfs2_local_disk_dqinfo *ldinfo;
struct buffer_head *bh;
handle_t *handle;
int type;
int status = 0;
struct inode *lqinode;
unsigned int flags;
printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
"slot %u\n", osb->dev_str, slot_num);
down_read(&sb->s_umount);
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
trace_ocfs2_finish_quota_recovery(slot_num);
lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num);
if (!lqinode) {
status = -ENOENT;
goto out;
}
status = ocfs2_inode_lock_full(lqinode, NULL, 1,
OCFS2_META_LOCK_NOQUEUE);
/* Someone else is holding the lock? Then he must be
* doing the recovery. Just skip the file... */
if (status == -EAGAIN) {
printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
"device (%s) for slot %d because quota file is "
"locked.\n", osb->dev_str, slot_num);
status = 0;
goto out_put;
} else if (status < 0) {
mlog_errno(status);
goto out_put;
}
/* Now read local header */
bh = NULL;
status = ocfs2_read_quota_block(lqinode, 0, &bh);
if (status) {
mlog_errno(status);
mlog(ML_ERROR, "failed to read quota file info header "
"(slot=%d type=%d)\n", slot_num, type);
goto out_lock;
}
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
/* Is recovery still needed? */
flags = le32_to_cpu(ldinfo->dqi_flags);
if (!(flags & OLQF_CLEAN))
status = ocfs2_recover_local_quota_file(lqinode,
type,
rec);
/* We don't want to mark file as clean when it is actually
* active */
if (slot_num == osb->slot_num)
goto out_bh;
/* Mark quota file as clean if we are recovering quota file of
* some other node. */
handle = ocfs2_start_trans(osb,
OCFS2_LOCAL_QINFO_WRITE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_bh;
}
status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode),
bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
lock_buffer(bh);
ldinfo->dqi_flags = cpu_to_le32(flags | OLQF_CLEAN);
unlock_buffer(bh);
ocfs2_journal_dirty(handle, bh);
out_trans:
ocfs2_commit_trans(osb, handle);
out_bh:
brelse(bh);
out_lock:
ocfs2_inode_unlock(lqinode, 1);
out_put:
iput(lqinode);
if (status < 0)
break;
}
out:
up_read(&sb->s_umount);
kfree(rec);
return status;
}
/* Read information header from quota file */
static int ocfs2_local_read_info(struct super_block *sb, int type)
{
struct ocfs2_local_disk_dqinfo *ldinfo;
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo;
struct inode *lqinode = sb_dqopt(sb)->files[type];
int status;
struct buffer_head *bh = NULL;
struct ocfs2_quota_recovery *rec;
int locked = 0;
info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
if (!oinfo) {
mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
" info.");
goto out_err;
}
info->dqi_priv = oinfo;
oinfo->dqi_type = type;
INIT_LIST_HEAD(&oinfo->dqi_chunk);
oinfo->dqi_rec = NULL;
oinfo->dqi_lqi_bh = NULL;
oinfo->dqi_libh = NULL;
status = ocfs2_global_read_info(sb, type);
if (status < 0)
goto out_err;
status = ocfs2_inode_lock(lqinode, &oinfo->dqi_lqi_bh, 1);
if (status < 0) {
mlog_errno(status);
goto out_err;
}
locked = 1;
/* Now read local header */
status = ocfs2_read_quota_block(lqinode, 0, &bh);
if (status) {
mlog_errno(status);
mlog(ML_ERROR, "failed to read quota file info header "
"(type=%d)\n", type);
goto out_err;
}
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
oinfo->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
oinfo->dqi_libh = bh;
/* We crashed when using local quota file? */
if (!(oinfo->dqi_flags & OLQF_CLEAN)) {
rec = OCFS2_SB(sb)->quota_rec;
if (!rec) {
rec = ocfs2_alloc_quota_recovery();
if (!rec) {
status = -ENOMEM;
mlog_errno(status);
goto out_err;
}
OCFS2_SB(sb)->quota_rec = rec;
}
status = ocfs2_recovery_load_quota(lqinode, ldinfo, type,
&rec->r_list[type]);
if (status < 0) {
mlog_errno(status);
goto out_err;
}
}
status = ocfs2_load_local_quota_bitmaps(lqinode,
ldinfo,
&oinfo->dqi_chunk);
if (status < 0) {
mlog_errno(status);
goto out_err;
}
/* Now mark quota file as used */
oinfo->dqi_flags &= ~OLQF_CLEAN;
status = ocfs2_modify_bh(lqinode, bh, olq_update_info, info);
if (status < 0) {
mlog_errno(status);
goto out_err;
}
return 0;
out_err:
if (oinfo) {
iput(oinfo->dqi_gqinode);
ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock);
ocfs2_lock_res_free(&oinfo->dqi_gqlock);
brelse(oinfo->dqi_lqi_bh);
if (locked)
ocfs2_inode_unlock(lqinode, 1);
ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
kfree(oinfo);
}
brelse(bh);
return -1;
}
/* Write local info to quota file */
static int ocfs2_local_write_info(struct super_block *sb, int type)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct buffer_head *bh = ((struct ocfs2_mem_dqinfo *)info->dqi_priv)
->dqi_libh;
int status;
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info,
info);
if (status < 0) {
mlog_errno(status);
return -1;
}
return 0;
}
/* Release info from memory */
static int ocfs2_local_free_info(struct super_block *sb, int type)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_quota_chunk *chunk;
struct ocfs2_local_disk_chunk *dchunk;
int mark_clean = 1, len;
int status = 0;
iput(oinfo->dqi_gqinode);
ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock);
ocfs2_lock_res_free(&oinfo->dqi_gqlock);
list_for_each_entry(chunk, &oinfo->dqi_chunk, qc_chunk) {
dchunk = (struct ocfs2_local_disk_chunk *)
(chunk->qc_headerbh->b_data);
if (chunk->qc_num < oinfo->dqi_chunks - 1) {
len = ol_chunk_entries(sb);
} else {
len = (oinfo->dqi_blocks -
ol_quota_chunk_block(sb, chunk->qc_num) - 1)
* ol_quota_entries_per_block(sb);
}
/* Not all entries free? Bug! */
if (le32_to_cpu(dchunk->dqc_free) != len) {
mlog(ML_ERROR, "releasing quota file with used "
"entries (type=%d)\n", type);
mark_clean = 0;
}
}
ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
/*
* s_umount held in exclusive mode protects us against racing with
* recovery thread...
*/
if (oinfo->dqi_rec) {
ocfs2_free_quota_recovery(oinfo->dqi_rec);
mark_clean = 0;
}
if (!mark_clean)
goto out;
/* Mark local file as clean */
oinfo->dqi_flags |= OLQF_CLEAN;
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
oinfo->dqi_libh,
olq_update_info,
info);
if (status < 0)
mlog_errno(status);
out:
ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1);
brelse(oinfo->dqi_libh);
brelse(oinfo->dqi_lqi_bh);
kfree(oinfo);
return status;
}
static void olq_set_dquot(struct buffer_head *bh, void *private)
{
struct ocfs2_dquot *od = private;
struct ocfs2_local_disk_dqblk *dqblk;
struct super_block *sb = od->dq_dquot.dq_sb;
dqblk = (struct ocfs2_local_disk_dqblk *)(bh->b_data
+ ol_dqblk_block_offset(sb, od->dq_local_off));
dqblk->dqb_id = cpu_to_le64(from_kqid(&init_user_ns,
od->dq_dquot.dq_id));
spin_lock(&od->dq_dquot.dq_dqb_lock);
dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace -
od->dq_origspace);
dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes -
od->dq_originodes);
spin_unlock(&od->dq_dquot.dq_dqb_lock);
trace_olq_set_dquot(
(unsigned long long)le64_to_cpu(dqblk->dqb_spacemod),
(unsigned long long)le64_to_cpu(dqblk->dqb_inodemod),
from_kqid(&init_user_ns, od->dq_dquot.dq_id));
}
/* Write dquot to local quota file */
int ocfs2_local_write_dquot(struct dquot *dquot)
{
struct super_block *sb = dquot->dq_sb;
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
struct buffer_head *bh;
struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_id.type];
int status;
status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk,
&bh);
if (status) {
mlog_errno(status);
goto out;
}
status = ocfs2_modify_bh(lqinode, bh, olq_set_dquot, od);
if (status < 0) {
mlog_errno(status);
goto out;
}
out:
brelse(bh);
return status;
}
/* Find free entry in local quota file */
static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
int type,
int *offset)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_quota_chunk *chunk = NULL, *iter;
struct ocfs2_local_disk_chunk *dchunk;
int found = 0, len;
list_for_each_entry(iter, &oinfo->dqi_chunk, qc_chunk) {
dchunk = (struct ocfs2_local_disk_chunk *)
iter->qc_headerbh->b_data;
if (le32_to_cpu(dchunk->dqc_free) > 0) {
chunk = iter;
break;
}
}
if (!chunk)
return NULL;
if (chunk->qc_num < oinfo->dqi_chunks - 1) {
len = ol_chunk_entries(sb);
} else {
len = (oinfo->dqi_blocks -
ol_quota_chunk_block(sb, chunk->qc_num) - 1)
* ol_quota_entries_per_block(sb);
}
found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
/* We failed? */
if (found == len) {
mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
" entries free (type=%d)\n", chunk->qc_num,
le32_to_cpu(dchunk->dqc_free), type);
return ERR_PTR(-EIO);
}
*offset = found;
return chunk;
}
/* Add new chunk to the local quota file */
static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
struct super_block *sb,
int type,
int *offset)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct inode *lqinode = sb_dqopt(sb)->files[type];
struct ocfs2_quota_chunk *chunk = NULL;
struct ocfs2_local_disk_chunk *dchunk;
int status;
handle_t *handle;
struct buffer_head *bh = NULL, *dbh = NULL;
u64 p_blkno;
/* We are protected by dqio_sem so no locking needed */
status = ocfs2_extend_no_holes(lqinode, NULL,
i_size_read(lqinode) + 2 * sb->s_blocksize,
i_size_read(lqinode));
if (status < 0) {
mlog_errno(status);
goto out;
}
status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh,
i_size_read(lqinode) + 2 * sb->s_blocksize);
if (status < 0) {
mlog_errno(status);
goto out;
}
chunk = kmem_cache_alloc(ocfs2_qf_chunk_cachep, GFP_NOFS);
if (!chunk) {
status = -ENOMEM;
mlog_errno(status);
goto out;
}
/* Local quota info and two new blocks we initialize */
handle = ocfs2_start_trans(OCFS2_SB(sb),
OCFS2_LOCAL_QINFO_WRITE_CREDITS +
2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out;
}
/* Initialize chunk header */
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
&p_blkno, NULL, NULL);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
bh = sb_getblk(sb, p_blkno);
if (!bh) {
status = -ENOMEM;
mlog_errno(status);
goto out_trans;
}
dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), bh);
status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
lock_buffer(bh);
dchunk->dqc_free = cpu_to_le32(ol_quota_entries_per_block(sb));
memset(dchunk->dqc_bitmap, 0,
sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) -
OCFS2_QBLK_RESERVED_SPACE);
unlock_buffer(bh);
ocfs2_journal_dirty(handle, bh);
/* Initialize new block with structures */
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1,
&p_blkno, NULL, NULL);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
dbh = sb_getblk(sb, p_blkno);
if (!dbh) {
status = -ENOMEM;
mlog_errno(status);
goto out_trans;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), dbh);
status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), dbh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
lock_buffer(dbh);
memset(dbh->b_data, 0, sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE);
unlock_buffer(dbh);
ocfs2_journal_dirty(handle, dbh);
/* Update local quotafile info */
oinfo->dqi_blocks += 2;
oinfo->dqi_chunks++;
status = ocfs2_local_write_info(sb, type);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
status = ocfs2_commit_trans(OCFS2_SB(sb), handle);
if (status < 0) {
mlog_errno(status);
goto out;
}
list_add_tail(&chunk->qc_chunk, &oinfo->dqi_chunk);
chunk->qc_num = list_entry(chunk->qc_chunk.prev,
struct ocfs2_quota_chunk,
qc_chunk)->qc_num + 1;
chunk->qc_headerbh = bh;
*offset = 0;
return chunk;
out_trans:
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out:
brelse(bh);
brelse(dbh);
kmem_cache_free(ocfs2_qf_chunk_cachep, chunk);
return ERR_PTR(status);
}
/* Find free entry in local quota file */
static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
struct super_block *sb,
int type,
int *offset)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_quota_chunk *chunk;
struct inode *lqinode = sb_dqopt(sb)->files[type];
struct ocfs2_local_disk_chunk *dchunk;
int epb = ol_quota_entries_per_block(sb);
unsigned int chunk_blocks;
struct buffer_head *bh;
u64 p_blkno;
int status;
handle_t *handle;
if (list_empty(&oinfo->dqi_chunk))
return ocfs2_local_quota_add_chunk(sb, type, offset);
/* Is the last chunk full? */
chunk = list_entry(oinfo->dqi_chunk.prev,
struct ocfs2_quota_chunk, qc_chunk);
chunk_blocks = oinfo->dqi_blocks -
ol_quota_chunk_block(sb, chunk->qc_num) - 1;
if (ol_chunk_blocks(sb) == chunk_blocks)
return ocfs2_local_quota_add_chunk(sb, type, offset);
/* We are protected by dqio_sem so no locking needed */
status = ocfs2_extend_no_holes(lqinode, NULL,
i_size_read(lqinode) + sb->s_blocksize,
i_size_read(lqinode));
if (status < 0) {
mlog_errno(status);
goto out;
}
status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh,
i_size_read(lqinode) + sb->s_blocksize);
if (status < 0) {
mlog_errno(status);
goto out;
}
/* Get buffer from the just added block */
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
&p_blkno, NULL, NULL);
if (status < 0) {
mlog_errno(status);
goto out;
}
bh = sb_getblk(sb, p_blkno);
if (!bh) {
status = -ENOMEM;
mlog_errno(status);
goto out;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), bh);
/* Local quota info, chunk header and the new block we initialize */
handle = ocfs2_start_trans(OCFS2_SB(sb),
OCFS2_LOCAL_QINFO_WRITE_CREDITS +
2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out;
}
/* Zero created block */
status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
unlock_buffer(bh);
ocfs2_journal_dirty(handle, bh);
/* Update chunk header */
status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode),
chunk->qc_headerbh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
dchunk = (struct ocfs2_local_disk_chunk *)chunk->qc_headerbh->b_data;
lock_buffer(chunk->qc_headerbh);
le32_add_cpu(&dchunk->dqc_free, ol_quota_entries_per_block(sb));
unlock_buffer(chunk->qc_headerbh);
ocfs2_journal_dirty(handle, chunk->qc_headerbh);
/* Update file header */
oinfo->dqi_blocks++;
status = ocfs2_local_write_info(sb, type);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
status = ocfs2_commit_trans(OCFS2_SB(sb), handle);
if (status < 0) {
mlog_errno(status);
goto out;
}
*offset = chunk_blocks * epb;
return chunk;
out_trans:
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out:
return ERR_PTR(status);
}
static void olq_alloc_dquot(struct buffer_head *bh, void *private)
{
int *offset = private;
struct ocfs2_local_disk_chunk *dchunk;
dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, -1);
}
/* Create dquot in the local file for given id */
int ocfs2_create_local_dquot(struct dquot *dquot)
{
struct super_block *sb = dquot->dq_sb;
int type = dquot->dq_id.type;
struct inode *lqinode = sb_dqopt(sb)->files[type];
struct ocfs2_quota_chunk *chunk;
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
int offset;
int status;
u64 pcount;
down_write(&OCFS2_I(lqinode)->ip_alloc_sem);
chunk = ocfs2_find_free_entry(sb, type, &offset);
if (!chunk) {
chunk = ocfs2_extend_local_quota_file(sb, type, &offset);
if (IS_ERR(chunk)) {
status = PTR_ERR(chunk);
goto out;
}
} else if (IS_ERR(chunk)) {
status = PTR_ERR(chunk);
goto out;
}
od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset);
od->dq_chunk = chunk;
status = ocfs2_extent_map_get_blocks(lqinode,
ol_dqblk_block(sb, chunk->qc_num, offset),
&od->dq_local_phys_blk,
&pcount,
NULL);
/* Initialize dquot structure on disk */
status = ocfs2_local_write_dquot(dquot);
if (status < 0) {
mlog_errno(status);
goto out;
}
/* Mark structure as allocated */
status = ocfs2_modify_bh(lqinode, chunk->qc_headerbh, olq_alloc_dquot,
&offset);
if (status < 0) {
mlog_errno(status);
goto out;
}
out:
up_write(&OCFS2_I(lqinode)->ip_alloc_sem);
return status;
}
/*
* Release dquot structure from local quota file. ocfs2_release_dquot() has
* already started a transaction and written all changes to global quota file
*/
int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
{
int status;
int type = dquot->dq_id.type;
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
struct super_block *sb = dquot->dq_sb;
struct ocfs2_local_disk_chunk *dchunk;
int offset;
status = ocfs2_journal_access_dq(handle,
INODE_CACHE(sb_dqopt(sb)->files[type]),
od->dq_chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out;
}
offset = ol_dqblk_chunk_off(sb, od->dq_chunk->qc_num,
od->dq_local_off);
dchunk = (struct ocfs2_local_disk_chunk *)
(od->dq_chunk->qc_headerbh->b_data);
/* Mark structure as freed */
lock_buffer(od->dq_chunk->qc_headerbh);
ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(od->dq_chunk->qc_headerbh);
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
out:
return status;
}
static const struct quota_format_ops ocfs2_format_ops = {
.check_quota_file = ocfs2_local_check_quota_file,
.read_file_info = ocfs2_local_read_info,
.write_file_info = ocfs2_global_write_info,
.free_file_info = ocfs2_local_free_info,
};
struct quota_format_type ocfs2_quota_format = {
.qf_fmt_id = QFMT_OCFS2,
.qf_ops = &ocfs2_format_ops,
.qf_owner = THIS_MODULE
};
| linux-master | fs/ocfs2/quota_local.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmglue.c
*
* Code which implements an OCFS2 specific interface to our DLM.
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/quotaops.h>
#include <linux/sched/signal.h>
#define MLOG_MASK_PREFIX ML_DLM_GLUE
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "ocfs2_lockingver.h"
#include "alloc.h"
#include "dcache.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "stackglue.h"
#include "slot_map.h"
#include "super.h"
#include "uptodate.h"
#include "quota.h"
#include "refcounttree.h"
#include "acl.h"
#include "buffer_head_io.h"
struct ocfs2_mask_waiter {
struct list_head mw_item;
int mw_status;
struct completion mw_complete;
unsigned long mw_mask;
unsigned long mw_goal;
#ifdef CONFIG_OCFS2_FS_STATS
ktime_t mw_lock_start;
#endif
};
static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
/*
* Return value from ->downconvert_worker functions.
*
* These control the precise actions of ocfs2_unblock_lock()
* and ocfs2_process_blocked_lock()
*
*/
enum ocfs2_unblock_action {
UNBLOCK_CONTINUE = 0, /* Continue downconvert */
UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
* ->post_unlock callback */
UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
* ->post_unlock() callback. */
};
struct ocfs2_unblock_ctl {
int requeue;
enum ocfs2_unblock_action unblock_action;
};
/* Lockdep class keys */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
#endif
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
int blocking);
static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
int blocking);
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
int blocking);
#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
/* This aids in debugging situations where a bad LVB might be involved. */
static void ocfs2_dump_meta_lvb_info(u64 level,
const char *function,
unsigned int line,
struct ocfs2_lock_res *lockres)
{
struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
mlog(level, "LVB information for %s (called from %s:%u):\n",
lockres->l_name, function, line);
mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
be32_to_cpu(lvb->lvb_igeneration));
mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
(unsigned long long)be64_to_cpu(lvb->lvb_isize),
be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
be16_to_cpu(lvb->lvb_imode));
mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
"mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
(long long)be64_to_cpu(lvb->lvb_iatime_packed),
(long long)be64_to_cpu(lvb->lvb_ictime_packed),
(long long)be64_to_cpu(lvb->lvb_imtime_packed),
be32_to_cpu(lvb->lvb_iattr));
}
/*
* OCFS2 Lock Resource Operations
*
* These fine tune the behavior of the generic dlmglue locking infrastructure.
*
* The most basic of lock types can point ->l_priv to their respective
* struct ocfs2_super and allow the default actions to manage things.
*
* Right now, each lock type also needs to implement an init function,
* and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
* should be called when the lock is no longer needed (i.e., object
* destruction time).
*/
struct ocfs2_lock_res_ops {
/*
* Translate an ocfs2_lock_res * into an ocfs2_super *. Define
* this callback if ->l_priv is not an ocfs2_super pointer
*/
struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
/*
* Optionally called in the downconvert thread after a
* successful downconvert. The lockres will not be referenced
* after this callback is called, so it is safe to free
* memory, etc.
*
* The exact semantics of when this is called are controlled
* by ->downconvert_worker()
*/
void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
/*
* Allow a lock type to add checks to determine whether it is
* safe to downconvert a lock. Return 0 to re-queue the
* downconvert at a later time, nonzero to continue.
*
* For most locks, the default checks that there are no
* incompatible holders are sufficient.
*
* Called with the lockres spinlock held.
*/
int (*check_downconvert)(struct ocfs2_lock_res *, int);
/*
* Allows a lock type to populate the lock value block. This
* is called on downconvert, and when we drop a lock.
*
* Locks that want to use this should set LOCK_TYPE_USES_LVB
* in the flags field.
*
* Called with the lockres spinlock held.
*/
void (*set_lvb)(struct ocfs2_lock_res *);
/*
* Called from the downconvert thread when it is determined
* that a lock will be downconverted. This is called without
* any locks held so the function can do work that might
* schedule (syncing out data, etc).
*
* This should return any one of the ocfs2_unblock_action
* values, depending on what it wants the thread to do.
*/
int (*downconvert_worker)(struct ocfs2_lock_res *, int);
/*
* LOCK_TYPE_* flags which describe the specific requirements
* of a lock type. Descriptions of each individual flag follow.
*/
int flags;
};
/*
* Some locks want to "refresh" potentially stale data when a
* meaningful (PRMODE or EXMODE) lock level is first obtained. If this
* flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
* individual lockres l_flags member from the ast function. It is
* expected that the locking wrapper will clear the
* OCFS2_LOCK_NEEDS_REFRESH flag when done.
*/
#define LOCK_TYPE_REQUIRES_REFRESH 0x1
/*
* Indicate that a lock type makes use of the lock value block. The
* ->set_lvb lock type callback must be defined.
*/
#define LOCK_TYPE_USES_LVB 0x2
static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
.get_osb = ocfs2_get_inode_osb,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
.get_osb = ocfs2_get_inode_osb,
.check_downconvert = ocfs2_check_meta_downconvert,
.set_lvb = ocfs2_set_meta_lvb,
.downconvert_worker = ocfs2_data_convert_worker,
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_super_lops = {
.flags = LOCK_TYPE_REQUIRES_REFRESH,
};
static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
.get_osb = ocfs2_get_dentry_osb,
.post_unlock = ocfs2_dentry_post_unlock,
.downconvert_worker = ocfs2_dentry_convert_worker,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
.get_osb = ocfs2_get_inode_osb,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
.get_osb = ocfs2_get_file_osb,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
.set_lvb = ocfs2_set_qinfo_lvb,
.get_osb = ocfs2_get_qinfo_osb,
.flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
.check_downconvert = ocfs2_check_refcount_downconvert,
.downconvert_worker = ocfs2_refcount_convert_worker,
.flags = 0,
};
static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
{
return lockres->l_type == OCFS2_LOCK_TYPE_META ||
lockres->l_type == OCFS2_LOCK_TYPE_RW ||
lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
}
static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
{
return container_of(lksb, struct ocfs2_lock_res, l_lksb);
}
static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
{
BUG_ON(!ocfs2_is_inode_lock(lockres));
return (struct inode *) lockres->l_priv;
}
static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
{
BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
return (struct ocfs2_dentry_lock *)lockres->l_priv;
}
static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
{
BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
}
static inline struct ocfs2_refcount_tree *
ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
{
return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
}
static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
{
if (lockres->l_ops->get_osb)
return lockres->l_ops->get_osb(lockres);
return (struct ocfs2_super *)lockres->l_priv;
}
static int ocfs2_lock_create(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 dlm_flags);
static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
int wanted);
static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level, unsigned long caller_ip);
static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level)
{
__ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
}
static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
int convert);
#define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
_err, _func, _lockres->l_name); \
else \
mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
_err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
(unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
} while (0)
static int ocfs2_downconvert_thread(void *arg);
static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static int ocfs2_inode_lock_update(struct inode *inode,
struct buffer_head **bh);
static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
static inline int ocfs2_highest_compat_lock_level(int level);
static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int new_level,
int lvb,
unsigned int generation);
static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static int ocfs2_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
u64 blkno,
u32 generation,
char *name)
{
int len;
BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
(long long)blkno, generation);
BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
mlog(0, "built lock resource with name: %s\n", name);
}
static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
struct ocfs2_dlm_debug *dlm_debug)
{
mlog(0, "Add tracking for lockres %s\n", res->l_name);
spin_lock(&ocfs2_dlm_tracking_lock);
list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
spin_unlock(&ocfs2_dlm_tracking_lock);
}
static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
{
spin_lock(&ocfs2_dlm_tracking_lock);
if (!list_empty(&res->l_debug_list))
list_del_init(&res->l_debug_list);
spin_unlock(&ocfs2_dlm_tracking_lock);
}
#ifdef CONFIG_OCFS2_FS_STATS
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
res->l_lock_refresh = 0;
res->l_lock_wait = 0;
memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
}
static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
struct ocfs2_mask_waiter *mw, int ret)
{
u32 usec;
ktime_t kt;
struct ocfs2_lock_stats *stats;
if (level == LKM_PRMODE)
stats = &res->l_lock_prmode;
else if (level == LKM_EXMODE)
stats = &res->l_lock_exmode;
else
return;
kt = ktime_sub(ktime_get(), mw->mw_lock_start);
usec = ktime_to_us(kt);
stats->ls_gets++;
stats->ls_total += ktime_to_ns(kt);
/* overflow */
if (unlikely(stats->ls_gets == 0)) {
stats->ls_gets++;
stats->ls_total = ktime_to_ns(kt);
}
if (stats->ls_max < usec)
stats->ls_max = usec;
if (ret)
stats->ls_fail++;
stats->ls_last = ktime_to_us(ktime_get_real());
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{
lockres->l_lock_refresh++;
}
static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
{
struct ocfs2_mask_waiter *mw;
if (list_empty(&lockres->l_mask_waiters)) {
lockres->l_lock_wait = 0;
return;
}
mw = list_first_entry(&lockres->l_mask_waiters,
struct ocfs2_mask_waiter, mw_item);
lockres->l_lock_wait =
ktime_to_us(ktime_mono_to_real(mw->mw_lock_start));
}
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
mw->mw_lock_start = ktime_get();
}
#else
static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
}
static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
int level, struct ocfs2_mask_waiter *mw, int ret)
{
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{
}
static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
{
}
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
}
#endif
static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
struct ocfs2_lock_res *res,
enum ocfs2_lock_type type,
struct ocfs2_lock_res_ops *ops,
void *priv)
{
res->l_type = type;
res->l_ops = ops;
res->l_priv = priv;
res->l_level = DLM_LOCK_IV;
res->l_requested = DLM_LOCK_IV;
res->l_blocking = DLM_LOCK_IV;
res->l_action = OCFS2_AST_INVALID;
res->l_unlock_action = OCFS2_UNLOCK_INVALID;
res->l_flags = OCFS2_LOCK_INITIALIZED;
ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
ocfs2_init_lock_stats(res);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (type != OCFS2_LOCK_TYPE_OPEN)
lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
&lockdep_keys[type], 0);
else
res->l_lockdep_map.key = NULL;
#endif
}
void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
{
/* This also clears out the lock status block */
memset(res, 0, sizeof(struct ocfs2_lock_res));
spin_lock_init(&res->l_lock);
init_waitqueue_head(&res->l_event);
INIT_LIST_HEAD(&res->l_blocked_list);
INIT_LIST_HEAD(&res->l_mask_waiters);
INIT_LIST_HEAD(&res->l_holders);
}
void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
enum ocfs2_lock_type type,
unsigned int generation,
struct inode *inode)
{
struct ocfs2_lock_res_ops *ops;
switch(type) {
case OCFS2_LOCK_TYPE_RW:
ops = &ocfs2_inode_rw_lops;
break;
case OCFS2_LOCK_TYPE_META:
ops = &ocfs2_inode_inode_lops;
break;
case OCFS2_LOCK_TYPE_OPEN:
ops = &ocfs2_inode_open_lops;
break;
default:
mlog_bug_on_msg(1, "type: %d\n", type);
ops = NULL; /* thanks, gcc */
break;
}
ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
generation, res->l_name);
ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
}
static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
{
struct inode *inode = ocfs2_lock_res_inode(lockres);
return OCFS2_SB(inode->i_sb);
}
static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_mem_dqinfo *info = lockres->l_priv;
return OCFS2_SB(info->dqi_gi.dqi_sb);
}
static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_file_private *fp = lockres->l_priv;
return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
}
static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
{
__be64 inode_blkno_be;
memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
sizeof(__be64));
return be64_to_cpu(inode_blkno_be);
}
static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_dentry_lock *dl = lockres->l_priv;
return OCFS2_SB(dl->dl_inode->i_sb);
}
void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
u64 parent, struct inode *inode)
{
int len;
u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
__be64 inode_blkno_be = cpu_to_be64(inode_blkno);
struct ocfs2_lock_res *lockres = &dl->dl_lockres;
ocfs2_lock_res_init_once(lockres);
/*
* Unfortunately, the standard lock naming scheme won't work
* here because we have two 16 byte values to use. Instead,
* we'll stuff the inode number as a binary value. We still
* want error prints to show something without garbling the
* display, so drop a null byte in there before the inode
* number. A future version of OCFS2 will likely use all
* binary lock names. The stringified names have been a
* tremendous aid in debugging, but now that the debugfs
* interface exists, we can mangle things there if need be.
*
* NOTE: We also drop the standard "pad" value (the total lock
* name size stays the same though - the last part is all
* zeros due to the memset in ocfs2_lock_res_init_once()
*/
len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
"%c%016llx",
ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
(long long)parent);
BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
sizeof(__be64));
ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
dl);
}
static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
/* Superblock lockres doesn't come from a slab so we call init
* once on it manually. */
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
&ocfs2_super_lops, osb);
}
static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
/* Rename lockres doesn't come from a slab so we call init
* once on it manually. */
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
&ocfs2_rename_lops, osb);
}
static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
/* nfs_sync lockres doesn't come from a slab so we call init
* once on it manually. */
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
&ocfs2_nfs_sync_lops, osb);
}
static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
{
ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
init_rwsem(&osb->nfs_sync_rwlock);
}
void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
/* Only one trimfs thread are allowed to work at the same time. */
mutex_lock(&osb->obs_trim_fs_mutex);
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS, 0, 0, lockres->l_name);
ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_TRIM_FS,
&ocfs2_trim_fs_lops, osb);
}
void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
ocfs2_simple_drop_lockres(osb, lockres);
ocfs2_lock_res_free(lockres);
mutex_unlock(&osb->obs_trim_fs_mutex);
}
static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
&ocfs2_orphan_scan_lops, osb);
}
void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_file_private *fp)
{
struct inode *inode = fp->fp_file->f_mapping->host;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
inode->i_generation, lockres->l_name);
ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
fp);
lockres->l_flags |= OCFS2_LOCK_NOCACHE;
}
void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_mem_dqinfo *info)
{
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
0, lockres->l_name);
ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
info);
}
void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_super *osb, u64 ref_blkno,
unsigned int generation)
{
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
generation, lockres->l_name);
ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
&ocfs2_refcount_block_lops, osb);
}
void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
{
if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
return;
ocfs2_remove_lockres_tracking(res);
mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
"Lockres %s is on the blocked list\n",
res->l_name);
mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
"Lockres %s has mask waiters pending\n",
res->l_name);
mlog_bug_on_msg(spin_is_locked(&res->l_lock),
"Lockres %s is locked\n",
res->l_name);
mlog_bug_on_msg(res->l_ro_holders,
"Lockres %s has %u ro holders\n",
res->l_name, res->l_ro_holders);
mlog_bug_on_msg(res->l_ex_holders,
"Lockres %s has %u ex holders\n",
res->l_name, res->l_ex_holders);
/* Need to clear out the lock status block for the dlm */
memset(&res->l_lksb, 0, sizeof(res->l_lksb));
res->l_flags = 0UL;
}
/*
* Keep a list of processes who have interest in a lockres.
* Note: this is now only uesed for check recursive cluster locking.
*/
static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
struct ocfs2_lock_holder *oh)
{
INIT_LIST_HEAD(&oh->oh_list);
oh->oh_owner_pid = get_pid(task_pid(current));
spin_lock(&lockres->l_lock);
list_add_tail(&oh->oh_list, &lockres->l_holders);
spin_unlock(&lockres->l_lock);
}
static struct ocfs2_lock_holder *
ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
struct pid *pid)
{
struct ocfs2_lock_holder *oh;
spin_lock(&lockres->l_lock);
list_for_each_entry(oh, &lockres->l_holders, oh_list) {
if (oh->oh_owner_pid == pid) {
spin_unlock(&lockres->l_lock);
return oh;
}
}
spin_unlock(&lockres->l_lock);
return NULL;
}
static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
struct ocfs2_lock_holder *oh)
{
spin_lock(&lockres->l_lock);
list_del(&oh->oh_list);
spin_unlock(&lockres->l_lock);
put_pid(oh->oh_owner_pid);
}
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
BUG_ON(!lockres);
switch(level) {
case DLM_LOCK_EX:
lockres->l_ex_holders++;
break;
case DLM_LOCK_PR:
lockres->l_ro_holders++;
break;
default:
BUG();
}
}
static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
int level)
{
BUG_ON(!lockres);
switch(level) {
case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--;
break;
case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--;
break;
default:
BUG();
}
}
/* WARNING: This function lives in a world where the only three lock
* levels are EX, PR, and NL. It *will* have to be adjusted when more
* lock types are added. */
static inline int ocfs2_highest_compat_lock_level(int level)
{
int new_level = DLM_LOCK_EX;
if (level == DLM_LOCK_EX)
new_level = DLM_LOCK_NL;
else if (level == DLM_LOCK_PR)
new_level = DLM_LOCK_PR;
return new_level;
}
static void lockres_set_flags(struct ocfs2_lock_res *lockres,
unsigned long newflags)
{
struct ocfs2_mask_waiter *mw, *tmp;
assert_spin_locked(&lockres->l_lock);
lockres->l_flags = newflags;
list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
continue;
list_del_init(&mw->mw_item);
mw->mw_status = 0;
complete(&mw->mw_complete);
ocfs2_track_lock_wait(lockres);
}
}
static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
{
lockres_set_flags(lockres, lockres->l_flags | or);
}
static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
unsigned long clear)
{
lockres_set_flags(lockres, lockres->l_flags & ~clear);
}
static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
{
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
lockres->l_level = lockres->l_requested;
if (lockres->l_level <=
ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
lockres->l_blocking = DLM_LOCK_NL;
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
}
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
}
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
{
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
/* Convert from RO to EX doesn't really need anything as our
* information is already up to data. Convert from NL to
* *anything* however should mark ourselves as needing an
* update */
if (lockres->l_level == DLM_LOCK_NL &&
lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
lockres->l_level = lockres->l_requested;
/*
* We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
* the OCFS2_LOCK_BUSY flag to prevent the dc thread from
* downconverting the lock before the upconvert has fully completed.
* Do not prevent the dc thread from downconverting if NONBLOCK lock
* had already returned.
*/
if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
else
lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
}
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
{
BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
if (lockres->l_requested > DLM_LOCK_NL &&
!(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
lockres->l_level = lockres->l_requested;
lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
}
static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
int level)
{
int needs_downconvert = 0;
assert_spin_locked(&lockres->l_lock);
if (level > lockres->l_blocking) {
/* only schedule a downconvert if we haven't already scheduled
* one that goes low enough to satisfy the level we're
* blocking. this also catches the case where we get
* duplicate BASTs */
if (ocfs2_highest_compat_lock_level(level) <
ocfs2_highest_compat_lock_level(lockres->l_blocking))
needs_downconvert = 1;
lockres->l_blocking = level;
}
mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
lockres->l_name, level, lockres->l_level, lockres->l_blocking,
needs_downconvert);
if (needs_downconvert)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
mlog(0, "needs_downconvert = %d\n", needs_downconvert);
return needs_downconvert;
}
/*
* OCFS2_LOCK_PENDING and l_pending_gen.
*
* Why does OCFS2_LOCK_PENDING exist? To close a race between setting
* OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
* for more details on the race.
*
* OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
* a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
* returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
* OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
* the caller is going to try to clear PENDING again. If nothing else is
* happening, __lockres_clear_pending() sees PENDING is unset and does
* nothing.
*
* But what if another path (eg downconvert thread) has just started a
* new locking action? The other path has re-set PENDING. Our path
* cannot clear PENDING, because that will re-open the original race
* window.
*
* [Example]
*
* ocfs2_meta_lock()
* ocfs2_cluster_lock()
* set BUSY
* set PENDING
* drop l_lock
* ocfs2_dlm_lock()
* ocfs2_locking_ast() ocfs2_downconvert_thread()
* clear PENDING ocfs2_unblock_lock()
* take_l_lock
* !BUSY
* ocfs2_prepare_downconvert()
* set BUSY
* set PENDING
* drop l_lock
* take l_lock
* clear PENDING
* drop l_lock
* <window>
* ocfs2_dlm_lock()
*
* So as you can see, we now have a window where l_lock is not held,
* PENDING is not set, and ocfs2_dlm_lock() has not been called.
*
* The core problem is that ocfs2_cluster_lock() has cleared the PENDING
* set by ocfs2_prepare_downconvert(). That wasn't nice.
*
* To solve this we introduce l_pending_gen. A call to
* lockres_clear_pending() will only do so when it is passed a generation
* number that matches the lockres. lockres_set_pending() will return the
* current generation number. When ocfs2_cluster_lock() goes to clear
* PENDING, it passes the generation it got from set_pending(). In our
* example above, the generation numbers will *not* match. Thus,
* ocfs2_cluster_lock() will not clear the PENDING set by
* ocfs2_prepare_downconvert().
*/
/* Unlocked version for ocfs2_locking_ast() */
static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
unsigned int generation,
struct ocfs2_super *osb)
{
assert_spin_locked(&lockres->l_lock);
/*
* The ast and locking functions can race us here. The winner
* will clear pending, the loser will not.
*/
if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
(lockres->l_pending_gen != generation))
return;
lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
lockres->l_pending_gen++;
/*
* The downconvert thread may have skipped us because we
* were PENDING. Wake it up.
*/
if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
ocfs2_wake_downconvert_thread(osb);
}
/* Locked version for callers of ocfs2_dlm_lock() */
static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
unsigned int generation,
struct ocfs2_super *osb)
{
unsigned long flags;
spin_lock_irqsave(&lockres->l_lock, flags);
__lockres_clear_pending(lockres, generation, osb);
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
{
assert_spin_locked(&lockres->l_lock);
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
return lockres->l_pending_gen;
}
static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
{
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
int needs_downconvert;
unsigned long flags;
BUG_ON(level <= DLM_LOCK_NL);
mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
"type %s\n", lockres->l_name, level, lockres->l_level,
ocfs2_lock_type_string(lockres->l_type));
/*
* We can skip the bast for locks which don't enable caching -
* they'll be dropped at the earliest possible time anyway.
*/
if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
return;
spin_lock_irqsave(&lockres->l_lock, flags);
needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
if (needs_downconvert)
ocfs2_schedule_blocked_lock(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
ocfs2_wake_downconvert_thread(osb);
}
static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
{
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
unsigned long flags;
int status;
spin_lock_irqsave(&lockres->l_lock, flags);
status = ocfs2_dlm_lock_status(&lockres->l_lksb);
if (status == -EAGAIN) {
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
goto out;
}
if (status) {
mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
lockres->l_name, status);
spin_unlock_irqrestore(&lockres->l_lock, flags);
return;
}
mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
"level %d => %d\n", lockres->l_name, lockres->l_action,
lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
switch(lockres->l_action) {
case OCFS2_AST_ATTACH:
ocfs2_generic_handle_attach_action(lockres);
lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
break;
case OCFS2_AST_CONVERT:
ocfs2_generic_handle_convert_action(lockres);
break;
case OCFS2_AST_DOWNCONVERT:
ocfs2_generic_handle_downconvert_action(lockres);
break;
default:
mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
"flags 0x%lx, unlock: %u\n",
lockres->l_name, lockres->l_action, lockres->l_flags,
lockres->l_unlock_action);
BUG();
}
out:
/* set it to something invalid so if we get called again we
* can catch it. */
lockres->l_action = OCFS2_AST_INVALID;
/* Did we try to cancel this lock? Clear that state */
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
/*
* We may have beaten the locking functions here. We certainly
* know that dlm_lock() has been called :-)
* Because we can't have two lock calls in flight at once, we
* can use lockres->l_pending_gen.
*/
__lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
wake_up(&lockres->l_event);
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
{
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
unsigned long flags;
mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
lockres->l_name, lockres->l_unlock_action);
spin_lock_irqsave(&lockres->l_lock, flags);
if (error) {
mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
"unlock_action %d\n", error, lockres->l_name,
lockres->l_unlock_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
return;
}
switch(lockres->l_unlock_action) {
case OCFS2_UNLOCK_CANCEL_CONVERT:
mlog(0, "Cancel convert success for %s\n", lockres->l_name);
lockres->l_action = OCFS2_AST_INVALID;
/* Downconvert thread may have requeued this lock, we
* need to wake it. */
if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
break;
case OCFS2_UNLOCK_DROP_LOCK:
lockres->l_level = DLM_LOCK_IV;
break;
default:
BUG();
}
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
wake_up(&lockres->l_event);
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
/*
* This is the filesystem locking protocol. It provides the lock handling
* hooks for the underlying DLM. It has a maximum version number.
* The version number allows interoperability with systems running at
* the same major number and an equal or smaller minor number.
*
* Whenever the filesystem does new things with locks (adds or removes a
* lock, orders them differently, does different things underneath a lock),
* the version must be changed. The protocol is negotiated when joining
* the dlm domain. A node may join the domain if its major version is
* identical to all other nodes and its minor version is greater than
* or equal to all other nodes. When its minor version is greater than
* the other nodes, it will run at the minor version specified by the
* other nodes.
*
* If a locking change is made that will not be compatible with older
* versions, the major number must be increased and the minor version set
* to zero. If a change merely adds a behavior that can be disabled when
* speaking to older versions, the minor version must be increased. If a
* change adds a fully backwards compatible change (eg, LVB changes that
* are just ignored by older versions), the version does not need to be
* updated.
*/
static struct ocfs2_locking_protocol lproto = {
.lp_max_version = {
.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
},
.lp_lock_ast = ocfs2_locking_ast,
.lp_blocking_ast = ocfs2_blocking_ast,
.lp_unlock_ast = ocfs2_unlock_ast,
};
void ocfs2_set_locking_protocol(void)
{
ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
}
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
int convert)
{
unsigned long flags;
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
if (convert)
lockres->l_action = OCFS2_AST_INVALID;
else
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
}
/* Note: If we detect another process working on the lock (i.e.,
* OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
* to do the right thing in that case.
*/
static int ocfs2_lock_create(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 dlm_flags)
{
int ret = 0;
unsigned long flags;
unsigned int gen;
mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
dlm_flags);
spin_lock_irqsave(&lockres->l_lock, flags);
if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
(lockres->l_flags & OCFS2_LOCK_BUSY)) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto bail;
}
lockres->l_action = OCFS2_AST_ATTACH;
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
gen = lockres_set_pending(lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_dlm_lock(osb->cconn,
level,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 1);
}
mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
bail:
return ret;
}
static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
int flag)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&lockres->l_lock, flags);
ret = lockres->l_flags & flag;
spin_unlock_irqrestore(&lockres->l_lock, flags);
return ret;
}
static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
{
wait_event(lockres->l_event,
!ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
}
static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
{
wait_event(lockres->l_event,
!ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
}
/* predict what lock level we'll be dropping down to on behalf
* of another node, and return true if the currently wanted
* level will be compatible with it. */
static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
int wanted)
{
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
}
static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
{
INIT_LIST_HEAD(&mw->mw_item);
init_completion(&mw->mw_complete);
ocfs2_init_start_time(mw);
}
static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
{
wait_for_completion(&mw->mw_complete);
/* Re-arm the completion in case we want to wait on it again */
reinit_completion(&mw->mw_complete);
return mw->mw_status;
}
static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
struct ocfs2_mask_waiter *mw,
unsigned long mask,
unsigned long goal)
{
BUG_ON(!list_empty(&mw->mw_item));
assert_spin_locked(&lockres->l_lock);
list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
mw->mw_mask = mask;
mw->mw_goal = goal;
ocfs2_track_lock_wait(lockres);
}
/* returns 0 if the mw that was removed was already satisfied, -EBUSY
* if the mask still hadn't reached its goal */
static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
struct ocfs2_mask_waiter *mw)
{
int ret = 0;
assert_spin_locked(&lockres->l_lock);
if (!list_empty(&mw->mw_item)) {
if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
ret = -EBUSY;
list_del_init(&mw->mw_item);
init_completion(&mw->mw_complete);
ocfs2_track_lock_wait(lockres);
}
return ret;
}
static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
struct ocfs2_mask_waiter *mw)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&lockres->l_lock, flags);
ret = __lockres_remove_mask_waiter(lockres, mw);
spin_unlock_irqrestore(&lockres->l_lock, flags);
return ret;
}
static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
struct ocfs2_lock_res *lockres)
{
int ret;
ret = wait_for_completion_interruptible(&mw->mw_complete);
if (ret)
lockres_remove_mask_waiter(lockres, mw);
else
ret = mw->mw_status;
/* Re-arm the completion in case we want to wait on it again */
reinit_completion(&mw->mw_complete);
return ret;
}
static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 lkm_flags,
int arg_flags,
int l_subclass,
unsigned long caller_ip)
{
struct ocfs2_mask_waiter mw;
int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
unsigned long flags;
unsigned int gen;
int noqueue_attempted = 0;
int dlm_locked = 0;
int kick_dc = 0;
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
mlog_errno(-EINVAL);
return -EINVAL;
}
ocfs2_init_mask_waiter(&mw);
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lkm_flags |= DLM_LKF_VALBLK;
again:
wait = 0;
spin_lock_irqsave(&lockres->l_lock, flags);
if (catch_signals && signal_pending(current)) {
ret = -ERESTARTSYS;
goto unlock;
}
mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
"Cluster lock called on freeing lockres %s! flags "
"0x%lx\n", lockres->l_name, lockres->l_flags);
/* We only compare against the currently granted level
* here. If the lock is blocked waiting on a downconvert,
* we'll get caught below. */
if (lockres->l_flags & OCFS2_LOCK_BUSY &&
level > lockres->l_level) {
/* is someone sitting in dlm_lock? If so, wait on
* them. */
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
wait = 1;
goto unlock;
}
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
/*
* We've upconverted. If the lock now has a level we can
* work with, we take it. If, however, the lock is not at the
* required level, we go thru the full cycle. One way this could
* happen is if a process requesting an upconvert to PR is
* closely followed by another requesting upconvert to an EX.
* If the process requesting EX lands here, we want it to
* continue attempting to upconvert and let the process
* requesting PR take the lock.
* If multiple processes request upconvert to PR, the first one
* here will take the lock. The others will have to go thru the
* OCFS2_LOCK_BLOCKED check to ensure that there is no pending
* downconvert request.
*/
if (level <= lockres->l_level)
goto update_holders;
}
if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
!ocfs2_may_continue_on_blocked_lock(lockres, level)) {
/* is the lock is currently blocked on behalf of
* another node */
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
wait = 1;
goto unlock;
}
if (level > lockres->l_level) {
if (noqueue_attempted > 0) {
ret = -EAGAIN;
goto unlock;
}
if (lkm_flags & DLM_LKF_NOQUEUE)
noqueue_attempted = 1;
if (lockres->l_action != OCFS2_AST_INVALID)
mlog(ML_ERROR, "lockres %s has action %u pending\n",
lockres->l_name, lockres->l_action);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
lockres->l_action = OCFS2_AST_ATTACH;
lkm_flags &= ~DLM_LKF_CONVERT;
} else {
lockres->l_action = OCFS2_AST_CONVERT;
lkm_flags |= DLM_LKF_CONVERT;
}
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
gen = lockres_set_pending(lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == DLM_LOCK_NL);
mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
lockres->l_name, lockres->l_level, level);
/* call dlm_lock to upgrade lock now */
ret = ocfs2_dlm_lock(osb->cconn,
level,
&lockres->l_lksb,
lkm_flags,
lockres->l_name,
OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
(ret != -EAGAIN)) {
ocfs2_log_dlm_error("ocfs2_dlm_lock",
ret, lockres);
}
ocfs2_recover_from_dlm_error(lockres, 1);
goto out;
}
dlm_locked = 1;
mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
lockres->l_name);
/* At this point we've gone inside the dlm and need to
* complete our work regardless. */
catch_signals = 0;
/* wait for busy to clear and carry on */
goto again;
}
update_holders:
/* Ok, if we get here then we're good to go. */
ocfs2_inc_holders(lockres, level);
ret = 0;
unlock:
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
/* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
if (kick_dc)
ocfs2_wake_downconvert_thread(osb);
out:
/*
* This is helping work around a lock inversion between the page lock
* and dlm locks. One path holds the page lock while calling aops
* which block acquiring dlm locks. The voting thread holds dlm
* locks while acquiring page locks while down converting data locks.
* This block is helping an aop path notice the inversion and back
* off to unlock its page lock before trying the dlm lock again.
*/
if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
wait = 0;
spin_lock_irqsave(&lockres->l_lock, flags);
if (__lockres_remove_mask_waiter(lockres, &mw)) {
if (dlm_locked)
lockres_or_flags(lockres,
OCFS2_LOCK_NONBLOCK_FINISHED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = -EAGAIN;
} else {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto again;
}
}
if (wait) {
ret = ocfs2_wait_for_mask(&mw);
if (ret == 0)
goto again;
mlog_errno(ret);
}
ocfs2_update_lock_stats(lockres, level, &mw, ret);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (!ret && lockres->l_lockdep_map.key != NULL) {
if (level == DLM_LOCK_PR)
rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
caller_ip);
else
rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
caller_ip);
}
#endif
return ret;
}
static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 lkm_flags,
int arg_flags)
{
return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
0, _RET_IP_);
}
static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
unsigned long caller_ip)
{
unsigned long flags;
spin_lock_irqsave(&lockres->l_lock, flags);
ocfs2_dec_holders(lockres, level);
ocfs2_downconvert_on_unlock(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (lockres->l_lockdep_map.key != NULL)
rwsem_release(&lockres->l_lockdep_map, caller_ip);
#endif
}
static int ocfs2_create_new_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int ex,
int local)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
unsigned long flags;
u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
spin_lock_irqsave(&lockres->l_lock, flags);
BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
spin_unlock_irqrestore(&lockres->l_lock, flags);
return ocfs2_lock_create(osb, lockres, level, lkm_flags);
}
/* Grants us an EX lock on the data and metadata resources, skipping
* the normal cluster directory lookup. Use this ONLY on newly created
* inodes which other nodes can't possibly see, and which haven't been
* hashed in the inode hash yet. This can give us a good performance
* increase as it'll skip the network broadcast normally associated
* with creating a new lock resource. */
int ocfs2_create_new_inode_locks(struct inode *inode)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
BUG_ON(!ocfs2_inode_is_new(inode));
mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
/* NOTE: That we don't increment any of the holder counts, nor
* do we add anything to a journal handle. Since this is
* supposed to be a new inode which the cluster doesn't know
* about yet, there is no need to. As far as the LVB handling
* is concerned, this is basically like acquiring an EX lock
* on a resource which has an invalid one -- we'll set it
* valid when we release the EX. */
ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
if (ret) {
mlog_errno(ret);
goto bail;
}
/*
* We don't want to use DLM_LKF_LOCAL on a meta data lock as they
* don't use a generation in their lock names.
*/
ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
if (ret) {
mlog_errno(ret);
goto bail;
}
ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
if (ret)
mlog_errno(ret);
bail:
return ret;
}
int ocfs2_rw_lock(struct inode *inode, int write)
{
int status, level;
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog(0, "inode %llu take %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (ocfs2_mount_local(osb))
return 0;
lockres = &OCFS2_I(inode)->ip_rw_lockres;
level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0)
mlog_errno(status);
return status;
}
int ocfs2_try_rw_lock(struct inode *inode, int write)
{
int status, level;
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog(0, "inode %llu try to take %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (ocfs2_mount_local(osb))
return 0;
lockres = &OCFS2_I(inode)->ip_rw_lockres;
level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
return status;
}
void ocfs2_rw_unlock(struct inode *inode, int write)
{
int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog(0, "inode %llu drop %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
}
/*
* ocfs2_open_lock always get PR mode lock.
*/
int ocfs2_open_lock(struct inode *inode)
{
int status = 0;
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
goto out;
lockres = &OCFS2_I(inode)->ip_open_lockres;
status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_PR, 0, 0);
if (status < 0)
mlog_errno(status);
out:
return status;
}
int ocfs2_try_open_lock(struct inode *inode, int write)
{
int status = 0, level;
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog(0, "inode %llu try to take %s open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (ocfs2_is_hard_readonly(osb)) {
if (write)
status = -EROFS;
goto out;
}
if (ocfs2_mount_local(osb))
goto out;
lockres = &OCFS2_I(inode)->ip_open_lockres;
level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
/*
* The file system may already holding a PRMODE/EXMODE open lock.
* Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
* other nodes and the -EAGAIN will indicate to the caller that
* this inode is still in use.
*/
status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
out:
return status;
}
/*
* ocfs2_open_unlock unlock PR and EX mode open locks.
*/
void ocfs2_open_unlock(struct inode *inode)
{
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog(0, "inode %llu drop open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
if (ocfs2_mount_local(osb))
goto out;
if(lockres->l_ro_holders)
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_PR);
if(lockres->l_ex_holders)
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
out:
return;
}
static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
int level)
{
int ret;
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
unsigned long flags;
struct ocfs2_mask_waiter mw;
ocfs2_init_mask_waiter(&mw);
retry_cancel:
spin_lock_irqsave(&lockres->l_lock, flags);
if (lockres->l_flags & OCFS2_LOCK_BUSY) {
ret = ocfs2_prepare_cancel_convert(osb, lockres);
if (ret) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_cancel_convert(osb, lockres);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
goto retry_cancel;
}
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ocfs2_wait_for_mask(&mw);
goto retry_cancel;
}
ret = -ERESTARTSYS;
/*
* We may still have gotten the lock, in which case there's no
* point to restarting the syscall.
*/
if (lockres->l_level == level)
ret = 0;
mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
lockres->l_flags, lockres->l_level, lockres->l_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
out:
return ret;
}
/*
* ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
* flock() calls. The locking approach this requires is sufficiently
* different from all other cluster lock types that we implement a
* separate path to the "low-level" dlm calls. In particular:
*
* - No optimization of lock levels is done - we take at exactly
* what's been requested.
*
* - No lock caching is employed. We immediately downconvert to
* no-lock at unlock time. This also means flock locks never go on
* the blocking list).
*
* - Since userspace can trivially deadlock itself with flock, we make
* sure to allow cancellation of a misbehaving applications flock()
* request.
*
* - Access to any flock lockres doesn't require concurrency, so we
* can simplify the code by requiring the caller to guarantee
* serialization of dlmglue flock calls.
*/
int ocfs2_file_lock(struct file *file, int ex, int trylock)
{
int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
unsigned long flags;
struct ocfs2_file_private *fp = file->private_data;
struct ocfs2_lock_res *lockres = &fp->fp_flock;
struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
struct ocfs2_mask_waiter mw;
ocfs2_init_mask_waiter(&mw);
if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
(lockres->l_level > DLM_LOCK_NL)) {
mlog(ML_ERROR,
"File lock \"%s\" has busy or locked state: flags: 0x%lx, "
"level: %u\n", lockres->l_name, lockres->l_flags,
lockres->l_level);
return -EINVAL;
}
spin_lock_irqsave(&lockres->l_lock, flags);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
/*
* Get the lock at NLMODE to start - that way we
* can cancel the upconvert request if need be.
*/
ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_wait_for_mask(&mw);
if (ret) {
mlog_errno(ret);
goto out;
}
spin_lock_irqsave(&lockres->l_lock, flags);
}
lockres->l_action = OCFS2_AST_CONVERT;
lkm_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
if (ret) {
if (!trylock || (ret != -EAGAIN)) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
ret = -EINVAL;
}
ocfs2_recover_from_dlm_error(lockres, 1);
lockres_remove_mask_waiter(lockres, &mw);
goto out;
}
ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
if (ret == -ERESTARTSYS) {
/*
* Userspace can cause deadlock itself with
* flock(). Current behavior locally is to allow the
* deadlock, but abort the system call if a signal is
* received. We follow this example, otherwise a
* poorly written program could sit in kernel until
* reboot.
*
* Handling this is a bit more complicated for Ocfs2
* though. We can't exit this function with an
* outstanding lock request, so a cancel convert is
* required. We intentionally overwrite 'ret' - if the
* cancel fails and the lock was granted, it's easier
* to just bubble success back up to the user.
*/
ret = ocfs2_flock_handle_signal(lockres, level);
} else if (!ret && (level > lockres->l_level)) {
/* Trylock failed asynchronously */
BUG_ON(!trylock);
ret = -EAGAIN;
}
out:
mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
lockres->l_name, ex, trylock, ret);
return ret;
}
void ocfs2_file_unlock(struct file *file)
{
int ret;
unsigned int gen;
unsigned long flags;
struct ocfs2_file_private *fp = file->private_data;
struct ocfs2_lock_res *lockres = &fp->fp_flock;
struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
struct ocfs2_mask_waiter mw;
ocfs2_init_mask_waiter(&mw);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
return;
if (lockres->l_level == DLM_LOCK_NL)
return;
mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
lockres->l_name, lockres->l_flags, lockres->l_level,
lockres->l_action);
spin_lock_irqsave(&lockres->l_lock, flags);
/*
* Fake a blocking ast for the downconvert code.
*/
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
lockres->l_blocking = DLM_LOCK_EX;
gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
if (ret) {
mlog_errno(ret);
return;
}
ret = ocfs2_wait_for_mask(&mw);
if (ret)
mlog_errno(ret);
}
static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int kick = 0;
/* If we know that another node is waiting on our lock, kick
* the downconvert thread * pre-emptively when we reach a release
* condition. */
if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
switch(lockres->l_blocking) {
case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders)
kick = 1;
break;
case DLM_LOCK_PR:
if (!lockres->l_ex_holders)
kick = 1;
break;
default:
BUG();
}
}
if (kick)
ocfs2_wake_downconvert_thread(osb);
}
#define OCFS2_SEC_BITS 34
#define OCFS2_SEC_SHIFT (64 - OCFS2_SEC_BITS)
#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
/* LVB only has room for 64 bits of time here so we pack it for
* now. */
static u64 ocfs2_pack_timespec(struct timespec64 *spec)
{
u64 res;
u64 sec = clamp_t(time64_t, spec->tv_sec, 0, 0x3ffffffffull);
u32 nsec = spec->tv_nsec;
res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
return res;
}
/* Call this with the lockres locked. I am reasonably sure we don't
* need ip_lock in this function as anyone who would be changing those
* values is supposed to be blocked in ocfs2_inode_lock right now. */
static void __ocfs2_stuff_meta_lvb(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
struct timespec64 ctime = inode_get_ctime(inode);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
/*
* Invalidate the LVB of a deleted inode - this way other
* nodes are forced to go to disk and discover the new inode
* status.
*/
if (oi->ip_flags & OCFS2_INODE_DELETED) {
lvb->lvb_version = 0;
goto out;
}
lvb->lvb_version = OCFS2_LVB_VERSION;
lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
lvb->lvb_imode = cpu_to_be16(inode->i_mode);
lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
lvb->lvb_iatime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
lvb->lvb_ictime_packed =
cpu_to_be64(ocfs2_pack_timespec(&ctime));
lvb->lvb_imtime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
out:
mlog_meta_lvb(0, lockres);
}
static void ocfs2_unpack_timespec(struct timespec64 *spec,
u64 packed_time)
{
spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
}
static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
struct timespec64 ctime;
mlog_meta_lvb(0, lockres);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
if (inode_wrong_type(inode, be16_to_cpu(lvb->lvb_imode)))
return -ESTALE;
/* We're safe here without the lockres lock... */
spin_lock(&oi->ip_lock);
oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
ocfs2_set_inode_flags(inode);
/* fast-symlinks are a special case */
if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
inode->i_blocks = 0;
else
inode->i_blocks = ocfs2_inode_sector_count(inode);
i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
inode->i_mode = be16_to_cpu(lvb->lvb_imode);
set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
ocfs2_unpack_timespec(&inode->i_atime,
be64_to_cpu(lvb->lvb_iatime_packed));
ocfs2_unpack_timespec(&inode->i_mtime,
be64_to_cpu(lvb->lvb_imtime_packed));
ocfs2_unpack_timespec(&ctime,
be64_to_cpu(lvb->lvb_ictime_packed));
inode_set_ctime_to_ts(inode, ctime);
spin_unlock(&oi->ip_lock);
return 0;
}
static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
struct ocfs2_lock_res *lockres)
{
struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
&& lvb->lvb_version == OCFS2_LVB_VERSION
&& be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
return 1;
return 0;
}
/* Determine whether a lock resource needs to be refreshed, and
* arbitrate who gets to refresh it.
*
* 0 means no refresh needed.
*
* > 0 means you need to refresh this and you MUST call
* ocfs2_complete_lock_res_refresh afterwards. */
static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
{
unsigned long flags;
int status = 0;
refresh_check:
spin_lock_irqsave(&lockres->l_lock, flags);
if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto bail;
}
if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
ocfs2_wait_on_refreshing_lock(lockres);
goto refresh_check;
}
/* Ok, I'll be the one to refresh this lock. */
lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
spin_unlock_irqrestore(&lockres->l_lock, flags);
status = 1;
bail:
mlog(0, "status %d\n", status);
return status;
}
/* If status is non zero, I'll mark it as not being in refresh
* anymroe, but i won't clear the needs refresh flag. */
static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
int status)
{
unsigned long flags;
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
if (!status)
lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
}
/* may or may not return a bh if it went to disk. */
static int ocfs2_inode_lock_update(struct inode *inode,
struct buffer_head **bh)
{
int status = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_dinode *fe;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (ocfs2_mount_local(osb))
goto bail;
spin_lock(&oi->ip_lock);
if (oi->ip_flags & OCFS2_INODE_DELETED) {
mlog(0, "Orphaned inode %llu was deleted while we "
"were waiting on a lock. ip_flags = 0x%x\n",
(unsigned long long)oi->ip_blkno, oi->ip_flags);
spin_unlock(&oi->ip_lock);
status = -ENOENT;
goto bail;
}
spin_unlock(&oi->ip_lock);
if (!ocfs2_should_refresh_lock_res(lockres))
goto bail;
/* This will discard any caching information we might have had
* for the inode metadata. */
ocfs2_metadata_cache_purge(INODE_CACHE(inode));
ocfs2_extent_map_trunc(inode, 0);
if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
mlog(0, "Trusting LVB on inode %llu\n",
(unsigned long long)oi->ip_blkno);
status = ocfs2_refresh_inode_from_lvb(inode);
goto bail_refresh;
} else {
/* Boo, we have to go to disk. */
/* read bh, cast, ocfs2_refresh_inode */
status = ocfs2_read_inode_block(inode, bh);
if (status < 0) {
mlog_errno(status);
goto bail_refresh;
}
fe = (struct ocfs2_dinode *) (*bh)->b_data;
if (inode_wrong_type(inode, le16_to_cpu(fe->i_mode))) {
status = -ESTALE;
goto bail_refresh;
}
/* This is a good chance to make sure we're not
* locking an invalid object. ocfs2_read_inode_block()
* already checked that the inode block is sane.
*
* We bug on a stale inode here because we checked
* above whether it was wiped from disk. The wiping
* node provides a guarantee that we receive that
* message and can mark the inode before dropping any
* locks associated with it. */
mlog_bug_on_msg(inode->i_generation !=
le32_to_cpu(fe->i_generation),
"Invalid dinode %llu disk generation: %u "
"inode->i_generation: %u\n",
(unsigned long long)oi->ip_blkno,
le32_to_cpu(fe->i_generation),
inode->i_generation);
mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
"Stale dinode %llu dtime: %llu flags: 0x%x\n",
(unsigned long long)oi->ip_blkno,
(unsigned long long)le64_to_cpu(fe->i_dtime),
le32_to_cpu(fe->i_flags));
ocfs2_refresh_inode(inode, fe);
ocfs2_track_lock_refresh(lockres);
}
status = 0;
bail_refresh:
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
return status;
}
static int ocfs2_assign_bh(struct inode *inode,
struct buffer_head **ret_bh,
struct buffer_head *passed_bh)
{
int status;
if (passed_bh) {
/* Ok, the update went to disk for us, use the
* returned bh. */
*ret_bh = passed_bh;
get_bh(*ret_bh);
return 0;
}
status = ocfs2_read_inode_block(inode, ret_bh);
if (status < 0)
mlog_errno(status);
return status;
}
/*
* returns < 0 error if the callback will never be called, otherwise
* the result of the lock will be communicated via the callback.
*/
int ocfs2_inode_lock_full_nested(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
int arg_flags,
int subclass)
{
int status, level, acquired;
u32 dlm_flags;
struct ocfs2_lock_res *lockres = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *local_bh = NULL;
mlog(0, "inode %llu, take %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
status = 0;
acquired = 0;
/* We'll allow faking a readonly metadata lock for
* rodevices. */
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
status = -EROFS;
goto getbh;
}
if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
ocfs2_mount_local(osb))
goto update;
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
lockres = &OCFS2_I(inode)->ip_inode_lockres;
level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
dlm_flags = 0;
if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
dlm_flags |= DLM_LKF_NOQUEUE;
status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
arg_flags, subclass, _RET_IP_);
if (status < 0) {
if (status != -EAGAIN)
mlog_errno(status);
goto bail;
}
/* Notify the error cleanup path to drop the cluster lock. */
acquired = 1;
/* We wait twice because a node may have died while we were in
* the lower dlm layers. The second time though, we've
* committed to owning this lock so we don't allow signals to
* abort the operation. */
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
update:
/*
* We only see this flag if we're being called from
* ocfs2_read_locked_inode(). It means we're locking an inode
* which hasn't been populated yet, so clear the refresh flag
* and let the caller handle it.
*/
if (inode->i_state & I_NEW) {
status = 0;
if (lockres)
ocfs2_complete_lock_res_refresh(lockres, 0);
goto bail;
}
/* This is fun. The caller may want a bh back, or it may
* not. ocfs2_inode_lock_update definitely wants one in, but
* may or may not read one, depending on what's in the
* LVB. The result of all of this is that we've *only* gone to
* disk if we have to, so the complexity is worthwhile. */
status = ocfs2_inode_lock_update(inode, &local_bh);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto bail;
}
getbh:
if (ret_bh) {
status = ocfs2_assign_bh(inode, ret_bh, local_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
bail:
if (status < 0) {
if (ret_bh && (*ret_bh)) {
brelse(*ret_bh);
*ret_bh = NULL;
}
if (acquired)
ocfs2_inode_unlock(inode, ex);
}
brelse(local_bh);
return status;
}
/*
* This is working around a lock inversion between tasks acquiring DLM
* locks while holding a page lock and the downconvert thread which
* blocks dlm lock acquiry while acquiring page locks.
*
* ** These _with_page variantes are only intended to be called from aop
* methods that hold page locks and return a very specific *positive* error
* code that aop methods pass up to the VFS -- test for errors with != 0. **
*
* The DLM is called such that it returns -EAGAIN if it would have
* blocked waiting for the downconvert thread. In that case we unlock
* our page so the downconvert thread can make progress. Once we've
* done this we have to return AOP_TRUNCATED_PAGE so the aop method
* that called us can bubble that back up into the VFS who will then
* immediately retry the aop call.
*/
int ocfs2_inode_lock_with_page(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct page *page)
{
int ret;
ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
if (ret == -EAGAIN) {
unlock_page(page);
/*
* If we can't get inode lock immediately, we should not return
* directly here, since this will lead to a softlockup problem.
* The method is to get a blocking lock and immediately unlock
* before returning, this can avoid CPU resource waste due to
* lots of retries, and benefits fairness in getting lock.
*/
if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
ocfs2_inode_unlock(inode, ex);
ret = AOP_TRUNCATED_PAGE;
}
return ret;
}
int ocfs2_inode_lock_atime(struct inode *inode,
struct vfsmount *vfsmnt,
int *level, int wait)
{
int ret;
if (wait)
ret = ocfs2_inode_lock(inode, NULL, 0);
else
ret = ocfs2_try_inode_lock(inode, NULL, 0);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
return ret;
}
/*
* If we should update atime, we will get EX lock,
* otherwise we just get PR lock.
*/
if (ocfs2_should_update_atime(inode, vfsmnt)) {
struct buffer_head *bh = NULL;
ocfs2_inode_unlock(inode, 0);
if (wait)
ret = ocfs2_inode_lock(inode, &bh, 1);
else
ret = ocfs2_try_inode_lock(inode, &bh, 1);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
return ret;
}
*level = 1;
if (ocfs2_should_update_atime(inode, vfsmnt))
ocfs2_update_inode_atime(inode, bh);
brelse(bh);
} else
*level = 0;
return ret;
}
void ocfs2_inode_unlock(struct inode *inode,
int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog(0, "inode %llu drop %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
if (!ocfs2_is_hard_readonly(osb) &&
!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
}
/*
* This _tracker variantes are introduced to deal with the recursive cluster
* locking issue. The idea is to keep track of a lock holder on the stack of
* the current process. If there's a lock holder on the stack, we know the
* task context is already protected by cluster locking. Currently, they're
* used in some VFS entry routines.
*
* return < 0 on error, return == 0 if there's no lock holder on the stack
* before this call, return == 1 if this call would be a recursive locking.
* return == -1 if this lock attempt will cause an upgrade which is forbidden.
*
* When taking lock levels into account,we face some different situations.
*
* 1. no lock is held
* In this case, just lock the inode as requested and return 0
*
* 2. We are holding a lock
* For this situation, things diverges into several cases
*
* wanted holding what to do
* ex ex see 2.1 below
* ex pr see 2.2 below
* pr ex see 2.1 below
* pr pr see 2.1 below
*
* 2.1 lock level that is been held is compatible
* with the wanted level, so no lock action will be tacken.
*
* 2.2 Otherwise, an upgrade is needed, but it is forbidden.
*
* Reason why upgrade within a process is forbidden is that
* lock upgrade may cause dead lock. The following illustrates
* how it happens.
*
* thread on node1 thread on node2
* ocfs2_inode_lock_tracker(ex=0)
*
* <====== ocfs2_inode_lock_tracker(ex=1)
*
* ocfs2_inode_lock_tracker(ex=1)
*/
int ocfs2_inode_lock_tracker(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct ocfs2_lock_holder *oh)
{
int status = 0;
struct ocfs2_lock_res *lockres;
struct ocfs2_lock_holder *tmp_oh;
struct pid *pid = task_pid(current);
lockres = &OCFS2_I(inode)->ip_inode_lockres;
tmp_oh = ocfs2_pid_holder(lockres, pid);
if (!tmp_oh) {
/*
* This corresponds to the case 1.
* We haven't got any lock before.
*/
status = ocfs2_inode_lock_full(inode, ret_bh, ex, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
oh->oh_ex = ex;
ocfs2_add_holder(lockres, oh);
return 0;
}
if (unlikely(ex && !tmp_oh->oh_ex)) {
/*
* case 2.2 upgrade may cause dead lock, forbid it.
*/
mlog(ML_ERROR, "Recursive locking is not permitted to "
"upgrade to EX level from PR level.\n");
dump_stack();
return -EINVAL;
}
/*
* case 2.1 OCFS2_META_LOCK_GETBH flag make ocfs2_inode_lock_full.
* ignore the lock level and just update it.
*/
if (ret_bh) {
status = ocfs2_inode_lock_full(inode, ret_bh, ex,
OCFS2_META_LOCK_GETBH);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
}
return 1;
}
void ocfs2_inode_unlock_tracker(struct inode *inode,
int ex,
struct ocfs2_lock_holder *oh,
int had_lock)
{
struct ocfs2_lock_res *lockres;
lockres = &OCFS2_I(inode)->ip_inode_lockres;
/* had_lock means that the currect process already takes the cluster
* lock previously.
* If had_lock is 1, we have nothing to do here.
* If had_lock is 0, we will release the lock.
*/
if (!had_lock) {
ocfs2_inode_unlock(inode, oh->oh_ex);
ocfs2_remove_holder(lockres, oh);
}
}
int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
struct ocfs2_lock_res *lockres;
struct ocfs2_orphan_scan_lvb *lvb;
int status = 0;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
lockres = &osb->osb_orphan_scan.os_lockres;
status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
if (status < 0)
return status;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
*seqno = be32_to_cpu(lvb->lvb_os_seqno);
else
*seqno = osb->osb_orphan_scan.os_seqno + 1;
return status;
}
void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
{
struct ocfs2_lock_res *lockres;
struct ocfs2_orphan_scan_lvb *lvb;
if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
lockres = &osb->osb_orphan_scan.os_lockres;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
lvb->lvb_os_seqno = cpu_to_be32(seqno);
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
}
}
int ocfs2_super_lock(struct ocfs2_super *osb,
int ex)
{
int status = 0;
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
goto bail;
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
/* The super block lock path is really in the best position to
* know when resources covered by the lock need to be
* refreshed, so we do it here. Of course, making sense of
* everything is up to the caller :) */
status = ocfs2_should_refresh_lock_res(lockres);
if (status) {
status = ocfs2_refresh_slot_info(osb);
ocfs2_complete_lock_res_refresh(lockres, status);
if (status < 0) {
ocfs2_cluster_unlock(osb, lockres, level);
mlog_errno(status);
}
ocfs2_track_lock_refresh(lockres);
}
bail:
return status;
}
void ocfs2_super_unlock(struct ocfs2_super *osb,
int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
}
int ocfs2_rename_lock(struct ocfs2_super *osb)
{
int status;
struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
if (status < 0)
mlog_errno(status);
return status;
}
void ocfs2_rename_unlock(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
}
int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
{
int status;
struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ex)
down_write(&osb->nfs_sync_rwlock);
else
down_read(&osb->nfs_sync_rwlock);
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
0, 0);
if (status < 0) {
mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
if (ex)
up_write(&osb->nfs_sync_rwlock);
else
up_read(&osb->nfs_sync_rwlock);
}
return status;
}
void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
{
struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres,
ex ? LKM_EXMODE : LKM_PRMODE);
if (ex)
up_write(&osb->nfs_sync_rwlock);
else
up_read(&osb->nfs_sync_rwlock);
}
int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
struct ocfs2_trim_fs_info *info, int trylock)
{
int status;
struct ocfs2_trim_fs_lvb *lvb;
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
if (info)
info->tf_valid = 0;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX,
trylock ? DLM_LKF_NOQUEUE : 0, 0);
if (status < 0) {
if (status != -EAGAIN)
mlog_errno(status);
return status;
}
if (info) {
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
lvb->lvb_version == OCFS2_TRIMFS_LVB_VERSION) {
info->tf_valid = 1;
info->tf_success = lvb->lvb_success;
info->tf_nodenum = be32_to_cpu(lvb->lvb_nodenum);
info->tf_start = be64_to_cpu(lvb->lvb_start);
info->tf_len = be64_to_cpu(lvb->lvb_len);
info->tf_minlen = be64_to_cpu(lvb->lvb_minlen);
info->tf_trimlen = be64_to_cpu(lvb->lvb_trimlen);
}
}
return status;
}
void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
struct ocfs2_trim_fs_info *info)
{
struct ocfs2_trim_fs_lvb *lvb;
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
if (ocfs2_mount_local(osb))
return;
if (info) {
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
lvb->lvb_version = OCFS2_TRIMFS_LVB_VERSION;
lvb->lvb_success = info->tf_success;
lvb->lvb_nodenum = cpu_to_be32(info->tf_nodenum);
lvb->lvb_start = cpu_to_be64(info->tf_start);
lvb->lvb_len = cpu_to_be64(info->tf_len);
lvb->lvb_minlen = cpu_to_be64(info->tf_minlen);
lvb->lvb_trimlen = cpu_to_be64(info->tf_trimlen);
}
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
}
int ocfs2_dentry_lock(struct dentry *dentry, int ex)
{
int ret;
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
BUG_ON(!dl);
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
return -EROFS;
return 0;
}
if (ocfs2_mount_local(osb))
return 0;
ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
if (ret < 0)
mlog_errno(ret);
return ret;
}
void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
}
/* Reference counting of the dlm debug structure. We want this because
* open references on the debug inodes can live on after a mount, so
* we can't rely on the ocfs2_super to always exist. */
static void ocfs2_dlm_debug_free(struct kref *kref)
{
struct ocfs2_dlm_debug *dlm_debug;
dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
kfree(dlm_debug);
}
void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
{
if (dlm_debug)
kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
}
static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
{
kref_get(&debug->d_refcnt);
}
struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
{
struct ocfs2_dlm_debug *dlm_debug;
dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
if (!dlm_debug) {
mlog_errno(-ENOMEM);
goto out;
}
kref_init(&dlm_debug->d_refcnt);
INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
dlm_debug->d_filter_secs = 0;
out:
return dlm_debug;
}
/* Access to this is arbitrated for us via seq_file->sem. */
struct ocfs2_dlm_seq_priv {
struct ocfs2_dlm_debug *p_dlm_debug;
struct ocfs2_lock_res p_iter_res;
struct ocfs2_lock_res p_tmp_res;
};
static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
struct ocfs2_dlm_seq_priv *priv)
{
struct ocfs2_lock_res *iter, *ret = NULL;
struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
assert_spin_locked(&ocfs2_dlm_tracking_lock);
list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
/* discover the head of the list */
if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
mlog(0, "End of list found, %p\n", ret);
break;
}
/* We track our "dummy" iteration lockres' by a NULL
* l_ops field. */
if (iter->l_ops != NULL) {
ret = iter;
break;
}
}
return ret;
}
static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
{
struct ocfs2_dlm_seq_priv *priv = m->private;
struct ocfs2_lock_res *iter;
spin_lock(&ocfs2_dlm_tracking_lock);
iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
if (iter) {
/* Since lockres' have the lifetime of their container
* (which can be inodes, ocfs2_supers, etc) we want to
* copy this out to a temporary lockres while still
* under the spinlock. Obviously after this we can't
* trust any pointers on the copy returned, but that's
* ok as the information we want isn't typically held
* in them. */
priv->p_tmp_res = *iter;
iter = &priv->p_tmp_res;
}
spin_unlock(&ocfs2_dlm_tracking_lock);
return iter;
}
static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
{
}
static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ocfs2_dlm_seq_priv *priv = m->private;
struct ocfs2_lock_res *iter = v;
struct ocfs2_lock_res *dummy = &priv->p_iter_res;
spin_lock(&ocfs2_dlm_tracking_lock);
iter = ocfs2_dlm_next_res(iter, priv);
list_del_init(&dummy->l_debug_list);
if (iter) {
list_add(&dummy->l_debug_list, &iter->l_debug_list);
priv->p_tmp_res = *iter;
iter = &priv->p_tmp_res;
}
spin_unlock(&ocfs2_dlm_tracking_lock);
return iter;
}
/*
* Version is used by debugfs.ocfs2 to determine the format being used
*
* New in version 2
* - Lock stats printed
* New in version 3
* - Max time in lock stats is in usecs (instead of nsecs)
* New in version 4
* - Add last pr/ex unlock times and first lock wait time in usecs
*/
#define OCFS2_DLM_DEBUG_STR_VERSION 4
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
char *lvb;
struct ocfs2_lock_res *lockres = v;
#ifdef CONFIG_OCFS2_FS_STATS
u64 now, last;
struct ocfs2_dlm_debug *dlm_debug =
((struct ocfs2_dlm_seq_priv *)m->private)->p_dlm_debug;
#endif
if (!lockres)
return -EINVAL;
#ifdef CONFIG_OCFS2_FS_STATS
if (!lockres->l_lock_wait && dlm_debug->d_filter_secs) {
now = ktime_to_us(ktime_get_real());
if (lockres->l_lock_prmode.ls_last >
lockres->l_lock_exmode.ls_last)
last = lockres->l_lock_prmode.ls_last;
else
last = lockres->l_lock_exmode.ls_last;
/*
* Use d_filter_secs field to filter lock resources dump,
* the default d_filter_secs(0) value filters nothing,
* otherwise, only dump the last N seconds active lock
* resources.
*/
if (div_u64(now - last, 1000000) > dlm_debug->d_filter_secs)
return 0;
}
#endif
seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
lockres->l_name,
(unsigned int)ocfs2_get_dentry_lock_ino(lockres));
else
seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
seq_printf(m, "%d\t"
"0x%lx\t"
"0x%x\t"
"0x%x\t"
"%u\t"
"%u\t"
"%d\t"
"%d\t",
lockres->l_level,
lockres->l_flags,
lockres->l_action,
lockres->l_unlock_action,
lockres->l_ro_holders,
lockres->l_ex_holders,
lockres->l_requested,
lockres->l_blocking);
/* Dump the raw LVB */
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
for(i = 0; i < DLM_LVB_LEN; i++)
seq_printf(m, "0x%x\t", lvb[i]);
#ifdef CONFIG_OCFS2_FS_STATS
# define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
# define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
# define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
# define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
# define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
# define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
# define lock_refresh(_l) ((_l)->l_lock_refresh)
# define lock_last_prmode(_l) ((_l)->l_lock_prmode.ls_last)
# define lock_last_exmode(_l) ((_l)->l_lock_exmode.ls_last)
# define lock_wait(_l) ((_l)->l_lock_wait)
#else
# define lock_num_prmode(_l) (0)
# define lock_num_exmode(_l) (0)
# define lock_num_prmode_failed(_l) (0)
# define lock_num_exmode_failed(_l) (0)
# define lock_total_prmode(_l) (0ULL)
# define lock_total_exmode(_l) (0ULL)
# define lock_max_prmode(_l) (0)
# define lock_max_exmode(_l) (0)
# define lock_refresh(_l) (0)
# define lock_last_prmode(_l) (0ULL)
# define lock_last_exmode(_l) (0ULL)
# define lock_wait(_l) (0ULL)
#endif
/* The following seq_print was added in version 2 of this output */
seq_printf(m, "%u\t"
"%u\t"
"%u\t"
"%u\t"
"%llu\t"
"%llu\t"
"%u\t"
"%u\t"
"%u\t"
"%llu\t"
"%llu\t"
"%llu\t",
lock_num_prmode(lockres),
lock_num_exmode(lockres),
lock_num_prmode_failed(lockres),
lock_num_exmode_failed(lockres),
lock_total_prmode(lockres),
lock_total_exmode(lockres),
lock_max_prmode(lockres),
lock_max_exmode(lockres),
lock_refresh(lockres),
lock_last_prmode(lockres),
lock_last_exmode(lockres),
lock_wait(lockres));
/* End the line */
seq_printf(m, "\n");
return 0;
}
static const struct seq_operations ocfs2_dlm_seq_ops = {
.start = ocfs2_dlm_seq_start,
.stop = ocfs2_dlm_seq_stop,
.next = ocfs2_dlm_seq_next,
.show = ocfs2_dlm_seq_show,
};
static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct ocfs2_dlm_seq_priv *priv = seq->private;
struct ocfs2_lock_res *res = &priv->p_iter_res;
ocfs2_remove_lockres_tracking(res);
ocfs2_put_dlm_debug(priv->p_dlm_debug);
return seq_release_private(inode, file);
}
static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
{
struct ocfs2_dlm_seq_priv *priv;
struct ocfs2_super *osb;
priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
if (!priv) {
mlog_errno(-ENOMEM);
return -ENOMEM;
}
osb = inode->i_private;
ocfs2_get_dlm_debug(osb->osb_dlm_debug);
priv->p_dlm_debug = osb->osb_dlm_debug;
INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
ocfs2_add_lockres_tracking(&priv->p_iter_res,
priv->p_dlm_debug);
return 0;
}
static const struct file_operations ocfs2_dlm_debug_fops = {
.open = ocfs2_dlm_debug_open,
.release = ocfs2_dlm_debug_release,
.read = seq_read,
.llseek = seq_lseek,
};
static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
{
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
debugfs_create_file("locking_state", S_IFREG|S_IRUSR,
osb->osb_debug_root, osb, &ocfs2_dlm_debug_fops);
debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
&dlm_debug->d_filter_secs);
ocfs2_get_dlm_debug(dlm_debug);
}
static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
{
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
if (dlm_debug)
ocfs2_put_dlm_debug(dlm_debug);
}
int ocfs2_dlm_init(struct ocfs2_super *osb)
{
int status = 0;
struct ocfs2_cluster_connection *conn = NULL;
if (ocfs2_mount_local(osb)) {
osb->node_num = 0;
goto local;
}
ocfs2_dlm_init_debug(osb);
/* launch downconvert thread */
osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc-%s",
osb->uuid_str);
if (IS_ERR(osb->dc_task)) {
status = PTR_ERR(osb->dc_task);
osb->dc_task = NULL;
mlog_errno(status);
goto bail;
}
/* for now, uuid == domain */
status = ocfs2_cluster_connect(osb->osb_cluster_stack,
osb->osb_cluster_name,
strlen(osb->osb_cluster_name),
osb->uuid_str,
strlen(osb->uuid_str),
&lproto, ocfs2_do_node_down, osb,
&conn);
if (status) {
mlog_errno(status);
goto bail;
}
status = ocfs2_cluster_this_node(conn, &osb->node_num);
if (status < 0) {
mlog_errno(status);
mlog(ML_ERROR,
"could not find this host's node number\n");
ocfs2_cluster_disconnect(conn, 0);
goto bail;
}
local:
ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
ocfs2_nfs_sync_lock_init(osb);
ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
osb->cconn = conn;
bail:
if (status < 0) {
ocfs2_dlm_shutdown_debug(osb);
if (osb->dc_task)
kthread_stop(osb->dc_task);
}
return status;
}
void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
int hangup_pending)
{
ocfs2_drop_osb_locks(osb);
/*
* Now that we have dropped all locks and ocfs2_dismount_volume()
* has disabled recovery, the DLM won't be talking to us. It's
* safe to tear things down before disconnecting the cluster.
*/
if (osb->dc_task) {
kthread_stop(osb->dc_task);
osb->dc_task = NULL;
}
ocfs2_lock_res_free(&osb->osb_super_lockres);
ocfs2_lock_res_free(&osb->osb_rename_lockres);
ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
if (osb->cconn) {
ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
osb->cconn = NULL;
ocfs2_dlm_shutdown_debug(osb);
}
}
static int ocfs2_drop_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int ret;
unsigned long flags;
u32 lkm_flags = 0;
/* We didn't get anywhere near actually using this lockres. */
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
goto out;
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lkm_flags |= DLM_LKF_VALBLK;
spin_lock_irqsave(&lockres->l_lock, flags);
mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
"lockres %s, flags 0x%lx\n",
lockres->l_name, lockres->l_flags);
while (lockres->l_flags & OCFS2_LOCK_BUSY) {
mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
"%u, unlock_action = %u\n",
lockres->l_name, lockres->l_flags, lockres->l_action,
lockres->l_unlock_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
/* XXX: Today we just wait on any busy
* locks... Perhaps we need to cancel converts in the
* future? */
ocfs2_wait_on_busy_lock(lockres);
spin_lock_irqsave(&lockres->l_lock, flags);
}
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
lockres->l_level == DLM_LOCK_EX &&
!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
lockres->l_ops->set_lvb(lockres);
}
if (lockres->l_flags & OCFS2_LOCK_BUSY)
mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
lockres->l_name);
if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto out;
}
lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
/* make sure we never get here while waiting for an ast to
* fire. */
BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
/* is this necessary? */
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog(0, "lock %s\n", lockres->l_name);
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
ocfs2_dlm_dump_lksb(&lockres->l_lksb);
BUG();
}
mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
lockres->l_name);
ocfs2_wait_on_busy_lock(lockres);
out:
return 0;
}
static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
/* Mark the lockres as being dropped. It will no longer be
* queued if blocking, but we still may have to wait on it
* being dequeued from the downconvert thread before we can consider
* it safe to drop.
*
* You can *not* attempt to call cluster_lock on this lockres anymore. */
void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int status;
struct ocfs2_mask_waiter mw;
unsigned long flags, flags2;
ocfs2_init_mask_waiter(&mw);
spin_lock_irqsave(&lockres->l_lock, flags);
lockres->l_flags |= OCFS2_LOCK_FREEING;
if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
/*
* We know the downconvert is queued but not in progress
* because we are the downconvert thread and processing
* different lock. So we can just remove the lock from the
* queue. This is not only an optimization but also a way
* to avoid the following deadlock:
* ocfs2_dentry_post_unlock()
* ocfs2_dentry_lock_put()
* ocfs2_drop_dentry_lock()
* iput()
* ocfs2_evict_inode()
* ocfs2_clear_inode()
* ocfs2_mark_lockres_freeing()
* ... blocks waiting for OCFS2_LOCK_QUEUED
* since we are the downconvert thread which
* should clear the flag.
*/
spin_unlock_irqrestore(&lockres->l_lock, flags);
spin_lock_irqsave(&osb->dc_task_lock, flags2);
list_del_init(&lockres->l_blocked_list);
osb->blocked_lock_count--;
spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
/*
* Warn if we recurse into another post_unlock call. Strictly
* speaking it isn't a problem but we need to be careful if
* that happens (stack overflow, deadlocks, ...) so warn if
* ocfs2 grows a path for which this can happen.
*/
WARN_ON_ONCE(lockres->l_ops->post_unlock);
/* Since the lock is freeing we don't do much in the fn below */
ocfs2_process_blocked_lock(osb, lockres);
return;
}
while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog(0, "Waiting on lockres %s\n", lockres->l_name);
status = ocfs2_wait_for_mask(&mw);
if (status)
mlog_errno(status);
spin_lock_irqsave(&lockres->l_lock, flags);
}
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int ret;
ocfs2_mark_lockres_freeing(osb, lockres);
ret = ocfs2_drop_lock(osb, lockres);
if (ret)
mlog_errno(ret);
}
static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
{
ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
}
int ocfs2_drop_inode_locks(struct inode *inode)
{
int status, err;
/* No need to call ocfs2_mark_lockres_freeing here -
* ocfs2_clear_inode has done it for us. */
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
&OCFS2_I(inode)->ip_open_lockres);
if (err < 0)
mlog_errno(err);
status = err;
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
&OCFS2_I(inode)->ip_inode_lockres);
if (err < 0)
mlog_errno(err);
if (err < 0 && !status)
status = err;
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
&OCFS2_I(inode)->ip_rw_lockres);
if (err < 0)
mlog_errno(err);
if (err < 0 && !status)
status = err;
return status;
}
static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
int new_level)
{
assert_spin_locked(&lockres->l_lock);
BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
if (lockres->l_level <= new_level) {
mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
"type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
"block %d, pgen %d\n", lockres->l_name, lockres->l_level,
new_level, list_empty(&lockres->l_blocked_list),
list_empty(&lockres->l_mask_waiters), lockres->l_type,
lockres->l_flags, lockres->l_ro_holders,
lockres->l_ex_holders, lockres->l_action,
lockres->l_unlock_action, lockres->l_requested,
lockres->l_blocking, lockres->l_pending_gen);
BUG();
}
mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
lockres->l_action = OCFS2_AST_DOWNCONVERT;
lockres->l_requested = new_level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
return lockres_set_pending(lockres);
}
static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int new_level,
int lvb,
unsigned int generation)
{
int ret;
u32 dlm_flags = DLM_LKF_CONVERT;
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
/*
* On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
* expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
* we can recover correctly from node failure. Otherwise, we may get
* invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
*/
if (ocfs2_userspace_stack(osb) &&
lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lvb = 1;
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
ret = ocfs2_dlm_lock(osb->cconn,
new_level,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, generation, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 1);
goto bail;
}
ret = 0;
bail:
return ret;
}
/* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
assert_spin_locked(&lockres->l_lock);
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
/* If we're already trying to cancel a lock conversion
* then just drop the spinlock and allow the caller to
* requeue this lock. */
mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
return 0;
}
/* were we in a convert when we got the bast fire? */
BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
lockres->l_action != OCFS2_AST_DOWNCONVERT);
/* set things up for the unlockast to know to just
* clear out the ast_action and unset busy, etc. */
lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
"lock %s, invalid flags: 0x%lx\n",
lockres->l_name, lockres->l_flags);
mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
return 1;
}
static int ocfs2_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int ret;
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
DLM_LKF_CANCEL);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 0);
}
mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
return ret;
}
static int ocfs2_unblock_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
struct ocfs2_unblock_ctl *ctl)
{
unsigned long flags;
int blocking;
int new_level;
int level;
int ret = 0;
int set_lvb = 0;
unsigned int gen;
spin_lock_irqsave(&lockres->l_lock, flags);
recheck:
/*
* Is it still blocking? If not, we have no more work to do.
*/
if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = 0;
goto leave;
}
if (lockres->l_flags & OCFS2_LOCK_BUSY) {
/* XXX
* This is a *big* race. The OCFS2_LOCK_PENDING flag
* exists entirely for one reason - another thread has set
* OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
*
* If we do ocfs2_cancel_convert() before the other thread
* calls dlm_lock(), our cancel will do nothing. We will
* get no ast, and we will have no way of knowing the
* cancel failed. Meanwhile, the other thread will call
* into dlm_lock() and wait...forever.
*
* Why forever? Because another node has asked for the
* lock first; that's why we're here in unblock_lock().
*
* The solution is OCFS2_LOCK_PENDING. When PENDING is
* set, we just requeue the unblock. Only when the other
* thread has called dlm_lock() and cleared PENDING will
* we then cancel their request.
*
* All callers of dlm_lock() must set OCFS2_DLM_PENDING
* at the same time they set OCFS2_DLM_BUSY. They must
* clear OCFS2_DLM_PENDING after dlm_lock() returns.
*/
if (lockres->l_flags & OCFS2_LOCK_PENDING) {
mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
lockres->l_name);
goto leave_requeue;
}
ctl->requeue = 1;
ret = ocfs2_prepare_cancel_convert(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
if (ret) {
ret = ocfs2_cancel_convert(osb, lockres);
if (ret < 0)
mlog_errno(ret);
}
goto leave;
}
/*
* This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
* set when the ast is received for an upconvert just before the
* OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
* on the heels of the ast, we want to delay the downconvert just
* enough to allow the up requestor to do its task. Because this
* lock is in the blocked queue, the lock will be downconverted
* as soon as the requestor is done with the lock.
*/
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
goto leave_requeue;
/*
* How can we block and yet be at NL? We were trying to upconvert
* from NL and got canceled. The code comes back here, and now
* we notice and clear BLOCKING.
*/
if (lockres->l_level == DLM_LOCK_NL) {
BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
lockres->l_blocking = DLM_LOCK_NL;
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto leave;
}
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) {
mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
lockres->l_name, lockres->l_ex_holders,
lockres->l_ro_holders);
goto leave_requeue;
}
/* If it's a PR we're blocking, then only
* requeue if we've got any EX holders */
if (lockres->l_blocking == DLM_LOCK_PR &&
lockres->l_ex_holders) {
mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
lockres->l_name, lockres->l_ex_holders);
goto leave_requeue;
}
/*
* Can we get a lock in this state if the holder counts are
* zero? The meta data unblock code used to check this.
*/
if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
&& (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
lockres->l_name);
goto leave_requeue;
}
new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
if (lockres->l_ops->check_downconvert
&& !lockres->l_ops->check_downconvert(lockres, new_level)) {
mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
lockres->l_name);
goto leave_requeue;
}
/* If we get here, then we know that there are no more
* incompatible holders (and anyone asking for an incompatible
* lock is blocked). We can now downconvert the lock */
if (!lockres->l_ops->downconvert_worker)
goto downconvert;
/* Some lockres types want to do a bit of work before
* downconverting a lock. Allow that here. The worker function
* may sleep, so we save off a copy of what we're blocking as
* it may change while we're not holding the spin lock. */
blocking = lockres->l_blocking;
level = lockres->l_level;
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
if (ctl->unblock_action == UNBLOCK_STOP_POST) {
mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
lockres->l_name);
goto leave;
}
spin_lock_irqsave(&lockres->l_lock, flags);
if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
"Recheck\n", lockres->l_name, blocking,
lockres->l_blocking, level, lockres->l_level);
goto recheck;
}
downconvert:
ctl->requeue = 0;
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
if (lockres->l_level == DLM_LOCK_EX)
set_lvb = 1;
/*
* We only set the lvb if the lock has been fully
* refreshed - otherwise we risk setting stale
* data. Otherwise, there's no need to actually clear
* out the lvb here as it's value is still valid.
*/
if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
lockres->l_ops->set_lvb(lockres);
}
gen = ocfs2_prepare_downconvert(lockres, new_level);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
gen);
/* The dlm lock convert is being cancelled in background,
* ocfs2_cancel_convert() is asynchronous in fs/dlm,
* requeue it, try again later.
*/
if (ret == -EBUSY) {
ctl->requeue = 1;
mlog(ML_BASTS, "lockres %s, ReQ: Downconvert busy\n",
lockres->l_name);
ret = 0;
msleep(20);
}
leave:
if (ret)
mlog_errno(ret);
return ret;
leave_requeue:
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->requeue = 1;
return 0;
}
static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
int blocking)
{
struct inode *inode;
struct address_space *mapping;
struct ocfs2_inode_info *oi;
inode = ocfs2_lock_res_inode(lockres);
mapping = inode->i_mapping;
if (S_ISDIR(inode->i_mode)) {
oi = OCFS2_I(inode);
oi->ip_dir_lock_gen++;
mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
goto out_forget;
}
if (!S_ISREG(inode->i_mode))
goto out;
/*
* We need this before the filemap_fdatawrite() so that it can
* transfer the dirty bit from the PTE to the
* page. Unfortunately this means that even for EX->PR
* downconverts, we'll lose our mappings and have to build
* them up again.
*/
unmap_mapping_range(mapping, 0, 0, 0);
if (filemap_fdatawrite(mapping)) {
mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
}
sync_mapping_buffers(mapping);
if (blocking == DLM_LOCK_EX) {
truncate_inode_pages(mapping, 0);
} else {
/* We only need to wait on the I/O if we're not also
* truncating pages because truncate_inode_pages waits
* for us above. We don't truncate pages if we're
* blocking anything < EXMODE because we want to keep
* them around in that case. */
filemap_fdatawait(mapping);
}
out_forget:
forget_all_cached_acls(inode);
out:
return UNBLOCK_CONTINUE;
}
static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
struct ocfs2_lock_res *lockres,
int new_level)
{
int checkpointed = ocfs2_ci_fully_checkpointed(ci);
BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
if (checkpointed)
return 1;
ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
return 0;
}
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level)
{
struct inode *inode = ocfs2_lock_res_inode(lockres);
return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
}
static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
{
struct inode *inode = ocfs2_lock_res_inode(lockres);
__ocfs2_stuff_meta_lvb(inode);
}
/*
* Does the final reference drop on our dentry lock. Right now this
* happens in the downconvert thread, but we could choose to simplify the
* dlmglue API and push these off to the ocfs2_wq in the future.
*/
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
ocfs2_dentry_lock_put(osb, dl);
}
/*
* d_delete() matching dentries before the lock downconvert.
*
* At this point, any process waiting to destroy the
* dentry_lock due to last ref count is stopped by the
* OCFS2_LOCK_QUEUED flag.
*
* We have two potential problems
*
* 1) If we do the last reference drop on our dentry_lock (via dput)
* we'll wind up in ocfs2_release_dentry_lock(), waiting on
* the downconvert to finish. Instead we take an elevated
* reference and push the drop until after we've completed our
* unblock processing.
*
* 2) There might be another process with a final reference,
* waiting on us to finish processing. If this is the case, we
* detect it and exit out - there's no more dentries anyway.
*/
static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
int blocking)
{
struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
struct dentry *dentry;
unsigned long flags;
int extra_ref = 0;
/*
* This node is blocking another node from getting a read
* lock. This happens when we've renamed within a
* directory. We've forced the other nodes to d_delete(), but
* we never actually dropped our lock because it's still
* valid. The downconvert code will retain a PR for this node,
* so there's no further work to do.
*/
if (blocking == DLM_LOCK_PR)
return UNBLOCK_CONTINUE;
/*
* Mark this inode as potentially orphaned. The code in
* ocfs2_delete_inode() will figure out whether it actually
* needs to be freed or not.
*/
spin_lock(&oi->ip_lock);
oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
spin_unlock(&oi->ip_lock);
/*
* Yuck. We need to make sure however that the check of
* OCFS2_LOCK_FREEING and the extra reference are atomic with
* respect to a reference decrement or the setting of that
* flag.
*/
spin_lock_irqsave(&lockres->l_lock, flags);
spin_lock(&dentry_attach_lock);
if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
&& dl->dl_count) {
dl->dl_count++;
extra_ref = 1;
}
spin_unlock(&dentry_attach_lock);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog(0, "extra_ref = %d\n", extra_ref);
/*
* We have a process waiting on us in ocfs2_dentry_iput(),
* which means we can't have any more outstanding
* aliases. There's no need to do any more work.
*/
if (!extra_ref)
return UNBLOCK_CONTINUE;
spin_lock(&dentry_attach_lock);
while (1) {
dentry = ocfs2_find_local_alias(dl->dl_inode,
dl->dl_parent_blkno, 1);
if (!dentry)
break;
spin_unlock(&dentry_attach_lock);
if (S_ISDIR(dl->dl_inode->i_mode))
shrink_dcache_parent(dentry);
mlog(0, "d_delete(%pd);\n", dentry);
/*
* The following dcache calls may do an
* iput(). Normally we don't want that from the
* downconverting thread, but in this case it's ok
* because the requesting node already has an
* exclusive lock on the inode, so it can't be queued
* for a downconvert.
*/
d_delete(dentry);
dput(dentry);
spin_lock(&dentry_attach_lock);
}
spin_unlock(&dentry_attach_lock);
/*
* If we are the last holder of this dentry lock, there is no
* reason to downconvert so skip straight to the unlock.
*/
if (dl->dl_count == 1)
return UNBLOCK_STOP_POST;
return UNBLOCK_CONTINUE_POST;
}
static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
int new_level)
{
struct ocfs2_refcount_tree *tree =
ocfs2_lock_res_refcount_tree(lockres);
return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
}
static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
int blocking)
{
struct ocfs2_refcount_tree *tree =
ocfs2_lock_res_refcount_tree(lockres);
ocfs2_metadata_cache_purge(&tree->rf_ci);
return UNBLOCK_CONTINUE;
}
static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_qinfo_lvb *lvb;
struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
oinfo->dqi_gi.dqi_type);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
}
void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
}
static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
{
struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
oinfo->dqi_gi.dqi_type);
struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
struct buffer_head *bh = NULL;
struct ocfs2_global_disk_dqinfo *gdinfo;
int status = 0;
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
oinfo->dqi_gi.dqi_free_entry =
be32_to_cpu(lvb->lvb_free_entry);
} else {
status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
oinfo->dqi_giblk, &bh);
if (status) {
mlog_errno(status);
goto bail;
}
gdinfo = (struct ocfs2_global_disk_dqinfo *)
(bh->b_data + OCFS2_GLOBAL_INFO_OFF);
info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
oinfo->dqi_gi.dqi_free_entry =
le32_to_cpu(gdinfo->dqi_free_entry);
brelse(bh);
ocfs2_track_lock_refresh(lockres);
}
bail:
return status;
}
/* Lock quota info, this function expects at least shared lock on the quota file
* so that we can safely refresh quota info from disk. */
int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
int status = 0;
/* On RO devices, locking really isn't needed... */
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
status = -EROFS;
goto bail;
}
if (ocfs2_mount_local(osb))
goto bail;
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (!ocfs2_should_refresh_lock_res(lockres))
goto bail;
/* OK, we have the lock but we need to refresh the quota info */
status = ocfs2_refresh_qinfo(oinfo);
if (status)
ocfs2_qinfo_unlock(oinfo, ex);
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
return status;
}
int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
int status;
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
struct ocfs2_super *osb = lockres->l_priv;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0)
mlog_errno(status);
return status;
}
void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
struct ocfs2_super *osb = lockres->l_priv;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
}
static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int status;
struct ocfs2_unblock_ctl ctl = {0, 0,};
unsigned long flags;
/* Our reference to the lockres in this function can be
* considered valid until we remove the OCFS2_LOCK_QUEUED
* flag. */
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
/* Detect whether a lock has been marked as going away while
* the downconvert thread was processing other things. A lock can
* still be marked with OCFS2_LOCK_FREEING after this check,
* but short circuiting here will still save us some
* performance. */
spin_lock_irqsave(&lockres->l_lock, flags);
if (lockres->l_flags & OCFS2_LOCK_FREEING)
goto unqueue;
spin_unlock_irqrestore(&lockres->l_lock, flags);
status = ocfs2_unblock_lock(osb, lockres, &ctl);
if (status < 0)
mlog_errno(status);
spin_lock_irqsave(&lockres->l_lock, flags);
unqueue:
if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
} else
ocfs2_schedule_blocked_lock(osb, lockres);
mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
ctl.requeue ? "yes" : "no");
spin_unlock_irqrestore(&lockres->l_lock, flags);
if (ctl.unblock_action != UNBLOCK_CONTINUE
&& lockres->l_ops->post_unlock)
lockres->l_ops->post_unlock(osb, lockres);
}
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
unsigned long flags;
assert_spin_locked(&lockres->l_lock);
if (lockres->l_flags & OCFS2_LOCK_FREEING) {
/* Do not schedule a lock for downconvert when it's on
* the way to destruction - any nodes wanting access
* to the resource will get it soon. */
mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
lockres->l_name, lockres->l_flags);
return;
}
lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
spin_lock_irqsave(&osb->dc_task_lock, flags);
if (list_empty(&lockres->l_blocked_list)) {
list_add_tail(&lockres->l_blocked_list,
&osb->blocked_lock_list);
osb->blocked_lock_count++;
}
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
}
static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
{
unsigned long processed;
unsigned long flags;
struct ocfs2_lock_res *lockres;
spin_lock_irqsave(&osb->dc_task_lock, flags);
/* grab this early so we know to try again if a state change and
* wake happens part-way through our work */
osb->dc_work_sequence = osb->dc_wake_sequence;
processed = osb->blocked_lock_count;
/*
* blocked lock processing in this loop might call iput which can
* remove items off osb->blocked_lock_list. Downconvert up to
* 'processed' number of locks, but stop short if we had some
* removed in ocfs2_mark_lockres_freeing when downconverting.
*/
while (processed && !list_empty(&osb->blocked_lock_list)) {
lockres = list_entry(osb->blocked_lock_list.next,
struct ocfs2_lock_res, l_blocked_list);
list_del_init(&lockres->l_blocked_list);
osb->blocked_lock_count--;
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
BUG_ON(!processed);
processed--;
ocfs2_process_blocked_lock(osb, lockres);
spin_lock_irqsave(&osb->dc_task_lock, flags);
}
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
}
static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
{
int empty = 0;
unsigned long flags;
spin_lock_irqsave(&osb->dc_task_lock, flags);
if (list_empty(&osb->blocked_lock_list))
empty = 1;
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
return empty;
}
static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
{
int should_wake = 0;
unsigned long flags;
spin_lock_irqsave(&osb->dc_task_lock, flags);
if (osb->dc_work_sequence != osb->dc_wake_sequence)
should_wake = 1;
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
return should_wake;
}
static int ocfs2_downconvert_thread(void *arg)
{
struct ocfs2_super *osb = arg;
/* only quit once we've been asked to stop and there is no more
* work available */
while (!(kthread_should_stop() &&
ocfs2_downconvert_thread_lists_empty(osb))) {
wait_event_interruptible(osb->dc_event,
ocfs2_downconvert_thread_should_wake(osb) ||
kthread_should_stop());
mlog(0, "downconvert_thread: awoken\n");
ocfs2_downconvert_thread_do_work(osb);
}
osb->dc_task = NULL;
return 0;
}
void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
{
unsigned long flags;
spin_lock_irqsave(&osb->dc_task_lock, flags);
/* make sure the voting thread gets a swipe at whatever changes
* the caller may have made to the voting state */
osb->dc_wake_sequence++;
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
wake_up(&osb->dc_event);
}
| linux-master | fs/ocfs2/dlmglue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dcache.c
*
* dentry cache handling code
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dcache.h"
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
#include "ocfs2_trace.h"
void ocfs2_dentry_attach_gen(struct dentry *dentry)
{
unsigned long gen =
OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen;
BUG_ON(d_inode(dentry));
dentry->d_fsdata = (void *)gen;
}
static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int ret = 0; /* if all else fails, just return false */
struct ocfs2_super *osb;
if (flags & LOOKUP_RCU)
return -ECHILD;
inode = d_inode(dentry);
osb = OCFS2_SB(dentry->d_sb);
trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len,
dentry->d_name.name);
/* For a negative dentry -
* check the generation number of the parent and compare with the
* one stored in the inode.
*/
if (inode == NULL) {
unsigned long gen = (unsigned long) dentry->d_fsdata;
unsigned long pgen;
spin_lock(&dentry->d_lock);
pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen;
spin_unlock(&dentry->d_lock);
trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
dentry->d_name.name,
pgen, gen);
if (gen != pgen)
goto bail;
goto valid;
}
BUG_ON(!osb);
if (inode == osb->root_inode || is_bad_inode(inode))
goto bail;
spin_lock(&OCFS2_I(inode)->ip_lock);
/* did we or someone else delete this inode? */
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&OCFS2_I(inode)->ip_lock);
trace_ocfs2_dentry_revalidate_delete(
(unsigned long long)OCFS2_I(inode)->ip_blkno);
goto bail;
}
spin_unlock(&OCFS2_I(inode)->ip_lock);
/*
* We don't need a cluster lock to test this because once an
* inode nlink hits zero, it never goes back.
*/
if (inode->i_nlink == 0) {
trace_ocfs2_dentry_revalidate_orphaned(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
S_ISDIR(inode->i_mode));
goto bail;
}
/*
* If the last lookup failed to create dentry lock, let us
* redo it.
*/
if (!dentry->d_fsdata) {
trace_ocfs2_dentry_revalidate_nofsdata(
(unsigned long long)OCFS2_I(inode)->ip_blkno);
goto bail;
}
valid:
ret = 1;
bail:
trace_ocfs2_dentry_revalidate_ret(ret);
return ret;
}
static int ocfs2_match_dentry(struct dentry *dentry,
u64 parent_blkno,
int skip_unhashed)
{
struct inode *parent;
/*
* ocfs2_lookup() does a d_splice_alias() _before_ attaching
* to the lock data, so we skip those here, otherwise
* ocfs2_dentry_attach_lock() will get its original dentry
* back.
*/
if (!dentry->d_fsdata)
return 0;
if (!dentry->d_parent)
return 0;
if (skip_unhashed && d_unhashed(dentry))
return 0;
parent = d_inode(dentry->d_parent);
/* Negative parent dentry? */
if (!parent)
return 0;
/* Name is in a different directory. */
if (OCFS2_I(parent)->ip_blkno != parent_blkno)
return 0;
return 1;
}
/*
* Walk the inode alias list, and find a dentry which has a given
* parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it
* is looking for a dentry_lock reference. The downconvert thread is
* looking to unhash aliases, so we allow it to skip any that already
* have that property.
*/
struct dentry *ocfs2_find_local_alias(struct inode *inode,
u64 parent_blkno,
int skip_unhashed)
{
struct dentry *dentry;
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
trace_ocfs2_find_local_alias(dentry->d_name.len,
dentry->d_name.name);
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
return dentry;
}
spin_unlock(&dentry->d_lock);
}
spin_unlock(&inode->i_lock);
return NULL;
}
DEFINE_SPINLOCK(dentry_attach_lock);
/*
* Attach this dentry to a cluster lock.
*
* Dentry locks cover all links in a given directory to a particular
* inode. We do this so that ocfs2 can build a lock name which all
* nodes in the cluster can agree on at all times. Shoving full names
* in the cluster lock won't work due to size restrictions. Covering
* links inside of a directory is a good compromise because it still
* allows us to use the parent directory lock to synchronize
* operations.
*
* Call this function with the parent dir semaphore and the parent dir
* cluster lock held.
*
* The dir semaphore will protect us from having to worry about
* concurrent processes on our node trying to attach a lock at the
* same time.
*
* The dir cluster lock (held at either PR or EX mode) protects us
* from unlink and rename on other nodes.
*
* A dput() can happen asynchronously due to pruning, so we cover
* attaching and detaching the dentry lock with a
* dentry_attach_lock.
*
* A node which has done lookup on a name retains a protected read
* lock until final dput. If the user requests and unlink or rename,
* the protected read is upgraded to an exclusive lock. Other nodes
* who have seen the dentry will then be informed that they need to
* downgrade their lock, which will involve d_delete on the
* dentry. This happens in ocfs2_dentry_convert_worker().
*/
int ocfs2_dentry_attach_lock(struct dentry *dentry,
struct inode *inode,
u64 parent_blkno)
{
int ret;
struct dentry *alias;
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name,
(unsigned long long)parent_blkno, dl);
/*
* Negative dentry. We ignore these for now.
*
* XXX: Could we can improve ocfs2_dentry_revalidate() by
* tracking these?
*/
if (!inode)
return 0;
if (d_really_is_negative(dentry) && dentry->d_fsdata) {
/* Converting a negative dentry to positive
Clear dentry->d_fsdata */
dentry->d_fsdata = dl = NULL;
}
if (dl) {
mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno,
" \"%pd\": old parent: %llu, new: %llu\n",
dentry,
(unsigned long long)parent_blkno,
(unsigned long long)dl->dl_parent_blkno);
return 0;
}
alias = ocfs2_find_local_alias(inode, parent_blkno, 0);
if (alias) {
/*
* Great, an alias exists, which means we must have a
* dentry lock already. We can just grab the lock off
* the alias and add it to the list.
*
* We're depending here on the fact that this dentry
* was found and exists in the dcache and so must have
* a reference to the dentry_lock because we can't
* race creates. Final dput() cannot happen on it
* since we have it pinned, so our reference is safe.
*/
dl = alias->d_fsdata;
mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n",
(unsigned long long)parent_blkno,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno,
" \"%pd\": old parent: %llu, new: %llu\n",
dentry,
(unsigned long long)parent_blkno,
(unsigned long long)dl->dl_parent_blkno);
trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name,
(unsigned long long)parent_blkno,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
goto out_attach;
}
/*
* There are no other aliases
*/
dl = kmalloc(sizeof(*dl), GFP_NOFS);
if (!dl) {
ret = -ENOMEM;
mlog_errno(ret);
return ret;
}
dl->dl_count = 0;
/*
* Does this have to happen below, for all attaches, in case
* the struct inode gets blown away by the downconvert thread?
*/
dl->dl_inode = igrab(inode);
dl->dl_parent_blkno = parent_blkno;
ocfs2_dentry_lock_res_init(dl, parent_blkno, inode);
out_attach:
spin_lock(&dentry_attach_lock);
if (unlikely(dentry->d_fsdata && !alias)) {
/* d_fsdata is set by a racing thread which is doing
* the same thing as this thread is doing. Leave the racing
* thread going ahead and we return here.
*/
spin_unlock(&dentry_attach_lock);
iput(dl->dl_inode);
ocfs2_lock_res_free(&dl->dl_lockres);
kfree(dl);
return 0;
}
dentry->d_fsdata = dl;
dl->dl_count++;
spin_unlock(&dentry_attach_lock);
/*
* This actually gets us our PRMODE level lock. From now on,
* we'll have a notification if one of these names is
* destroyed on another node.
*/
ret = ocfs2_dentry_lock(dentry, 0);
if (!ret)
ocfs2_dentry_unlock(dentry, 0);
else
mlog_errno(ret);
/*
* In case of error, manually free the allocation and do the iput().
* We need to do this because error here means no d_instantiate(),
* which means iput() will not be called during dput(dentry).
*/
if (ret < 0 && !alias) {
ocfs2_lock_res_free(&dl->dl_lockres);
BUG_ON(dl->dl_count != 1);
spin_lock(&dentry_attach_lock);
dentry->d_fsdata = NULL;
spin_unlock(&dentry_attach_lock);
kfree(dl);
iput(inode);
}
dput(alias);
return ret;
}
/*
* ocfs2_dentry_iput() and friends.
*
* At this point, our particular dentry is detached from the inodes
* alias list, so there's no way that the locking code can find it.
*
* The interesting stuff happens when we determine that our lock needs
* to go away because this is the last subdir alias in the
* system. This function needs to handle a couple things:
*
* 1) Synchronizing lock shutdown with the downconvert threads. This
* is already handled for us via the lockres release drop function
* called in ocfs2_release_dentry_lock()
*
* 2) A race may occur when we're doing our lock shutdown and
* another process wants to create a new dentry lock. Right now we
* let them race, which means that for a very short while, this
* node might have two locks on a lock resource. This should be a
* problem though because one of them is in the process of being
* thrown out.
*/
static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl)
{
iput(dl->dl_inode);
ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
ocfs2_lock_res_free(&dl->dl_lockres);
kfree(dl);
}
void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl)
{
int unlock = 0;
BUG_ON(dl->dl_count == 0);
spin_lock(&dentry_attach_lock);
dl->dl_count--;
unlock = !dl->dl_count;
spin_unlock(&dentry_attach_lock);
if (unlock)
ocfs2_drop_dentry_lock(osb, dl);
}
static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode)
{
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
if (!dl) {
/*
* No dentry lock is ok if we're disconnected or
* unhashed.
*/
if (!(dentry->d_flags & DCACHE_DISCONNECTED) &&
!d_unhashed(dentry)) {
unsigned long long ino = 0ULL;
if (inode)
ino = (unsigned long long)OCFS2_I(inode)->ip_blkno;
mlog(ML_ERROR, "Dentry is missing cluster lock. "
"inode: %llu, d_flags: 0x%x, d_name: %pd\n",
ino, dentry->d_flags, dentry);
}
goto out;
}
mlog_bug_on_msg(dl->dl_count == 0, "dentry: %pd, count: %u\n",
dentry, dl->dl_count);
ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl);
out:
iput(inode);
}
/*
* d_move(), but keep the locks in sync.
*
* When we are done, "dentry" will have the parent dir and name of
* "target", which will be thrown away.
*
* We manually update the lock of "dentry" if need be.
*
* "target" doesn't have it's dentry lock touched - we allow the later
* dput() to handle this for us.
*
* This is called during ocfs2_rename(), while holding parent
* directory locks. The dentries have already been deleted on other
* nodes via ocfs2_remote_dentry_delete().
*
* Normally, the VFS handles the d_move() for the file system, after
* the ->rename() callback. OCFS2 wants to handle this internally, so
* the new lock can be created atomically with respect to the cluster.
*/
void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target,
struct inode *old_dir, struct inode *new_dir)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb);
struct inode *inode = d_inode(dentry);
/*
* Move within the same directory, so the actual lock info won't
* change.
*
* XXX: Is there any advantage to dropping the lock here?
*/
if (old_dir == new_dir)
goto out_move;
ocfs2_dentry_lock_put(osb, dentry->d_fsdata);
dentry->d_fsdata = NULL;
ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno);
if (ret)
mlog_errno(ret);
out_move:
d_move(dentry, target);
}
const struct dentry_operations ocfs2_dentry_ops = {
.d_revalidate = ocfs2_dentry_revalidate,
.d_iput = ocfs2_dentry_iput,
};
| linux-master | fs/ocfs2/dcache.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/configfs.h>
#include "tcp.h"
#include "nodemanager.h"
#include "heartbeat.h"
#include "masklog.h"
#include "sys.h"
/* for now we operate under the assertion that there can be only one
* cluster active at a time. Changing this will require trickling
* cluster references throughout where nodes are looked up */
struct o2nm_cluster *o2nm_single_cluster = NULL;
static const char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
"reset", /* O2NM_FENCE_RESET */
"panic", /* O2NM_FENCE_PANIC */
};
static inline void o2nm_lock_subsystem(void);
static inline void o2nm_unlock_subsystem(void);
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
struct o2nm_node *node = NULL;
if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
goto out;
read_lock(&o2nm_single_cluster->cl_nodes_lock);
node = o2nm_single_cluster->cl_nodes[node_num];
if (node)
config_item_get(&node->nd_item);
read_unlock(&o2nm_single_cluster->cl_nodes_lock);
out:
return node;
}
EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
{
struct o2nm_cluster *cluster = o2nm_single_cluster;
BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
if (cluster == NULL)
return -EINVAL;
read_lock(&cluster->cl_nodes_lock);
bitmap_copy(map, cluster->cl_nodes_bitmap, O2NM_MAX_NODES);
read_unlock(&cluster->cl_nodes_lock);
return 0;
}
EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
__be32 ip_needle,
struct rb_node ***ret_p,
struct rb_node **ret_parent)
{
struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
struct rb_node *parent = NULL;
struct o2nm_node *node, *ret = NULL;
while (*p) {
int cmp;
parent = *p;
node = rb_entry(parent, struct o2nm_node, nd_ip_node);
cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
sizeof(ip_needle));
if (cmp < 0)
p = &(*p)->rb_left;
else if (cmp > 0)
p = &(*p)->rb_right;
else {
ret = node;
break;
}
}
if (ret_p != NULL)
*ret_p = p;
if (ret_parent != NULL)
*ret_parent = parent;
return ret;
}
struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
{
struct o2nm_node *node = NULL;
struct o2nm_cluster *cluster = o2nm_single_cluster;
if (cluster == NULL)
goto out;
read_lock(&cluster->cl_nodes_lock);
node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
if (node)
config_item_get(&node->nd_item);
read_unlock(&cluster->cl_nodes_lock);
out:
return node;
}
EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
void o2nm_node_put(struct o2nm_node *node)
{
config_item_put(&node->nd_item);
}
EXPORT_SYMBOL_GPL(o2nm_node_put);
void o2nm_node_get(struct o2nm_node *node)
{
config_item_get(&node->nd_item);
}
EXPORT_SYMBOL_GPL(o2nm_node_get);
u8 o2nm_this_node(void)
{
u8 node_num = O2NM_MAX_NODES;
if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
node_num = o2nm_single_cluster->cl_local_node;
return node_num;
}
EXPORT_SYMBOL_GPL(o2nm_this_node);
/* node configfs bits */
static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
{
return item ?
container_of(to_config_group(item), struct o2nm_cluster,
cl_group)
: NULL;
}
static struct o2nm_node *to_o2nm_node(struct config_item *item)
{
return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
}
static void o2nm_node_release(struct config_item *item)
{
struct o2nm_node *node = to_o2nm_node(item);
kfree(node);
}
static ssize_t o2nm_node_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num);
}
static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
{
/* through the first node_set .parent
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
if (node->nd_item.ci_parent)
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
else
return NULL;
}
enum {
O2NM_NODE_ATTR_NUM = 0,
O2NM_NODE_ATTR_PORT,
O2NM_NODE_ATTR_ADDRESS,
};
static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
int ret = 0;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
if (tmp >= O2NM_MAX_NODES)
return -ERANGE;
/* once we're in the cl_nodes tree networking can look us up by
* node number and try to use our address and port attributes
* to connect to this node.. make sure that they've been set
* before writing the node attribute? */
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
o2nm_unlock_subsystem();
return -EINVAL;
}
write_lock(&cluster->cl_nodes_lock);
if (cluster->cl_nodes[tmp])
ret = -EEXIST;
else if (test_and_set_bit(O2NM_NODE_ATTR_NUM,
&node->nd_set_attributes))
ret = -EBUSY;
else {
cluster->cl_nodes[tmp] = node;
node->nd_num = tmp;
set_bit(tmp, cluster->cl_nodes_bitmap);
}
write_unlock(&cluster->cl_nodes_lock);
o2nm_unlock_subsystem();
if (ret)
return ret;
return count;
}
static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port));
}
static ssize_t o2nm_node_ipv4_port_store(struct config_item *item,
const char *page, size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
unsigned long tmp;
char *p = (char *)page;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
if (tmp == 0)
return -EINVAL;
if (tmp >= (u16)-1)
return -ERANGE;
if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EBUSY;
node->nd_ipv4_port = htons(tmp);
return count;
}
static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page)
{
return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address);
}
static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster;
int ret, i;
struct rb_node **p, *parent;
unsigned int octets[4];
__be32 ipv4_addr = 0;
ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
&octets[1], &octets[0]);
if (ret != 4)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(octets); i++) {
if (octets[i] > 255)
return -ERANGE;
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
}
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
o2nm_unlock_subsystem();
return -EINVAL;
}
ret = 0;
write_lock(&cluster->cl_nodes_lock);
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
ret = -EEXIST;
else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS,
&node->nd_set_attributes))
ret = -EBUSY;
else {
rb_link_node(&node->nd_ip_node, parent, p);
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
}
write_unlock(&cluster->cl_nodes_lock);
o2nm_unlock_subsystem();
if (ret)
return ret;
memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
return count;
}
static ssize_t o2nm_node_local_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local);
}
static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
tmp = !!tmp; /* boolean of whether this node wants to be local */
/* setting local turns on networking rx for now so we require having
* set everything else first */
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
!test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
ret = -EINVAL;
goto out;
}
/* the only failure case is trying to set a new local node
* when a different one is already set */
if (tmp && tmp == cluster->cl_has_local &&
cluster->cl_local_node != node->nd_num) {
ret = -EBUSY;
goto out;
}
/* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node);
if (ret)
goto out;
}
if (!tmp && cluster->cl_has_local &&
cluster->cl_local_node == node->nd_num) {
o2net_stop_listening(node);
cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
}
node->nd_local = tmp;
if (node->nd_local) {
cluster->cl_has_local = tmp;
cluster->cl_local_node = node->nd_num;
}
ret = count;
out:
o2nm_unlock_subsystem();
return ret;
}
CONFIGFS_ATTR(o2nm_node_, num);
CONFIGFS_ATTR(o2nm_node_, ipv4_port);
CONFIGFS_ATTR(o2nm_node_, ipv4_address);
CONFIGFS_ATTR(o2nm_node_, local);
static struct configfs_attribute *o2nm_node_attrs[] = {
&o2nm_node_attr_num,
&o2nm_node_attr_ipv4_port,
&o2nm_node_attr_ipv4_address,
&o2nm_node_attr_local,
NULL,
};
static struct configfs_item_operations o2nm_node_item_ops = {
.release = o2nm_node_release,
};
static const struct config_item_type o2nm_node_type = {
.ct_item_ops = &o2nm_node_item_ops,
.ct_attrs = o2nm_node_attrs,
.ct_owner = THIS_MODULE,
};
/* node set */
struct o2nm_node_group {
struct config_group ns_group;
/* some stuff? */
};
#if 0
static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
{
return group ?
container_of(group, struct o2nm_node_group, ns_group)
: NULL;
}
#endif
static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
unsigned int *val)
{
unsigned long tmp;
char *p = (char *)page;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
if (tmp == 0)
return -EINVAL;
if (tmp >= (u32)-1)
return -ERANGE;
*val = tmp;
return count;
}
static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item,
char *page)
{
return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms);
}
static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item,
const char *page, size_t count)
{
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
ssize_t ret;
unsigned int val;
ret = o2nm_cluster_attr_write(page, count, &val);
if (ret > 0) {
if (cluster->cl_idle_timeout_ms != val
&& o2net_num_connected_peers()) {
mlog(ML_NOTICE,
"o2net: cannot change idle timeout after "
"the first peer has agreed to it."
" %d connected peers\n",
o2net_num_connected_peers());
ret = -EINVAL;
} else if (val <= cluster->cl_keepalive_delay_ms) {
mlog(ML_NOTICE, "o2net: idle timeout must be larger "
"than keepalive delay\n");
ret = -EINVAL;
} else {
cluster->cl_idle_timeout_ms = val;
}
}
return ret;
}
static ssize_t o2nm_cluster_keepalive_delay_ms_show(
struct config_item *item, char *page)
{
return sprintf(page, "%u\n",
to_o2nm_cluster(item)->cl_keepalive_delay_ms);
}
static ssize_t o2nm_cluster_keepalive_delay_ms_store(
struct config_item *item, const char *page, size_t count)
{
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
ssize_t ret;
unsigned int val;
ret = o2nm_cluster_attr_write(page, count, &val);
if (ret > 0) {
if (cluster->cl_keepalive_delay_ms != val
&& o2net_num_connected_peers()) {
mlog(ML_NOTICE,
"o2net: cannot change keepalive delay after"
" the first peer has agreed to it."
" %d connected peers\n",
o2net_num_connected_peers());
ret = -EINVAL;
} else if (val >= cluster->cl_idle_timeout_ms) {
mlog(ML_NOTICE, "o2net: keepalive delay must be "
"smaller than idle timeout\n");
ret = -EINVAL;
} else {
cluster->cl_keepalive_delay_ms = val;
}
}
return ret;
}
static ssize_t o2nm_cluster_reconnect_delay_ms_show(
struct config_item *item, char *page)
{
return sprintf(page, "%u\n",
to_o2nm_cluster(item)->cl_reconnect_delay_ms);
}
static ssize_t o2nm_cluster_reconnect_delay_ms_store(
struct config_item *item, const char *page, size_t count)
{
return o2nm_cluster_attr_write(page, count,
&to_o2nm_cluster(item)->cl_reconnect_delay_ms);
}
static ssize_t o2nm_cluster_fence_method_show(
struct config_item *item, char *page)
{
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
ssize_t ret = 0;
if (cluster)
ret = sprintf(page, "%s\n",
o2nm_fence_method_desc[cluster->cl_fence_method]);
return ret;
}
static ssize_t o2nm_cluster_fence_method_store(
struct config_item *item, const char *page, size_t count)
{
unsigned int i;
if (page[count - 1] != '\n')
goto bail;
for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
if (count != strlen(o2nm_fence_method_desc[i]) + 1)
continue;
if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
continue;
if (to_o2nm_cluster(item)->cl_fence_method != i) {
printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
o2nm_fence_method_desc[i]);
to_o2nm_cluster(item)->cl_fence_method = i;
}
return count;
}
bail:
return -EINVAL;
}
CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms);
CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms);
CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms);
CONFIGFS_ATTR(o2nm_cluster_, fence_method);
static struct configfs_attribute *o2nm_cluster_attrs[] = {
&o2nm_cluster_attr_idle_timeout_ms,
&o2nm_cluster_attr_keepalive_delay_ms,
&o2nm_cluster_attr_reconnect_delay_ms,
&o2nm_cluster_attr_fence_method,
NULL,
};
static struct config_item *o2nm_node_group_make_item(struct config_group *group,
const char *name)
{
struct o2nm_node *node = NULL;
if (strlen(name) > O2NM_MAX_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
if (node == NULL)
return ERR_PTR(-ENOMEM);
strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
spin_lock_init(&node->nd_lock);
mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name);
return &node->nd_item;
}
static void o2nm_node_group_drop_item(struct config_group *group,
struct config_item *item)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
if (cluster->cl_nodes[node->nd_num] == node) {
o2net_disconnect_node(node);
if (cluster->cl_has_local &&
(cluster->cl_local_node == node->nd_num)) {
cluster->cl_has_local = 0;
cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
o2net_stop_listening(node);
}
}
/* XXX call into net to stop this node from trading messages */
write_lock(&cluster->cl_nodes_lock);
/* XXX sloppy */
if (node->nd_ipv4_address)
rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
/* nd_num might be 0 if the node number hasn't been set.. */
if (cluster->cl_nodes[node->nd_num] == node) {
cluster->cl_nodes[node->nd_num] = NULL;
clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
}
write_unlock(&cluster->cl_nodes_lock);
mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n",
config_item_name(&node->nd_item));
config_item_put(item);
}
static struct configfs_group_operations o2nm_node_group_group_ops = {
.make_item = o2nm_node_group_make_item,
.drop_item = o2nm_node_group_drop_item,
};
static const struct config_item_type o2nm_node_group_type = {
.ct_group_ops = &o2nm_node_group_group_ops,
.ct_owner = THIS_MODULE,
};
/* cluster */
static void o2nm_cluster_release(struct config_item *item)
{
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
kfree(cluster);
}
static struct configfs_item_operations o2nm_cluster_item_ops = {
.release = o2nm_cluster_release,
};
static const struct config_item_type o2nm_cluster_type = {
.ct_item_ops = &o2nm_cluster_item_ops,
.ct_attrs = o2nm_cluster_attrs,
.ct_owner = THIS_MODULE,
};
/* cluster set */
struct o2nm_cluster_group {
struct configfs_subsystem cs_subsys;
/* some stuff? */
};
#if 0
static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
{
return group ?
container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
: NULL;
}
#endif
static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
const char *name)
{
struct o2nm_cluster *cluster = NULL;
struct o2nm_node_group *ns = NULL;
struct config_group *o2hb_group = NULL, *ret = NULL;
/* this runs under the parent dir's i_rwsem; there can be only
* one caller in here at a time */
if (o2nm_single_cluster)
return ERR_PTR(-ENOSPC);
cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
o2hb_group = o2hb_alloc_hb_set();
if (cluster == NULL || ns == NULL || o2hb_group == NULL)
goto out;
config_group_init_type_name(&cluster->cl_group, name,
&o2nm_cluster_type);
configfs_add_default_group(&ns->ns_group, &cluster->cl_group);
config_group_init_type_name(&ns->ns_group, "node",
&o2nm_node_group_type);
configfs_add_default_group(o2hb_group, &cluster->cl_group);
rwlock_init(&cluster->cl_nodes_lock);
cluster->cl_node_ip_tree = RB_ROOT;
cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
cluster->cl_fence_method = O2NM_FENCE_RESET;
ret = &cluster->cl_group;
o2nm_single_cluster = cluster;
out:
if (ret == NULL) {
kfree(cluster);
kfree(ns);
o2hb_free_hb_set(o2hb_group);
ret = ERR_PTR(-ENOMEM);
}
return ret;
}
static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
{
struct o2nm_cluster *cluster = to_o2nm_cluster(item);
BUG_ON(o2nm_single_cluster != cluster);
o2nm_single_cluster = NULL;
configfs_remove_default_groups(&cluster->cl_group);
config_item_put(item);
}
static struct configfs_group_operations o2nm_cluster_group_group_ops = {
.make_group = o2nm_cluster_group_make_group,
.drop_item = o2nm_cluster_group_drop_item,
};
static const struct config_item_type o2nm_cluster_group_type = {
.ct_group_ops = &o2nm_cluster_group_group_ops,
.ct_owner = THIS_MODULE,
};
static struct o2nm_cluster_group o2nm_cluster_group = {
.cs_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "cluster",
.ci_type = &o2nm_cluster_group_type,
},
},
},
};
static inline void o2nm_lock_subsystem(void)
{
mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
}
static inline void o2nm_unlock_subsystem(void)
{
mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
}
int o2nm_depend_item(struct config_item *item)
{
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
}
void o2nm_undepend_item(struct config_item *item)
{
configfs_undepend_item(item);
}
int o2nm_depend_this_node(void)
{
int ret = 0;
struct o2nm_node *local_node;
local_node = o2nm_get_node_by_num(o2nm_this_node());
if (!local_node) {
ret = -EINVAL;
goto out;
}
ret = o2nm_depend_item(&local_node->nd_item);
o2nm_node_put(local_node);
out:
return ret;
}
void o2nm_undepend_this_node(void)
{
struct o2nm_node *local_node;
local_node = o2nm_get_node_by_num(o2nm_this_node());
BUG_ON(!local_node);
o2nm_undepend_item(&local_node->nd_item);
o2nm_node_put(local_node);
}
static void __exit exit_o2nm(void)
{
/* XXX sync with hb callbacks and shut down hb? */
o2net_unregister_hb_callbacks();
configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
o2cb_sys_shutdown();
o2net_exit();
o2hb_exit();
}
static int __init init_o2nm(void)
{
int ret;
o2hb_init();
ret = o2net_init();
if (ret)
goto out_o2hb;
ret = o2net_register_hb_callbacks();
if (ret)
goto out_o2net;
config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
if (ret) {
printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
goto out_callbacks;
}
ret = o2cb_sys_init();
if (!ret)
goto out;
configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
out_callbacks:
o2net_unregister_hb_callbacks();
out_o2net:
o2net_exit();
out_o2hb:
o2hb_exit();
out:
return ret;
}
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OCFS2 cluster management");
module_init(init_o2nm)
module_exit(exit_o2nm)
| linux-master | fs/ocfs2/cluster/nodemanager.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sys.c
*
* OCFS2 cluster sysfs interface
*
* Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/fs.h>
#include "ocfs2_nodemanager.h"
#include "masklog.h"
#include "sys.h"
static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", O2NM_API_VERSION);
}
static struct kobj_attribute attr_version =
__ATTR(interface_revision, S_IRUGO, version_show, NULL);
static struct attribute *o2cb_attrs[] = {
&attr_version.attr,
NULL,
};
static struct attribute_group o2cb_attr_group = {
.attrs = o2cb_attrs,
};
static struct kset *o2cb_kset;
void o2cb_sys_shutdown(void)
{
mlog_sys_shutdown();
kset_unregister(o2cb_kset);
}
int o2cb_sys_init(void)
{
int ret;
o2cb_kset = kset_create_and_add("o2cb", NULL, fs_kobj);
if (!o2cb_kset)
return -ENOMEM;
ret = sysfs_create_group(&o2cb_kset->kobj, &o2cb_attr_group);
if (ret)
goto error;
ret = mlog_sys_init(o2cb_kset);
if (ret)
goto error;
return 0;
error:
kset_unregister(o2cb_kset);
return ret;
}
| linux-master | fs/ocfs2/cluster/sys.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include "masklog.h"
struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
EXPORT_SYMBOL_GPL(mlog_and_bits);
struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0);
EXPORT_SYMBOL_GPL(mlog_not_bits);
static ssize_t mlog_mask_show(u64 mask, char *buf)
{
char *state;
if (__mlog_test_u64(mask, mlog_and_bits))
state = "allow";
else if (__mlog_test_u64(mask, mlog_not_bits))
state = "deny";
else
state = "off";
return snprintf(buf, PAGE_SIZE, "%s\n", state);
}
static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
{
if (!strncasecmp(buf, "allow", 5)) {
__mlog_set_u64(mask, mlog_and_bits);
__mlog_clear_u64(mask, mlog_not_bits);
} else if (!strncasecmp(buf, "deny", 4)) {
__mlog_set_u64(mask, mlog_not_bits);
__mlog_clear_u64(mask, mlog_and_bits);
} else if (!strncasecmp(buf, "off", 3)) {
__mlog_clear_u64(mask, mlog_not_bits);
__mlog_clear_u64(mask, mlog_and_bits);
} else
return -EINVAL;
return count;
}
void __mlog_printk(const u64 *mask, const char *func, int line,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
const char *level;
const char *prefix = "";
if (!__mlog_test_u64(*mask, mlog_and_bits) ||
__mlog_test_u64(*mask, mlog_not_bits))
return;
if (*mask & ML_ERROR) {
level = KERN_ERR;
prefix = "ERROR: ";
} else if (*mask & ML_NOTICE) {
level = KERN_NOTICE;
} else {
level = KERN_INFO;
}
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk("%s(%s,%u,%u):%s:%d %s%pV",
level, current->comm, task_pid_nr(current),
raw_smp_processor_id(), func, line, prefix, &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(__mlog_printk);
struct mlog_attribute {
struct attribute attr;
u64 mask;
};
#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
#define define_mask(_name) { \
.attr = { \
.name = #_name, \
.mode = S_IRUGO | S_IWUSR, \
}, \
.mask = ML_##_name, \
}
static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(TCP),
define_mask(MSG),
define_mask(SOCKET),
define_mask(HEARTBEAT),
define_mask(HB_BIO),
define_mask(DLMFS),
define_mask(DLM),
define_mask(DLM_DOMAIN),
define_mask(DLM_THREAD),
define_mask(DLM_MASTER),
define_mask(DLM_RECOVERY),
define_mask(DLM_GLUE),
define_mask(VOTE),
define_mask(CONN),
define_mask(QUORUM),
define_mask(BASTS),
define_mask(CLUSTER),
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
};
static struct attribute *mlog_default_attrs[MLOG_MAX_BITS] = {NULL, };
ATTRIBUTE_GROUPS(mlog_default);
static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
char *buf)
{
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
return mlog_mask_show(mlog_attr->mask, buf);
}
static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
const char *buf, size_t count)
{
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
return mlog_mask_store(mlog_attr->mask, buf, count);
}
static const struct sysfs_ops mlog_attr_ops = {
.show = mlog_show,
.store = mlog_store,
};
static struct kobj_type mlog_ktype = {
.default_groups = mlog_default_groups,
.sysfs_ops = &mlog_attr_ops,
};
static struct kset mlog_kset = {
.kobj = {.ktype = &mlog_ktype},
};
int mlog_sys_init(struct kset *o2cb_kset)
{
int i = 0;
while (mlog_attrs[i].attr.mode) {
mlog_default_attrs[i] = &mlog_attrs[i].attr;
i++;
}
mlog_default_attrs[i] = NULL;
kobject_set_name(&mlog_kset.kobj, "logmask");
mlog_kset.kobj.kset = o2cb_kset;
return kset_register(&mlog_kset);
}
void mlog_sys_shutdown(void)
{
kset_unregister(&mlog_kset);
}
| linux-master | fs/ocfs2/cluster/masklog.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2004 Oracle. All rights reserved.
*
* ----
*
* Callers for this were originally written against a very simple synchronus
* API. This implementation reflects those simple callers. Some day I'm sure
* we'll need to move to a more robust posting/callback mechanism.
*
* Transmit calls pass in kernel virtual addresses and block copying this into
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
* for a failed socket to timeout. TX callers can also pass in a poniter to an
* 'int' which gets filled with an errno off the wire in response to the
* message they send.
*
* Handlers for unsolicited messages are registered. Each socket has a page
* that incoming data is copied into. First the header, then the data.
* Handlers are called from only one thread with a reference to this per-socket
* page. This page is destroyed after the handler call, so it can't be
* referenced beyond the call. Handlers may block but are discouraged from
* doing so.
*
* Any framing errors (bad magic, large payload lengths) close a connection.
*
* Our sock_container holds the state we associate with a socket. It's current
* framing state is held there as well as the refcounting we do around when it
* is safe to tear down the socket. The socket is only finally torn down from
* the container when the container loses all of its references -- so as long
* as you hold a ref on the container you can trust that the socket is valid
* for use with kernel socket APIs.
*
* Connections are initiated between a pair of nodes when the node with the
* higher node number gets a heartbeat callback which indicates that the lower
* numbered node has started heartbeating. The lower numbered node is passive
* and only accepts the connection if the higher numbered node is heartbeating.
*/
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/net.h>
#include <linux/export.h>
#include <net/tcp.h>
#include <trace/events/sock.h>
#include <linux/uaccess.h>
#include "heartbeat.h"
#include "tcp.h"
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_TCP
#include "masklog.h"
#include "quorum.h"
#include "tcp_internal.h"
#define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \
&sc->sc_node->nd_ipv4_address, \
ntohs(sc->sc_node->nd_ipv4_port)
/*
* In the following two log macros, the whitespace after the ',' just
* before ##args is intentional. Otherwise, gcc 2.95 will eat the
* previous token if args expands to nothing.
*/
#define msglog(hdr, fmt, args...) do { \
typeof(hdr) __hdr = (hdr); \
mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
"key %08x num %u] " fmt, \
be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
be32_to_cpu(__hdr->msg_num) , ##args); \
} while (0)
#define sclog(sc, fmt, args...) do { \
typeof(sc) __sc = (sc); \
mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
"pg_off %zu] " fmt, __sc, \
kref_read(&__sc->sc_kref), __sc->sc_sock, \
__sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
##args); \
} while (0)
static DEFINE_RWLOCK(o2net_handler_lock);
static struct rb_root o2net_handler_tree = RB_ROOT;
static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
/* XXX someday we'll need better accounting */
static struct socket *o2net_listen_sock;
/*
* listen work is only queued by the listening socket callbacks on the
* o2net_wq. teardown detaches the callbacks before destroying the workqueue.
* quorum work is queued as sock containers are shutdown.. stop_listening
* tears down all the node's sock containers, preventing future shutdowns
* and queued quroum work, before canceling delayed quorum work and
* destroying the work queue.
*/
static struct workqueue_struct *o2net_wq;
static struct work_struct o2net_listen_work;
static struct o2hb_callback_func o2net_hb_up, o2net_hb_down;
#define O2NET_HB_PRI 0x1
static struct o2net_handshake *o2net_hand;
static struct o2net_msg *o2net_keep_req, *o2net_keep_resp;
static int o2net_sys_err_translations[O2NET_ERR_MAX] =
{[O2NET_ERR_NONE] = 0,
[O2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
[O2NET_ERR_OVERFLOW] = -EOVERFLOW,
[O2NET_ERR_DIED] = -EHOSTDOWN,};
/* can't quite avoid *all* internal declarations :/ */
static void o2net_sc_connect_completed(struct work_struct *work);
static void o2net_rx_until_empty(struct work_struct *work);
static void o2net_shutdown_sc(struct work_struct *work);
static void o2net_listen_data_ready(struct sock *sk);
static void o2net_sc_send_keep_req(struct work_struct *work);
static void o2net_idle_timer(struct timer_list *t);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
#ifdef CONFIG_DEBUG_FS
static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
u32 msgkey, struct task_struct *task, u8 node)
{
INIT_LIST_HEAD(&nst->st_net_debug_item);
nst->st_task = task;
nst->st_msg_type = msgtype;
nst->st_msg_key = msgkey;
nst->st_node = node;
}
static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
{
nst->st_sock_time = ktime_get();
}
static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
{
nst->st_send_time = ktime_get();
}
static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
{
nst->st_status_time = ktime_get();
}
static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
struct o2net_sock_container *sc)
{
nst->st_sc = sc;
}
static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
u32 msg_id)
{
nst->st_id = msg_id;
}
static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
{
sc->sc_tv_timer = ktime_get();
}
static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
{
sc->sc_tv_data_ready = ktime_get();
}
static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
{
sc->sc_tv_advance_start = ktime_get();
}
static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
{
sc->sc_tv_advance_stop = ktime_get();
}
static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
{
sc->sc_tv_func_start = ktime_get();
}
static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
{
sc->sc_tv_func_stop = ktime_get();
}
#else /* CONFIG_DEBUG_FS */
# define o2net_init_nst(a, b, c, d, e)
# define o2net_set_nst_sock_time(a)
# define o2net_set_nst_send_time(a)
# define o2net_set_nst_status_time(a)
# define o2net_set_nst_sock_container(a, b)
# define o2net_set_nst_msg_id(a, b)
# define o2net_set_sock_timer(a)
# define o2net_set_data_ready_time(a)
# define o2net_set_advance_start_time(a)
# define o2net_set_advance_stop_time(a)
# define o2net_set_func_start_time(a)
# define o2net_set_func_stop_time(a)
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_OCFS2_FS_STATS
static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
{
return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
}
static void o2net_update_send_stats(struct o2net_send_tracking *nst,
struct o2net_sock_container *sc)
{
sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
ktime_sub(ktime_get(),
nst->st_status_time));
sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
ktime_sub(nst->st_status_time,
nst->st_send_time));
sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
ktime_sub(nst->st_send_time,
nst->st_sock_time));
sc->sc_send_count++;
}
static void o2net_update_recv_stats(struct o2net_sock_container *sc)
{
sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
o2net_get_func_run_time(sc));
sc->sc_recv_count++;
}
#else
# define o2net_update_send_stats(a, b)
# define o2net_update_recv_stats(sc)
#endif /* CONFIG_OCFS2_FS_STATS */
static inline unsigned int o2net_reconnect_delay(void)
{
return o2nm_single_cluster->cl_reconnect_delay_ms;
}
static inline unsigned int o2net_keepalive_delay(void)
{
return o2nm_single_cluster->cl_keepalive_delay_ms;
}
static inline unsigned int o2net_idle_timeout(void)
{
return o2nm_single_cluster->cl_idle_timeout_ms;
}
static inline int o2net_sys_err_to_errno(enum o2net_system_error err)
{
int trans;
BUG_ON(err >= O2NET_ERR_MAX);
trans = o2net_sys_err_translations[err];
/* Just in case we mess up the translation table above */
BUG_ON(err != O2NET_ERR_NONE && trans == 0);
return trans;
}
static struct o2net_node * o2net_nn_from_num(u8 node_num)
{
BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes));
return &o2net_nodes[node_num];
}
static u8 o2net_num_from_nn(struct o2net_node *nn)
{
BUG_ON(nn == NULL);
return nn - o2net_nodes;
}
/* ------------------------------------------------------------ */
static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
{
int ret;
spin_lock(&nn->nn_lock);
ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
if (ret >= 0) {
nsw->ns_id = ret;
list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
}
spin_unlock(&nn->nn_lock);
if (ret < 0)
return ret;
init_waitqueue_head(&nsw->ns_wq);
nsw->ns_sys_status = O2NET_ERR_NONE;
nsw->ns_status = 0;
return 0;
}
static void o2net_complete_nsw_locked(struct o2net_node *nn,
struct o2net_status_wait *nsw,
enum o2net_system_error sys_status,
s32 status)
{
assert_spin_locked(&nn->nn_lock);
if (!list_empty(&nsw->ns_node_item)) {
list_del_init(&nsw->ns_node_item);
nsw->ns_sys_status = sys_status;
nsw->ns_status = status;
idr_remove(&nn->nn_status_idr, nsw->ns_id);
wake_up(&nsw->ns_wq);
}
}
static void o2net_complete_nsw(struct o2net_node *nn,
struct o2net_status_wait *nsw,
u64 id, enum o2net_system_error sys_status,
s32 status)
{
spin_lock(&nn->nn_lock);
if (nsw == NULL) {
if (id > INT_MAX)
goto out;
nsw = idr_find(&nn->nn_status_idr, id);
if (nsw == NULL)
goto out;
}
o2net_complete_nsw_locked(nn, nsw, sys_status, status);
out:
spin_unlock(&nn->nn_lock);
return;
}
static void o2net_complete_nodes_nsw(struct o2net_node *nn)
{
struct o2net_status_wait *nsw, *tmp;
unsigned int num_kills = 0;
assert_spin_locked(&nn->nn_lock);
list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0);
num_kills++;
}
mlog(0, "completed %d messages for node %u\n", num_kills,
o2net_num_from_nn(nn));
}
static int o2net_nsw_completed(struct o2net_node *nn,
struct o2net_status_wait *nsw)
{
int completed;
spin_lock(&nn->nn_lock);
completed = list_empty(&nsw->ns_node_item);
spin_unlock(&nn->nn_lock);
return completed;
}
/* ------------------------------------------------------------ */
static void sc_kref_release(struct kref *kref)
{
struct o2net_sock_container *sc = container_of(kref,
struct o2net_sock_container, sc_kref);
BUG_ON(timer_pending(&sc->sc_idle_timeout));
sclog(sc, "releasing\n");
if (sc->sc_sock) {
sock_release(sc->sc_sock);
sc->sc_sock = NULL;
}
o2nm_undepend_item(&sc->sc_node->nd_item);
o2nm_node_put(sc->sc_node);
sc->sc_node = NULL;
o2net_debug_del_sc(sc);
if (sc->sc_page)
__free_page(sc->sc_page);
kfree(sc);
}
static void sc_put(struct o2net_sock_container *sc)
{
sclog(sc, "put\n");
kref_put(&sc->sc_kref, sc_kref_release);
}
static void sc_get(struct o2net_sock_container *sc)
{
sclog(sc, "get\n");
kref_get(&sc->sc_kref);
}
static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
{
struct o2net_sock_container *sc, *ret = NULL;
struct page *page = NULL;
int status = 0;
page = alloc_page(GFP_NOFS);
sc = kzalloc(sizeof(*sc), GFP_NOFS);
if (sc == NULL || page == NULL)
goto out;
kref_init(&sc->sc_kref);
o2nm_node_get(node);
sc->sc_node = node;
/* pin the node item of the remote node */
status = o2nm_depend_item(&node->nd_item);
if (status) {
mlog_errno(status);
o2nm_node_put(node);
goto out;
}
INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
timer_setup(&sc->sc_idle_timeout, o2net_idle_timer, 0);
sclog(sc, "alloced\n");
ret = sc;
sc->sc_page = page;
o2net_debug_add_sc(sc);
sc = NULL;
page = NULL;
out:
if (page)
__free_page(page);
kfree(sc);
return ret;
}
/* ------------------------------------------------------------ */
static void o2net_sc_queue_work(struct o2net_sock_container *sc,
struct work_struct *work)
{
sc_get(sc);
if (!queue_work(o2net_wq, work))
sc_put(sc);
}
static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
struct delayed_work *work,
int delay)
{
sc_get(sc);
if (!queue_delayed_work(o2net_wq, work, delay))
sc_put(sc);
}
static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
struct delayed_work *work)
{
if (cancel_delayed_work(work))
sc_put(sc);
}
static atomic_t o2net_connected_peers = ATOMIC_INIT(0);
int o2net_num_connected_peers(void)
{
return atomic_read(&o2net_connected_peers);
}
static void o2net_set_nn_state(struct o2net_node *nn,
struct o2net_sock_container *sc,
unsigned valid, int err)
{
int was_valid = nn->nn_sc_valid;
int was_err = nn->nn_persistent_error;
struct o2net_sock_container *old_sc = nn->nn_sc;
assert_spin_locked(&nn->nn_lock);
if (old_sc && !sc)
atomic_dec(&o2net_connected_peers);
else if (!old_sc && sc)
atomic_inc(&o2net_connected_peers);
/* the node num comparison and single connect/accept path should stop
* an non-null sc from being overwritten with another */
BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
if (was_valid && !valid && err == 0)
err = -ENOTCONN;
mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
nn->nn_persistent_error, err);
nn->nn_sc = sc;
nn->nn_sc_valid = valid ? 1 : 0;
nn->nn_persistent_error = err;
/* mirrors o2net_tx_can_proceed() */
if (nn->nn_persistent_error || nn->nn_sc_valid)
wake_up(&nn->nn_sc_wq);
if (was_valid && !was_err && nn->nn_persistent_error) {
o2quo_conn_err(o2net_num_from_nn(nn));
queue_delayed_work(o2net_wq, &nn->nn_still_up,
msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
}
if (was_valid && !valid) {
if (old_sc)
printk(KERN_NOTICE "o2net: No longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
if (!was_valid && valid) {
o2quo_conn_up(o2net_num_from_nn(nn));
cancel_delayed_work(&nn->nn_connect_expired);
printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
"Connected to" : "Accepted connection from",
SC_NODEF_ARGS(sc));
}
/* trigger the connecting worker func as long as we're not valid,
* it will back off if it shouldn't connect. This can be called
* from node config teardown and so needs to be careful about
* the work queue actually being up. */
if (!valid && o2net_wq) {
unsigned long delay;
/* delay if we're within a RECONNECT_DELAY of the
* last attempt */
delay = (nn->nn_last_connect_attempt +
msecs_to_jiffies(o2net_reconnect_delay()))
- jiffies;
if (delay > msecs_to_jiffies(o2net_reconnect_delay()))
delay = 0;
mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
/*
* Delay the expired work after idle timeout.
*
* We might have lots of failed connection attempts that run
* through here but we only cancel the connect_expired work when
* a connection attempt succeeds. So only the first enqueue of
* the connect_expired work will do anything. The rest will see
* that it's already queued and do nothing.
*/
delay += msecs_to_jiffies(o2net_idle_timeout());
queue_delayed_work(o2net_wq, &nn->nn_connect_expired, delay);
}
/* keep track of the nn's sc ref for the caller */
if ((old_sc == NULL) && sc)
sc_get(sc);
if (old_sc && (old_sc != sc)) {
o2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
sc_put(old_sc);
}
}
/* see o2net_register_callbacks() */
static void o2net_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
struct o2net_sock_container *sc;
trace_sk_data_ready(sk);
read_lock_bh(&sk->sk_callback_lock);
sc = sk->sk_user_data;
if (sc) {
sclog(sc, "data_ready hit\n");
o2net_set_data_ready_time(sc);
o2net_sc_queue_work(sc, &sc->sc_rx_work);
ready = sc->sc_data_ready;
} else {
ready = sk->sk_data_ready;
}
read_unlock_bh(&sk->sk_callback_lock);
ready(sk);
}
/* see o2net_register_callbacks() */
static void o2net_state_change(struct sock *sk)
{
void (*state_change)(struct sock *sk);
struct o2net_sock_container *sc;
read_lock_bh(&sk->sk_callback_lock);
sc = sk->sk_user_data;
if (sc == NULL) {
state_change = sk->sk_state_change;
goto out;
}
sclog(sc, "state_change to %d\n", sk->sk_state);
state_change = sc->sc_state_change;
switch(sk->sk_state) {
/* ignore connecting sockets as they make progress */
case TCP_SYN_SENT:
case TCP_SYN_RECV:
break;
case TCP_ESTABLISHED:
o2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
" shutdown, state %d\n",
SC_NODEF_ARGS(sc), sk->sk_state);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
break;
}
out:
read_unlock_bh(&sk->sk_callback_lock);
state_change(sk);
}
/*
* we register callbacks so we can queue work on events before calling
* the original callbacks. our callbacks our careful to test user_data
* to discover when they've reaced with o2net_unregister_callbacks().
*/
static void o2net_register_callbacks(struct sock *sk,
struct o2net_sock_container *sc)
{
write_lock_bh(&sk->sk_callback_lock);
/* accepted sockets inherit the old listen socket data ready */
if (sk->sk_data_ready == o2net_listen_data_ready) {
sk->sk_data_ready = sk->sk_user_data;
sk->sk_user_data = NULL;
}
BUG_ON(sk->sk_user_data != NULL);
sk->sk_user_data = sc;
sc_get(sc);
sc->sc_data_ready = sk->sk_data_ready;
sc->sc_state_change = sk->sk_state_change;
sk->sk_data_ready = o2net_data_ready;
sk->sk_state_change = o2net_state_change;
mutex_init(&sc->sc_send_lock);
write_unlock_bh(&sk->sk_callback_lock);
}
static int o2net_unregister_callbacks(struct sock *sk,
struct o2net_sock_container *sc)
{
int ret = 0;
write_lock_bh(&sk->sk_callback_lock);
if (sk->sk_user_data == sc) {
ret = 1;
sk->sk_user_data = NULL;
sk->sk_data_ready = sc->sc_data_ready;
sk->sk_state_change = sc->sc_state_change;
}
write_unlock_bh(&sk->sk_callback_lock);
return ret;
}
/*
* this is a little helper that is called by callers who have seen a problem
* with an sc and want to detach it from the nn if someone already hasn't beat
* them to it. if an error is given then the shutdown will be persistent
* and pending transmits will be canceled.
*/
static void o2net_ensure_shutdown(struct o2net_node *nn,
struct o2net_sock_container *sc,
int err)
{
spin_lock(&nn->nn_lock);
if (nn->nn_sc == sc)
o2net_set_nn_state(nn, NULL, 0, err);
spin_unlock(&nn->nn_lock);
}
/*
* This work queue function performs the blocking parts of socket shutdown. A
* few paths lead here. set_nn_state will trigger this callback if it sees an
* sc detached from the nn. state_change will also trigger this callback
* directly when it sees errors. In that case we need to call set_nn_state
* ourselves as state_change couldn't get the nn_lock and call set_nn_state
* itself.
*/
static void o2net_shutdown_sc(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container,
sc_shutdown_work);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
sclog(sc, "shutting down\n");
/* drop the callbacks ref and call shutdown only once */
if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
/* we shouldn't flush as we're in the thread, the
* races with pending sc work structs are harmless */
del_timer_sync(&sc->sc_idle_timeout);
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
sc_put(sc);
kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
}
/* not fatal so failed connects before the other guy has our
* heartbeat can be retried */
o2net_ensure_shutdown(nn, sc, 0);
sc_put(sc);
}
/* ------------------------------------------------------------ */
static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type,
u32 key)
{
int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
if (ret == 0)
ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
return ret;
}
static struct o2net_msg_handler *
o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
struct rb_node **ret_parent)
{
struct rb_node **p = &o2net_handler_tree.rb_node;
struct rb_node *parent = NULL;
struct o2net_msg_handler *nmh, *ret = NULL;
int cmp;
while (*p) {
parent = *p;
nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
cmp = o2net_handler_cmp(nmh, msg_type, key);
if (cmp < 0)
p = &(*p)->rb_left;
else if (cmp > 0)
p = &(*p)->rb_right;
else {
ret = nmh;
break;
}
}
if (ret_p != NULL)
*ret_p = p;
if (ret_parent != NULL)
*ret_parent = parent;
return ret;
}
static void o2net_handler_kref_release(struct kref *kref)
{
struct o2net_msg_handler *nmh;
nmh = container_of(kref, struct o2net_msg_handler, nh_kref);
kfree(nmh);
}
static void o2net_handler_put(struct o2net_msg_handler *nmh)
{
kref_put(&nmh->nh_kref, o2net_handler_kref_release);
}
/* max_len is protection for the handler func. incoming messages won't
* be given to the handler if their payload is longer than the max. */
int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
o2net_msg_handler_func *func, void *data,
o2net_post_msg_handler_func *post_func,
struct list_head *unreg_list)
{
struct o2net_msg_handler *nmh = NULL;
struct rb_node **p, *parent;
int ret = 0;
if (max_len > O2NET_MAX_PAYLOAD_BYTES) {
mlog(0, "max_len for message handler out of range: %u\n",
max_len);
ret = -EINVAL;
goto out;
}
if (!msg_type) {
mlog(0, "no message type provided: %u, %p\n", msg_type, func);
ret = -EINVAL;
goto out;
}
if (!func) {
mlog(0, "no message handler provided: %u, %p\n",
msg_type, func);
ret = -EINVAL;
goto out;
}
nmh = kzalloc(sizeof(struct o2net_msg_handler), GFP_NOFS);
if (nmh == NULL) {
ret = -ENOMEM;
goto out;
}
nmh->nh_func = func;
nmh->nh_func_data = data;
nmh->nh_post_func = post_func;
nmh->nh_msg_type = msg_type;
nmh->nh_max_len = max_len;
nmh->nh_key = key;
/* the tree and list get this ref.. they're both removed in
* unregister when this ref is dropped */
kref_init(&nmh->nh_kref);
INIT_LIST_HEAD(&nmh->nh_unregister_item);
write_lock(&o2net_handler_lock);
if (o2net_handler_tree_lookup(msg_type, key, &p, &parent))
ret = -EEXIST;
else {
rb_link_node(&nmh->nh_node, parent, p);
rb_insert_color(&nmh->nh_node, &o2net_handler_tree);
list_add_tail(&nmh->nh_unregister_item, unreg_list);
mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
func, msg_type, key);
/* we've had some trouble with handlers seemingly vanishing. */
mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p,
&parent) == NULL,
"couldn't find handler we *just* registered "
"for type %u key %08x\n", msg_type, key);
}
write_unlock(&o2net_handler_lock);
out:
if (ret)
kfree(nmh);
return ret;
}
EXPORT_SYMBOL_GPL(o2net_register_handler);
void o2net_unregister_handler_list(struct list_head *list)
{
struct o2net_msg_handler *nmh, *n;
write_lock(&o2net_handler_lock);
list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
rb_erase(&nmh->nh_node, &o2net_handler_tree);
list_del_init(&nmh->nh_unregister_item);
kref_put(&nmh->nh_kref, o2net_handler_kref_release);
}
write_unlock(&o2net_handler_lock);
}
EXPORT_SYMBOL_GPL(o2net_unregister_handler_list);
static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key)
{
struct o2net_msg_handler *nmh;
read_lock(&o2net_handler_lock);
nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL);
if (nmh)
kref_get(&nmh->nh_kref);
read_unlock(&o2net_handler_lock);
return nmh;
}
/* ------------------------------------------------------------ */
static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
{
struct kvec vec = { .iov_len = len, .iov_base = data, };
struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, len);
return sock_recvmsg(sock, &msg, MSG_DONTWAIT);
}
static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
size_t veclen, size_t total)
{
int ret;
struct msghdr msg = {.msg_flags = 0,};
if (sock == NULL) {
ret = -EINVAL;
goto out;
}
ret = kernel_sendmsg(sock, &msg, vec, veclen, total);
if (likely(ret == total))
return 0;
mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, total);
if (ret >= 0)
ret = -EPIPE; /* should be smarter, I bet */
out:
mlog(0, "returning error: %d\n", ret);
return ret;
}
static void o2net_sendpage(struct o2net_sock_container *sc,
void *virt, size_t size)
{
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
struct msghdr msg = {};
struct bio_vec bv;
ssize_t ret;
bvec_set_virt(&bv, virt, size);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, size);
while (1) {
msg.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES;
mutex_lock(&sc->sc_send_lock);
ret = sock_sendmsg(sc->sc_sock, &msg);
mutex_unlock(&sc->sc_send_lock);
if (ret == size)
break;
if (ret == (ssize_t)-EAGAIN) {
mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
" returned EAGAIN\n", size, SC_NODEF_ARGS(sc));
cond_resched();
continue;
}
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
o2net_ensure_shutdown(nn, sc, 0);
break;
}
}
static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key)
{
memset(msg, 0, sizeof(struct o2net_msg));
msg->magic = cpu_to_be16(O2NET_MSG_MAGIC);
msg->data_len = cpu_to_be16(data_len);
msg->msg_type = cpu_to_be16(msg_type);
msg->sys_status = cpu_to_be32(O2NET_ERR_NONE);
msg->status = 0;
msg->key = cpu_to_be32(key);
}
static int o2net_tx_can_proceed(struct o2net_node *nn,
struct o2net_sock_container **sc_ret,
int *error)
{
int ret = 0;
spin_lock(&nn->nn_lock);
if (nn->nn_persistent_error) {
ret = 1;
*sc_ret = NULL;
*error = nn->nn_persistent_error;
} else if (nn->nn_sc_valid) {
kref_get(&nn->nn_sc->sc_kref);
ret = 1;
*sc_ret = nn->nn_sc;
*error = 0;
}
spin_unlock(&nn->nn_lock);
return ret;
}
/* Get a map of all nodes to which this node is currently connected to */
void o2net_fill_node_map(unsigned long *map, unsigned int bits)
{
struct o2net_sock_container *sc;
int node, ret;
bitmap_zero(map, bits);
for (node = 0; node < O2NM_MAX_NODES; ++node) {
if (!o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret))
continue;
if (!ret) {
set_bit(node, map);
sc_put(sc);
}
}
}
EXPORT_SYMBOL_GPL(o2net_fill_node_map);
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
size_t caller_veclen, u8 target_node, int *status)
{
int ret = 0;
struct o2net_msg *msg = NULL;
size_t veclen, caller_bytes = 0;
struct kvec *vec = NULL;
struct o2net_sock_container *sc = NULL;
struct o2net_node *nn = o2net_nn_from_num(target_node);
struct o2net_status_wait nsw = {
.ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
};
struct o2net_send_tracking nst;
o2net_init_nst(&nst, msg_type, key, current, target_node);
if (o2net_wq == NULL) {
mlog(0, "attempt to tx without o2netd running\n");
ret = -ESRCH;
goto out;
}
if (caller_veclen == 0) {
mlog(0, "bad kvec array length\n");
ret = -EINVAL;
goto out;
}
caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) {
mlog(0, "total payload len %zu too large\n", caller_bytes);
ret = -EINVAL;
goto out;
}
if (target_node == o2nm_this_node()) {
ret = -ELOOP;
goto out;
}
o2net_debug_add_nst(&nst);
o2net_set_nst_sock_time(&nst);
wait_event(nn->nn_sc_wq, o2net_tx_can_proceed(nn, &sc, &ret));
if (ret)
goto out;
o2net_set_nst_sock_container(&nst, sc);
veclen = caller_veclen + 1;
vec = kmalloc_array(veclen, sizeof(struct kvec), GFP_ATOMIC);
if (vec == NULL) {
mlog(0, "failed to %zu element kvec!\n", veclen);
ret = -ENOMEM;
goto out;
}
msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC);
if (!msg) {
mlog(0, "failed to allocate a o2net_msg!\n");
ret = -ENOMEM;
goto out;
}
o2net_init_msg(msg, caller_bytes, msg_type, key);
vec[0].iov_len = sizeof(struct o2net_msg);
vec[0].iov_base = msg;
memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
ret = o2net_prep_nsw(nn, &nsw);
if (ret)
goto out;
msg->msg_num = cpu_to_be32(nsw.ns_id);
o2net_set_nst_msg_id(&nst, nsw.ns_id);
o2net_set_nst_send_time(&nst);
/* finally, convert the message header to network byte-order
* and send */
mutex_lock(&sc->sc_send_lock);
ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen,
sizeof(struct o2net_msg) + caller_bytes);
mutex_unlock(&sc->sc_send_lock);
msglog(msg, "sending returned %d\n", ret);
if (ret < 0) {
mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
goto out;
}
/* wait on other node's handler */
o2net_set_nst_status_time(&nst);
wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
o2net_update_send_stats(&nst, sc);
/* Note that we avoid overwriting the callers status return
* variable if a system error was reported on the other
* side. Callers beware. */
ret = o2net_sys_err_to_errno(nsw.ns_sys_status);
if (status && !ret)
*status = nsw.ns_status;
mlog(0, "woken, returning system status %d, user status %d\n",
ret, nsw.ns_status);
out:
o2net_debug_del_nst(&nst); /* must be before dropping sc and node */
if (sc)
sc_put(sc);
kfree(vec);
kfree(msg);
o2net_complete_nsw(nn, &nsw, 0, 0, 0);
return ret;
}
EXPORT_SYMBOL_GPL(o2net_send_message_vec);
int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
u8 target_node, int *status)
{
struct kvec vec = {
.iov_base = data,
.iov_len = len,
};
return o2net_send_message_vec(msg_type, key, &vec, 1,
target_node, status);
}
EXPORT_SYMBOL_GPL(o2net_send_message);
static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr,
enum o2net_system_error syserr, int err)
{
struct kvec vec = {
.iov_base = hdr,
.iov_len = sizeof(struct o2net_msg),
};
BUG_ON(syserr >= O2NET_ERR_MAX);
/* leave other fields intact from the incoming message, msg_num
* in particular */
hdr->sys_status = cpu_to_be32(syserr);
hdr->status = cpu_to_be32(err);
hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic
hdr->data_len = 0;
msglog(hdr, "about to send status magic %d\n", err);
/* hdr has been in host byteorder this whole time */
return o2net_send_tcp_msg(sock, &vec, 1, sizeof(struct o2net_msg));
}
/* this returns -errno if the header was unknown or too large, etc.
* after this is called the buffer us reused for the next message */
static int o2net_process_message(struct o2net_sock_container *sc,
struct o2net_msg *hdr)
{
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
int ret = 0, handler_status;
enum o2net_system_error syserr;
struct o2net_msg_handler *nmh = NULL;
void *ret_data = NULL;
msglog(hdr, "processing message\n");
o2net_sc_postpone_idle(sc);
switch(be16_to_cpu(hdr->magic)) {
case O2NET_MSG_STATUS_MAGIC:
/* special type for returning message status */
o2net_complete_nsw(nn, NULL,
be32_to_cpu(hdr->msg_num),
be32_to_cpu(hdr->sys_status),
be32_to_cpu(hdr->status));
goto out;
case O2NET_MSG_KEEP_REQ_MAGIC:
o2net_sendpage(sc, o2net_keep_resp,
sizeof(*o2net_keep_resp));
goto out;
case O2NET_MSG_KEEP_RESP_MAGIC:
goto out;
case O2NET_MSG_MAGIC:
break;
default:
msglog(hdr, "bad magic\n");
ret = -EINVAL;
goto out;
}
/* find a handler for it */
handler_status = 0;
nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type),
be32_to_cpu(hdr->key));
if (!nmh) {
mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
syserr = O2NET_ERR_NO_HNDLR;
goto out_respond;
}
syserr = O2NET_ERR_NONE;
if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
syserr = O2NET_ERR_OVERFLOW;
if (syserr != O2NET_ERR_NONE)
goto out_respond;
o2net_set_func_start_time(sc);
sc->sc_msg_key = be32_to_cpu(hdr->key);
sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
be16_to_cpu(hdr->data_len),
nmh->nh_func_data, &ret_data);
o2net_set_func_stop_time(sc);
o2net_update_recv_stats(sc);
out_respond:
/* this destroys the hdr, so don't use it after this */
mutex_lock(&sc->sc_send_lock);
ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr,
handler_status);
mutex_unlock(&sc->sc_send_lock);
hdr = NULL;
mlog(0, "sending handler status %d, syserr %d returned %d\n",
handler_status, syserr, ret);
if (nmh) {
BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
if (nmh->nh_post_func)
(nmh->nh_post_func)(handler_status, nmh->nh_func_data,
ret_data);
}
out:
if (nmh)
o2net_handler_put(nmh);
return ret;
}
static int o2net_check_handshake(struct o2net_sock_container *sc)
{
struct o2net_handshake *hand = page_address(sc->sc_page);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
"protocol version %llu but %llu is required. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
(unsigned long long)be64_to_cpu(hand->protocol_version),
O2NET_PROTOCOL_VERSION);
/* don't bother reconnecting if its the wrong version. */
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
/*
* Ensure timeouts are consistent with other nodes, otherwise
* we can end up with one node thinking that the other must be down,
* but isn't. This can ultimately cause corruption.
*/
if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
o2net_idle_timeout()) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
"idle timeout of %u ms, but we use %u ms locally. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
be32_to_cpu(hand->o2net_idle_timeout_ms),
o2net_idle_timeout());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
o2net_keepalive_delay()) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
"delay of %u ms, but we use %u ms locally. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
be32_to_cpu(hand->o2net_keepalive_delay_ms),
o2net_keepalive_delay());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
O2HB_MAX_WRITE_TIMEOUT_MS) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
"timeout of %u ms, but we use %u ms locally. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
O2HB_MAX_WRITE_TIMEOUT_MS);
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
sc->sc_handshake_ok = 1;
spin_lock(&nn->nn_lock);
/* set valid and queue the idle timers only if it hasn't been
* shut down already */
if (nn->nn_sc == sc) {
o2net_sc_reset_idle_timer(sc);
atomic_set(&nn->nn_timeout, 0);
o2net_set_nn_state(nn, sc, 1, 0);
}
spin_unlock(&nn->nn_lock);
/* shift everything up as though it wasn't there */
sc->sc_page_off -= sizeof(struct o2net_handshake);
if (sc->sc_page_off)
memmove(hand, hand + 1, sc->sc_page_off);
return 0;
}
/* this demuxes the queued rx bytes into header or payload bits and calls
* handlers as each full message is read off the socket. it returns -error,
* == 0 eof, or > 0 for progress made.*/
static int o2net_advance_rx(struct o2net_sock_container *sc)
{
struct o2net_msg *hdr;
int ret = 0;
void *data;
size_t datalen;
sclog(sc, "receiving\n");
o2net_set_advance_start_time(sc);
if (unlikely(sc->sc_handshake_ok == 0)) {
if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
data = page_address(sc->sc_page) + sc->sc_page_off;
datalen = sizeof(struct o2net_handshake) - sc->sc_page_off;
ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
if (ret > 0)
sc->sc_page_off += ret;
}
if (sc->sc_page_off == sizeof(struct o2net_handshake)) {
o2net_check_handshake(sc);
if (unlikely(sc->sc_handshake_ok == 0))
ret = -EPROTO;
}
goto out;
}
/* do we need more header? */
if (sc->sc_page_off < sizeof(struct o2net_msg)) {
data = page_address(sc->sc_page) + sc->sc_page_off;
datalen = sizeof(struct o2net_msg) - sc->sc_page_off;
ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
if (ret > 0) {
sc->sc_page_off += ret;
/* only swab incoming here.. we can
* only get here once as we cross from
* being under to over */
if (sc->sc_page_off == sizeof(struct o2net_msg)) {
hdr = page_address(sc->sc_page);
if (be16_to_cpu(hdr->data_len) >
O2NET_MAX_PAYLOAD_BYTES)
ret = -EOVERFLOW;
}
}
if (ret <= 0)
goto out;
}
if (sc->sc_page_off < sizeof(struct o2net_msg)) {
/* oof, still don't have a header */
goto out;
}
/* this was swabbed above when we first read it */
hdr = page_address(sc->sc_page);
msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
/* do we need more payload? */
if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) {
/* need more payload */
data = page_address(sc->sc_page) + sc->sc_page_off;
datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) -
sc->sc_page_off;
ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
if (ret > 0)
sc->sc_page_off += ret;
if (ret <= 0)
goto out;
}
if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) {
/* we can only get here once, the first time we read
* the payload.. so set ret to progress if the handler
* works out. after calling this the message is toast */
ret = o2net_process_message(sc, hdr);
if (ret == 0)
ret = 1;
sc->sc_page_off = 0;
}
out:
sclog(sc, "ret = %d\n", ret);
o2net_set_advance_stop_time(sc);
return ret;
}
/* this work func is triggerd by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */
static void o2net_rx_until_empty(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container, sc_rx_work);
int ret;
do {
ret = o2net_advance_rx(sc);
} while (ret > 0);
if (ret <= 0 && ret != -EAGAIN) {
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
sclog(sc, "saw error %d, closing\n", ret);
/* not permanent so read failed handshake can retry */
o2net_ensure_shutdown(nn, sc, 0);
}
sc_put(sc);
}
static void o2net_initialize_handshake(void)
{
o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32(
O2HB_MAX_WRITE_TIMEOUT_MS);
o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(o2net_idle_timeout());
o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32(
o2net_keepalive_delay());
o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32(
o2net_reconnect_delay());
}
/* ------------------------------------------------------------ */
/* called when a connect completes and after a sock is accepted. the
* rx path will see the response and mark the sc valid */
static void o2net_sc_connect_completed(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container,
sc_connect_work);
mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
(unsigned long long)O2NET_PROTOCOL_VERSION,
(unsigned long long)be64_to_cpu(o2net_hand->connector_id));
o2net_initialize_handshake();
o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
sc_put(sc);
}
/* this is called as a work_struct func. */
static void o2net_sc_send_keep_req(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container,
sc_keepalive_work.work);
o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
sc_put(sc);
}
/* socket shutdown does a del_timer_sync against this as it tears down.
* we can't start this timer until we've got to the point in sc buildup
* where shutdown is going to be involved */
static void o2net_idle_timer(struct timer_list *t)
{
struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
#ifdef CONFIG_DEBUG_FS
unsigned long msecs = ktime_to_ms(ktime_get()) -
ktime_to_ms(sc->sc_tv_timer);
#else
unsigned long msecs = o2net_idle_timeout();
#endif
printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
"idle for %lu.%lu secs.\n",
SC_NODEF_ARGS(sc), msecs / 1000, msecs % 1000);
/* idle timerout happen, don't shutdown the connection, but
* make fence decision. Maybe the connection can recover before
* the decision is made.
*/
atomic_set(&nn->nn_timeout, 1);
o2quo_conn_err(o2net_num_from_nn(nn));
queue_delayed_work(o2net_wq, &nn->nn_still_up,
msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
o2net_sc_reset_idle_timer(sc);
}
static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc)
{
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
msecs_to_jiffies(o2net_keepalive_delay()));
o2net_set_sock_timer(sc);
mod_timer(&sc->sc_idle_timeout,
jiffies + msecs_to_jiffies(o2net_idle_timeout()));
}
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
{
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
/* clear fence decision since the connection recover from timeout*/
if (atomic_read(&nn->nn_timeout)) {
o2quo_conn_up(o2net_num_from_nn(nn));
cancel_delayed_work(&nn->nn_still_up);
atomic_set(&nn->nn_timeout, 0);
}
/* Only push out an existing timer */
if (timer_pending(&sc->sc_idle_timeout))
o2net_sc_reset_idle_timer(sc);
}
/* this work func is kicked whenever a path sets the nn state which doesn't
* have valid set. This includes seeing hb come up, losing a connection,
* having a connect attempt fail, etc. This centralizes the logic which decides
* if a connect attempt should be made or if we should give up and all future
* transmit attempts should fail */
static void o2net_start_connect(struct work_struct *work)
{
struct o2net_node *nn =
container_of(work, struct o2net_node, nn_connect_work.work);
struct o2net_sock_container *sc = NULL;
struct o2nm_node *node = NULL, *mynode = NULL;
struct socket *sock = NULL;
struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
int ret = 0, stop;
unsigned int timeout;
unsigned int nofs_flag;
/*
* sock_create allocates the sock with GFP_KERNEL. We must
* prevent the filesystem from being reentered by memory reclaim.
*/
nofs_flag = memalloc_nofs_save();
/* if we're greater we initiate tx, otherwise we accept */
if (o2nm_this_node() <= o2net_num_from_nn(nn))
goto out;
/* watch for racing with tearing a node down */
node = o2nm_get_node_by_num(o2net_num_from_nn(nn));
if (node == NULL)
goto out;
mynode = o2nm_get_node_by_num(o2nm_this_node());
if (mynode == NULL)
goto out;
spin_lock(&nn->nn_lock);
/*
* see if we already have one pending or have given up.
* For nn_timeout, it is set when we close the connection
* because of the idle time out. So it means that we have
* at least connected to that node successfully once,
* now try to connect to it again.
*/
timeout = atomic_read(&nn->nn_timeout);
stop = (nn->nn_sc ||
(nn->nn_persistent_error &&
(nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
spin_unlock(&nn->nn_lock);
if (stop)
goto out;
nn->nn_last_connect_attempt = jiffies;
sc = sc_alloc(node);
if (sc == NULL) {
mlog(0, "couldn't allocate sc\n");
ret = -ENOMEM;
goto out;
}
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
mlog(0, "can't create socket: %d\n", ret);
goto out;
}
sc->sc_sock = sock; /* freed by sc_kref_release */
sock->sk->sk_allocation = GFP_ATOMIC;
sock->sk->sk_use_task_frag = false;
myaddr.sin_family = AF_INET;
myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
myaddr.sin_port = htons(0); /* any port */
ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
sizeof(myaddr));
if (ret) {
mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
ret, &mynode->nd_ipv4_address);
goto out;
}
tcp_sock_set_nodelay(sc->sc_sock->sk);
tcp_sock_set_user_timeout(sock->sk, O2NET_TCP_USER_TIMEOUT);
o2net_register_callbacks(sc->sc_sock->sk, sc);
spin_lock(&nn->nn_lock);
/* handshake completion will set nn->nn_sc_valid */
o2net_set_nn_state(nn, sc, 0, 0);
spin_unlock(&nn->nn_lock);
remoteaddr.sin_family = AF_INET;
remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
remoteaddr.sin_port = node->nd_ipv4_port;
ret = sc->sc_sock->ops->connect(sc->sc_sock,
(struct sockaddr *)&remoteaddr,
sizeof(remoteaddr),
O_NONBLOCK);
if (ret == -EINPROGRESS)
ret = 0;
out:
if (ret && sc) {
printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
" failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
/* 0 err so that another will be queued and attempted
* from set_nn_state */
o2net_ensure_shutdown(nn, sc, 0);
}
if (sc)
sc_put(sc);
if (node)
o2nm_node_put(node);
if (mynode)
o2nm_node_put(mynode);
memalloc_nofs_restore(nofs_flag);
return;
}
static void o2net_connect_expired(struct work_struct *work)
{
struct o2net_node *nn =
container_of(work, struct o2net_node, nn_connect_expired.work);
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
printk(KERN_NOTICE "o2net: No connection established with "
"node %u after %u.%u seconds, check network and"
" cluster configuration.\n",
o2net_num_from_nn(nn),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
o2net_set_nn_state(nn, NULL, 0, 0);
}
spin_unlock(&nn->nn_lock);
}
static void o2net_still_up(struct work_struct *work)
{
struct o2net_node *nn =
container_of(work, struct o2net_node, nn_still_up.work);
o2quo_hb_still_up(o2net_num_from_nn(nn));
}
/* ------------------------------------------------------------ */
void o2net_disconnect_node(struct o2nm_node *node)
{
struct o2net_node *nn = o2net_nn_from_num(node->nd_num);
/* don't reconnect until it's heartbeating again */
spin_lock(&nn->nn_lock);
atomic_set(&nn->nn_timeout, 0);
o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
spin_unlock(&nn->nn_lock);
if (o2net_wq) {
cancel_delayed_work(&nn->nn_connect_expired);
cancel_delayed_work(&nn->nn_connect_work);
cancel_delayed_work(&nn->nn_still_up);
flush_workqueue(o2net_wq);
}
}
static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
void *data)
{
o2quo_hb_down(node_num);
if (!node)
return;
if (node_num != o2nm_this_node())
o2net_disconnect_node(node);
BUG_ON(atomic_read(&o2net_connected_peers) < 0);
}
static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
void *data)
{
struct o2net_node *nn = o2net_nn_from_num(node_num);
o2quo_hb_up(node_num);
BUG_ON(!node);
/* ensure an immediate connect attempt */
nn->nn_last_connect_attempt = jiffies -
(msecs_to_jiffies(o2net_reconnect_delay()) + 1);
if (node_num != o2nm_this_node()) {
/* believe it or not, accept and node heartbeating testing
* can succeed for this node before we got here.. so
* only use set_nn_state to clear the persistent error
* if that hasn't already happened */
spin_lock(&nn->nn_lock);
atomic_set(&nn->nn_timeout, 0);
if (nn->nn_persistent_error)
o2net_set_nn_state(nn, NULL, 0, 0);
spin_unlock(&nn->nn_lock);
}
}
void o2net_unregister_hb_callbacks(void)
{
o2hb_unregister_callback(NULL, &o2net_hb_up);
o2hb_unregister_callback(NULL, &o2net_hb_down);
}
int o2net_register_hb_callbacks(void)
{
int ret;
o2hb_setup_callback(&o2net_hb_down, O2HB_NODE_DOWN_CB,
o2net_hb_node_down_cb, NULL, O2NET_HB_PRI);
o2hb_setup_callback(&o2net_hb_up, O2HB_NODE_UP_CB,
o2net_hb_node_up_cb, NULL, O2NET_HB_PRI);
ret = o2hb_register_callback(NULL, &o2net_hb_up);
if (ret == 0)
ret = o2hb_register_callback(NULL, &o2net_hb_down);
if (ret)
o2net_unregister_hb_callbacks();
return ret;
}
/* ------------------------------------------------------------ */
static int o2net_accept_one(struct socket *sock, int *more)
{
int ret;
struct sockaddr_in sin;
struct socket *new_sock = NULL;
struct o2nm_node *node = NULL;
struct o2nm_node *local_node = NULL;
struct o2net_sock_container *sc = NULL;
struct o2net_node *nn;
unsigned int nofs_flag;
/*
* sock_create_lite allocates the sock with GFP_KERNEL. We must
* prevent the filesystem from being reentered by memory reclaim.
*/
nofs_flag = memalloc_nofs_save();
BUG_ON(sock == NULL);
*more = 0;
ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
sock->sk->sk_protocol, &new_sock);
if (ret)
goto out;
new_sock->type = sock->type;
new_sock->ops = sock->ops;
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
if (ret < 0)
goto out;
*more = 1;
new_sock->sk->sk_allocation = GFP_ATOMIC;
tcp_sock_set_nodelay(new_sock->sk);
tcp_sock_set_user_timeout(new_sock->sk, O2NET_TCP_USER_TIMEOUT);
ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin, 1);
if (ret < 0)
goto out;
node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
"node at %pI4:%d\n", &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
if (o2nm_this_node() >= node->nd_num) {
local_node = o2nm_get_node_by_num(o2nm_this_node());
if (local_node)
printk(KERN_NOTICE "o2net: Unexpected connect attempt "
"seen at node '%s' (%u, %pI4:%d) from "
"node '%s' (%u, %pI4:%d)\n",
local_node->nd_name, local_node->nd_num,
&(local_node->nd_ipv4_address),
ntohs(local_node->nd_ipv4_port),
node->nd_name,
node->nd_num, &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
/* this happens all the time when the other node sees our heartbeat
* and tries to connect before we see their heartbeat */
if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) {
mlog(ML_CONN, "attempt to connect from node '%s' at "
"%pI4:%d but it isn't heartbeating\n",
node->nd_name, &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
nn = o2net_nn_from_num(node->nd_num);
spin_lock(&nn->nn_lock);
if (nn->nn_sc)
ret = -EBUSY;
else
ret = 0;
spin_unlock(&nn->nn_lock);
if (ret) {
printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
"at %pI4:%d but it already has an open connection\n",
node->nd_name, &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
goto out;
}
sc = sc_alloc(node);
if (sc == NULL) {
ret = -ENOMEM;
goto out;
}
sc->sc_sock = new_sock;
new_sock = NULL;
spin_lock(&nn->nn_lock);
atomic_set(&nn->nn_timeout, 0);
o2net_set_nn_state(nn, sc, 0, 0);
spin_unlock(&nn->nn_lock);
o2net_register_callbacks(sc->sc_sock->sk, sc);
o2net_sc_queue_work(sc, &sc->sc_rx_work);
o2net_initialize_handshake();
o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
out:
if (new_sock)
sock_release(new_sock);
if (node)
o2nm_node_put(node);
if (local_node)
o2nm_node_put(local_node);
if (sc)
sc_put(sc);
memalloc_nofs_restore(nofs_flag);
return ret;
}
/*
* This function is invoked in response to one or more
* pending accepts at softIRQ level. We must drain the
* entire que before returning.
*/
static void o2net_accept_many(struct work_struct *work)
{
struct socket *sock = o2net_listen_sock;
int more;
/*
* It is critical to note that due to interrupt moderation
* at the network driver level, we can't assume to get a
* softIRQ for every single conn since tcp SYN packets
* can arrive back-to-back, and therefore many pending
* accepts may result in just 1 softIRQ. If we terminate
* the o2net_accept_one() loop upon seeing an err, what happens
* to the rest of the conns in the queue? If no new SYN
* arrives for hours, no softIRQ will be delivered,
* and the connections will just sit in the queue.
*/
for (;;) {
o2net_accept_one(sock, &more);
if (!more)
break;
cond_resched();
}
}
static void o2net_listen_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
trace_sk_data_ready(sk);
read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (ready == NULL) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
/* This callback may called twice when a new connection
* is being established as a child socket inherits everything
* from a parent LISTEN socket, including the data_ready cb of
* the parent. This leads to a hazard. In o2net_accept_one()
* we are still initializing the child socket but have not
* changed the inherited data_ready callback yet when
* data starts arriving.
* We avoid this hazard by checking the state.
* For the listening socket, the state will be TCP_LISTEN; for the new
* socket, will be TCP_ESTABLISHED. Also, in this case,
* sk->sk_user_data is not a valid function pointer.
*/
if (sk->sk_state == TCP_LISTEN) {
queue_work(o2net_wq, &o2net_listen_work);
} else {
ready = NULL;
}
out:
read_unlock_bh(&sk->sk_callback_lock);
if (ready != NULL)
ready(sk);
}
static int o2net_open_listening_sock(__be32 addr, __be16 port)
{
struct socket *sock = NULL;
int ret;
struct sockaddr_in sin = {
.sin_family = PF_INET,
.sin_addr = { .s_addr = addr },
.sin_port = port,
};
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
goto out;
}
sock->sk->sk_allocation = GFP_ATOMIC;
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = sock->sk->sk_data_ready;
sock->sk->sk_data_ready = o2net_listen_data_ready;
write_unlock_bh(&sock->sk->sk_callback_lock);
o2net_listen_sock = sock;
INIT_WORK(&o2net_listen_work, o2net_accept_many);
sock->sk->sk_reuse = SK_CAN_REUSE;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0) {
printk(KERN_ERR "o2net: Error %d while binding socket at "
"%pI4:%u\n", ret, &addr, ntohs(port));
goto out;
}
ret = sock->ops->listen(sock, 64);
if (ret < 0)
printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
ret, &addr, ntohs(port));
out:
if (ret) {
o2net_listen_sock = NULL;
if (sock)
sock_release(sock);
}
return ret;
}
/*
* called from node manager when we should bring up our network listening
* socket. node manager handles all the serialization to only call this
* once and to match it with o2net_stop_listening(). note,
* o2nm_this_node() doesn't work yet as we're being called while it
* is being set up.
*/
int o2net_start_listening(struct o2nm_node *node)
{
int ret = 0;
BUG_ON(o2net_wq != NULL);
BUG_ON(o2net_listen_sock != NULL);
mlog(ML_KTHREAD, "starting o2net thread...\n");
o2net_wq = alloc_ordered_workqueue("o2net", WQ_MEM_RECLAIM);
if (o2net_wq == NULL) {
mlog(ML_ERROR, "unable to launch o2net thread\n");
return -ENOMEM; /* ? */
}
ret = o2net_open_listening_sock(node->nd_ipv4_address,
node->nd_ipv4_port);
if (ret) {
destroy_workqueue(o2net_wq);
o2net_wq = NULL;
} else
o2quo_conn_up(node->nd_num);
return ret;
}
/* again, o2nm_this_node() doesn't work here as we're involved in
* tearing it down */
void o2net_stop_listening(struct o2nm_node *node)
{
struct socket *sock = o2net_listen_sock;
size_t i;
BUG_ON(o2net_wq == NULL);
BUG_ON(o2net_listen_sock == NULL);
/* stop the listening socket from generating work */
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_data_ready = sock->sk->sk_user_data;
sock->sk->sk_user_data = NULL;
write_unlock_bh(&sock->sk->sk_callback_lock);
for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
struct o2nm_node *node = o2nm_get_node_by_num(i);
if (node) {
o2net_disconnect_node(node);
o2nm_node_put(node);
}
}
/* finish all work and tear down the work queue */
mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n");
destroy_workqueue(o2net_wq);
o2net_wq = NULL;
sock_release(o2net_listen_sock);
o2net_listen_sock = NULL;
o2quo_conn_err(node->nd_num);
}
/* ------------------------------------------------------------ */
int o2net_init(void)
{
struct folio *folio;
void *p;
unsigned long i;
o2quo_init();
o2net_debugfs_init();
folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 0);
if (!folio)
goto out;
p = folio_address(folio);
o2net_hand = p;
p += sizeof(struct o2net_handshake);
o2net_keep_req = p;
p += sizeof(struct o2net_msg);
o2net_keep_resp = p;
o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
o2net_hand->connector_id = cpu_to_be64(1);
o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC);
o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC);
for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
struct o2net_node *nn = o2net_nn_from_num(i);
atomic_set(&nn->nn_timeout, 0);
spin_lock_init(&nn->nn_lock);
INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
INIT_DELAYED_WORK(&nn->nn_connect_expired,
o2net_connect_expired);
INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
/* until we see hb from a node we'll return einval */
nn->nn_persistent_error = -ENOTCONN;
init_waitqueue_head(&nn->nn_sc_wq);
idr_init(&nn->nn_status_idr);
INIT_LIST_HEAD(&nn->nn_status_list);
}
return 0;
out:
o2net_debugfs_exit();
o2quo_exit();
return -ENOMEM;
}
void o2net_exit(void)
{
o2quo_exit();
o2net_debugfs_exit();
folio_put(virt_to_folio(o2net_hand));
}
| linux-master | fs/ocfs2/cluster/tcp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2005 Oracle. All rights reserved.
*/
/* This quorum hack is only here until we transition to some more rational
* approach that is driven from userspace. Honest. No foolin'.
*
* Imagine two nodes lose network connectivity to each other but they're still
* up and operating in every other way. Presumably a network timeout indicates
* that a node is broken and should be recovered. They can't both recover each
* other and both carry on without serialising their access to the file system.
* They need to decide who is authoritative. Now extend that problem to
* arbitrary groups of nodes losing connectivity between each other.
*
* So we declare that a node which has given up on connecting to a majority
* of nodes who are still heartbeating will fence itself.
*
* There are huge opportunities for races here. After we give up on a node's
* connection we need to wait long enough to give heartbeat an opportunity
* to declare the node as truly dead. We also need to be careful with the
* race between when we see a node start heartbeating and when we connect
* to it.
*
* So nodes that are in this transtion put a hold on the quorum decision
* with a counter. As they fall out of this transition they drop the count
* and if they're the last, they fire off the decision.
*/
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include "heartbeat.h"
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_QUORUM
#include "masklog.h"
#include "quorum.h"
static struct o2quo_state {
spinlock_t qs_lock;
struct work_struct qs_work;
int qs_pending;
int qs_heartbeating;
unsigned long qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
int qs_connected;
unsigned long qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
int qs_holds;
unsigned long qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
} o2quo_state;
/* this is horribly heavy-handed. It should instead flip the file
* system RO and call some userspace script. */
static void o2quo_fence_self(void)
{
/* panic spins with interrupts enabled. with preempt
* threads can still schedule, etc, etc */
o2hb_stop_all_regions();
switch (o2nm_single_cluster->cl_fence_method) {
case O2NM_FENCE_PANIC:
panic("*** ocfs2 is very sorry to be fencing this system by "
"panicing ***\n");
break;
default:
WARN_ON(o2nm_single_cluster->cl_fence_method >=
O2NM_FENCE_METHODS);
fallthrough;
case O2NM_FENCE_RESET:
printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this "
"system by restarting ***\n");
emergency_restart();
break;
}
}
/* Indicate that a timeout occurred on a heartbeat region write. The
* other nodes in the cluster may consider us dead at that time so we
* want to "fence" ourselves so that we don't scribble on the disk
* after they think they've recovered us. This can't solve all
* problems related to writeout after recovery but this hack can at
* least close some of those gaps. When we have real fencing, this can
* go away as our node would be fenced externally before other nodes
* begin recovery. */
void o2quo_disk_timeout(void)
{
o2quo_fence_self();
}
static void o2quo_make_decision(struct work_struct *work)
{
int quorum;
int lowest_hb, lowest_reachable = 0, fence = 0;
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES);
if (lowest_hb != O2NM_MAX_NODES)
lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);
mlog(0, "heartbeating: %d, connected: %d, "
"lowest: %d (%sreachable)\n", qs->qs_heartbeating,
qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un");
if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) ||
qs->qs_heartbeating == 1)
goto out;
if (qs->qs_heartbeating & 1) {
/* the odd numbered cluster case is straight forward --
* if we can't talk to the majority we're hosed */
quorum = (qs->qs_heartbeating + 1)/2;
if (qs->qs_connected < quorum) {
mlog(ML_ERROR, "fencing this node because it is "
"only connected to %u nodes and %u is needed "
"to make a quorum out of %u heartbeating nodes\n",
qs->qs_connected, quorum,
qs->qs_heartbeating);
fence = 1;
}
} else {
/* the even numbered cluster adds the possibility of each half
* of the cluster being able to talk amongst themselves.. in
* that case we're hosed if we can't talk to the group that has
* the lowest numbered node */
quorum = qs->qs_heartbeating / 2;
if (qs->qs_connected < quorum) {
mlog(ML_ERROR, "fencing this node because it is "
"only connected to %u nodes and %u is needed "
"to make a quorum out of %u heartbeating nodes\n",
qs->qs_connected, quorum,
qs->qs_heartbeating);
fence = 1;
}
else if ((qs->qs_connected == quorum) &&
!lowest_reachable) {
mlog(ML_ERROR, "fencing this node because it is "
"connected to a half-quorum of %u out of %u "
"nodes which doesn't include the lowest active "
"node %u\n", quorum, qs->qs_heartbeating,
lowest_hb);
fence = 1;
}
}
out:
if (fence) {
spin_unlock_bh(&qs->qs_lock);
o2quo_fence_self();
} else {
mlog(ML_NOTICE, "not fencing this node, heartbeating: %d, "
"connected: %d, lowest: %d (%sreachable)\n",
qs->qs_heartbeating, qs->qs_connected, lowest_hb,
lowest_reachable ? "" : "un");
spin_unlock_bh(&qs->qs_lock);
}
}
static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
{
assert_spin_locked(&qs->qs_lock);
if (!test_and_set_bit(node, qs->qs_hold_bm)) {
qs->qs_holds++;
mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
"node %u\n", node);
mlog(0, "node %u, %d total\n", node, qs->qs_holds);
}
}
static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
{
assert_spin_locked(&qs->qs_lock);
if (test_and_clear_bit(node, qs->qs_hold_bm)) {
mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
if (--qs->qs_holds == 0) {
if (qs->qs_pending) {
qs->qs_pending = 0;
schedule_work(&qs->qs_work);
}
}
mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
node, qs->qs_holds);
}
}
/* as a node comes up we delay the quorum decision until we know the fate of
* the connection. the hold will be droped in conn_up or hb_down. it might be
* perpetuated by con_err until hb_down. if we already have a conn, we might
* be dropping a hold that conn_up got. */
void o2quo_hb_up(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
qs->qs_heartbeating++;
mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
"node %u\n", node);
mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
set_bit(node, qs->qs_hb_bm);
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
if (!test_bit(node, qs->qs_conn_bm))
o2quo_set_hold(qs, node);
else
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* hb going down releases any holds we might have had due to this node from
* conn_up, conn_err, or hb_up */
void o2quo_hb_down(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
qs->qs_heartbeating--;
mlog_bug_on_msg(qs->qs_heartbeating < 0,
"node %u, %d heartbeating\n",
node, qs->qs_heartbeating);
mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
clear_bit(node, qs->qs_hb_bm);
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* this tells us that we've decided that the node is still heartbeating
* even though we've lost it's conn. it must only be called after conn_err
* and indicates that we must now make a quorum decision in the future,
* though we might be doing so after waiting for holds to drain. Here
* we'll be dropping the hold from conn_err. */
void o2quo_hb_still_up(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
mlog(0, "node %u\n", node);
qs->qs_pending = 1;
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* This is analogous to hb_up. as a node's connection comes up we delay the
* quorum decision until we see it heartbeating. the hold will be droped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we might be dropping a hold that conn_up got.
* */
void o2quo_conn_up(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
qs->qs_connected++;
mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
"node %u\n", node);
mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
set_bit(node, qs->qs_conn_bm);
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
if (!test_bit(node, qs->qs_hb_bm))
o2quo_set_hold(qs, node);
else
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* we've decided that we won't ever be connecting to the node again. if it's
* still heartbeating we grab a hold that will delay decisions until either the
* node stops heartbeating from hb_down or the caller decides that the node is
* still up and calls still_up */
void o2quo_conn_err(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
if (test_bit(node, qs->qs_conn_bm)) {
qs->qs_connected--;
mlog_bug_on_msg(qs->qs_connected < 0,
"node %u, connected %d\n",
node, qs->qs_connected);
clear_bit(node, qs->qs_conn_bm);
if (test_bit(node, qs->qs_hb_bm))
o2quo_set_hold(qs, node);
}
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
spin_unlock_bh(&qs->qs_lock);
}
void o2quo_init(void)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_init(&qs->qs_lock);
INIT_WORK(&qs->qs_work, o2quo_make_decision);
}
void o2quo_exit(void)
{
struct o2quo_state *qs = &o2quo_state;
flush_work(&qs->qs_work);
}
| linux-master | fs/ocfs2/cluster/quorum.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* netdebug.c
*
* debug functionality for o2net
*
* Copyright (C) 2005, 2008 Oracle. All rights reserved.
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include "tcp.h"
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_TCP
#include "masklog.h"
#include "tcp_internal.h"
#define O2NET_DEBUG_DIR "o2net"
#define SC_DEBUG_NAME "sock_containers"
#define NST_DEBUG_NAME "send_tracking"
#define STATS_DEBUG_NAME "stats"
#define NODES_DEBUG_NAME "connected_nodes"
#define SHOW_SOCK_CONTAINERS 0
#define SHOW_SOCK_STATS 1
static struct dentry *o2net_dentry;
static DEFINE_SPINLOCK(o2net_debug_lock);
static LIST_HEAD(sock_containers);
static LIST_HEAD(send_tracking);
void o2net_debug_add_nst(struct o2net_send_tracking *nst)
{
spin_lock_bh(&o2net_debug_lock);
list_add(&nst->st_net_debug_item, &send_tracking);
spin_unlock_bh(&o2net_debug_lock);
}
void o2net_debug_del_nst(struct o2net_send_tracking *nst)
{
spin_lock_bh(&o2net_debug_lock);
if (!list_empty(&nst->st_net_debug_item))
list_del_init(&nst->st_net_debug_item);
spin_unlock_bh(&o2net_debug_lock);
}
static struct o2net_send_tracking
*next_nst(struct o2net_send_tracking *nst_start)
{
struct o2net_send_tracking *nst, *ret = NULL;
assert_spin_locked(&o2net_debug_lock);
list_for_each_entry(nst, &nst_start->st_net_debug_item,
st_net_debug_item) {
/* discover the head of the list */
if (&nst->st_net_debug_item == &send_tracking)
break;
/* use st_task to detect real nsts in the list */
if (nst->st_task != NULL) {
ret = nst;
break;
}
}
return ret;
}
static void *nst_seq_start(struct seq_file *seq, loff_t *pos)
{
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
spin_lock_bh(&o2net_debug_lock);
nst = next_nst(dummy_nst);
spin_unlock_bh(&o2net_debug_lock);
return nst;
}
static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
spin_lock_bh(&o2net_debug_lock);
nst = next_nst(dummy_nst);
list_del_init(&dummy_nst->st_net_debug_item);
if (nst)
list_add(&dummy_nst->st_net_debug_item,
&nst->st_net_debug_item);
spin_unlock_bh(&o2net_debug_lock);
return nst; /* unused, just needs to be null when done */
}
static int nst_seq_show(struct seq_file *seq, void *v)
{
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
ktime_t now;
s64 sock, send, status;
spin_lock_bh(&o2net_debug_lock);
nst = next_nst(dummy_nst);
if (!nst)
goto out;
now = ktime_get();
sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
send = ktime_to_us(ktime_sub(now, nst->st_send_time));
status = ktime_to_us(ktime_sub(now, nst->st_status_time));
/* get_task_comm isn't exported. oh well. */
seq_printf(seq, "%p:\n"
" pid: %lu\n"
" tgid: %lu\n"
" process name: %s\n"
" node: %u\n"
" sc: %p\n"
" message id: %d\n"
" message type: %u\n"
" message key: 0x%08x\n"
" sock acquiry: %lld usecs ago\n"
" send start: %lld usecs ago\n"
" wait start: %lld usecs ago\n",
nst, (unsigned long)task_pid_nr(nst->st_task),
(unsigned long)nst->st_task->tgid,
nst->st_task->comm, nst->st_node,
nst->st_sc, nst->st_id, nst->st_msg_type,
nst->st_msg_key,
(long long)sock,
(long long)send,
(long long)status);
out:
spin_unlock_bh(&o2net_debug_lock);
return 0;
}
static void nst_seq_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations nst_seq_ops = {
.start = nst_seq_start,
.next = nst_seq_next,
.stop = nst_seq_stop,
.show = nst_seq_show,
};
static int nst_fop_open(struct inode *inode, struct file *file)
{
struct o2net_send_tracking *dummy_nst;
dummy_nst = __seq_open_private(file, &nst_seq_ops, sizeof(*dummy_nst));
if (!dummy_nst)
return -ENOMEM;
o2net_debug_add_nst(dummy_nst);
return 0;
}
static int nst_fop_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct o2net_send_tracking *dummy_nst = seq->private;
o2net_debug_del_nst(dummy_nst);
return seq_release_private(inode, file);
}
static const struct file_operations nst_seq_fops = {
.open = nst_fop_open,
.read = seq_read,
.llseek = seq_lseek,
.release = nst_fop_release,
};
void o2net_debug_add_sc(struct o2net_sock_container *sc)
{
spin_lock_bh(&o2net_debug_lock);
list_add(&sc->sc_net_debug_item, &sock_containers);
spin_unlock_bh(&o2net_debug_lock);
}
void o2net_debug_del_sc(struct o2net_sock_container *sc)
{
spin_lock_bh(&o2net_debug_lock);
list_del_init(&sc->sc_net_debug_item);
spin_unlock_bh(&o2net_debug_lock);
}
struct o2net_sock_debug {
int dbg_ctxt;
struct o2net_sock_container *dbg_sock;
};
static struct o2net_sock_container
*next_sc(struct o2net_sock_container *sc_start)
{
struct o2net_sock_container *sc, *ret = NULL;
assert_spin_locked(&o2net_debug_lock);
list_for_each_entry(sc, &sc_start->sc_net_debug_item,
sc_net_debug_item) {
/* discover the head of the list miscast as a sc */
if (&sc->sc_net_debug_item == &sock_containers)
break;
/* use sc_page to detect real scs in the list */
if (sc->sc_page != NULL) {
ret = sc;
break;
}
}
return ret;
}
static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock_bh(&o2net_debug_lock);
sc = next_sc(dummy_sc);
spin_unlock_bh(&o2net_debug_lock);
return sc;
}
static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock_bh(&o2net_debug_lock);
sc = next_sc(dummy_sc);
list_del_init(&dummy_sc->sc_net_debug_item);
if (sc)
list_add(&dummy_sc->sc_net_debug_item, &sc->sc_net_debug_item);
spin_unlock_bh(&o2net_debug_lock);
return sc; /* unused, just needs to be null when done */
}
#ifdef CONFIG_OCFS2_FS_STATS
# define sc_send_count(_s) ((_s)->sc_send_count)
# define sc_recv_count(_s) ((_s)->sc_recv_count)
# define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total))
# define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total))
# define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total))
# define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total))
#else
# define sc_send_count(_s) (0U)
# define sc_recv_count(_s) (0U)
# define sc_tv_acquiry_total_ns(_s) (0LL)
# define sc_tv_send_total_ns(_s) (0LL)
# define sc_tv_status_total_ns(_s) (0LL)
# define sc_tv_process_total_ns(_s) (0LL)
#endif
/* So that debugfs.ocfs2 can determine which format is being used */
#define O2NET_STATS_STR_VERSION 1
static void sc_show_sock_stats(struct seq_file *seq,
struct o2net_sock_container *sc)
{
if (!sc)
return;
seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
(long long)sc_tv_acquiry_total_ns(sc),
(long long)sc_tv_send_total_ns(sc),
(long long)sc_tv_status_total_ns(sc),
(unsigned long)sc_recv_count(sc),
(long long)sc_tv_process_total_ns(sc));
}
static void sc_show_sock_container(struct seq_file *seq,
struct o2net_sock_container *sc)
{
struct inet_sock *inet = NULL;
__be32 saddr = 0, daddr = 0;
__be16 sport = 0, dport = 0;
if (!sc)
return;
if (sc->sc_sock) {
inet = inet_sk(sc->sc_sock->sk);
/* the stack's structs aren't sparse endian clean */
saddr = (__force __be32)inet->inet_saddr;
daddr = (__force __be32)inet->inet_daddr;
sport = (__force __be16)inet->inet_sport;
dport = (__force __be16)inet->inet_dport;
}
/* XXX sigh, inet-> doesn't have sparse annotation so any
* use of it here generates a warning with -Wbitwise */
seq_printf(seq, "%p:\n"
" krefs: %d\n"
" sock: %pI4:%u -> "
"%pI4:%u\n"
" remote node: %s\n"
" page off: %zu\n"
" handshake ok: %u\n"
" timer: %lld usecs\n"
" data ready: %lld usecs\n"
" advance start: %lld usecs\n"
" advance stop: %lld usecs\n"
" func start: %lld usecs\n"
" func stop: %lld usecs\n"
" func key: 0x%08x\n"
" func type: %u\n",
sc,
kref_read(&sc->sc_kref),
&saddr, inet ? ntohs(sport) : 0,
&daddr, inet ? ntohs(dport) : 0,
sc->sc_node->nd_name,
sc->sc_page_off,
sc->sc_handshake_ok,
(long long)ktime_to_us(sc->sc_tv_timer),
(long long)ktime_to_us(sc->sc_tv_data_ready),
(long long)ktime_to_us(sc->sc_tv_advance_start),
(long long)ktime_to_us(sc->sc_tv_advance_stop),
(long long)ktime_to_us(sc->sc_tv_func_start),
(long long)ktime_to_us(sc->sc_tv_func_stop),
sc->sc_msg_key,
sc->sc_msg_type);
}
static int sc_seq_show(struct seq_file *seq, void *v)
{
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock_bh(&o2net_debug_lock);
sc = next_sc(dummy_sc);
if (sc) {
if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
sc_show_sock_container(seq, sc);
else
sc_show_sock_stats(seq, sc);
}
spin_unlock_bh(&o2net_debug_lock);
return 0;
}
static void sc_seq_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations sc_seq_ops = {
.start = sc_seq_start,
.next = sc_seq_next,
.stop = sc_seq_stop,
.show = sc_seq_show,
};
static int sc_common_open(struct file *file, int ctxt)
{
struct o2net_sock_debug *sd;
struct o2net_sock_container *dummy_sc;
dummy_sc = kzalloc(sizeof(*dummy_sc), GFP_KERNEL);
if (!dummy_sc)
return -ENOMEM;
sd = __seq_open_private(file, &sc_seq_ops, sizeof(*sd));
if (!sd) {
kfree(dummy_sc);
return -ENOMEM;
}
sd->dbg_ctxt = ctxt;
sd->dbg_sock = dummy_sc;
o2net_debug_add_sc(dummy_sc);
return 0;
}
static int sc_fop_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *dummy_sc = sd->dbg_sock;
o2net_debug_del_sc(dummy_sc);
kfree(dummy_sc);
return seq_release_private(inode, file);
}
static int stats_fop_open(struct inode *inode, struct file *file)
{
return sc_common_open(file, SHOW_SOCK_STATS);
}
static const struct file_operations stats_seq_fops = {
.open = stats_fop_open,
.read = seq_read,
.llseek = seq_lseek,
.release = sc_fop_release,
};
static int sc_fop_open(struct inode *inode, struct file *file)
{
return sc_common_open(file, SHOW_SOCK_CONTAINERS);
}
static const struct file_operations sc_seq_fops = {
.open = sc_fop_open,
.read = seq_read,
.llseek = seq_lseek,
.release = sc_fop_release,
};
static int o2net_fill_bitmap(char *buf, int len)
{
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
int i = -1, out = 0;
o2net_fill_node_map(map, O2NM_MAX_NODES);
while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i);
out += scnprintf(buf + out, PAGE_SIZE - out, "\n");
return out;
}
static int nodes_fop_open(struct inode *inode, struct file *file)
{
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
file->private_data = buf;
return 0;
}
static int o2net_debug_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static ssize_t o2net_debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
i_size_read(file->f_mapping->host));
}
static const struct file_operations nodes_fops = {
.open = nodes_fop_open,
.release = o2net_debug_release,
.read = o2net_debug_read,
.llseek = generic_file_llseek,
};
void o2net_debugfs_exit(void)
{
debugfs_remove_recursive(o2net_dentry);
}
void o2net_debugfs_init(void)
{
umode_t mode = S_IFREG|S_IRUSR;
o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
debugfs_create_file(NST_DEBUG_NAME, mode, o2net_dentry, NULL,
&nst_seq_fops);
debugfs_create_file(SC_DEBUG_NAME, mode, o2net_dentry, NULL,
&sc_seq_fops);
debugfs_create_file(STATS_DEBUG_NAME, mode, o2net_dentry, NULL,
&stats_seq_fops);
debugfs_create_file(NODES_DEBUG_NAME, mode, o2net_dentry, NULL,
&nodes_fops);
}
#endif /* CONFIG_DEBUG_FS */
| linux-master | fs/ocfs2/cluster/netdebug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/kthread.h>
#include <linux/configfs.h>
#include <linux/random.h>
#include <linux/crc32.h>
#include <linux/time.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/ktime.h>
#include "heartbeat.h"
#include "tcp.h"
#include "nodemanager.h"
#include "quorum.h"
#include "masklog.h"
/*
* The first heartbeat pass had one global thread that would serialize all hb
* callback calls. This global serializing sem should only be removed once
* we've made sure that all callees can deal with being called concurrently
* from multiple hb region threads.
*/
static DECLARE_RWSEM(o2hb_callback_sem);
/*
* multiple hb threads are watching multiple regions. A node is live
* whenever any of the threads sees activity from the node in its region.
*/
static DEFINE_SPINLOCK(o2hb_live_lock);
static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
static LIST_HEAD(o2hb_node_events);
static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue);
/*
* In global heartbeat, we maintain a series of region bitmaps.
* - o2hb_region_bitmap allows us to limit the region number to max region.
* - o2hb_live_region_bitmap tracks live regions (seen steady iterations).
* - o2hb_quorum_region_bitmap tracks live regions that have seen all nodes
* heartbeat on it.
* - o2hb_failed_region_bitmap tracks the regions that have seen io timeouts.
*/
static unsigned long o2hb_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
static unsigned long o2hb_live_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
static unsigned long o2hb_quorum_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
#define O2HB_DB_TYPE_LIVENODES 0
#define O2HB_DB_TYPE_LIVEREGIONS 1
#define O2HB_DB_TYPE_QUORUMREGIONS 2
#define O2HB_DB_TYPE_FAILEDREGIONS 3
#define O2HB_DB_TYPE_REGION_LIVENODES 4
#define O2HB_DB_TYPE_REGION_NUMBER 5
#define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6
#define O2HB_DB_TYPE_REGION_PINNED 7
struct o2hb_debug_buf {
int db_type;
int db_size;
int db_len;
void *db_data;
};
static struct o2hb_debug_buf *o2hb_db_livenodes;
static struct o2hb_debug_buf *o2hb_db_liveregions;
static struct o2hb_debug_buf *o2hb_db_quorumregions;
static struct o2hb_debug_buf *o2hb_db_failedregions;
#define O2HB_DEBUG_DIR "o2hb"
#define O2HB_DEBUG_LIVENODES "livenodes"
#define O2HB_DEBUG_LIVEREGIONS "live_regions"
#define O2HB_DEBUG_QUORUMREGIONS "quorum_regions"
#define O2HB_DEBUG_FAILEDREGIONS "failed_regions"
#define O2HB_DEBUG_REGION_NUMBER "num"
#define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms"
#define O2HB_DEBUG_REGION_PINNED "pinned"
static struct dentry *o2hb_debug_dir;
static LIST_HEAD(o2hb_all_regions);
static struct o2hb_callback {
struct list_head list;
} o2hb_callbacks[O2HB_NUM_CB];
static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
enum o2hb_heartbeat_modes {
O2HB_HEARTBEAT_LOCAL = 0,
O2HB_HEARTBEAT_GLOBAL,
O2HB_HEARTBEAT_NUM_MODES,
};
static const char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = {
"local", /* O2HB_HEARTBEAT_LOCAL */
"global", /* O2HB_HEARTBEAT_GLOBAL */
};
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
static unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
/*
* o2hb_dependent_users tracks the number of registered callbacks that depend
* on heartbeat. o2net and o2dlm are two entities that register this callback.
* However only o2dlm depends on the heartbeat. It does not want the heartbeat
* to stop while a dlm domain is still active.
*/
static unsigned int o2hb_dependent_users;
/*
* In global heartbeat mode, all regions are pinned if there are one or more
* dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All
* regions are unpinned if the region count exceeds the cut off or the number
* of dependent users falls to zero.
*/
#define O2HB_PIN_CUT_OFF 3
/*
* In local heartbeat mode, we assume the dlm domain name to be the same as
* region uuid. This is true for domains created for the file system but not
* necessarily true for userdlm domains. This is a known limitation.
*
* In global heartbeat mode, we pin/unpin all o2hb regions. This solution
* works for both file system and userdlm domains.
*/
static int o2hb_region_pin(const char *region_uuid);
static void o2hb_region_unpin(const char *region_uuid);
/* Only sets a new threshold if there are no active regions.
*
* No locking or otherwise interesting code is required for reading
* o2hb_dead_threshold as it can't change once regions are active and
* it's not interesting to anyone until then anyway. */
static void o2hb_dead_threshold_set(unsigned int threshold)
{
if (threshold > O2HB_MIN_DEAD_THRESHOLD) {
spin_lock(&o2hb_live_lock);
if (list_empty(&o2hb_all_regions))
o2hb_dead_threshold = threshold;
spin_unlock(&o2hb_live_lock);
}
}
static int o2hb_global_heartbeat_mode_set(unsigned int hb_mode)
{
int ret = -1;
if (hb_mode < O2HB_HEARTBEAT_NUM_MODES) {
spin_lock(&o2hb_live_lock);
if (list_empty(&o2hb_all_regions)) {
o2hb_heartbeat_mode = hb_mode;
ret = 0;
}
spin_unlock(&o2hb_live_lock);
}
return ret;
}
struct o2hb_node_event {
struct list_head hn_item;
enum o2hb_callback_type hn_event_type;
struct o2nm_node *hn_node;
int hn_node_num;
};
struct o2hb_disk_slot {
struct o2hb_disk_heartbeat_block *ds_raw_block;
u8 ds_node_num;
u64 ds_last_time;
u64 ds_last_generation;
u16 ds_equal_samples;
u16 ds_changed_samples;
struct list_head ds_live_item;
};
/* each thread owns a region.. when we're asked to tear down the region
* we ask the thread to stop, who cleans up the region */
struct o2hb_region {
struct config_item hr_item;
struct list_head hr_all_item;
unsigned hr_unclean_stop:1,
hr_aborted_start:1,
hr_item_pinned:1,
hr_item_dropped:1,
hr_node_deleted:1;
/* protected by the hr_callback_sem */
struct task_struct *hr_task;
unsigned int hr_blocks;
unsigned long long hr_start_block;
unsigned int hr_block_bits;
unsigned int hr_block_bytes;
unsigned int hr_slots_per_page;
unsigned int hr_num_pages;
struct page **hr_slot_data;
struct block_device *hr_bdev;
struct o2hb_disk_slot *hr_slots;
/* live node map of this region */
unsigned long hr_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned int hr_region_num;
struct dentry *hr_debug_dir;
struct o2hb_debug_buf *hr_db_livenodes;
struct o2hb_debug_buf *hr_db_regnum;
struct o2hb_debug_buf *hr_db_elapsed_time;
struct o2hb_debug_buf *hr_db_pinned;
/* let the person setting up hb wait for it to return until it
* has reached a 'steady' state. This will be fixed when we have
* a more complete api that doesn't lead to this sort of fragility. */
atomic_t hr_steady_iterations;
/* terminate o2hb thread if it does not reach steady state
* (hr_steady_iterations == 0) within hr_unsteady_iterations */
atomic_t hr_unsteady_iterations;
unsigned int hr_timeout_ms;
/* randomized as the region goes up and down so that a node
* recognizes a node going up and down in one iteration */
u64 hr_generation;
struct delayed_work hr_write_timeout_work;
unsigned long hr_last_timeout_start;
/* negotiate timer, used to negotiate extending hb timeout. */
struct delayed_work hr_nego_timeout_work;
unsigned long hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
/* Used during o2hb_check_slot to hold a copy of the block
* being checked because we temporarily have to zero out the
* crc field. */
struct o2hb_disk_heartbeat_block *hr_tmp_block;
/* Message key for negotiate timeout message. */
unsigned int hr_key;
struct list_head hr_handler_list;
/* last hb status, 0 for success, other value for error. */
int hr_last_hb_status;
};
struct o2hb_bio_wait_ctxt {
atomic_t wc_num_reqs;
struct completion wc_io_complete;
int wc_error;
};
#define O2HB_NEGO_TIMEOUT_MS (O2HB_MAX_WRITE_TIMEOUT_MS/2)
enum {
O2HB_NEGO_TIMEOUT_MSG = 1,
O2HB_NEGO_APPROVE_MSG = 2,
};
struct o2hb_nego_msg {
u8 node_num;
};
static void o2hb_write_timeout(struct work_struct *work)
{
int failed, quorum;
struct o2hb_region *reg =
container_of(work, struct o2hb_region,
hr_write_timeout_work.work);
mlog(ML_ERROR, "Heartbeat write timeout to device %pg after %u "
"milliseconds\n", reg->hr_bdev,
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
if (o2hb_global_heartbeat_active()) {
spin_lock(&o2hb_live_lock);
if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
set_bit(reg->hr_region_num, o2hb_failed_region_bitmap);
failed = bitmap_weight(o2hb_failed_region_bitmap,
O2NM_MAX_REGIONS);
quorum = bitmap_weight(o2hb_quorum_region_bitmap,
O2NM_MAX_REGIONS);
spin_unlock(&o2hb_live_lock);
mlog(ML_HEARTBEAT, "Number of regions %d, failed regions %d\n",
quorum, failed);
/*
* Fence if the number of failed regions >= half the number
* of quorum regions
*/
if ((failed << 1) < quorum)
return;
}
o2quo_disk_timeout();
}
static void o2hb_arm_timeout(struct o2hb_region *reg)
{
/* Arm writeout only after thread reaches steady state */
if (atomic_read(®->hr_steady_iterations) != 0)
return;
mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
O2HB_MAX_WRITE_TIMEOUT_MS);
if (o2hb_global_heartbeat_active()) {
spin_lock(&o2hb_live_lock);
clear_bit(reg->hr_region_num, o2hb_failed_region_bitmap);
spin_unlock(&o2hb_live_lock);
}
cancel_delayed_work(®->hr_write_timeout_work);
schedule_delayed_work(®->hr_write_timeout_work,
msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
cancel_delayed_work(®->hr_nego_timeout_work);
/* negotiate timeout must be less than write timeout. */
schedule_delayed_work(®->hr_nego_timeout_work,
msecs_to_jiffies(O2HB_NEGO_TIMEOUT_MS));
bitmap_zero(reg->hr_nego_node_bitmap, O2NM_MAX_NODES);
}
static void o2hb_disarm_timeout(struct o2hb_region *reg)
{
cancel_delayed_work_sync(®->hr_write_timeout_work);
cancel_delayed_work_sync(®->hr_nego_timeout_work);
}
static int o2hb_send_nego_msg(int key, int type, u8 target)
{
struct o2hb_nego_msg msg;
int status, ret;
msg.node_num = o2nm_this_node();
again:
ret = o2net_send_message(type, key, &msg, sizeof(msg),
target, &status);
if (ret == -EAGAIN || ret == -ENOMEM) {
msleep(100);
goto again;
}
return ret;
}
static void o2hb_nego_timeout(struct work_struct *work)
{
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
int master_node, i, ret;
struct o2hb_region *reg;
reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
/* don't negotiate timeout if last hb failed since it is very
* possible io failed. Should let write timeout fence self.
*/
if (reg->hr_last_hb_status)
return;
o2hb_fill_node_map(live_node_bitmap, O2NM_MAX_NODES);
/* lowest node as master node to make negotiate decision. */
master_node = find_first_bit(live_node_bitmap, O2NM_MAX_NODES);
if (master_node == o2nm_this_node()) {
if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%pg).\n",
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000,
config_item_name(®->hr_item), reg->hr_bdev);
set_bit(master_node, reg->hr_nego_node_bitmap);
}
if (!bitmap_equal(reg->hr_nego_node_bitmap, live_node_bitmap,
O2NM_MAX_NODES)) {
/* check negotiate bitmap every second to do timeout
* approve decision.
*/
schedule_delayed_work(®->hr_nego_timeout_work,
msecs_to_jiffies(1000));
return;
}
printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%pg) is down.\n",
config_item_name(®->hr_item), reg->hr_bdev);
/* approve negotiate timeout request. */
o2hb_arm_timeout(reg);
i = -1;
while ((i = find_next_bit(live_node_bitmap,
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
if (i == master_node)
continue;
mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i);
ret = o2hb_send_nego_msg(reg->hr_key,
O2HB_NEGO_APPROVE_MSG, i);
if (ret)
mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n",
i, ret);
}
} else {
/* negotiate timeout with master node. */
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%pg), negotiate timeout with node %d.\n",
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(®->hr_item),
reg->hr_bdev, master_node);
ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG,
master_node);
if (ret)
mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n",
master_node, ret);
}
}
static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct o2hb_region *reg = data;
struct o2hb_nego_msg *nego_msg;
nego_msg = (struct o2hb_nego_msg *)msg->buf;
printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%pg).\n",
nego_msg->node_num, config_item_name(®->hr_item), reg->hr_bdev);
if (nego_msg->node_num < O2NM_MAX_NODES)
set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap);
else
mlog(ML_ERROR, "got nego timeout message from bad node.\n");
return 0;
}
static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct o2hb_region *reg = data;
printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%pg).\n",
config_item_name(®->hr_item), reg->hr_bdev);
o2hb_arm_timeout(reg);
return 0;
}
static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
{
atomic_set(&wc->wc_num_reqs, 1);
init_completion(&wc->wc_io_complete);
wc->wc_error = 0;
}
/* Used in error paths too */
static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
unsigned int num)
{
/* sadly atomic_sub_and_test() isn't available on all platforms. The
* good news is that the fast path only completes one at a time */
while(num--) {
if (atomic_dec_and_test(&wc->wc_num_reqs)) {
BUG_ON(num > 0);
complete(&wc->wc_io_complete);
}
}
}
static void o2hb_wait_on_io(struct o2hb_bio_wait_ctxt *wc)
{
o2hb_bio_wait_dec(wc, 1);
wait_for_completion(&wc->wc_io_complete);
}
static void o2hb_bio_end_io(struct bio *bio)
{
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
if (bio->bi_status) {
mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
wc->wc_error = blk_status_to_errno(bio->bi_status);
}
o2hb_bio_wait_dec(wc, 1);
bio_put(bio);
}
/* Setup a Bio to cover I/O against num_slots slots starting at
* start_slot. */
static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc,
unsigned int *current_slot,
unsigned int max_slots, blk_opf_t opf)
{
int len, current_page;
unsigned int vec_len, vec_start;
unsigned int bits = reg->hr_block_bits;
unsigned int spp = reg->hr_slots_per_page;
unsigned int cs = *current_slot;
struct bio *bio;
struct page *page;
/* Testing has shown this allocation to take long enough under
* GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this
* all together. */
bio = bio_alloc(reg->hr_bdev, 16, opf, GFP_ATOMIC);
if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM);
goto bail;
}
/* Must put everything in 512 byte sectors for the bio... */
bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io;
vec_start = (cs << bits) % PAGE_SIZE;
while(cs < max_slots) {
current_page = cs / spp;
page = reg->hr_slot_data[current_page];
vec_len = min(PAGE_SIZE - vec_start,
(max_slots-cs) * (PAGE_SIZE/spp) );
mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
current_page, vec_len, vec_start);
len = bio_add_page(bio, page, vec_len, vec_start);
if (len != vec_len) break;
cs += vec_len / (PAGE_SIZE/spp);
vec_start = 0;
}
bail:
*current_slot = cs;
return bio;
}
static int o2hb_read_slots(struct o2hb_region *reg,
unsigned int begin_slot,
unsigned int max_slots)
{
unsigned int current_slot = begin_slot;
int status;
struct o2hb_bio_wait_ctxt wc;
struct bio *bio;
o2hb_bio_wait_init(&wc);
while(current_slot < max_slots) {
bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots,
REQ_OP_READ);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
goto bail_and_wait;
}
atomic_inc(&wc.wc_num_reqs);
submit_bio(bio);
}
status = 0;
bail_and_wait:
o2hb_wait_on_io(&wc);
if (wc.wc_error && !status)
status = wc.wc_error;
return status;
}
static int o2hb_issue_node_write(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *write_wc)
{
int status;
unsigned int slot;
struct bio *bio;
o2hb_bio_wait_init(write_wc);
slot = o2nm_this_node();
bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1,
REQ_OP_WRITE | REQ_SYNC);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
goto bail;
}
atomic_inc(&write_wc->wc_num_reqs);
submit_bio(bio);
status = 0;
bail:
return status;
}
static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg,
struct o2hb_disk_heartbeat_block *hb_block)
{
__le32 old_cksum;
u32 ret;
/* We want to compute the block crc with a 0 value in the
* hb_cksum field. Save it off here and replace after the
* crc. */
old_cksum = hb_block->hb_cksum;
hb_block->hb_cksum = 0;
ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes);
hb_block->hb_cksum = old_cksum;
return ret;
}
static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block)
{
mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, "
"cksum = 0x%x, generation 0x%llx\n",
(long long)le64_to_cpu(hb_block->hb_seq),
hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum),
(long long)le64_to_cpu(hb_block->hb_generation));
}
static int o2hb_verify_crc(struct o2hb_region *reg,
struct o2hb_disk_heartbeat_block *hb_block)
{
u32 read, computed;
read = le32_to_cpu(hb_block->hb_cksum);
computed = o2hb_compute_block_crc_le(reg, hb_block);
return read == computed;
}
/*
* Compare the slot data with what we wrote in the last iteration.
* If the match fails, print an appropriate error message. This is to
* detect errors like... another node hearting on the same slot,
* flaky device that is losing writes, etc.
* Returns 1 if check succeeds, 0 otherwise.
*/
static int o2hb_check_own_slot(struct o2hb_region *reg)
{
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
char *errstr;
slot = ®->hr_slots[o2nm_this_node()];
/* Don't check on our 1st timestamp */
if (!slot->ds_last_time)
return 0;
hb_block = slot->ds_raw_block;
if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
hb_block->hb_node == slot->ds_node_num)
return 1;
#define ERRSTR1 "Another node is heartbeating on device"
#define ERRSTR2 "Heartbeat generation mismatch on device"
#define ERRSTR3 "Heartbeat sequence mismatch on device"
if (hb_block->hb_node != slot->ds_node_num)
errstr = ERRSTR1;
else if (le64_to_cpu(hb_block->hb_generation) !=
slot->ds_last_generation)
errstr = ERRSTR2;
else
errstr = ERRSTR3;
mlog(ML_ERROR, "%s (%pg): expected(%u:0x%llx, 0x%llx), "
"ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_bdev,
slot->ds_node_num, (unsigned long long)slot->ds_last_generation,
(unsigned long long)slot->ds_last_time, hb_block->hb_node,
(unsigned long long)le64_to_cpu(hb_block->hb_generation),
(unsigned long long)le64_to_cpu(hb_block->hb_seq));
return 0;
}
static inline void o2hb_prepare_block(struct o2hb_region *reg,
u64 generation)
{
int node_num;
u64 cputime;
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
node_num = o2nm_this_node();
slot = ®->hr_slots[node_num];
hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block;
memset(hb_block, 0, reg->hr_block_bytes);
/* TODO: time stuff */
cputime = ktime_get_real_seconds();
if (!cputime)
cputime = 1;
hb_block->hb_seq = cpu_to_le64(cputime);
hb_block->hb_node = node_num;
hb_block->hb_generation = cpu_to_le64(generation);
hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS);
/* This step must always happen last! */
hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
hb_block));
mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n",
(long long)generation,
le32_to_cpu(hb_block->hb_cksum));
}
static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
struct o2nm_node *node,
int idx)
{
struct o2hb_callback_func *f;
list_for_each_entry(f, &hbcall->list, hc_item) {
mlog(ML_HEARTBEAT, "calling funcs %p\n", f);
(f->hc_func)(node, idx, f->hc_data);
}
}
/* Will run the list in order until we process the passed event */
static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
{
struct o2hb_callback *hbcall;
struct o2hb_node_event *event;
/* Holding callback sem assures we don't alter the callback
* lists when doing this, and serializes ourselves with other
* processes wanting callbacks. */
down_write(&o2hb_callback_sem);
spin_lock(&o2hb_live_lock);
while (!list_empty(&o2hb_node_events)
&& !list_empty(&queued_event->hn_item)) {
event = list_entry(o2hb_node_events.next,
struct o2hb_node_event,
hn_item);
list_del_init(&event->hn_item);
spin_unlock(&o2hb_live_lock);
mlog(ML_HEARTBEAT, "Node %s event for %d\n",
event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN",
event->hn_node_num);
hbcall = hbcall_from_type(event->hn_event_type);
/* We should *never* have gotten on to the list with a
* bad type... This isn't something that we should try
* to recover from. */
BUG_ON(IS_ERR(hbcall));
o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num);
spin_lock(&o2hb_live_lock);
}
spin_unlock(&o2hb_live_lock);
up_write(&o2hb_callback_sem);
}
static void o2hb_queue_node_event(struct o2hb_node_event *event,
enum o2hb_callback_type type,
struct o2nm_node *node,
int node_num)
{
assert_spin_locked(&o2hb_live_lock);
BUG_ON((!node) && (type != O2HB_NODE_DOWN_CB));
event->hn_event_type = type;
event->hn_node = node;
event->hn_node_num = node_num;
mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n",
type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num);
list_add_tail(&event->hn_item, &o2hb_node_events);
}
static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
{
struct o2hb_node_event event =
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
struct o2nm_node *node;
int queued = 0;
node = o2nm_get_node_by_num(slot->ds_node_num);
if (!node)
return;
spin_lock(&o2hb_live_lock);
if (!list_empty(&slot->ds_live_item)) {
mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n",
slot->ds_node_num);
list_del_init(&slot->ds_live_item);
if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
slot->ds_node_num);
queued = 1;
}
}
spin_unlock(&o2hb_live_lock);
if (queued)
o2hb_run_event_list(&event);
o2nm_node_put(node);
}
static void o2hb_set_quorum_device(struct o2hb_region *reg)
{
if (!o2hb_global_heartbeat_active())
return;
/* Prevent race with o2hb_heartbeat_group_drop_item() */
if (kthread_should_stop())
return;
/* Tag region as quorum only after thread reaches steady state */
if (atomic_read(®->hr_steady_iterations) != 0)
return;
spin_lock(&o2hb_live_lock);
if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
goto unlock;
/*
* A region can be added to the quorum only when it sees all
* live nodes heartbeat on it. In other words, the region has been
* added to all nodes.
*/
if (!bitmap_equal(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
O2NM_MAX_NODES))
goto unlock;
printk(KERN_NOTICE "o2hb: Region %s (%pg) is now a quorum device\n",
config_item_name(®->hr_item), reg->hr_bdev);
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
/*
* If global heartbeat active, unpin all regions if the
* region count > CUT_OFF
*/
if (bitmap_weight(o2hb_quorum_region_bitmap,
O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
o2hb_region_unpin(NULL);
unlock:
spin_unlock(&o2hb_live_lock);
}
static int o2hb_check_slot(struct o2hb_region *reg,
struct o2hb_disk_slot *slot)
{
int changed = 0, gen_changed = 0;
struct o2hb_node_event event =
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
struct o2nm_node *node;
struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block;
u64 cputime;
unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
unsigned int slot_dead_ms;
int tmp;
int queued = 0;
memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
/*
* If a node is no longer configured but is still in the livemap, we
* may need to clear that bit from the livemap.
*/
node = o2nm_get_node_by_num(slot->ds_node_num);
if (!node) {
spin_lock(&o2hb_live_lock);
tmp = test_bit(slot->ds_node_num, o2hb_live_node_bitmap);
spin_unlock(&o2hb_live_lock);
if (!tmp)
return 0;
}
if (!o2hb_verify_crc(reg, hb_block)) {
/* all paths from here will drop o2hb_live_lock for
* us. */
spin_lock(&o2hb_live_lock);
/* Don't print an error on the console in this case -
* a freshly formatted heartbeat area will not have a
* crc set on it. */
if (list_empty(&slot->ds_live_item))
goto out;
/* The node is live but pushed out a bad crc. We
* consider it a transient miss but don't populate any
* other values as they may be junk. */
mlog(ML_ERROR, "Node %d has written a bad crc to %pg\n",
slot->ds_node_num, reg->hr_bdev);
o2hb_dump_slot(hb_block);
slot->ds_equal_samples++;
goto fire_callbacks;
}
/* we don't care if these wrap.. the state transitions below
* clear at the right places */
cputime = le64_to_cpu(hb_block->hb_seq);
if (slot->ds_last_time != cputime)
slot->ds_changed_samples++;
else
slot->ds_equal_samples++;
slot->ds_last_time = cputime;
/* The node changed heartbeat generations. We assume this to
* mean it dropped off but came back before we timed out. We
* want to consider it down for the time being but don't want
* to lose any changed_samples state we might build up to
* considering it live again. */
if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) {
gen_changed = 1;
slot->ds_equal_samples = 0;
mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx "
"to 0x%llx)\n", slot->ds_node_num,
(long long)slot->ds_last_generation,
(long long)le64_to_cpu(hb_block->hb_generation));
}
slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x "
"seq %llu last %llu changed %u equal %u\n",
slot->ds_node_num, (long long)slot->ds_last_generation,
le32_to_cpu(hb_block->hb_cksum),
(unsigned long long)le64_to_cpu(hb_block->hb_seq),
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
slot->ds_equal_samples);
spin_lock(&o2hb_live_lock);
fire_callbacks:
/* dead nodes only come to life after some number of
* changes at any time during their dead time */
if (list_empty(&slot->ds_live_item) &&
slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) {
mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n",
slot->ds_node_num, (long long)slot->ds_last_generation);
set_bit(slot->ds_node_num, reg->hr_live_node_bitmap);
/* first on the list generates a callback */
if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
mlog(ML_HEARTBEAT, "o2hb: Add node %d to live nodes "
"bitmap\n", slot->ds_node_num);
set_bit(slot->ds_node_num, o2hb_live_node_bitmap);
o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node,
slot->ds_node_num);
changed = 1;
queued = 1;
}
list_add_tail(&slot->ds_live_item,
&o2hb_live_slots[slot->ds_node_num]);
slot->ds_equal_samples = 0;
/* We want to be sure that all nodes agree on the
* number of milliseconds before a node will be
* considered dead. The self-fencing timeout is
* computed from this value, and a discrepancy might
* result in heartbeat calling a node dead when it
* hasn't self-fenced yet. */
slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms);
if (slot_dead_ms && slot_dead_ms != dead_ms) {
/* TODO: Perhaps we can fail the region here. */
mlog(ML_ERROR, "Node %d on device %pg has a dead count "
"of %u ms, but our count is %u ms.\n"
"Please double check your configuration values "
"for 'O2CB_HEARTBEAT_THRESHOLD'\n",
slot->ds_node_num, reg->hr_bdev, slot_dead_ms,
dead_ms);
}
goto out;
}
/* if the list is dead, we're done.. */
if (list_empty(&slot->ds_live_item))
goto out;
/* live nodes only go dead after enough consequtive missed
* samples.. reset the missed counter whenever we see
* activity */
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
mlog(ML_HEARTBEAT, "Node %d left my region\n",
slot->ds_node_num);
clear_bit(slot->ds_node_num, reg->hr_live_node_bitmap);
/* last off the live_slot generates a callback */
list_del_init(&slot->ds_live_item);
if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
mlog(ML_HEARTBEAT, "o2hb: Remove node %d from live "
"nodes bitmap\n", slot->ds_node_num);
clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
/* node can be null */
o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB,
node, slot->ds_node_num);
changed = 1;
queued = 1;
}
/* We don't clear this because the node is still
* actually writing new blocks. */
if (!gen_changed)
slot->ds_changed_samples = 0;
goto out;
}
if (slot->ds_changed_samples) {
slot->ds_changed_samples = 0;
slot->ds_equal_samples = 0;
}
out:
spin_unlock(&o2hb_live_lock);
if (queued)
o2hb_run_event_list(&event);
if (node)
o2nm_node_put(node);
return changed;
}
static int o2hb_highest_node(unsigned long *nodes, int numbits)
{
return find_last_bit(nodes, numbits);
}
static int o2hb_lowest_node(unsigned long *nodes, int numbits)
{
return find_first_bit(nodes, numbits);
}
static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
int i, ret, highest_node, lowest_node;
int membership_change = 0, own_slot_ok = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct o2hb_bio_wait_ctxt write_wc;
ret = o2nm_configured_node_map(configured_nodes,
sizeof(configured_nodes));
if (ret) {
mlog_errno(ret);
goto bail;
}
/*
* If a node is not configured but is in the livemap, we still need
* to read the slot so as to be able to remove it from the livemap.
*/
o2hb_fill_node_map(live_node_bitmap, O2NM_MAX_NODES);
i = -1;
while ((i = find_next_bit(live_node_bitmap,
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
set_bit(i, configured_nodes);
}
highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
lowest_node = o2hb_lowest_node(configured_nodes, O2NM_MAX_NODES);
if (highest_node >= O2NM_MAX_NODES || lowest_node >= O2NM_MAX_NODES) {
mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
ret = -EINVAL;
goto bail;
}
/* No sense in reading the slots of nodes that don't exist
* yet. Of course, if the node definitions have holes in them
* then we're reading an empty slot anyway... Consider this
* best-effort. */
ret = o2hb_read_slots(reg, lowest_node, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
goto bail;
}
/* With an up to date view of the slots, we can check that no
* other node has been improperly configured to heartbeat in
* our slot. */
own_slot_ok = o2hb_check_own_slot(reg);
/* fill in the proper info for our next heartbeat */
o2hb_prepare_block(reg, reg->hr_generation);
ret = o2hb_issue_node_write(reg, &write_wc);
if (ret < 0) {
mlog_errno(ret);
goto bail;
}
i = -1;
while((i = find_next_bit(configured_nodes,
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
membership_change |= o2hb_check_slot(reg, ®->hr_slots[i]);
}
/*
* We have to be sure we've advertised ourselves on disk
* before we can go to steady state. This ensures that
* people we find in our steady state have seen us.
*/
o2hb_wait_on_io(&write_wc);
if (write_wc.wc_error) {
/* Do not re-arm the write timeout on I/O error - we
* can't be sure that the new block ever made it to
* disk */
mlog(ML_ERROR, "Write error %d on device \"%pg\"\n",
write_wc.wc_error, reg->hr_bdev);
ret = write_wc.wc_error;
goto bail;
}
/* Skip disarming the timeout if own slot has stale/bad data */
if (own_slot_ok) {
o2hb_set_quorum_device(reg);
o2hb_arm_timeout(reg);
reg->hr_last_timeout_start = jiffies;
}
bail:
/* let the person who launched us know when things are steady */
if (atomic_read(®->hr_steady_iterations) != 0) {
if (!ret && own_slot_ok && !membership_change) {
if (atomic_dec_and_test(®->hr_steady_iterations))
wake_up(&o2hb_steady_queue);
}
}
if (atomic_read(®->hr_steady_iterations) != 0) {
if (atomic_dec_and_test(®->hr_unsteady_iterations)) {
printk(KERN_NOTICE "o2hb: Unable to stabilize "
"heartbeat on region %s (%pg)\n",
config_item_name(®->hr_item),
reg->hr_bdev);
atomic_set(®->hr_steady_iterations, 0);
reg->hr_aborted_start = 1;
wake_up(&o2hb_steady_queue);
ret = -EIO;
}
}
return ret;
}
/*
* we ride the region ref that the region dir holds. before the region
* dir is removed and drops it ref it will wait to tear down this
* thread.
*/
static int o2hb_thread(void *data)
{
int i, ret;
struct o2hb_region *reg = data;
struct o2hb_bio_wait_ctxt write_wc;
ktime_t before_hb, after_hb;
unsigned int elapsed_msec;
mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
set_user_nice(current, MIN_NICE);
/* Pin node */
ret = o2nm_depend_this_node();
if (ret) {
mlog(ML_ERROR, "Node has been deleted, ret = %d\n", ret);
reg->hr_node_deleted = 1;
wake_up(&o2hb_steady_queue);
return 0;
}
while (!kthread_should_stop() &&
!reg->hr_unclean_stop && !reg->hr_aborted_start) {
/* We track the time spent inside
* o2hb_do_disk_heartbeat so that we avoid more than
* hr_timeout_ms between disk writes. On busy systems
* this should result in a heartbeat which is less
* likely to time itself out. */
before_hb = ktime_get_real();
ret = o2hb_do_disk_heartbeat(reg);
reg->hr_last_hb_status = ret;
after_hb = ktime_get_real();
elapsed_msec = (unsigned int)
ktime_ms_delta(after_hb, before_hb);
mlog(ML_HEARTBEAT,
"start = %lld, end = %lld, msec = %u, ret = %d\n",
before_hb, after_hb, elapsed_msec, ret);
if (!kthread_should_stop() &&
elapsed_msec < reg->hr_timeout_ms) {
/* the kthread api has blocked signals for us so no
* need to record the return value. */
msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
}
}
o2hb_disarm_timeout(reg);
/* unclean stop is only used in very bad situation */
for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
o2hb_shutdown_slot(®->hr_slots[i]);
/* Explicit down notification - avoid forcing the other nodes
* to timeout on this region when we could just as easily
* write a clear generation - thus indicating to them that
* this node has left this region.
*/
if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
o2hb_prepare_block(reg, 0);
ret = o2hb_issue_node_write(reg, &write_wc);
if (ret == 0)
o2hb_wait_on_io(&write_wc);
else
mlog_errno(ret);
}
/* Unpin node */
o2nm_undepend_this_node();
mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int o2hb_debug_open(struct inode *inode, struct file *file)
{
struct o2hb_debug_buf *db = inode->i_private;
struct o2hb_region *reg;
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long lts;
char *buf = NULL;
int i = -1;
int out = 0;
/* max_nodes should be the largest bitmap we pass here */
BUG_ON(sizeof(map) < db->db_size);
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
goto bail;
switch (db->db_type) {
case O2HB_DB_TYPE_LIVENODES:
case O2HB_DB_TYPE_LIVEREGIONS:
case O2HB_DB_TYPE_QUORUMREGIONS:
case O2HB_DB_TYPE_FAILEDREGIONS:
spin_lock(&o2hb_live_lock);
memcpy(map, db->db_data, db->db_size);
spin_unlock(&o2hb_live_lock);
break;
case O2HB_DB_TYPE_REGION_LIVENODES:
spin_lock(&o2hb_live_lock);
reg = (struct o2hb_region *)db->db_data;
memcpy(map, reg->hr_live_node_bitmap, db->db_size);
spin_unlock(&o2hb_live_lock);
break;
case O2HB_DB_TYPE_REGION_NUMBER:
reg = (struct o2hb_region *)db->db_data;
out += scnprintf(buf + out, PAGE_SIZE - out, "%d\n",
reg->hr_region_num);
goto done;
case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
reg = (struct o2hb_region *)db->db_data;
lts = reg->hr_last_timeout_start;
/* If 0, it has never been set before */
if (lts)
lts = jiffies_to_msecs(jiffies - lts);
out += scnprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
goto done;
case O2HB_DB_TYPE_REGION_PINNED:
reg = (struct o2hb_region *)db->db_data;
out += scnprintf(buf + out, PAGE_SIZE - out, "%u\n",
!!reg->hr_item_pinned);
goto done;
default:
goto done;
}
while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len)
out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i);
out += scnprintf(buf + out, PAGE_SIZE - out, "\n");
done:
i_size_write(inode, out);
file->private_data = buf;
return 0;
bail:
return -ENOMEM;
}
static int o2hb_debug_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
i_size_read(file->f_mapping->host));
}
#else
static int o2hb_debug_open(struct inode *inode, struct file *file)
{
return 0;
}
static int o2hb_debug_release(struct inode *inode, struct file *file)
{
return 0;
}
static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return 0;
}
#endif /* CONFIG_DEBUG_FS */
static const struct file_operations o2hb_debug_fops = {
.open = o2hb_debug_open,
.release = o2hb_debug_release,
.read = o2hb_debug_read,
.llseek = generic_file_llseek,
};
void o2hb_exit(void)
{
debugfs_remove_recursive(o2hb_debug_dir);
kfree(o2hb_db_livenodes);
kfree(o2hb_db_liveregions);
kfree(o2hb_db_quorumregions);
kfree(o2hb_db_failedregions);
}
static void o2hb_debug_create(const char *name, struct dentry *dir,
struct o2hb_debug_buf **db, int db_len, int type,
int size, int len, void *data)
{
*db = kmalloc(db_len, GFP_KERNEL);
if (!*db)
return;
(*db)->db_type = type;
(*db)->db_size = size;
(*db)->db_len = len;
(*db)->db_data = data;
debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db, &o2hb_debug_fops);
}
static void o2hb_debug_init(void)
{
o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL);
o2hb_debug_create(O2HB_DEBUG_LIVENODES, o2hb_debug_dir,
&o2hb_db_livenodes, sizeof(*o2hb_db_livenodes),
O2HB_DB_TYPE_LIVENODES, sizeof(o2hb_live_node_bitmap),
O2NM_MAX_NODES, o2hb_live_node_bitmap);
o2hb_debug_create(O2HB_DEBUG_LIVEREGIONS, o2hb_debug_dir,
&o2hb_db_liveregions, sizeof(*o2hb_db_liveregions),
O2HB_DB_TYPE_LIVEREGIONS,
sizeof(o2hb_live_region_bitmap), O2NM_MAX_REGIONS,
o2hb_live_region_bitmap);
o2hb_debug_create(O2HB_DEBUG_QUORUMREGIONS, o2hb_debug_dir,
&o2hb_db_quorumregions,
sizeof(*o2hb_db_quorumregions),
O2HB_DB_TYPE_QUORUMREGIONS,
sizeof(o2hb_quorum_region_bitmap), O2NM_MAX_REGIONS,
o2hb_quorum_region_bitmap);
o2hb_debug_create(O2HB_DEBUG_FAILEDREGIONS, o2hb_debug_dir,
&o2hb_db_failedregions,
sizeof(*o2hb_db_failedregions),
O2HB_DB_TYPE_FAILEDREGIONS,
sizeof(o2hb_failed_region_bitmap), O2NM_MAX_REGIONS,
o2hb_failed_region_bitmap);
}
void o2hb_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++)
INIT_LIST_HEAD(&o2hb_callbacks[i].list);
for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++)
INIT_LIST_HEAD(&o2hb_live_slots[i]);
bitmap_zero(o2hb_live_node_bitmap, O2NM_MAX_NODES);
bitmap_zero(o2hb_region_bitmap, O2NM_MAX_REGIONS);
bitmap_zero(o2hb_live_region_bitmap, O2NM_MAX_REGIONS);
bitmap_zero(o2hb_quorum_region_bitmap, O2NM_MAX_REGIONS);
bitmap_zero(o2hb_failed_region_bitmap, O2NM_MAX_REGIONS);
o2hb_dependent_users = 0;
o2hb_debug_init();
}
/* if we're already in a callback then we're already serialized by the sem */
static void o2hb_fill_node_map_from_callback(unsigned long *map,
unsigned int bits)
{
bitmap_copy(map, o2hb_live_node_bitmap, bits);
}
/*
* get a map of all nodes that are heartbeating in any regions
*/
void o2hb_fill_node_map(unsigned long *map, unsigned int bits)
{
/* callers want to serialize this map and callbacks so that they
* can trust that they don't miss nodes coming to the party */
down_read(&o2hb_callback_sem);
spin_lock(&o2hb_live_lock);
o2hb_fill_node_map_from_callback(map, bits);
spin_unlock(&o2hb_live_lock);
up_read(&o2hb_callback_sem);
}
EXPORT_SYMBOL_GPL(o2hb_fill_node_map);
/*
* heartbeat configfs bits. The heartbeat set is a default set under
* the cluster set in nodemanager.c.
*/
static struct o2hb_region *to_o2hb_region(struct config_item *item)
{
return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
}
/* drop_item only drops its ref after killing the thread, nothing should
* be using the region anymore. this has to clean up any state that
* attributes might have built up. */
static void o2hb_region_release(struct config_item *item)
{
int i;
struct page *page;
struct o2hb_region *reg = to_o2hb_region(item);
mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg->hr_bdev);
kfree(reg->hr_tmp_block);
if (reg->hr_slot_data) {
for (i = 0; i < reg->hr_num_pages; i++) {
page = reg->hr_slot_data[i];
if (page)
__free_page(page);
}
kfree(reg->hr_slot_data);
}
if (reg->hr_bdev)
blkdev_put(reg->hr_bdev, NULL);
kfree(reg->hr_slots);
debugfs_remove_recursive(reg->hr_debug_dir);
kfree(reg->hr_db_livenodes);
kfree(reg->hr_db_regnum);
kfree(reg->hr_db_elapsed_time);
kfree(reg->hr_db_pinned);
spin_lock(&o2hb_live_lock);
list_del(®->hr_all_item);
spin_unlock(&o2hb_live_lock);
o2net_unregister_handler_list(®->hr_handler_list);
kfree(reg);
}
static int o2hb_read_block_input(struct o2hb_region *reg,
const char *page,
unsigned long *ret_bytes,
unsigned int *ret_bits)
{
unsigned long bytes;
char *p = (char *)page;
bytes = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
/* Heartbeat and fs min / max block sizes are the same. */
if (bytes > 4096 || bytes < 512)
return -ERANGE;
if (hweight16(bytes) != 1)
return -EINVAL;
if (ret_bytes)
*ret_bytes = bytes;
if (ret_bits)
*ret_bits = ffs(bytes) - 1;
return 0;
}
static ssize_t o2hb_region_block_bytes_show(struct config_item *item,
char *page)
{
return sprintf(page, "%u\n", to_o2hb_region(item)->hr_block_bytes);
}
static ssize_t o2hb_region_block_bytes_store(struct config_item *item,
const char *page,
size_t count)
{
struct o2hb_region *reg = to_o2hb_region(item);
int status;
unsigned long block_bytes;
unsigned int block_bits;
if (reg->hr_bdev)
return -EINVAL;
status = o2hb_read_block_input(reg, page, &block_bytes,
&block_bits);
if (status)
return status;
reg->hr_block_bytes = (unsigned int)block_bytes;
reg->hr_block_bits = block_bits;
return count;
}
static ssize_t o2hb_region_start_block_show(struct config_item *item,
char *page)
{
return sprintf(page, "%llu\n", to_o2hb_region(item)->hr_start_block);
}
static ssize_t o2hb_region_start_block_store(struct config_item *item,
const char *page,
size_t count)
{
struct o2hb_region *reg = to_o2hb_region(item);
unsigned long long tmp;
char *p = (char *)page;
ssize_t ret;
if (reg->hr_bdev)
return -EINVAL;
ret = kstrtoull(p, 0, &tmp);
if (ret)
return -EINVAL;
reg->hr_start_block = tmp;
return count;
}
static ssize_t o2hb_region_blocks_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_o2hb_region(item)->hr_blocks);
}
static ssize_t o2hb_region_blocks_store(struct config_item *item,
const char *page,
size_t count)
{
struct o2hb_region *reg = to_o2hb_region(item);
unsigned long tmp;
char *p = (char *)page;
if (reg->hr_bdev)
return -EINVAL;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
if (tmp > O2NM_MAX_NODES || tmp == 0)
return -ERANGE;
reg->hr_blocks = (unsigned int)tmp;
return count;
}
static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
{
unsigned int ret = 0;
if (to_o2hb_region(item)->hr_bdev)
ret = sprintf(page, "%pg\n", to_o2hb_region(item)->hr_bdev);
return ret;
}
static void o2hb_init_region_params(struct o2hb_region *reg)
{
reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
reg->hr_start_block, reg->hr_blocks);
mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n",
reg->hr_block_bytes, reg->hr_block_bits);
mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms);
mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold);
}
static int o2hb_map_slot_data(struct o2hb_region *reg)
{
int i, j;
unsigned int last_slot;
unsigned int spp = reg->hr_slots_per_page;
struct page *page;
char *raw;
struct o2hb_disk_slot *slot;
reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
if (reg->hr_tmp_block == NULL)
return -ENOMEM;
reg->hr_slots = kcalloc(reg->hr_blocks,
sizeof(struct o2hb_disk_slot), GFP_KERNEL);
if (reg->hr_slots == NULL)
return -ENOMEM;
for(i = 0; i < reg->hr_blocks; i++) {
slot = ®->hr_slots[i];
slot->ds_node_num = i;
INIT_LIST_HEAD(&slot->ds_live_item);
slot->ds_raw_block = NULL;
}
reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp;
mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks "
"at %u blocks per page\n",
reg->hr_num_pages, reg->hr_blocks, spp);
reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
GFP_KERNEL);
if (!reg->hr_slot_data)
return -ENOMEM;
for(i = 0; i < reg->hr_num_pages; i++) {
page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
reg->hr_slot_data[i] = page;
last_slot = i * spp;
raw = page_address(page);
for (j = 0;
(j < spp) && ((j + last_slot) < reg->hr_blocks);
j++) {
BUG_ON((j + last_slot) >= reg->hr_blocks);
slot = ®->hr_slots[j + last_slot];
slot->ds_raw_block =
(struct o2hb_disk_heartbeat_block *) raw;
raw += reg->hr_block_bytes;
}
}
return 0;
}
/* Read in all the slots available and populate the tracking
* structures so that we can start with a baseline idea of what's
* there. */
static int o2hb_populate_slot_data(struct o2hb_region *reg)
{
int ret, i;
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
ret = o2hb_read_slots(reg, 0, reg->hr_blocks);
if (ret)
goto out;
/* We only want to get an idea of the values initially in each
* slot, so we do no verification - o2hb_check_slot will
* actually determine if each configured slot is valid and
* whether any values have changed. */
for(i = 0; i < reg->hr_blocks; i++) {
slot = ®->hr_slots[i];
hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block;
/* Only fill the values that o2hb_check_slot uses to
* determine changing slots */
slot->ds_last_time = le64_to_cpu(hb_block->hb_seq);
slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
}
out:
return ret;
}
/* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
static ssize_t o2hb_region_dev_store(struct config_item *item,
const char *page,
size_t count)
{
struct o2hb_region *reg = to_o2hb_region(item);
struct task_struct *hb_task;
long fd;
int sectsize;
char *p = (char *)page;
struct fd f;
ssize_t ret = -EINVAL;
int live_threshold;
if (reg->hr_bdev)
goto out;
/* We can't heartbeat without having had our node number
* configured yet. */
if (o2nm_this_node() == O2NM_MAX_NODES)
goto out;
fd = simple_strtol(p, &p, 0);
if (!p || (*p && (*p != '\n')))
goto out;
if (fd < 0 || fd >= INT_MAX)
goto out;
f = fdget(fd);
if (f.file == NULL)
goto out;
if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
reg->hr_block_bytes == 0)
goto out2;
if (!S_ISBLK(f.file->f_mapping->host->i_mode))
goto out2;
reg->hr_bdev = blkdev_get_by_dev(f.file->f_mapping->host->i_rdev,
BLK_OPEN_WRITE | BLK_OPEN_READ, NULL,
NULL);
if (IS_ERR(reg->hr_bdev)) {
ret = PTR_ERR(reg->hr_bdev);
reg->hr_bdev = NULL;
goto out2;
}
sectsize = bdev_logical_block_size(reg->hr_bdev);
if (sectsize != reg->hr_block_bytes) {
mlog(ML_ERROR,
"blocksize %u incorrect for device, expected %d",
reg->hr_block_bytes, sectsize);
ret = -EINVAL;
goto out3;
}
o2hb_init_region_params(reg);
/* Generation of zero is invalid */
do {
get_random_bytes(®->hr_generation,
sizeof(reg->hr_generation));
} while (reg->hr_generation == 0);
ret = o2hb_map_slot_data(reg);
if (ret) {
mlog_errno(ret);
goto out3;
}
ret = o2hb_populate_slot_data(reg);
if (ret) {
mlog_errno(ret);
goto out3;
}
INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout);
INIT_DELAYED_WORK(®->hr_nego_timeout_work, o2hb_nego_timeout);
/*
* A node is considered live after it has beat LIVE_THRESHOLD
* times. We're not steady until we've given them a chance
* _after_ our first read.
* The default threshold is bare minimum so as to limit the delay
* during mounts. For global heartbeat, the threshold doubled for the
* first region.
*/
live_threshold = O2HB_LIVE_THRESHOLD;
if (o2hb_global_heartbeat_active()) {
spin_lock(&o2hb_live_lock);
if (bitmap_weight(o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
live_threshold <<= 1;
spin_unlock(&o2hb_live_lock);
}
++live_threshold;
atomic_set(®->hr_steady_iterations, live_threshold);
/* unsteady_iterations is triple the steady_iterations */
atomic_set(®->hr_unsteady_iterations, (live_threshold * 3));
hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
reg->hr_item.ci_name);
if (IS_ERR(hb_task)) {
ret = PTR_ERR(hb_task);
mlog_errno(ret);
goto out3;
}
spin_lock(&o2hb_live_lock);
reg->hr_task = hb_task;
spin_unlock(&o2hb_live_lock);
ret = wait_event_interruptible(o2hb_steady_queue,
atomic_read(®->hr_steady_iterations) == 0 ||
reg->hr_node_deleted);
if (ret) {
atomic_set(®->hr_steady_iterations, 0);
reg->hr_aborted_start = 1;
}
if (reg->hr_aborted_start) {
ret = -EIO;
goto out3;
}
if (reg->hr_node_deleted) {
ret = -EINVAL;
goto out3;
}
/* Ok, we were woken. Make sure it wasn't by drop_item() */
spin_lock(&o2hb_live_lock);
hb_task = reg->hr_task;
if (o2hb_global_heartbeat_active())
set_bit(reg->hr_region_num, o2hb_live_region_bitmap);
spin_unlock(&o2hb_live_lock);
if (hb_task)
ret = count;
else
ret = -EIO;
if (hb_task && o2hb_global_heartbeat_active())
printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%pg)\n",
config_item_name(®->hr_item), reg->hr_bdev);
out3:
if (ret < 0) {
blkdev_put(reg->hr_bdev, NULL);
reg->hr_bdev = NULL;
}
out2:
fdput(f);
out:
return ret;
}
static ssize_t o2hb_region_pid_show(struct config_item *item, char *page)
{
struct o2hb_region *reg = to_o2hb_region(item);
pid_t pid = 0;
spin_lock(&o2hb_live_lock);
if (reg->hr_task)
pid = task_pid_nr(reg->hr_task);
spin_unlock(&o2hb_live_lock);
if (!pid)
return 0;
return sprintf(page, "%u\n", pid);
}
CONFIGFS_ATTR(o2hb_region_, block_bytes);
CONFIGFS_ATTR(o2hb_region_, start_block);
CONFIGFS_ATTR(o2hb_region_, blocks);
CONFIGFS_ATTR(o2hb_region_, dev);
CONFIGFS_ATTR_RO(o2hb_region_, pid);
static struct configfs_attribute *o2hb_region_attrs[] = {
&o2hb_region_attr_block_bytes,
&o2hb_region_attr_start_block,
&o2hb_region_attr_blocks,
&o2hb_region_attr_dev,
&o2hb_region_attr_pid,
NULL,
};
static struct configfs_item_operations o2hb_region_item_ops = {
.release = o2hb_region_release,
};
static const struct config_item_type o2hb_region_type = {
.ct_item_ops = &o2hb_region_item_ops,
.ct_attrs = o2hb_region_attrs,
.ct_owner = THIS_MODULE,
};
/* heartbeat set */
struct o2hb_heartbeat_group {
struct config_group hs_group;
/* some stuff? */
};
static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group)
{
return group ?
container_of(group, struct o2hb_heartbeat_group, hs_group)
: NULL;
}
static void o2hb_debug_region_init(struct o2hb_region *reg,
struct dentry *parent)
{
struct dentry *dir;
dir = debugfs_create_dir(config_item_name(®->hr_item), parent);
reg->hr_debug_dir = dir;
o2hb_debug_create(O2HB_DEBUG_LIVENODES, dir, &(reg->hr_db_livenodes),
sizeof(*(reg->hr_db_livenodes)),
O2HB_DB_TYPE_REGION_LIVENODES,
sizeof(reg->hr_live_node_bitmap), O2NM_MAX_NODES,
reg);
o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, dir, &(reg->hr_db_regnum),
sizeof(*(reg->hr_db_regnum)),
O2HB_DB_TYPE_REGION_NUMBER, 0, O2NM_MAX_NODES, reg);
o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME, dir,
&(reg->hr_db_elapsed_time),
sizeof(*(reg->hr_db_elapsed_time)),
O2HB_DB_TYPE_REGION_ELAPSED_TIME, 0, 0, reg);
o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, dir, &(reg->hr_db_pinned),
sizeof(*(reg->hr_db_pinned)),
O2HB_DB_TYPE_REGION_PINNED, 0, 0, reg);
}
static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
const char *name)
{
struct o2hb_region *reg = NULL;
int ret;
reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL);
if (reg == NULL)
return ERR_PTR(-ENOMEM);
if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) {
ret = -ENAMETOOLONG;
goto free;
}
spin_lock(&o2hb_live_lock);
reg->hr_region_num = 0;
if (o2hb_global_heartbeat_active()) {
reg->hr_region_num = find_first_zero_bit(o2hb_region_bitmap,
O2NM_MAX_REGIONS);
if (reg->hr_region_num >= O2NM_MAX_REGIONS) {
spin_unlock(&o2hb_live_lock);
ret = -EFBIG;
goto free;
}
set_bit(reg->hr_region_num, o2hb_region_bitmap);
}
list_add_tail(®->hr_all_item, &o2hb_all_regions);
spin_unlock(&o2hb_live_lock);
config_item_init_type_name(®->hr_item, name, &o2hb_region_type);
/* this is the same way to generate msg key as dlm, for local heartbeat,
* name is also the same, so make initial crc value different to avoid
* message key conflict.
*/
reg->hr_key = crc32_le(reg->hr_region_num + O2NM_MAX_REGIONS,
name, strlen(name));
INIT_LIST_HEAD(®->hr_handler_list);
ret = o2net_register_handler(O2HB_NEGO_TIMEOUT_MSG, reg->hr_key,
sizeof(struct o2hb_nego_msg),
o2hb_nego_timeout_handler,
reg, NULL, ®->hr_handler_list);
if (ret)
goto remove_item;
ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
sizeof(struct o2hb_nego_msg),
o2hb_nego_approve_handler,
reg, NULL, ®->hr_handler_list);
if (ret)
goto unregister_handler;
o2hb_debug_region_init(reg, o2hb_debug_dir);
return ®->hr_item;
unregister_handler:
o2net_unregister_handler_list(®->hr_handler_list);
remove_item:
spin_lock(&o2hb_live_lock);
list_del(®->hr_all_item);
if (o2hb_global_heartbeat_active())
clear_bit(reg->hr_region_num, o2hb_region_bitmap);
spin_unlock(&o2hb_live_lock);
free:
kfree(reg);
return ERR_PTR(ret);
}
static void o2hb_heartbeat_group_drop_item(struct config_group *group,
struct config_item *item)
{
struct task_struct *hb_task;
struct o2hb_region *reg = to_o2hb_region(item);
int quorum_region = 0;
/* stop the thread when the user removes the region dir */
spin_lock(&o2hb_live_lock);
hb_task = reg->hr_task;
reg->hr_task = NULL;
reg->hr_item_dropped = 1;
spin_unlock(&o2hb_live_lock);
if (hb_task)
kthread_stop(hb_task);
if (o2hb_global_heartbeat_active()) {
spin_lock(&o2hb_live_lock);
clear_bit(reg->hr_region_num, o2hb_region_bitmap);
clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
quorum_region = 1;
clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
spin_unlock(&o2hb_live_lock);
printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%pg)\n",
((atomic_read(®->hr_steady_iterations) == 0) ?
"stopped" : "start aborted"), config_item_name(item),
reg->hr_bdev);
}
/*
* If we're racing a dev_write(), we need to wake them. They will
* check reg->hr_task
*/
if (atomic_read(®->hr_steady_iterations) != 0) {
reg->hr_aborted_start = 1;
atomic_set(®->hr_steady_iterations, 0);
wake_up(&o2hb_steady_queue);
}
config_item_put(item);
if (!o2hb_global_heartbeat_active() || !quorum_region)
return;
/*
* If global heartbeat active and there are dependent users,
* pin all regions if quorum region count <= CUT_OFF
*/
spin_lock(&o2hb_live_lock);
if (!o2hb_dependent_users)
goto unlock;
if (bitmap_weight(o2hb_quorum_region_bitmap,
O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
o2hb_region_pin(NULL);
unlock:
spin_unlock(&o2hb_live_lock);
}
static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
char *page)
{
return sprintf(page, "%u\n", o2hb_dead_threshold);
}
static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
const char *page, size_t count)
{
unsigned long tmp;
char *p = (char *)page;
tmp = simple_strtoul(p, &p, 10);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
/* this will validate ranges for us. */
o2hb_dead_threshold_set((unsigned int) tmp);
return count;
}
static ssize_t o2hb_heartbeat_group_mode_show(struct config_item *item,
char *page)
{
return sprintf(page, "%s\n",
o2hb_heartbeat_mode_desc[o2hb_heartbeat_mode]);
}
static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item,
const char *page, size_t count)
{
unsigned int i;
int ret;
size_t len;
len = (page[count - 1] == '\n') ? count - 1 : count;
if (!len)
return -EINVAL;
for (i = 0; i < O2HB_HEARTBEAT_NUM_MODES; ++i) {
if (strncasecmp(page, o2hb_heartbeat_mode_desc[i], len))
continue;
ret = o2hb_global_heartbeat_mode_set(i);
if (!ret)
printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n",
o2hb_heartbeat_mode_desc[i]);
return count;
}
return -EINVAL;
}
CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
&o2hb_heartbeat_group_attr_dead_threshold,
&o2hb_heartbeat_group_attr_mode,
NULL,
};
static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
.make_item = o2hb_heartbeat_group_make_item,
.drop_item = o2hb_heartbeat_group_drop_item,
};
static const struct config_item_type o2hb_heartbeat_group_type = {
.ct_group_ops = &o2hb_heartbeat_group_group_ops,
.ct_attrs = o2hb_heartbeat_group_attrs,
.ct_owner = THIS_MODULE,
};
/* this is just here to avoid touching group in heartbeat.h which the
* entire damn world #includes */
struct config_group *o2hb_alloc_hb_set(void)
{
struct o2hb_heartbeat_group *hs = NULL;
struct config_group *ret = NULL;
hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
if (hs == NULL)
goto out;
config_group_init_type_name(&hs->hs_group, "heartbeat",
&o2hb_heartbeat_group_type);
ret = &hs->hs_group;
out:
if (ret == NULL)
kfree(hs);
return ret;
}
void o2hb_free_hb_set(struct config_group *group)
{
struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group);
kfree(hs);
}
/* hb callback registration and issuing */
static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
{
if (type == O2HB_NUM_CB)
return ERR_PTR(-EINVAL);
return &o2hb_callbacks[type];
}
void o2hb_setup_callback(struct o2hb_callback_func *hc,
enum o2hb_callback_type type,
o2hb_cb_func *func,
void *data,
int priority)
{
INIT_LIST_HEAD(&hc->hc_item);
hc->hc_func = func;
hc->hc_data = data;
hc->hc_priority = priority;
hc->hc_type = type;
hc->hc_magic = O2HB_CB_MAGIC;
}
EXPORT_SYMBOL_GPL(o2hb_setup_callback);
/*
* In local heartbeat mode, region_uuid passed matches the dlm domain name.
* In global heartbeat mode, region_uuid passed is NULL.
*
* In local, we only pin the matching region. In global we pin all the active
* regions.
*/
static int o2hb_region_pin(const char *region_uuid)
{
int ret = 0, found = 0;
struct o2hb_region *reg;
char *uuid;
assert_spin_locked(&o2hb_live_lock);
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
if (reg->hr_item_dropped)
continue;
uuid = config_item_name(®->hr_item);
/* local heartbeat */
if (region_uuid) {
if (strcmp(region_uuid, uuid))
continue;
found = 1;
}
if (reg->hr_item_pinned || reg->hr_item_dropped)
goto skip_pin;
/* Ignore ENOENT only for local hb (userdlm domain) */
ret = o2nm_depend_item(®->hr_item);
if (!ret) {
mlog(ML_CLUSTER, "Pin region %s\n", uuid);
reg->hr_item_pinned = 1;
} else {
if (ret == -ENOENT && found)
ret = 0;
else {
mlog(ML_ERROR, "Pin region %s fails with %d\n",
uuid, ret);
break;
}
}
skip_pin:
if (found)
break;
}
return ret;
}
/*
* In local heartbeat mode, region_uuid passed matches the dlm domain name.
* In global heartbeat mode, region_uuid passed is NULL.
*
* In local, we only unpin the matching region. In global we unpin all the
* active regions.
*/
static void o2hb_region_unpin(const char *region_uuid)
{
struct o2hb_region *reg;
char *uuid;
int found = 0;
assert_spin_locked(&o2hb_live_lock);
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
if (reg->hr_item_dropped)
continue;
uuid = config_item_name(®->hr_item);
if (region_uuid) {
if (strcmp(region_uuid, uuid))
continue;
found = 1;
}
if (reg->hr_item_pinned) {
mlog(ML_CLUSTER, "Unpin region %s\n", uuid);
o2nm_undepend_item(®->hr_item);
reg->hr_item_pinned = 0;
}
if (found)
break;
}
}
static int o2hb_region_inc_user(const char *region_uuid)
{
int ret = 0;
spin_lock(&o2hb_live_lock);
/* local heartbeat */
if (!o2hb_global_heartbeat_active()) {
ret = o2hb_region_pin(region_uuid);
goto unlock;
}
/*
* if global heartbeat active and this is the first dependent user,
* pin all regions if quorum region count <= CUT_OFF
*/
o2hb_dependent_users++;
if (o2hb_dependent_users > 1)
goto unlock;
if (bitmap_weight(o2hb_quorum_region_bitmap,
O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
ret = o2hb_region_pin(NULL);
unlock:
spin_unlock(&o2hb_live_lock);
return ret;
}
static void o2hb_region_dec_user(const char *region_uuid)
{
spin_lock(&o2hb_live_lock);
/* local heartbeat */
if (!o2hb_global_heartbeat_active()) {
o2hb_region_unpin(region_uuid);
goto unlock;
}
/*
* if global heartbeat active and there are no dependent users,
* unpin all quorum regions
*/
o2hb_dependent_users--;
if (!o2hb_dependent_users)
o2hb_region_unpin(NULL);
unlock:
spin_unlock(&o2hb_live_lock);
}
int o2hb_register_callback(const char *region_uuid,
struct o2hb_callback_func *hc)
{
struct o2hb_callback_func *f;
struct o2hb_callback *hbcall;
int ret;
BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
BUG_ON(!list_empty(&hc->hc_item));
hbcall = hbcall_from_type(hc->hc_type);
if (IS_ERR(hbcall)) {
ret = PTR_ERR(hbcall);
goto out;
}
if (region_uuid) {
ret = o2hb_region_inc_user(region_uuid);
if (ret) {
mlog_errno(ret);
goto out;
}
}
down_write(&o2hb_callback_sem);
list_for_each_entry(f, &hbcall->list, hc_item) {
if (hc->hc_priority < f->hc_priority) {
list_add_tail(&hc->hc_item, &f->hc_item);
break;
}
}
if (list_empty(&hc->hc_item))
list_add_tail(&hc->hc_item, &hbcall->list);
up_write(&o2hb_callback_sem);
ret = 0;
out:
mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
ret, __builtin_return_address(0), hc);
return ret;
}
EXPORT_SYMBOL_GPL(o2hb_register_callback);
void o2hb_unregister_callback(const char *region_uuid,
struct o2hb_callback_func *hc)
{
BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
__builtin_return_address(0), hc);
/* XXX Can this happen _with_ a region reference? */
if (list_empty(&hc->hc_item))
return;
if (region_uuid)
o2hb_region_dec_user(region_uuid);
down_write(&o2hb_callback_sem);
list_del_init(&hc->hc_item);
up_write(&o2hb_callback_sem);
}
EXPORT_SYMBOL_GPL(o2hb_unregister_callback);
int o2hb_check_node_heartbeating_no_sem(u8 node_num)
{
unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
spin_lock(&o2hb_live_lock);
o2hb_fill_node_map_from_callback(testing_map, O2NM_MAX_NODES);
spin_unlock(&o2hb_live_lock);
if (!test_bit(node_num, testing_map)) {
mlog(ML_HEARTBEAT,
"node (%u) does not have heartbeating enabled.\n",
node_num);
return 0;
}
return 1;
}
EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_no_sem);
int o2hb_check_node_heartbeating_from_callback(u8 node_num)
{
unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
o2hb_fill_node_map_from_callback(testing_map, O2NM_MAX_NODES);
if (!test_bit(node_num, testing_map)) {
mlog(ML_HEARTBEAT,
"node (%u) does not have heartbeating enabled.\n",
node_num);
return 0;
}
return 1;
}
EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback);
/*
* this is just a hack until we get the plumbing which flips file systems
* read only and drops the hb ref instead of killing the node dead.
*/
void o2hb_stop_all_regions(void)
{
struct o2hb_region *reg;
mlog(ML_ERROR, "stopping heartbeat on all active regions.\n");
spin_lock(&o2hb_live_lock);
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item)
reg->hr_unclean_stop = 1;
spin_unlock(&o2hb_live_lock);
}
EXPORT_SYMBOL_GPL(o2hb_stop_all_regions);
int o2hb_get_all_regions(char *region_uuids, u8 max_regions)
{
struct o2hb_region *reg;
int numregs = 0;
char *p;
spin_lock(&o2hb_live_lock);
p = region_uuids;
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
if (reg->hr_item_dropped)
continue;
mlog(0, "Region: %s\n", config_item_name(®->hr_item));
if (numregs < max_regions) {
memcpy(p, config_item_name(®->hr_item),
O2HB_MAX_REGION_NAME_LEN);
p += O2HB_MAX_REGION_NAME_LEN;
}
numregs++;
}
spin_unlock(&o2hb_live_lock);
return numregs;
}
EXPORT_SYMBOL_GPL(o2hb_get_all_regions);
int o2hb_global_heartbeat_active(void)
{
return (o2hb_heartbeat_mode == O2HB_HEARTBEAT_GLOBAL);
}
EXPORT_SYMBOL(o2hb_global_heartbeat_active);
| linux-master | fs/ocfs2/cluster/heartbeat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmfs.c
*
* Code which implements the kernel side of a minimal userspace
* interface to our DLM. This file handles the virtual file system
* used for communication with userspace. Credit should go to ramfs,
* which was a template for the fs side of this module.
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*/
/* Simple VFS hooks based on: */
/*
* Resizable simple ram filesystem for Linux.
*
* Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include "../stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "../cluster/masklog.h"
static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations;
static const struct inode_operations dlmfs_dir_inode_operations;
static const struct inode_operations dlmfs_root_inode_operations;
static const struct inode_operations dlmfs_file_inode_operations;
static struct kmem_cache *dlmfs_inode_cache;
struct workqueue_struct *user_dlm_worker;
/*
* These are the ABI capabilities of dlmfs.
*
* Over time, dlmfs has added some features that were not part of the
* initial ABI. Unfortunately, some of these features are not detectable
* via standard usage. For example, Linux's default poll always returns
* EPOLLIN, so there is no way for a caller of poll(2) to know when dlmfs
* added poll support. Instead, we provide this list of new capabilities.
*
* Capabilities is a read-only attribute. We do it as a module parameter
* so we can discover it whether dlmfs is built in, loaded, or even not
* loaded.
*
* The ABI features are local to this machine's dlmfs mount. This is
* distinct from the locking protocol, which is concerned with inter-node
* interaction.
*
* Capabilities:
* - bast : EPOLLIN against the file descriptor of a held lock
* signifies a bast fired on the lock.
*/
#define DLMFS_CAPABILITIES "bast stackglue"
static int param_set_dlmfs_capabilities(const char *val,
const struct kernel_param *kp)
{
printk(KERN_ERR "%s: readonly parameter\n", kp->name);
return -EINVAL;
}
static int param_get_dlmfs_capabilities(char *buffer,
const struct kernel_param *kp)
{
return strlcpy(buffer, DLMFS_CAPABILITIES,
strlen(DLMFS_CAPABILITIES) + 1);
}
module_param_call(capabilities, param_set_dlmfs_capabilities,
param_get_dlmfs_capabilities, NULL, 0444);
MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
/*
* decodes a set of open flags into a valid lock level and a set of flags.
* returns < 0 if we have invalid flags
* flags which mean something to us:
* O_RDONLY -> PRMODE level
* O_WRONLY -> EXMODE level
*
* O_NONBLOCK -> NOQUEUE
*/
static int dlmfs_decode_open_flags(int open_flags,
int *level,
int *flags)
{
if (open_flags & (O_WRONLY|O_RDWR))
*level = DLM_LOCK_EX;
else
*level = DLM_LOCK_PR;
*flags = 0;
if (open_flags & O_NONBLOCK)
*flags |= DLM_LKF_NOQUEUE;
return 0;
}
static int dlmfs_file_open(struct inode *inode,
struct file *file)
{
int status, level, flags;
struct dlmfs_filp_private *fp = NULL;
struct dlmfs_inode_private *ip;
if (S_ISDIR(inode->i_mode))
BUG();
mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
file->f_flags);
status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
if (status < 0)
goto bail;
/* We don't want to honor O_APPEND at read/write time as it
* doesn't make sense for LVB writes. */
file->f_flags &= ~O_APPEND;
fp = kmalloc(sizeof(*fp), GFP_NOFS);
if (!fp) {
status = -ENOMEM;
goto bail;
}
fp->fp_lock_level = level;
ip = DLMFS_I(inode);
status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
if (status < 0) {
/* this is a strange error to return here but I want
* to be able userspace to be able to distinguish a
* valid lock request from one that simply couldn't be
* granted. */
if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
status = -ETXTBSY;
kfree(fp);
goto bail;
}
file->private_data = fp;
bail:
return status;
}
static int dlmfs_file_release(struct inode *inode,
struct file *file)
{
int level;
struct dlmfs_inode_private *ip = DLMFS_I(inode);
struct dlmfs_filp_private *fp = file->private_data;
if (S_ISDIR(inode->i_mode))
BUG();
mlog(0, "close called on inode %lu\n", inode->i_ino);
if (fp) {
level = fp->fp_lock_level;
if (level != DLM_LOCK_IV)
user_dlm_cluster_unlock(&ip->ip_lockres, level);
kfree(fp);
file->private_data = NULL;
}
return 0;
}
/*
* We do ->setattr() just to override size changes. Our size is the size
* of the LVB and nothing else.
*/
static int dlmfs_file_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
int error;
struct inode *inode = d_inode(dentry);
attr->ia_valid &= ~ATTR_SIZE;
error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
return error;
setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait)
{
__poll_t event = 0;
struct inode *inode = file_inode(file);
struct dlmfs_inode_private *ip = DLMFS_I(inode);
poll_wait(file, &ip->ip_lockres.l_event, wait);
spin_lock(&ip->ip_lockres.l_lock);
if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
event = EPOLLIN | EPOLLRDNORM;
spin_unlock(&ip->ip_lockres.l_lock);
return event;
}
static ssize_t dlmfs_file_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
char lvb[DLM_LVB_LEN];
if (!user_dlm_read_lvb(file_inode(file), lvb))
return 0;
return simple_read_from_buffer(buf, count, ppos, lvb, sizeof(lvb));
}
static ssize_t dlmfs_file_write(struct file *filp,
const char __user *buf,
size_t count,
loff_t *ppos)
{
char lvb_buf[DLM_LVB_LEN];
int bytes_left;
struct inode *inode = file_inode(filp);
mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
inode->i_ino, count, *ppos);
if (*ppos >= DLM_LVB_LEN)
return -ENOSPC;
/* don't write past the lvb */
if (count > DLM_LVB_LEN - *ppos)
count = DLM_LVB_LEN - *ppos;
if (!count)
return 0;
bytes_left = copy_from_user(lvb_buf, buf, count);
count -= bytes_left;
if (count)
user_dlm_write_lvb(inode, lvb_buf, count);
*ppos = *ppos + count;
mlog(0, "wrote %zu bytes\n", count);
return count;
}
static void dlmfs_init_once(void *foo)
{
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
ip->ip_conn = NULL;
ip->ip_parent = NULL;
inode_init_once(&ip->ip_vfs_inode);
}
static struct inode *dlmfs_alloc_inode(struct super_block *sb)
{
struct dlmfs_inode_private *ip;
ip = alloc_inode_sb(sb, dlmfs_inode_cache, GFP_NOFS);
if (!ip)
return NULL;
return &ip->ip_vfs_inode;
}
static void dlmfs_free_inode(struct inode *inode)
{
kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
}
static void dlmfs_evict_inode(struct inode *inode)
{
int status;
struct dlmfs_inode_private *ip;
struct user_lock_res *lockres;
int teardown;
clear_inode(inode);
mlog(0, "inode %lu\n", inode->i_ino);
ip = DLMFS_I(inode);
lockres = &ip->ip_lockres;
if (S_ISREG(inode->i_mode)) {
spin_lock(&lockres->l_lock);
teardown = !!(lockres->l_flags & USER_LOCK_IN_TEARDOWN);
spin_unlock(&lockres->l_lock);
if (!teardown) {
status = user_dlm_destroy_lock(lockres);
if (status < 0)
mlog_errno(status);
}
iput(ip->ip_parent);
goto clear_fields;
}
mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
/* we must be a directory. If required, lets unregister the
* dlm context now. */
if (ip->ip_conn)
user_dlm_unregister(ip->ip_conn);
clear_fields:
ip->ip_parent = NULL;
ip->ip_conn = NULL;
}
static struct inode *dlmfs_get_root_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
umode_t mode = S_IFDIR | 0755;
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inc_nlink(inode);
inode->i_fop = &simple_dir_operations;
inode->i_op = &dlmfs_root_inode_operations;
}
return inode;
}
static struct inode *dlmfs_get_inode(struct inode *parent,
struct dentry *dentry,
umode_t mode)
{
struct super_block *sb = parent->i_sb;
struct inode * inode = new_inode(sb);
struct dlmfs_inode_private *ip;
if (!inode)
return NULL;
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
ip = DLMFS_I(inode);
ip->ip_conn = DLMFS_I(parent)->ip_conn;
switch (mode & S_IFMT) {
default:
/* for now we don't support anything other than
* directories and regular files. */
BUG();
break;
case S_IFREG:
inode->i_op = &dlmfs_file_inode_operations;
inode->i_fop = &dlmfs_file_operations;
i_size_write(inode, DLM_LVB_LEN);
user_dlm_lock_res_init(&ip->ip_lockres, dentry);
/* released at clear_inode time, this insures that we
* get to drop the dlm reference on each lock *before*
* we call the unregister code for releasing parent
* directories. */
ip->ip_parent = igrab(parent);
BUG_ON(!ip->ip_parent);
break;
case S_IFDIR:
inode->i_op = &dlmfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink ==
* 2 (for "." entry) */
inc_nlink(inode);
break;
}
return inode;
}
/*
* File creation. Allocate an inode, and we're done..
*/
/* SMP-safe */
static int dlmfs_mkdir(struct mnt_idmap * idmap,
struct inode * dir,
struct dentry * dentry,
umode_t mode)
{
int status;
struct inode *inode = NULL;
const struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
struct ocfs2_cluster_connection *conn;
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */
if (domain->len >= GROUP_NAME_MAX) {
status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail;
}
inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
if (!inode) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ip = DLMFS_I(inode);
conn = user_dlm_register(domain);
if (IS_ERR(conn)) {
status = PTR_ERR(conn);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
status, domain->len, domain->name);
goto bail;
}
ip->ip_conn = conn;
inc_nlink(dir);
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
status = 0;
bail:
if (status < 0)
iput(inode);
return status;
}
static int dlmfs_create(struct mnt_idmap *idmap,
struct inode *dir,
struct dentry *dentry,
umode_t mode,
bool excl)
{
int status = 0;
struct inode *inode;
const struct qstr *name = &dentry->d_name;
mlog(0, "create %.*s\n", name->len, name->name);
/* verify name is valid and doesn't contain any dlm reserved
* characters */
if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
name->name[0] == '$') {
status = -EINVAL;
mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
name->name);
goto bail;
}
inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
if (!inode) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
bail:
return status;
}
static int dlmfs_unlink(struct inode *dir,
struct dentry *dentry)
{
int status;
struct inode *inode = d_inode(dentry);
mlog(0, "unlink inode %lu\n", inode->i_ino);
/* if there are no current holders, or none that are waiting
* to acquire a lock, this basically destroys our lockres. */
status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
if (status < 0) {
mlog(ML_ERROR, "unlink %pd, error %d from destroy\n",
dentry, status);
goto bail;
}
status = simple_unlink(dir, dentry);
bail:
return status;
}
static int dlmfs_fill_super(struct super_block * sb,
void * data,
int silent)
{
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = DLMFS_MAGIC;
sb->s_op = &dlmfs_ops;
sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
if (!sb->s_root)
return -ENOMEM;
return 0;
}
static const struct file_operations dlmfs_file_operations = {
.open = dlmfs_file_open,
.release = dlmfs_file_release,
.poll = dlmfs_file_poll,
.read = dlmfs_file_read,
.write = dlmfs_file_write,
.llseek = default_llseek,
};
static const struct inode_operations dlmfs_dir_inode_operations = {
.create = dlmfs_create,
.lookup = simple_lookup,
.unlink = dlmfs_unlink,
};
/* this way we can restrict mkdir to only the toplevel of the fs. */
static const struct inode_operations dlmfs_root_inode_operations = {
.lookup = simple_lookup,
.mkdir = dlmfs_mkdir,
.rmdir = simple_rmdir,
};
static const struct super_operations dlmfs_ops = {
.statfs = simple_statfs,
.alloc_inode = dlmfs_alloc_inode,
.free_inode = dlmfs_free_inode,
.evict_inode = dlmfs_evict_inode,
.drop_inode = generic_delete_inode,
};
static const struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
.setattr = dlmfs_file_setattr,
};
static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
}
static struct file_system_type dlmfs_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2_dlmfs",
.mount = dlmfs_mount,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("ocfs2_dlmfs");
static int __init init_dlmfs_fs(void)
{
int status;
int cleanup_inode = 0, cleanup_worker = 0;
dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
sizeof(struct dlmfs_inode_private),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
dlmfs_init_once);
if (!dlmfs_inode_cache) {
status = -ENOMEM;
goto bail;
}
cleanup_inode = 1;
user_dlm_worker = alloc_workqueue("user_dlm", WQ_MEM_RECLAIM, 0);
if (!user_dlm_worker) {
status = -ENOMEM;
goto bail;
}
cleanup_worker = 1;
user_dlm_set_locking_protocol();
status = register_filesystem(&dlmfs_fs_type);
bail:
if (status) {
if (cleanup_inode)
kmem_cache_destroy(dlmfs_inode_cache);
if (cleanup_worker)
destroy_workqueue(user_dlm_worker);
} else
printk("OCFS2 User DLM kernel interface loaded\n");
return status;
}
static void __exit exit_dlmfs_fs(void)
{
unregister_filesystem(&dlmfs_fs_type);
destroy_workqueue(user_dlm_worker);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(dlmfs_inode_cache);
}
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OCFS2 DLM-Filesystem");
module_init(init_dlmfs_fs)
module_exit(exit_dlmfs_fs)
| linux-master | fs/ocfs2/dlmfs/dlmfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* userdlm.c
*
* Code which implements the kernel side of a minimal userspace
* interface to our DLM.
*
* Many of the functions here are pared down versions of dlmglue.c
* functions.
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*/
#include <linux/signal.h>
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/crc32.h>
#include "../ocfs2_lockingver.h"
#include "../stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "../cluster/masklog.h"
static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
{
return container_of(lksb, struct user_lock_res, l_lksb);
}
static inline int user_check_wait_flag(struct user_lock_res *lockres,
int flag)
{
int ret;
spin_lock(&lockres->l_lock);
ret = lockres->l_flags & flag;
spin_unlock(&lockres->l_lock);
return ret;
}
static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
{
wait_event(lockres->l_event,
!user_check_wait_flag(lockres, USER_LOCK_BUSY));
}
static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
{
wait_event(lockres->l_event,
!user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
}
/* I heart container_of... */
static inline struct ocfs2_cluster_connection *
cluster_connection_from_user_lockres(struct user_lock_res *lockres)
{
struct dlmfs_inode_private *ip;
ip = container_of(lockres,
struct dlmfs_inode_private,
ip_lockres);
return ip->ip_conn;
}
static struct inode *
user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
{
struct dlmfs_inode_private *ip;
ip = container_of(lockres,
struct dlmfs_inode_private,
ip_lockres);
return &ip->ip_vfs_inode;
}
static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
{
spin_lock(&lockres->l_lock);
lockres->l_flags &= ~USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
}
#define user_log_dlm_error(_func, _stat, _lockres) do { \
mlog(ML_ERROR, "Dlm error %d while calling %s on " \
"resource %.*s\n", _stat, _func, \
_lockres->l_namelen, _lockres->l_name); \
} while (0)
/* WARNING: This function lives in a world where the only three lock
* levels are EX, PR, and NL. It *will* have to be adjusted when more
* lock types are added. */
static inline int user_highest_compat_lock_level(int level)
{
int new_level = DLM_LOCK_EX;
if (level == DLM_LOCK_EX)
new_level = DLM_LOCK_NL;
else if (level == DLM_LOCK_PR)
new_level = DLM_LOCK_PR;
return new_level;
}
static void user_ast(struct ocfs2_dlm_lksb *lksb)
{
struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
int status;
mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
lockres->l_namelen, lockres->l_name, lockres->l_level,
lockres->l_requested);
spin_lock(&lockres->l_lock);
status = ocfs2_dlm_lock_status(&lockres->l_lksb);
if (status) {
mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
status, lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
return;
}
mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
"Lockres %.*s, requested ivmode. flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
/* we're downconverting. */
if (lockres->l_requested < lockres->l_level) {
if (lockres->l_requested <=
user_highest_compat_lock_level(lockres->l_blocking)) {
lockres->l_blocking = DLM_LOCK_NL;
lockres->l_flags &= ~USER_LOCK_BLOCKED;
}
}
lockres->l_level = lockres->l_requested;
lockres->l_requested = DLM_LOCK_IV;
lockres->l_flags |= USER_LOCK_ATTACHED;
lockres->l_flags &= ~USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
wake_up(&lockres->l_event);
}
static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
{
struct inode *inode;
inode = user_dlm_inode_from_user_lockres(lockres);
if (!igrab(inode))
BUG();
}
static void user_dlm_unblock_lock(struct work_struct *work);
static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
{
if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
user_dlm_grab_inode_ref(lockres);
INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
queue_work(user_dlm_worker, &lockres->l_work);
lockres->l_flags |= USER_LOCK_QUEUED;
}
}
static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
{
int queue = 0;
if (!(lockres->l_flags & USER_LOCK_BLOCKED))
return;
switch (lockres->l_blocking) {
case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders)
queue = 1;
break;
case DLM_LOCK_PR:
if (!lockres->l_ex_holders)
queue = 1;
break;
default:
BUG();
}
if (queue)
__user_dlm_queue_lockres(lockres);
}
static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
{
struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
lockres->l_namelen, lockres->l_name, level, lockres->l_level);
spin_lock(&lockres->l_lock);
lockres->l_flags |= USER_LOCK_BLOCKED;
if (level > lockres->l_blocking)
lockres->l_blocking = level;
__user_dlm_queue_lockres(lockres);
spin_unlock(&lockres->l_lock);
wake_up(&lockres->l_event);
}
static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
{
struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
if (status)
mlog(ML_ERROR, "dlm returns status %d\n", status);
spin_lock(&lockres->l_lock);
/* The teardown flag gets set early during the unlock process,
* so test the cancel flag to make sure that this ast isn't
* for a concurrent cancel. */
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
&& !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
lockres->l_level = DLM_LOCK_IV;
} else if (status == DLM_CANCELGRANT) {
/* We tried to cancel a convert request, but it was
* already granted. Don't clear the busy flag - the
* ast should've done this already. */
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
goto out_noclear;
} else {
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
/* Cancel succeeded, we want to re-queue */
lockres->l_requested = DLM_LOCK_IV; /* cancel an
* upconvert
* request. */
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
/* we want the unblock thread to look at it again
* now. */
if (lockres->l_flags & USER_LOCK_BLOCKED)
__user_dlm_queue_lockres(lockres);
}
lockres->l_flags &= ~USER_LOCK_BUSY;
out_noclear:
spin_unlock(&lockres->l_lock);
wake_up(&lockres->l_event);
}
/*
* This is the userdlmfs locking protocol version.
*
* See fs/ocfs2/dlmglue.c for more details on locking versions.
*/
static struct ocfs2_locking_protocol user_dlm_lproto = {
.lp_max_version = {
.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
},
.lp_lock_ast = user_ast,
.lp_blocking_ast = user_bast,
.lp_unlock_ast = user_unlock_ast,
};
static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
{
struct inode *inode;
inode = user_dlm_inode_from_user_lockres(lockres);
iput(inode);
}
static void user_dlm_unblock_lock(struct work_struct *work)
{
int new_level, status;
struct user_lock_res *lockres =
container_of(work, struct user_lock_res, l_work);
struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
"Lockres %.*s, flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
/* notice that we don't clear USER_LOCK_BLOCKED here. If it's
* set, we want user_ast clear it. */
lockres->l_flags &= ~USER_LOCK_QUEUED;
/* It's valid to get here and no longer be blocked - if we get
* several basts in a row, we might be queued by the first
* one, the unblock thread might run and clear the queued
* flag, and finally we might get another bast which re-queues
* us before our ast for the downconvert is called. */
if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_BUSY) {
if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
lockres->l_flags |= USER_LOCK_IN_CANCEL;
spin_unlock(&lockres->l_lock);
status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
DLM_LKF_CANCEL);
if (status)
user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto drop_ref;
}
/* If there are still incompat holders, we can exit safely
* without worrying about re-queueing this lock as that will
* happen on the last call to user_cluster_unlock. */
if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) {
spin_unlock(&lockres->l_lock);
mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
lockres->l_namelen, lockres->l_name,
lockres->l_ex_holders, lockres->l_ro_holders);
goto drop_ref;
}
if ((lockres->l_blocking == DLM_LOCK_PR)
&& lockres->l_ex_holders) {
spin_unlock(&lockres->l_lock);
mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
lockres->l_namelen, lockres->l_name,
lockres->l_ex_holders);
goto drop_ref;
}
/* yay, we can downconvert now. */
new_level = user_highest_compat_lock_level(lockres->l_blocking);
lockres->l_requested = new_level;
lockres->l_flags |= USER_LOCK_BUSY;
mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
spin_unlock(&lockres->l_lock);
/* need lock downconvert request now... */
status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
DLM_LKF_CONVERT|DLM_LKF_VALBLK,
lockres->l_name,
lockres->l_namelen);
if (status) {
user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
user_recover_from_dlm_error(lockres);
}
drop_ref:
user_dlm_drop_inode_ref(lockres);
}
static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
case DLM_LOCK_EX:
lockres->l_ex_holders++;
break;
case DLM_LOCK_PR:
lockres->l_ro_holders++;
break;
default:
BUG();
}
}
/* predict what lock level we'll be dropping down to on behalf
* of another node, and return true if the currently wanted
* level will be compatible with it. */
static inline int
user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
int wanted)
{
BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
}
int user_dlm_cluster_lock(struct user_lock_res *lockres,
int level,
int lkm_flags)
{
int status, local_flags;
struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
if (level != DLM_LOCK_EX &&
level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
status = -EINVAL;
goto bail;
}
mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
lockres->l_namelen, lockres->l_name, level, lkm_flags);
again:
if (signal_pending(current)) {
status = -ERESTARTSYS;
goto bail;
}
spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
spin_unlock(&lockres->l_lock);
status = -EAGAIN;
goto bail;
}
/* We only compare against the currently granted level
* here. If the lock is blocked waiting on a downconvert,
* we'll get caught below. */
if ((lockres->l_flags & USER_LOCK_BUSY) &&
(level > lockres->l_level)) {
/* is someone sitting in dlm_lock? If so, wait on
* them. */
spin_unlock(&lockres->l_lock);
user_wait_on_busy_lock(lockres);
goto again;
}
if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
(!user_may_continue_on_blocked_lock(lockres, level))) {
/* is the lock is currently blocked on behalf of
* another node */
spin_unlock(&lockres->l_lock);
user_wait_on_blocked_lock(lockres);
goto again;
}
if (level > lockres->l_level) {
local_flags = lkm_flags | DLM_LKF_VALBLK;
if (lockres->l_level != DLM_LOCK_IV)
local_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == DLM_LOCK_NL);
/* call dlm_lock to upgrade lock now */
status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
local_flags, lockres->l_name,
lockres->l_namelen);
if (status) {
if ((lkm_flags & DLM_LKF_NOQUEUE) &&
(status != -EAGAIN))
user_log_dlm_error("ocfs2_dlm_lock",
status, lockres);
user_recover_from_dlm_error(lockres);
goto bail;
}
user_wait_on_busy_lock(lockres);
goto again;
}
user_dlm_inc_holders(lockres, level);
spin_unlock(&lockres->l_lock);
status = 0;
bail:
return status;
}
static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--;
break;
case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--;
break;
default:
BUG();
}
}
void user_dlm_cluster_unlock(struct user_lock_res *lockres,
int level)
{
if (level != DLM_LOCK_EX &&
level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
return;
}
spin_lock(&lockres->l_lock);
user_dlm_dec_holders(lockres, level);
__user_dlm_cond_queue_lockres(lockres);
spin_unlock(&lockres->l_lock);
}
void user_dlm_write_lvb(struct inode *inode,
const char *val,
unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
char *lvb;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
BUG_ON(lockres->l_level < DLM_LOCK_EX);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(lvb, val, len);
spin_unlock(&lockres->l_lock);
}
bool user_dlm_read_lvb(struct inode *inode, char *val)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
char *lvb;
bool ret = true;
spin_lock(&lockres->l_lock);
BUG_ON(lockres->l_level < DLM_LOCK_PR);
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(val, lvb, DLM_LVB_LEN);
} else
ret = false;
spin_unlock(&lockres->l_lock);
return ret;
}
void user_dlm_lock_res_init(struct user_lock_res *lockres,
struct dentry *dentry)
{
memset(lockres, 0, sizeof(*lockres));
spin_lock_init(&lockres->l_lock);
init_waitqueue_head(&lockres->l_event);
lockres->l_level = DLM_LOCK_IV;
lockres->l_requested = DLM_LOCK_IV;
lockres->l_blocking = DLM_LOCK_IV;
/* should have been checked before getting here. */
BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
memcpy(lockres->l_name,
dentry->d_name.name,
dentry->d_name.len);
lockres->l_namelen = dentry->d_name.len;
}
int user_dlm_destroy_lock(struct user_lock_res *lockres)
{
int status = -EBUSY;
struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
spin_unlock(&lockres->l_lock);
goto bail;
}
lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
while (lockres->l_flags & USER_LOCK_BUSY) {
spin_unlock(&lockres->l_lock);
user_wait_on_busy_lock(lockres);
spin_lock(&lockres->l_lock);
}
if (lockres->l_ro_holders || lockres->l_ex_holders) {
lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
spin_unlock(&lockres->l_lock);
goto bail;
}
status = 0;
if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
/*
* lock is never requested, leave USER_LOCK_IN_TEARDOWN set
* to avoid new lock request coming in.
*/
spin_unlock(&lockres->l_lock);
goto bail;
}
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
if (status) {
spin_lock(&lockres->l_lock);
lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
lockres->l_flags &= ~USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto bail;
}
user_wait_on_busy_lock(lockres);
status = 0;
bail:
return status;
}
static void user_dlm_recovery_handler_noop(int node_num,
void *recovery_data)
{
/* We ignore recovery events */
return;
}
void user_dlm_set_locking_protocol(void)
{
ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
}
struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name)
{
int rc;
struct ocfs2_cluster_connection *conn;
rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
&user_dlm_lproto,
user_dlm_recovery_handler_noop,
NULL, &conn);
if (rc)
mlog_errno(rc);
return rc ? ERR_PTR(rc) : conn;
}
void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
{
ocfs2_cluster_disconnect(conn, 0);
}
| linux-master | fs/ocfs2/dlmfs/userdlm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmconvert.c
*
* underlying calls for lock conversion
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/spinlock.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmconvert.h"
#define MLOG_MASK_PREFIX ML_DLM
#include "../cluster/masklog.h"
/* NOTE: __dlmconvert_master is the only function in here that
* needs a spinlock held on entry (res->spinlock) and it is the
* only one that holds a lock on exit (res->spinlock).
* All other functions in here need no locks and drop all of
* the locks that they acquire. */
static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags,
int type, int *call_ast,
int *kick_thread);
static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags, int type);
/*
* this is only called directly by dlmlock(), and only when the
* local node is the owner of the lockres
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: see __dlmconvert_master
*/
enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags, int type)
{
int call_ast = 0, kick_thread = 0;
enum dlm_status status;
spin_lock(&res->spinlock);
/* we are not in a network handler, this is fine */
__dlm_wait_on_lockres(res);
__dlm_lockres_reserve_ast(res);
res->state |= DLM_LOCK_RES_IN_PROGRESS;
status = __dlmconvert_master(dlm, res, lock, flags, type,
&call_ast, &kick_thread);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
if (status != DLM_NORMAL && status != DLM_NOTQUEUED)
dlm_error(status);
/* either queue the ast or release it */
if (call_ast)
dlm_queue_ast(dlm, lock);
else
dlm_lockres_release_ast(dlm, res);
if (kick_thread)
dlm_kick_thread(dlm, res);
return status;
}
/* performs lock conversion at the lockres master site
* locking:
* caller needs: res->spinlock
* taken: takes and drops lock->spinlock
* held on exit: res->spinlock
* returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED
* call_ast: whether ast should be called for this lock
* kick_thread: whether dlm_kick_thread should be called
*/
static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags,
int type, int *call_ast,
int *kick_thread)
{
enum dlm_status status = DLM_NORMAL;
struct dlm_lock *tmplock=NULL;
assert_spin_locked(&res->spinlock);
mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
lock->ml.type, lock->ml.convert_type, type);
spin_lock(&lock->spinlock);
/* already converting? */
if (lock->ml.convert_type != LKM_IVMODE) {
mlog(ML_ERROR, "attempted to convert a lock with a lock "
"conversion pending\n");
status = DLM_DENIED;
goto unlock_exit;
}
/* must be on grant queue to convert */
if (!dlm_lock_on_list(&res->granted, lock)) {
mlog(ML_ERROR, "attempted to convert a lock not on grant "
"queue\n");
status = DLM_DENIED;
goto unlock_exit;
}
if (flags & LKM_VALBLK) {
switch (lock->ml.type) {
case LKM_EXMODE:
/* EX + LKM_VALBLK + convert == set lvb */
mlog(0, "will set lvb: converting %s->%s\n",
dlm_lock_mode_name(lock->ml.type),
dlm_lock_mode_name(type));
lock->lksb->flags |= DLM_LKSB_PUT_LVB;
break;
case LKM_PRMODE:
case LKM_NLMODE:
/* refetch if new level is not NL */
if (type > LKM_NLMODE) {
mlog(0, "will fetch new value into "
"lvb: converting %s->%s\n",
dlm_lock_mode_name(lock->ml.type),
dlm_lock_mode_name(type));
lock->lksb->flags |= DLM_LKSB_GET_LVB;
} else {
mlog(0, "will NOT fetch new value "
"into lvb: converting %s->%s\n",
dlm_lock_mode_name(lock->ml.type),
dlm_lock_mode_name(type));
flags &= ~(LKM_VALBLK);
}
break;
}
}
/* in-place downconvert? */
if (type <= lock->ml.type)
goto grant;
/* upconvert from here on */
status = DLM_NORMAL;
list_for_each_entry(tmplock, &res->granted, list) {
if (tmplock == lock)
continue;
if (!dlm_lock_compatible(tmplock->ml.type, type))
goto switch_queues;
}
list_for_each_entry(tmplock, &res->converting, list) {
if (!dlm_lock_compatible(tmplock->ml.type, type))
goto switch_queues;
/* existing conversion requests take precedence */
if (!dlm_lock_compatible(tmplock->ml.convert_type, type))
goto switch_queues;
}
/* fall thru to grant */
grant:
mlog(0, "res %.*s, granting %s lock\n", res->lockname.len,
res->lockname.name, dlm_lock_mode_name(type));
/* immediately grant the new lock type */
lock->lksb->status = DLM_NORMAL;
if (lock->ml.node == dlm->node_num)
mlog(0, "doing in-place convert for nonlocal lock\n");
lock->ml.type = type;
if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
/*
* Move the lock to the tail because it may be the only lock which has
* an invalid lvb.
*/
list_move_tail(&lock->list, &res->granted);
status = DLM_NORMAL;
*call_ast = 1;
goto unlock_exit;
switch_queues:
if (flags & LKM_NOQUEUE) {
mlog(0, "failed to convert NOQUEUE lock %.*s from "
"%d to %d...\n", res->lockname.len, res->lockname.name,
lock->ml.type, type);
status = DLM_NOTQUEUED;
goto unlock_exit;
}
mlog(0, "res %.*s, queueing...\n", res->lockname.len,
res->lockname.name);
lock->ml.convert_type = type;
/* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, &res->converting);
unlock_exit:
spin_unlock(&lock->spinlock);
if (status == DLM_DENIED) {
__dlm_print_one_lock_resource(res);
}
if (status == DLM_NORMAL)
*kick_thread = 1;
return status;
}
void dlm_revert_pending_convert(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
/* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, &res->granted);
lock->ml.convert_type = LKM_IVMODE;
lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
}
/* messages the master site to do lock conversion
* locking:
* caller needs: none
* taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS
* held on exit: none
* returns: DLM_NORMAL, DLM_RECOVERING, status from remote node
*/
enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags, int type)
{
enum dlm_status status;
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
mlog(0, "bailing out early since res is RECOVERING "
"on secondary queue\n");
/* __dlm_print_one_lock_resource(res); */
status = DLM_RECOVERING;
goto bail;
}
/* will exit this call with spinlock held */
__dlm_wait_on_lockres(res);
if (lock->ml.convert_type != LKM_IVMODE) {
__dlm_print_one_lock_resource(res);
mlog(ML_ERROR, "converting a remote lock that is already "
"converting! (cookie=%u:%llu, conv=%d)\n",
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ml.convert_type);
status = DLM_DENIED;
goto bail;
}
if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
mlog(0, "last convert request returned DLM_RECOVERING, but "
"owner has already queued and sent ast to me. res %.*s, "
"(cookie=%u:%llu, type=%d, conv=%d)\n",
res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ml.type, lock->ml.convert_type);
status = DLM_NORMAL;
goto bail;
}
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* move lock to local convert queue */
/* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, &res->converting);
lock->convert_pending = 1;
lock->ml.convert_type = type;
if (flags & LKM_VALBLK) {
if (lock->ml.type == LKM_EXMODE) {
flags |= LKM_PUT_LVB;
lock->lksb->flags |= DLM_LKSB_PUT_LVB;
} else {
if (lock->ml.convert_type == LKM_NLMODE)
flags &= ~LKM_VALBLK;
else {
flags |= LKM_GET_LVB;
lock->lksb->flags |= DLM_LKSB_GET_LVB;
}
}
}
spin_unlock(&res->spinlock);
/* no locks held here.
* need to wait for a reply as to whether it got queued or not. */
status = dlm_send_remote_convert_request(dlm, res, lock, flags, type);
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
/* if it failed, move it back to granted queue.
* if master returns DLM_NORMAL and then down before sending ast,
* it may have already been moved to granted queue, reset to
* DLM_RECOVERING and retry convert */
if (status != DLM_NORMAL) {
if (status != DLM_NOTQUEUED)
dlm_error(status);
dlm_revert_pending_convert(res, lock);
} else if (!lock->convert_pending) {
mlog(0, "%s: res %.*s, owner died and lock has been moved back "
"to granted list, retry convert.\n",
dlm->name, res->lockname.len, res->lockname.name);
status = DLM_RECOVERING;
}
lock->convert_pending = 0;
bail:
spin_unlock(&res->spinlock);
/* TODO: should this be a wake_one? */
/* wake up any IN_PROGRESS waiters */
wake_up(&res->wq);
return status;
}
/* sends DLM_CONVERT_LOCK_MSG to master site
* locking:
* caller needs: none
* taken: none
* held on exit: none
* returns: DLM_NOLOCKMGR, status from remote node
*/
static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags, int type)
{
struct dlm_convert_lock convert;
int tmpret;
enum dlm_status ret;
int status = 0;
struct kvec vec[2];
size_t veclen = 1;
mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
memset(&convert, 0, sizeof(struct dlm_convert_lock));
convert.node_idx = dlm->node_num;
convert.requested_type = type;
convert.cookie = lock->ml.cookie;
convert.namelen = res->lockname.len;
convert.flags = cpu_to_be32(flags);
memcpy(convert.name, res->lockname.name, convert.namelen);
vec[0].iov_len = sizeof(struct dlm_convert_lock);
vec[0].iov_base = &convert;
if (flags & LKM_PUT_LVB) {
/* extra data to send if we are updating lvb */
vec[1].iov_len = DLM_LVB_LEN;
vec[1].iov_base = lock->lksb->lvb;
veclen++;
}
tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key,
vec, veclen, res->owner, &status);
if (tmpret >= 0) {
// successfully sent and received
ret = status; // this is already a dlm_status
if (ret == DLM_RECOVERING) {
mlog(0, "node %u returned DLM_RECOVERING from convert "
"message!\n", res->owner);
} else if (ret == DLM_MIGRATING) {
mlog(0, "node %u returned DLM_MIGRATING from convert "
"message!\n", res->owner);
} else if (ret == DLM_FORWARD) {
mlog(0, "node %u returned DLM_FORWARD from convert "
"message!\n", res->owner);
} else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
dlm_error(ret);
} else {
mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
"node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key,
res->owner);
if (dlm_is_host_down(tmpret)) {
/* instead of logging the same network error over
* and over, sleep here and wait for the heartbeat
* to notice the node is dead. times out after 5s. */
dlm_wait_for_node_death(dlm, res->owner,
DLM_NODE_DEATH_WAIT_MAX);
ret = DLM_RECOVERING;
mlog(0, "node %u died so returning DLM_RECOVERING "
"from convert message!\n", res->owner);
} else {
ret = dlm_err_to_dlm_status(tmpret);
}
}
return ret;
}
/* handler for DLM_CONVERT_LOCK_MSG on master site
* locking:
* caller needs: none
* taken: takes and drop res->spinlock
* held on exit: none
* returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
* status from __dlmconvert_master
*/
int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
struct dlm_lock_resource *res = NULL;
struct dlm_lock *lock = NULL;
struct dlm_lock *tmp_lock;
struct dlm_lockstatus *lksb;
enum dlm_status status = DLM_NORMAL;
u32 flags;
int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0;
if (!dlm_grab(dlm)) {
dlm_error(DLM_REJECTED);
return DLM_REJECTED;
}
mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
"Domain %s not fully joined!\n", dlm->name);
if (cnv->namelen > DLM_LOCKID_NAME_MAX) {
status = DLM_IVBUFLEN;
dlm_error(status);
goto leave;
}
flags = be32_to_cpu(cnv->flags);
if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
(LKM_PUT_LVB|LKM_GET_LVB)) {
mlog(ML_ERROR, "both PUT and GET lvb specified\n");
status = DLM_BADARGS;
goto leave;
}
mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
(flags & LKM_GET_LVB ? "get lvb" : "none"));
status = DLM_IVLOCKID;
res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen);
if (!res) {
dlm_error(status);
goto leave;
}
spin_lock(&res->spinlock);
status = __dlm_lockres_state_to_status(res);
if (status != DLM_NORMAL) {
spin_unlock(&res->spinlock);
dlm_error(status);
goto leave;
}
list_for_each_entry(tmp_lock, &res->granted, list) {
if (tmp_lock->ml.cookie == cnv->cookie &&
tmp_lock->ml.node == cnv->node_idx) {
lock = tmp_lock;
dlm_lock_get(lock);
break;
}
}
spin_unlock(&res->spinlock);
if (!lock) {
status = DLM_IVLOCKID;
mlog(ML_ERROR, "did not find lock to convert on grant queue! "
"cookie=%u:%llu\n",
dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
dlm_print_one_lock_resource(res);
goto leave;
}
/* found the lock */
lksb = lock->lksb;
/* see if caller needed to get/put lvb */
if (flags & LKM_PUT_LVB) {
BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
lksb->flags |= DLM_LKSB_PUT_LVB;
memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN);
} else if (flags & LKM_GET_LVB) {
BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
lksb->flags |= DLM_LKSB_GET_LVB;
}
spin_lock(&res->spinlock);
status = __dlm_lockres_state_to_status(res);
if (status == DLM_NORMAL) {
__dlm_lockres_reserve_ast(res);
ast_reserved = 1;
res->state |= DLM_LOCK_RES_IN_PROGRESS;
status = __dlmconvert_master(dlm, res, lock, flags,
cnv->requested_type,
&call_ast, &kick_thread);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
wake = 1;
}
spin_unlock(&res->spinlock);
if (wake)
wake_up(&res->wq);
if (status != DLM_NORMAL) {
if (status != DLM_NOTQUEUED)
dlm_error(status);
lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
}
leave:
if (lock)
dlm_lock_put(lock);
/* either queue the ast or release it, if reserved */
if (call_ast)
dlm_queue_ast(dlm, lock);
else if (ast_reserved)
dlm_lockres_release_ast(dlm, res);
if (kick_thread)
dlm_kick_thread(dlm, res);
if (res)
dlm_lockres_put(res);
dlm_put(dlm);
return status;
}
| linux-master | fs/ocfs2/dlm/dlmconvert.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmdomain.c
*
* defines domain join / leave apis
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/debugfs.h>
#include <linux/sched/signal.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#include "dlmdebug.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
#include "../cluster/masklog.h"
/*
* ocfs2 node maps are array of long int, which limits to send them freely
* across the wire due to endianness issues. To workaround this, we convert
* long ints to byte arrays. Following 3 routines are helper functions to
* set/test/copy bits within those array of bytes
*/
static inline void byte_set_bit(u8 nr, u8 map[])
{
map[nr >> 3] |= (1UL << (nr & 7));
}
static inline int byte_test_bit(u8 nr, u8 map[])
{
return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0;
}
static inline void byte_copymap(u8 dmap[], unsigned long smap[],
unsigned int sz)
{
unsigned int nn;
if (!sz)
return;
memset(dmap, 0, ((sz + 7) >> 3));
for (nn = 0 ; nn < sz; nn++)
if (test_bit(nn, smap))
byte_set_bit(nn, dmap);
}
static void dlm_free_pagevec(void **vec, int pages)
{
while (pages--)
free_page((unsigned long)vec[pages]);
kfree(vec);
}
static void **dlm_alloc_pagevec(int pages)
{
void **vec = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
int i;
if (!vec)
return NULL;
for (i = 0; i < pages; i++)
if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
goto out_free;
mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
pages, (unsigned long)DLM_HASH_PAGES,
(unsigned long)DLM_BUCKETS_PER_PAGE);
return vec;
out_free:
dlm_free_pagevec(vec, i);
return NULL;
}
/*
*
* spinlock lock ordering: if multiple locks are needed, obey this ordering:
* dlm_domain_lock
* struct dlm_ctxt->spinlock
* struct dlm_lock_resource->spinlock
* struct dlm_ctxt->master_lock
* struct dlm_ctxt->ast_lock
* dlm_master_list_entry->spinlock
* dlm_lock->spinlock
*
*/
DEFINE_SPINLOCK(dlm_domain_lock);
LIST_HEAD(dlm_domains);
static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
/*
* The supported protocol version for DLM communication. Running domains
* will have a negotiated version with the same major number and a minor
* number equal or smaller. The dlm_ctxt->dlm_locking_proto field should
* be used to determine what a running domain is actually using.
*
* New in version 1.1:
* - Message DLM_QUERY_REGION added to support global heartbeat
* - Message DLM_QUERY_NODEINFO added to allow online node removes
* New in version 1.2:
* - Message DLM_BEGIN_EXIT_DOMAIN_MSG added to mark start of exit domain
* New in version 1.3:
* - Message DLM_DEREF_LOCKRES_DONE added to inform non-master that the
* refmap is cleared
*/
static const struct dlm_protocol_version dlm_protocol = {
.pv_major = 1,
.pv_minor = 3,
};
#define DLM_DOMAIN_BACKOFF_MS 200
static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
void *data, void **ret_data);
static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
static int dlm_protocol_compare(struct dlm_protocol_version *existing,
struct dlm_protocol_version *request);
static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
if (hlist_unhashed(&res->hash_node))
return;
mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
res->lockname.name);
hlist_del_init(&res->hash_node);
dlm_lockres_put(res);
}
void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
struct hlist_head *bucket;
assert_spin_locked(&dlm->spinlock);
bucket = dlm_lockres_hash(dlm, res->lockname.hash);
/* get a reference for our hashtable */
dlm_lockres_get(res);
hlist_add_head(&res->hash_node, bucket);
mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
res->lockname.name);
}
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
const char *name,
unsigned int len,
unsigned int hash)
{
struct hlist_head *bucket;
struct dlm_lock_resource *res;
mlog(0, "%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
bucket = dlm_lockres_hash(dlm, hash);
hlist_for_each_entry(res, bucket, hash_node) {
if (res->lockname.name[0] != name[0])
continue;
if (unlikely(res->lockname.len != len))
continue;
if (memcmp(res->lockname.name + 1, name + 1, len - 1))
continue;
dlm_lockres_get(res);
return res;
}
return NULL;
}
/* intended to be called by functions which do not care about lock
* resources which are being purged (most net _handler functions).
* this will return NULL for any lock resource which is found but
* currently in the process of dropping its mastery reference.
* use __dlm_lookup_lockres_full when you need the lock resource
* regardless (e.g. dlm_get_lock_resource) */
struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
const char *name,
unsigned int len,
unsigned int hash)
{
struct dlm_lock_resource *res = NULL;
mlog(0, "%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
res = __dlm_lookup_lockres_full(dlm, name, len, hash);
if (res) {
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
return NULL;
}
spin_unlock(&res->spinlock);
}
return res;
}
struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
const char *name,
unsigned int len)
{
struct dlm_lock_resource *res;
unsigned int hash = dlm_lockid_hash(name, len);
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, name, len, hash);
spin_unlock(&dlm->spinlock);
return res;
}
static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
{
struct dlm_ctxt *tmp;
assert_spin_locked(&dlm_domain_lock);
/* tmp->name here is always NULL terminated,
* but domain may not be! */
list_for_each_entry(tmp, &dlm_domains, list) {
if (strlen(tmp->name) == len &&
memcmp(tmp->name, domain, len)==0)
return tmp;
}
return NULL;
}
/* For null terminated domain strings ONLY */
static struct dlm_ctxt * __dlm_lookup_domain(const char *domain)
{
assert_spin_locked(&dlm_domain_lock);
return __dlm_lookup_domain_full(domain, strlen(domain));
}
/* returns true on one of two conditions:
* 1) the domain does not exist
* 2) the domain exists and it's state is "joined" */
static int dlm_wait_on_domain_helper(const char *domain)
{
int ret = 0;
struct dlm_ctxt *tmp = NULL;
spin_lock(&dlm_domain_lock);
tmp = __dlm_lookup_domain(domain);
if (!tmp)
ret = 1;
else if (tmp->dlm_state == DLM_CTXT_JOINED)
ret = 1;
spin_unlock(&dlm_domain_lock);
return ret;
}
static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
{
dlm_destroy_debugfs_subroot(dlm);
if (dlm->lockres_hash)
dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
if (dlm->master_hash)
dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
kfree(dlm->name);
kfree(dlm);
}
/* A little strange - this function will be called while holding
* dlm_domain_lock and is expected to be holding it on the way out. We
* will however drop and reacquire it multiple times */
static void dlm_ctxt_release(struct kref *kref)
{
struct dlm_ctxt *dlm;
dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
BUG_ON(dlm->num_joins);
BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
/* we may still be in the list if we hit an error during join. */
list_del_init(&dlm->list);
spin_unlock(&dlm_domain_lock);
mlog(0, "freeing memory from domain %s\n", dlm->name);
wake_up(&dlm_domain_events);
dlm_free_ctxt_mem(dlm);
spin_lock(&dlm_domain_lock);
}
void dlm_put(struct dlm_ctxt *dlm)
{
spin_lock(&dlm_domain_lock);
kref_put(&dlm->dlm_refs, dlm_ctxt_release);
spin_unlock(&dlm_domain_lock);
}
static void __dlm_get(struct dlm_ctxt *dlm)
{
kref_get(&dlm->dlm_refs);
}
/* given a questionable reference to a dlm object, gets a reference if
* it can find it in the list, otherwise returns NULL in which case
* you shouldn't trust your pointer. */
struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
{
struct dlm_ctxt *target;
struct dlm_ctxt *ret = NULL;
spin_lock(&dlm_domain_lock);
list_for_each_entry(target, &dlm_domains, list) {
if (target == dlm) {
__dlm_get(target);
ret = target;
break;
}
}
spin_unlock(&dlm_domain_lock);
return ret;
}
int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
{
int ret;
spin_lock(&dlm_domain_lock);
ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
(dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
spin_unlock(&dlm_domain_lock);
return ret;
}
static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
{
if (dlm->dlm_worker) {
destroy_workqueue(dlm->dlm_worker);
dlm->dlm_worker = NULL;
}
}
static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
{
dlm_unregister_domain_handlers(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
dlm_destroy_dlm_worker(dlm);
/* We've left the domain. Now we can take ourselves out of the
* list and allow the kref stuff to help us free the
* memory. */
spin_lock(&dlm_domain_lock);
list_del_init(&dlm->list);
spin_unlock(&dlm_domain_lock);
/* Wake up anyone waiting for us to remove this domain */
wake_up(&dlm_domain_events);
}
static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
{
int i, num, n, ret = 0;
struct dlm_lock_resource *res;
struct hlist_node *iter;
struct hlist_head *bucket;
int dropped;
mlog(0, "Migrating locks from domain %s\n", dlm->name);
num = 0;
spin_lock(&dlm->spinlock);
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
redo_bucket:
n = 0;
bucket = dlm_lockres_hash(dlm, i);
iter = bucket->first;
while (iter) {
n++;
res = hlist_entry(iter, struct dlm_lock_resource,
hash_node);
dlm_lockres_get(res);
/* migrate, if necessary. this will drop the dlm
* spinlock and retake it if it does migration. */
dropped = dlm_empty_lockres(dlm, res);
spin_lock(&res->spinlock);
if (dropped)
__dlm_lockres_calc_usage(dlm, res);
else
iter = res->hash_node.next;
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
if (dropped) {
cond_resched_lock(&dlm->spinlock);
goto redo_bucket;
}
}
cond_resched_lock(&dlm->spinlock);
num += n;
}
if (!num) {
if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
mlog(0, "%s: perhaps there are more lock resources "
"need to be migrated after dlm recovery\n", dlm->name);
ret = -EAGAIN;
} else {
mlog(0, "%s: we won't do dlm recovery after migrating "
"all lock resources\n", dlm->name);
dlm->migrate_done = 1;
}
}
spin_unlock(&dlm->spinlock);
wake_up(&dlm->dlm_thread_wq);
/* let the dlm thread take care of purging, keep scanning until
* nothing remains in the hash */
if (num) {
mlog(0, "%s: %d lock resources in hash last pass\n",
dlm->name, num);
ret = -EAGAIN;
}
mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
return ret;
}
static int dlm_no_joining_node(struct dlm_ctxt *dlm)
{
int ret;
spin_lock(&dlm->spinlock);
ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
spin_unlock(&dlm->spinlock);
return ret;
}
static int dlm_begin_exit_domain_handler(struct o2net_msg *msg, u32 len,
void *data, void **ret_data)
{
struct dlm_ctxt *dlm = data;
unsigned int node;
struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
if (!dlm_grab(dlm))
return 0;
node = exit_msg->node_idx;
mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node);
spin_lock(&dlm->spinlock);
set_bit(node, dlm->exit_domain_map);
spin_unlock(&dlm->spinlock);
dlm_put(dlm);
return 0;
}
static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
{
/* Yikes, a double spinlock! I need domain_lock for the dlm
* state and the dlm spinlock for join state... Sorry! */
again:
spin_lock(&dlm_domain_lock);
spin_lock(&dlm->spinlock);
if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "Node %d is joining, we wait on it.\n",
dlm->joining_node);
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
goto again;
}
dlm->dlm_state = DLM_CTXT_LEAVING;
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
}
static void __dlm_print_nodes(struct dlm_ctxt *dlm)
{
int node = -1, num = 0;
assert_spin_locked(&dlm->spinlock);
printk("( ");
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
printk("%d ", node);
++num;
}
printk(") %u nodes\n", num);
}
static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
unsigned int node;
struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
mlog(0, "%p %u %p", msg, len, data);
if (!dlm_grab(dlm))
return 0;
node = exit_msg->node_idx;
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
clear_bit(node, dlm->exit_domain_map);
printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
__dlm_print_nodes(dlm);
/* notify anything attached to the heartbeat events */
dlm_hb_event_notify_attached(dlm, node, 0);
spin_unlock(&dlm->spinlock);
dlm_put(dlm);
return 0;
}
static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type,
unsigned int node)
{
int status;
struct dlm_exit_domain leave_msg;
mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name,
msg_type, node);
memset(&leave_msg, 0, sizeof(leave_msg));
leave_msg.node_idx = dlm->node_num;
status = o2net_send_message(msg_type, dlm->key, &leave_msg,
sizeof(leave_msg), node, NULL);
if (status < 0)
mlog(ML_ERROR, "Error %d sending domain exit message %u "
"to node %u on domain %s\n", status, msg_type, node,
dlm->name);
return status;
}
static void dlm_begin_exit_domain(struct dlm_ctxt *dlm)
{
int node = -1;
/* Support for begin exit domain was added in 1.2 */
if (dlm->dlm_locking_proto.pv_major == 1 &&
dlm->dlm_locking_proto.pv_minor < 2)
return;
/*
* Unlike DLM_EXIT_DOMAIN_MSG, DLM_BEGIN_EXIT_DOMAIN_MSG is purely
* informational. Meaning if a node does not receive the message,
* so be it.
*/
spin_lock(&dlm->spinlock);
while (1) {
node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1);
if (node >= O2NM_MAX_NODES)
break;
if (node == dlm->node_num)
continue;
spin_unlock(&dlm->spinlock);
dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node);
spin_lock(&dlm->spinlock);
}
spin_unlock(&dlm->spinlock);
}
static void dlm_leave_domain(struct dlm_ctxt *dlm)
{
int node, clear_node, status;
/* At this point we've migrated away all our locks and won't
* accept mastership of new ones. The dlm is responsible for
* almost nothing now. We make sure not to confuse any joining
* nodes and then commence shutdown procedure. */
spin_lock(&dlm->spinlock);
/* Clear ourselves from the domain map */
clear_bit(dlm->node_num, dlm->domain_map);
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
0)) < O2NM_MAX_NODES) {
/* Drop the dlm spinlock. This is safe wrt the domain_map.
* -nodes cannot be added now as the
* query_join_handlers knows to respond with OK_NO_MAP
* -we catch the right network errors if a node is
* removed from the map while we're sending him the
* exit message. */
spin_unlock(&dlm->spinlock);
clear_node = 1;
status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG,
node);
if (status < 0 &&
status != -ENOPROTOOPT &&
status != -ENOTCONN) {
mlog(ML_NOTICE, "Error %d sending domain exit message "
"to node %d\n", status, node);
/* Not sure what to do here but lets sleep for
* a bit in case this was a transient
* error... */
msleep(DLM_DOMAIN_BACKOFF_MS);
clear_node = 0;
}
spin_lock(&dlm->spinlock);
/* If we're not clearing the node bit then we intend
* to loop back around to try again. */
if (clear_node)
clear_bit(node, dlm->domain_map);
}
spin_unlock(&dlm->spinlock);
}
void dlm_unregister_domain(struct dlm_ctxt *dlm)
{
int leave = 0;
struct dlm_lock_resource *res;
spin_lock(&dlm_domain_lock);
BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
BUG_ON(!dlm->num_joins);
dlm->num_joins--;
if (!dlm->num_joins) {
/* We mark it "in shutdown" now so new register
* requests wait until we've completely left the
* domain. Don't use DLM_CTXT_LEAVING yet as we still
* want new domain joins to communicate with us at
* least until we've completed migration of our
* resources. */
dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
leave = 1;
}
spin_unlock(&dlm_domain_lock);
if (leave) {
mlog(0, "shutting down domain %s\n", dlm->name);
dlm_begin_exit_domain(dlm);
/* We changed dlm state, notify the thread */
dlm_kick_thread(dlm, NULL);
while (dlm_migrate_all_locks(dlm)) {
/* Give dlm_thread time to purge the lockres' */
msleep(500);
mlog(0, "%s: more migration to do\n", dlm->name);
}
/* This list should be empty. If not, print remaining lockres */
if (!list_empty(&dlm->tracking_list)) {
mlog(ML_ERROR, "Following lockres' are still on the "
"tracking list:\n");
list_for_each_entry(res, &dlm->tracking_list, tracking)
dlm_print_one_lock_resource(res);
}
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
dlm_put(dlm);
}
EXPORT_SYMBOL_GPL(dlm_unregister_domain);
static int dlm_query_join_proto_check(char *proto_type, int node,
struct dlm_protocol_version *ours,
struct dlm_protocol_version *request)
{
int rc;
struct dlm_protocol_version proto = *request;
if (!dlm_protocol_compare(ours, &proto)) {
mlog(0,
"node %u wanted to join with %s locking protocol "
"%u.%u, we respond with %u.%u\n",
node, proto_type,
request->pv_major,
request->pv_minor,
proto.pv_major, proto.pv_minor);
request->pv_minor = proto.pv_minor;
rc = 0;
} else {
mlog(ML_NOTICE,
"Node %u wanted to join with %s locking "
"protocol %u.%u, but we have %u.%u, disallowing\n",
node, proto_type,
request->pv_major,
request->pv_minor,
ours->pv_major,
ours->pv_minor);
rc = 1;
}
return rc;
}
/*
* struct dlm_query_join_packet is made up of four one-byte fields. They
* are effectively in big-endian order already. However, little-endian
* machines swap them before putting the packet on the wire (because
* query_join's response is a status, and that status is treated as a u32
* on the wire). Thus, a big-endian and little-endian machines will treat
* this structure differently.
*
* The solution is to have little-endian machines swap the structure when
* converting from the structure to the u32 representation. This will
* result in the structure having the correct format on the wire no matter
* the host endian format.
*/
static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
u32 *wire)
{
union dlm_query_join_response response;
response.packet = *packet;
*wire = be32_to_cpu(response.intval);
}
static void dlm_query_join_wire_to_packet(u32 wire,
struct dlm_query_join_packet *packet)
{
union dlm_query_join_response response;
response.intval = cpu_to_be32(wire);
*packet = response.packet;
}
static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_query_join_request *query;
struct dlm_query_join_packet packet = {
.code = JOIN_DISALLOW,
};
struct dlm_ctxt *dlm = NULL;
u32 response;
u8 nodenum;
query = (struct dlm_query_join_request *) msg->buf;
mlog(0, "node %u wants to join domain %s\n", query->node_idx,
query->domain);
/*
* If heartbeat doesn't consider the node live, tell it
* to back off and try again. This gives heartbeat a chance
* to catch up.
*/
if (!o2hb_check_node_heartbeating_no_sem(query->node_idx)) {
mlog(0, "node %u is not in our live map yet\n",
query->node_idx);
packet.code = JOIN_DISALLOW;
goto respond;
}
packet.code = JOIN_OK_NO_MAP;
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
if (!dlm)
goto unlock_respond;
/*
* There is a small window where the joining node may not see the
* node(s) that just left but still part of the cluster. DISALLOW
* join request if joining node has different node map.
*/
nodenum=0;
while (nodenum < O2NM_MAX_NODES) {
if (test_bit(nodenum, dlm->domain_map)) {
if (!byte_test_bit(nodenum, query->node_map)) {
mlog(0, "disallow join as node %u does not "
"have node %u in its nodemap\n",
query->node_idx, nodenum);
packet.code = JOIN_DISALLOW;
goto unlock_respond;
}
}
nodenum++;
}
/* Once the dlm ctxt is marked as leaving then we don't want
* to be put in someone's domain map.
* Also, explicitly disallow joining at certain troublesome
* times (ie. during recovery). */
if (dlm->dlm_state != DLM_CTXT_LEAVING) {
int bit = query->node_idx;
spin_lock(&dlm->spinlock);
if (dlm->dlm_state == DLM_CTXT_NEW &&
dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
/*If this is a brand new context and we
* haven't started our join process yet, then
* the other node won the race. */
packet.code = JOIN_OK_NO_MAP;
} else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
/* Disallow parallel joins. */
packet.code = JOIN_DISALLOW;
} else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
mlog(0, "node %u trying to join, but recovery "
"is ongoing.\n", bit);
packet.code = JOIN_DISALLOW;
} else if (test_bit(bit, dlm->recovery_map)) {
mlog(0, "node %u trying to join, but it "
"still needs recovery.\n", bit);
packet.code = JOIN_DISALLOW;
} else if (test_bit(bit, dlm->domain_map)) {
mlog(0, "node %u trying to join, but it "
"is still in the domain! needs recovery?\n",
bit);
packet.code = JOIN_DISALLOW;
} else {
/* Alright we're fully a part of this domain
* so we keep some state as to who's joining
* and indicate to him that needs to be fixed
* up. */
/* Make sure we speak compatible locking protocols. */
if (dlm_query_join_proto_check("DLM", bit,
&dlm->dlm_locking_proto,
&query->dlm_proto)) {
packet.code = JOIN_PROTOCOL_MISMATCH;
} else if (dlm_query_join_proto_check("fs", bit,
&dlm->fs_locking_proto,
&query->fs_proto)) {
packet.code = JOIN_PROTOCOL_MISMATCH;
} else {
packet.dlm_minor = query->dlm_proto.pv_minor;
packet.fs_minor = query->fs_proto.pv_minor;
packet.code = JOIN_OK;
__dlm_set_joining_node(dlm, query->node_idx);
}
}
spin_unlock(&dlm->spinlock);
}
unlock_respond:
spin_unlock(&dlm_domain_lock);
respond:
mlog(0, "We respond with %u\n", packet.code);
dlm_query_join_packet_to_wire(&packet, &response);
return response;
}
static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_assert_joined *assert;
struct dlm_ctxt *dlm = NULL;
assert = (struct dlm_assert_joined *) msg->buf;
mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
assert->domain);
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
/* XXX should we consider no dlm ctxt an error? */
if (dlm) {
spin_lock(&dlm->spinlock);
/* Alright, this node has officially joined our
* domain. Set him in the map and clean up our
* leftover join state. */
BUG_ON(dlm->joining_node != assert->node_idx);
if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
mlog(0, "dlm recovery is ongoing, disallow join\n");
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
return -EAGAIN;
}
set_bit(assert->node_idx, dlm->domain_map);
clear_bit(assert->node_idx, dlm->exit_domain_map);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
assert->node_idx, dlm->name);
__dlm_print_nodes(dlm);
/* notify anything attached to the heartbeat events */
dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
spin_unlock(&dlm->spinlock);
}
spin_unlock(&dlm_domain_lock);
return 0;
}
static int dlm_match_regions(struct dlm_ctxt *dlm,
struct dlm_query_region *qr,
char *local, int locallen)
{
char *remote = qr->qr_regions;
char *l, *r;
int localnr, i, j, foundit;
int status = 0;
if (!o2hb_global_heartbeat_active()) {
if (qr->qr_numregions) {
mlog(ML_ERROR, "Domain %s: Joining node %d has global "
"heartbeat enabled but local node %d does not\n",
qr->qr_domain, qr->qr_node, dlm->node_num);
status = -EINVAL;
}
goto bail;
}
if (o2hb_global_heartbeat_active() && !qr->qr_numregions) {
mlog(ML_ERROR, "Domain %s: Local node %d has global "
"heartbeat enabled but joining node %d does not\n",
qr->qr_domain, dlm->node_num, qr->qr_node);
status = -EINVAL;
goto bail;
}
r = remote;
for (i = 0; i < qr->qr_numregions; ++i) {
mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, r);
r += O2HB_MAX_REGION_NAME_LEN;
}
localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN);
localnr = o2hb_get_all_regions(local, (u8)localnr);
/* compare local regions with remote */
l = local;
for (i = 0; i < localnr; ++i) {
foundit = 0;
r = remote;
for (j = 0; j <= qr->qr_numregions; ++j) {
if (!memcmp(l, r, O2HB_MAX_REGION_NAME_LEN)) {
foundit = 1;
break;
}
r += O2HB_MAX_REGION_NAME_LEN;
}
if (!foundit) {
status = -EINVAL;
mlog(ML_ERROR, "Domain %s: Region '%.*s' registered "
"in local node %d but not in joining node %d\n",
qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, l,
dlm->node_num, qr->qr_node);
goto bail;
}
l += O2HB_MAX_REGION_NAME_LEN;
}
/* compare remote with local regions */
r = remote;
for (i = 0; i < qr->qr_numregions; ++i) {
foundit = 0;
l = local;
for (j = 0; j < localnr; ++j) {
if (!memcmp(r, l, O2HB_MAX_REGION_NAME_LEN)) {
foundit = 1;
break;
}
l += O2HB_MAX_REGION_NAME_LEN;
}
if (!foundit) {
status = -EINVAL;
mlog(ML_ERROR, "Domain %s: Region '%.*s' registered "
"in joining node %d but not in local node %d\n",
qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, r,
qr->qr_node, dlm->node_num);
goto bail;
}
r += O2HB_MAX_REGION_NAME_LEN;
}
bail:
return status;
}
static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
{
struct dlm_query_region *qr = NULL;
int status, ret = 0, i;
char *p;
if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail;
qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL);
if (!qr) {
ret = -ENOMEM;
mlog_errno(ret);
goto bail;
}
qr->qr_node = dlm->node_num;
qr->qr_namelen = strlen(dlm->name);
memcpy(qr->qr_domain, dlm->name, qr->qr_namelen);
/* if local hb, the numregions will be zero */
if (o2hb_global_heartbeat_active())
qr->qr_numregions = o2hb_get_all_regions(qr->qr_regions,
O2NM_MAX_REGIONS);
p = qr->qr_regions;
for (i = 0; i < qr->qr_numregions; ++i, p += O2HB_MAX_REGION_NAME_LEN)
mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, p);
i = -1;
while ((i = find_next_bit(node_map, O2NM_MAX_NODES,
i + 1)) < O2NM_MAX_NODES) {
if (i == dlm->node_num)
continue;
mlog(0, "Sending regions to node %d\n", i);
ret = o2net_send_message(DLM_QUERY_REGION, DLM_MOD_KEY, qr,
sizeof(struct dlm_query_region),
i, &status);
if (ret >= 0)
ret = status;
if (ret) {
mlog(ML_ERROR, "Region mismatch %d, node %d\n",
ret, i);
break;
}
}
bail:
kfree(qr);
return ret;
}
static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
void *data, void **ret_data)
{
struct dlm_query_region *qr;
struct dlm_ctxt *dlm = NULL;
char *local = NULL;
int status = 0;
qr = (struct dlm_query_region *) msg->buf;
mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node,
qr->qr_domain);
/* buffer used in dlm_mast_regions() */
local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
if (!local)
return -ENOMEM;
status = -EINVAL;
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain_full(qr->qr_domain, qr->qr_namelen);
if (!dlm) {
mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
"before join domain\n", qr->qr_node, qr->qr_domain);
goto out_domain_lock;
}
spin_lock(&dlm->spinlock);
if (dlm->joining_node != qr->qr_node) {
mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
"but joining node is %d\n", qr->qr_node, qr->qr_domain,
dlm->joining_node);
goto out_dlm_lock;
}
/* Support for global heartbeat was added in 1.1 */
if (dlm->dlm_locking_proto.pv_major == 1 &&
dlm->dlm_locking_proto.pv_minor == 0) {
mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
"but active dlm protocol is %d.%d\n", qr->qr_node,
qr->qr_domain, dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor);
goto out_dlm_lock;
}
status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
out_dlm_lock:
spin_unlock(&dlm->spinlock);
out_domain_lock:
spin_unlock(&dlm_domain_lock);
kfree(local);
return status;
}
static int dlm_match_nodes(struct dlm_ctxt *dlm, struct dlm_query_nodeinfo *qn)
{
struct o2nm_node *local;
struct dlm_node_info *remote;
int i, j;
int status = 0;
for (j = 0; j < qn->qn_numnodes; ++j)
mlog(0, "Node %3d, %pI4:%u\n", qn->qn_nodes[j].ni_nodenum,
&(qn->qn_nodes[j].ni_ipv4_address),
ntohs(qn->qn_nodes[j].ni_ipv4_port));
for (i = 0; i < O2NM_MAX_NODES && !status; ++i) {
local = o2nm_get_node_by_num(i);
remote = NULL;
for (j = 0; j < qn->qn_numnodes; ++j) {
if (qn->qn_nodes[j].ni_nodenum == i) {
remote = &(qn->qn_nodes[j]);
break;
}
}
if (!local && !remote)
continue;
if ((local && !remote) || (!local && remote))
status = -EINVAL;
if (!status &&
((remote->ni_nodenum != local->nd_num) ||
(remote->ni_ipv4_port != local->nd_ipv4_port) ||
(remote->ni_ipv4_address != local->nd_ipv4_address)))
status = -EINVAL;
if (status) {
if (remote && !local)
mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) "
"registered in joining node %d but not in "
"local node %d\n", qn->qn_domain,
remote->ni_nodenum,
&(remote->ni_ipv4_address),
ntohs(remote->ni_ipv4_port),
qn->qn_nodenum, dlm->node_num);
if (local && !remote)
mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) "
"registered in local node %d but not in "
"joining node %d\n", qn->qn_domain,
local->nd_num, &(local->nd_ipv4_address),
ntohs(local->nd_ipv4_port),
dlm->node_num, qn->qn_nodenum);
BUG_ON((!local && !remote));
}
if (local)
o2nm_node_put(local);
}
return status;
}
static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
{
struct dlm_query_nodeinfo *qn = NULL;
struct o2nm_node *node;
int ret = 0, status, count, i;
if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail;
qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL);
if (!qn) {
ret = -ENOMEM;
mlog_errno(ret);
goto bail;
}
for (i = 0, count = 0; i < O2NM_MAX_NODES; ++i) {
node = o2nm_get_node_by_num(i);
if (!node)
continue;
qn->qn_nodes[count].ni_nodenum = node->nd_num;
qn->qn_nodes[count].ni_ipv4_port = node->nd_ipv4_port;
qn->qn_nodes[count].ni_ipv4_address = node->nd_ipv4_address;
mlog(0, "Node %3d, %pI4:%u\n", node->nd_num,
&(node->nd_ipv4_address), ntohs(node->nd_ipv4_port));
++count;
o2nm_node_put(node);
}
qn->qn_nodenum = dlm->node_num;
qn->qn_numnodes = count;
qn->qn_namelen = strlen(dlm->name);
memcpy(qn->qn_domain, dlm->name, qn->qn_namelen);
i = -1;
while ((i = find_next_bit(node_map, O2NM_MAX_NODES,
i + 1)) < O2NM_MAX_NODES) {
if (i == dlm->node_num)
continue;
mlog(0, "Sending nodeinfo to node %d\n", i);
ret = o2net_send_message(DLM_QUERY_NODEINFO, DLM_MOD_KEY,
qn, sizeof(struct dlm_query_nodeinfo),
i, &status);
if (ret >= 0)
ret = status;
if (ret) {
mlog(ML_ERROR, "node mismatch %d, node %d\n", ret, i);
break;
}
}
bail:
kfree(qn);
return ret;
}
static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
void *data, void **ret_data)
{
struct dlm_query_nodeinfo *qn;
struct dlm_ctxt *dlm = NULL;
int locked = 0, status = -EINVAL;
qn = (struct dlm_query_nodeinfo *) msg->buf;
mlog(0, "Node %u queries nodes on domain %s\n", qn->qn_nodenum,
qn->qn_domain);
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain_full(qn->qn_domain, qn->qn_namelen);
if (!dlm) {
mlog(ML_ERROR, "Node %d queried nodes on domain %s before "
"join domain\n", qn->qn_nodenum, qn->qn_domain);
goto bail;
}
spin_lock(&dlm->spinlock);
locked = 1;
if (dlm->joining_node != qn->qn_nodenum) {
mlog(ML_ERROR, "Node %d queried nodes on domain %s but "
"joining node is %d\n", qn->qn_nodenum, qn->qn_domain,
dlm->joining_node);
goto bail;
}
/* Support for node query was added in 1.1 */
if (dlm->dlm_locking_proto.pv_major == 1 &&
dlm->dlm_locking_proto.pv_minor == 0) {
mlog(ML_ERROR, "Node %d queried nodes on domain %s "
"but active dlm protocol is %d.%d\n", qn->qn_nodenum,
qn->qn_domain, dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor);
goto bail;
}
status = dlm_match_nodes(dlm, qn);
bail:
if (locked)
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
return status;
}
static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_cancel_join *cancel;
struct dlm_ctxt *dlm = NULL;
cancel = (struct dlm_cancel_join *) msg->buf;
mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
cancel->domain);
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
if (dlm) {
spin_lock(&dlm->spinlock);
/* Yikes, this guy wants to cancel his join. No
* problem, we simply cleanup our join state. */
BUG_ON(dlm->joining_node != cancel->node_idx);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
spin_unlock(&dlm->spinlock);
}
spin_unlock(&dlm_domain_lock);
return 0;
}
static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
unsigned int node)
{
int status;
struct dlm_cancel_join cancel_msg;
memset(&cancel_msg, 0, sizeof(cancel_msg));
cancel_msg.node_idx = dlm->node_num;
cancel_msg.name_len = strlen(dlm->name);
memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
&cancel_msg, sizeof(cancel_msg), node,
NULL);
if (status < 0) {
mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
"node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
node);
goto bail;
}
bail:
return status;
}
/* map_size should be in bytes. */
static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
unsigned long *node_map,
unsigned int map_size)
{
int status, tmpstat;
int node;
if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) *
sizeof(unsigned long))) {
mlog(ML_ERROR,
"map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n",
map_size, (unsigned)BITS_TO_LONGS(O2NM_MAX_NODES));
return -EINVAL;
}
status = 0;
node = -1;
while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
if (node == dlm->node_num)
continue;
tmpstat = dlm_send_one_join_cancel(dlm, node);
if (tmpstat) {
mlog(ML_ERROR, "Error return %d cancelling join on "
"node %d\n", tmpstat, node);
if (!status)
status = tmpstat;
}
}
if (status)
mlog_errno(status);
return status;
}
static int dlm_request_join(struct dlm_ctxt *dlm,
int node,
enum dlm_query_join_response_code *response)
{
int status;
struct dlm_query_join_request join_msg;
struct dlm_query_join_packet packet;
u32 join_resp;
mlog(0, "querying node %d\n", node);
memset(&join_msg, 0, sizeof(join_msg));
join_msg.node_idx = dlm->node_num;
join_msg.name_len = strlen(dlm->name);
memcpy(join_msg.domain, dlm->name, join_msg.name_len);
join_msg.dlm_proto = dlm->dlm_locking_proto;
join_msg.fs_proto = dlm->fs_locking_proto;
/* copy live node map to join message */
byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
sizeof(join_msg), node, &join_resp);
if (status < 0 && status != -ENOPROTOOPT) {
mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
"node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
node);
goto bail;
}
dlm_query_join_wire_to_packet(join_resp, &packet);
/* -ENOPROTOOPT from the net code means the other side isn't
listening for our message type -- that's fine, it means
his dlm isn't up, so we can consider him a 'yes' but not
joined into the domain. */
if (status == -ENOPROTOOPT) {
status = 0;
*response = JOIN_OK_NO_MAP;
} else {
*response = packet.code;
switch (packet.code) {
case JOIN_DISALLOW:
case JOIN_OK_NO_MAP:
break;
case JOIN_PROTOCOL_MISMATCH:
mlog(ML_NOTICE,
"This node requested DLM locking protocol %u.%u and "
"filesystem locking protocol %u.%u. At least one of "
"the protocol versions on node %d is not compatible, "
"disconnecting\n",
dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor,
dlm->fs_locking_proto.pv_major,
dlm->fs_locking_proto.pv_minor,
node);
status = -EPROTO;
break;
case JOIN_OK:
/* Use the same locking protocol as the remote node */
dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
dlm->fs_locking_proto.pv_minor = packet.fs_minor;
mlog(0,
"Node %d responds JOIN_OK with DLM locking protocol "
"%u.%u and fs locking protocol %u.%u\n",
node,
dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor,
dlm->fs_locking_proto.pv_major,
dlm->fs_locking_proto.pv_minor);
break;
default:
status = -EINVAL;
mlog(ML_ERROR, "invalid response %d from node %u\n",
packet.code, node);
/* Reset response to JOIN_DISALLOW */
*response = JOIN_DISALLOW;
break;
}
}
mlog(0, "status %d, node %d response is %d\n", status, node,
*response);
bail:
return status;
}
static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
unsigned int node)
{
int status;
int ret;
struct dlm_assert_joined assert_msg;
mlog(0, "Sending join assert to node %u\n", node);
memset(&assert_msg, 0, sizeof(assert_msg));
assert_msg.node_idx = dlm->node_num;
assert_msg.name_len = strlen(dlm->name);
memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
&assert_msg, sizeof(assert_msg), node,
&ret);
if (status < 0)
mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
"node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
node);
else
status = ret;
return status;
}
static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
unsigned long *node_map)
{
int status, node, live;
status = 0;
node = -1;
while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
if (node == dlm->node_num)
continue;
do {
/* It is very important that this message be
* received so we spin until either the node
* has died or it gets the message. */
status = dlm_send_one_join_assert(dlm, node);
spin_lock(&dlm->spinlock);
live = test_bit(node, dlm->live_nodes_map);
spin_unlock(&dlm->spinlock);
if (status) {
mlog(ML_ERROR, "Error return %d asserting "
"join on node %d\n", status, node);
/* give us some time between errors... */
if (live)
msleep(DLM_DOMAIN_BACKOFF_MS);
}
} while (status && live);
}
}
struct domain_join_ctxt {
unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
};
static int dlm_should_restart_join(struct dlm_ctxt *dlm,
struct domain_join_ctxt *ctxt,
enum dlm_query_join_response_code response)
{
int ret;
if (response == JOIN_DISALLOW) {
mlog(0, "Latest response of disallow -- should restart\n");
return 1;
}
spin_lock(&dlm->spinlock);
/* For now, we restart the process if the node maps have
* changed at all */
ret = !bitmap_equal(ctxt->live_map, dlm->live_nodes_map,
O2NM_MAX_NODES);
spin_unlock(&dlm->spinlock);
if (ret)
mlog(0, "Node maps changed -- should restart\n");
return ret;
}
static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
{
int status = 0, tmpstat, node;
struct domain_join_ctxt *ctxt;
enum dlm_query_join_response_code response = JOIN_DISALLOW;
mlog(0, "%p", dlm);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
/* group sem locking should work for us here -- we're already
* registered for heartbeat events so filling this should be
* atomic wrt getting those handlers called. */
o2hb_fill_node_map(dlm->live_nodes_map, O2NM_MAX_NODES);
spin_lock(&dlm->spinlock);
bitmap_copy(ctxt->live_map, dlm->live_nodes_map, O2NM_MAX_NODES);
__dlm_set_joining_node(dlm, dlm->node_num);
spin_unlock(&dlm->spinlock);
node = -1;
while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
if (node == dlm->node_num)
continue;
status = dlm_request_join(dlm, node, &response);
if (status < 0) {
mlog_errno(status);
goto bail;
}
/* Ok, either we got a response or the node doesn't have a
* dlm up. */
if (response == JOIN_OK)
set_bit(node, ctxt->yes_resp_map);
if (dlm_should_restart_join(dlm, ctxt, response)) {
status = -EAGAIN;
goto bail;
}
}
mlog(0, "Yay, done querying nodes!\n");
/* Yay, everyone agree's we can join the domain. My domain is
* comprised of all nodes who were put in the
* yes_resp_map. Copy that into our domain map and send a join
* assert message to clean up everyone elses state. */
spin_lock(&dlm->spinlock);
bitmap_copy(dlm->domain_map, ctxt->yes_resp_map, O2NM_MAX_NODES);
set_bit(dlm->node_num, dlm->domain_map);
spin_unlock(&dlm->spinlock);
/* Support for global heartbeat and node info was added in 1.1 */
if (dlm->dlm_locking_proto.pv_major > 1 ||
dlm->dlm_locking_proto.pv_minor > 0) {
status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
if (status) {
mlog_errno(status);
goto bail;
}
status = dlm_send_regions(dlm, ctxt->yes_resp_map);
if (status) {
mlog_errno(status);
goto bail;
}
}
dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
/* Joined state *must* be set before the joining node
* information, otherwise the query_join handler may read no
* current joiner but a state of NEW and tell joining nodes
* we're not in the domain. */
spin_lock(&dlm_domain_lock);
dlm->dlm_state = DLM_CTXT_JOINED;
dlm->num_joins++;
spin_unlock(&dlm_domain_lock);
bail:
spin_lock(&dlm->spinlock);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
if (!status) {
printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
__dlm_print_nodes(dlm);
}
spin_unlock(&dlm->spinlock);
if (ctxt) {
/* Do we need to send a cancel message to any nodes? */
if (status < 0) {
tmpstat = dlm_send_join_cancels(dlm,
ctxt->yes_resp_map,
sizeof(ctxt->yes_resp_map));
if (tmpstat < 0)
mlog_errno(tmpstat);
}
kfree(ctxt);
}
mlog(0, "returning %d\n", status);
return status;
}
static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
{
o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
}
static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
{
int status;
mlog(0, "registering handlers.\n");
o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
if (status)
goto bail;
status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
if (status)
goto bail;
status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
sizeof(struct dlm_master_request),
dlm_master_request_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
sizeof(struct dlm_assert_master),
dlm_assert_master_handler,
dlm, dlm_assert_master_post_handler,
&dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
sizeof(struct dlm_create_lock),
dlm_create_lock_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
DLM_CONVERT_LOCK_MAX_LEN,
dlm_convert_lock_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
DLM_UNLOCK_LOCK_MAX_LEN,
dlm_unlock_lock_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
DLM_PROXY_AST_MAX_LEN,
dlm_proxy_ast_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
sizeof(struct dlm_exit_domain),
dlm_exit_domain_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
sizeof(struct dlm_deref_lockres),
dlm_deref_lockres_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
sizeof(struct dlm_migrate_request),
dlm_migrate_request_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
DLM_MIG_LOCKRES_MAX_LEN,
dlm_mig_lockres_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
sizeof(struct dlm_master_requery),
dlm_master_requery_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
sizeof(struct dlm_lock_request),
dlm_request_all_locks_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
sizeof(struct dlm_reco_data_done),
dlm_reco_data_done_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
sizeof(struct dlm_begin_reco),
dlm_begin_reco_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
sizeof(struct dlm_finalize_reco),
dlm_finalize_reco_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key,
sizeof(struct dlm_exit_domain),
dlm_begin_exit_domain_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_DEREF_LOCKRES_DONE, dlm->key,
sizeof(struct dlm_deref_lockres_done),
dlm_deref_lockres_done_handler,
dlm, NULL, &dlm->dlm_domain_handlers);
bail:
if (status)
dlm_unregister_domain_handlers(dlm);
return status;
}
static int dlm_join_domain(struct dlm_ctxt *dlm)
{
int status;
unsigned int backoff;
unsigned int total_backoff = 0;
char wq_name[O2NM_MAX_NAME_LEN];
BUG_ON(!dlm);
mlog(0, "Join domain %s\n", dlm->name);
status = dlm_register_domain_handlers(dlm);
if (status) {
mlog_errno(status);
goto bail;
}
status = dlm_launch_thread(dlm);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = dlm_launch_recovery_thread(dlm);
if (status < 0) {
mlog_errno(status);
goto bail;
}
dlm_debug_init(dlm);
snprintf(wq_name, O2NM_MAX_NAME_LEN, "dlm_wq-%s", dlm->name);
dlm->dlm_worker = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 0);
if (!dlm->dlm_worker) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
do {
status = dlm_try_to_join_domain(dlm);
/* If we're racing another node to the join, then we
* need to back off temporarily and let them
* complete. */
#define DLM_JOIN_TIMEOUT_MSECS 90000
if (status == -EAGAIN) {
if (signal_pending(current)) {
status = -ERESTARTSYS;
goto bail;
}
if (total_backoff > DLM_JOIN_TIMEOUT_MSECS) {
status = -ERESTARTSYS;
mlog(ML_NOTICE, "Timed out joining dlm domain "
"%s after %u msecs\n", dlm->name,
total_backoff);
goto bail;
}
/*
* <chip> After you!
* <dale> No, after you!
* <chip> I insist!
* <dale> But you first!
* ...
*/
backoff = (unsigned int)(jiffies & 0x3);
backoff *= DLM_DOMAIN_BACKOFF_MS;
total_backoff += backoff;
mlog(0, "backoff %d\n", backoff);
msleep(backoff);
}
} while (status == -EAGAIN);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = 0;
bail:
wake_up(&dlm_domain_events);
if (status) {
dlm_unregister_domain_handlers(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
dlm_destroy_dlm_worker(dlm);
}
return status;
}
static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
u32 key)
{
int i;
int ret;
struct dlm_ctxt *dlm = NULL;
dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
if (!dlm) {
ret = -ENOMEM;
mlog_errno(ret);
goto leave;
}
dlm->name = kstrdup(domain, GFP_KERNEL);
if (dlm->name == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto leave;
}
dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
if (!dlm->lockres_hash) {
ret = -ENOMEM;
mlog_errno(ret);
goto leave;
}
for (i = 0; i < DLM_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
dlm->master_hash = (struct hlist_head **)
dlm_alloc_pagevec(DLM_HASH_PAGES);
if (!dlm->master_hash) {
ret = -ENOMEM;
mlog_errno(ret);
goto leave;
}
for (i = 0; i < DLM_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(dlm_master_hash(dlm, i));
dlm->key = key;
dlm->node_num = o2nm_this_node();
dlm_create_debugfs_subroot(dlm);
spin_lock_init(&dlm->spinlock);
spin_lock_init(&dlm->master_lock);
spin_lock_init(&dlm->ast_lock);
spin_lock_init(&dlm->track_lock);
INIT_LIST_HEAD(&dlm->list);
INIT_LIST_HEAD(&dlm->dirty_list);
INIT_LIST_HEAD(&dlm->reco.resources);
INIT_LIST_HEAD(&dlm->reco.node_data);
INIT_LIST_HEAD(&dlm->purge_list);
INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
INIT_LIST_HEAD(&dlm->tracking_list);
dlm->reco.state = 0;
INIT_LIST_HEAD(&dlm->pending_asts);
INIT_LIST_HEAD(&dlm->pending_basts);
mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
dlm->recovery_map, &(dlm->recovery_map[0]));
bitmap_zero(dlm->recovery_map, O2NM_MAX_NODES);
bitmap_zero(dlm->live_nodes_map, O2NM_MAX_NODES);
bitmap_zero(dlm->domain_map, O2NM_MAX_NODES);
dlm->dlm_thread_task = NULL;
dlm->dlm_reco_thread_task = NULL;
dlm->dlm_worker = NULL;
init_waitqueue_head(&dlm->dlm_thread_wq);
init_waitqueue_head(&dlm->dlm_reco_thread_wq);
init_waitqueue_head(&dlm->reco.event);
init_waitqueue_head(&dlm->ast_wq);
init_waitqueue_head(&dlm->migration_wq);
INIT_LIST_HEAD(&dlm->mle_hb_events);
dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
init_waitqueue_head(&dlm->dlm_join_events);
dlm->migrate_done = 0;
dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
atomic_set(&dlm->res_tot_count, 0);
atomic_set(&dlm->res_cur_count, 0);
for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
atomic_set(&dlm->mle_tot_count[i], 0);
atomic_set(&dlm->mle_cur_count[i], 0);
}
spin_lock_init(&dlm->work_lock);
INIT_LIST_HEAD(&dlm->work_list);
INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
kref_init(&dlm->dlm_refs);
dlm->dlm_state = DLM_CTXT_NEW;
INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
mlog(0, "context init: refcount %u\n",
kref_read(&dlm->dlm_refs));
ret = 0;
leave:
if (ret < 0 && dlm) {
if (dlm->master_hash)
dlm_free_pagevec((void **)dlm->master_hash,
DLM_HASH_PAGES);
if (dlm->lockres_hash)
dlm_free_pagevec((void **)dlm->lockres_hash,
DLM_HASH_PAGES);
kfree(dlm->name);
kfree(dlm);
dlm = NULL;
}
return dlm;
}
/*
* Compare a requested locking protocol version against the current one.
*
* If the major numbers are different, they are incompatible.
* If the current minor is greater than the request, they are incompatible.
* If the current minor is less than or equal to the request, they are
* compatible, and the requester should run at the current minor version.
*/
static int dlm_protocol_compare(struct dlm_protocol_version *existing,
struct dlm_protocol_version *request)
{
if (existing->pv_major != request->pv_major)
return 1;
if (existing->pv_minor > request->pv_minor)
return 1;
if (existing->pv_minor < request->pv_minor)
request->pv_minor = existing->pv_minor;
return 0;
}
/*
* dlm_register_domain: one-time setup per "domain".
*
* The filesystem passes in the requested locking version via proto.
* If registration was successful, proto will contain the negotiated
* locking protocol.
*/
struct dlm_ctxt * dlm_register_domain(const char *domain,
u32 key,
struct dlm_protocol_version *fs_proto)
{
int ret;
struct dlm_ctxt *dlm = NULL;
struct dlm_ctxt *new_ctxt = NULL;
if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
ret = -ENAMETOOLONG;
mlog(ML_ERROR, "domain name length too long\n");
goto leave;
}
mlog(0, "register called for domain \"%s\"\n", domain);
retry:
dlm = NULL;
if (signal_pending(current)) {
ret = -ERESTARTSYS;
mlog_errno(ret);
goto leave;
}
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain(domain);
if (dlm) {
if (dlm->dlm_state != DLM_CTXT_JOINED) {
spin_unlock(&dlm_domain_lock);
mlog(0, "This ctxt is not joined yet!\n");
wait_event_interruptible(dlm_domain_events,
dlm_wait_on_domain_helper(
domain));
goto retry;
}
if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
spin_unlock(&dlm_domain_lock);
mlog(ML_ERROR,
"Requested locking protocol version is not "
"compatible with already registered domain "
"\"%s\"\n", domain);
ret = -EPROTO;
goto leave;
}
__dlm_get(dlm);
dlm->num_joins++;
spin_unlock(&dlm_domain_lock);
ret = 0;
goto leave;
}
/* doesn't exist */
if (!new_ctxt) {
spin_unlock(&dlm_domain_lock);
new_ctxt = dlm_alloc_ctxt(domain, key);
if (new_ctxt)
goto retry;
ret = -ENOMEM;
mlog_errno(ret);
goto leave;
}
/* a little variable switch-a-roo here... */
dlm = new_ctxt;
new_ctxt = NULL;
/* add the new domain */
list_add_tail(&dlm->list, &dlm_domains);
spin_unlock(&dlm_domain_lock);
/*
* Pass the locking protocol version into the join. If the join
* succeeds, it will have the negotiated protocol set.
*/
dlm->dlm_locking_proto = dlm_protocol;
dlm->fs_locking_proto = *fs_proto;
ret = dlm_join_domain(dlm);
if (ret) {
mlog_errno(ret);
dlm_put(dlm);
goto leave;
}
/* Tell the caller what locking protocol we negotiated */
*fs_proto = dlm->fs_locking_proto;
ret = 0;
leave:
if (new_ctxt)
dlm_free_ctxt_mem(new_ctxt);
if (ret < 0)
dlm = ERR_PTR(ret);
return dlm;
}
EXPORT_SYMBOL_GPL(dlm_register_domain);
static LIST_HEAD(dlm_join_handlers);
static void dlm_unregister_net_handlers(void)
{
o2net_unregister_handler_list(&dlm_join_handlers);
}
static int dlm_register_net_handlers(void)
{
int status = 0;
status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
sizeof(struct dlm_query_join_request),
dlm_query_join_handler,
NULL, NULL, &dlm_join_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
sizeof(struct dlm_assert_joined),
dlm_assert_joined_handler,
NULL, NULL, &dlm_join_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
sizeof(struct dlm_cancel_join),
dlm_cancel_join_handler,
NULL, NULL, &dlm_join_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_QUERY_REGION, DLM_MOD_KEY,
sizeof(struct dlm_query_region),
dlm_query_region_handler,
NULL, NULL, &dlm_join_handlers);
if (status)
goto bail;
status = o2net_register_handler(DLM_QUERY_NODEINFO, DLM_MOD_KEY,
sizeof(struct dlm_query_nodeinfo),
dlm_query_nodeinfo_handler,
NULL, NULL, &dlm_join_handlers);
bail:
if (status < 0)
dlm_unregister_net_handlers();
return status;
}
/* Domain eviction callback handling.
*
* The file system requires notification of node death *before* the
* dlm completes it's recovery work, otherwise it may be able to
* acquire locks on resources requiring recovery. Since the dlm can
* evict a node from it's domain *before* heartbeat fires, a similar
* mechanism is required. */
/* Eviction is not expected to happen often, so a per-domain lock is
* not necessary. Eviction callbacks are allowed to sleep for short
* periods of time. */
static DECLARE_RWSEM(dlm_callback_sem);
void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
int node_num)
{
struct dlm_eviction_cb *cb;
down_read(&dlm_callback_sem);
list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) {
cb->ec_func(node_num, cb->ec_data);
}
up_read(&dlm_callback_sem);
}
void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
dlm_eviction_func *f,
void *data)
{
INIT_LIST_HEAD(&cb->ec_item);
cb->ec_func = f;
cb->ec_data = data;
}
EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb);
void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
struct dlm_eviction_cb *cb)
{
down_write(&dlm_callback_sem);
list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);
up_write(&dlm_callback_sem);
}
EXPORT_SYMBOL_GPL(dlm_register_eviction_cb);
void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb)
{
down_write(&dlm_callback_sem);
list_del_init(&cb->ec_item);
up_write(&dlm_callback_sem);
}
EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb);
static int __init dlm_init(void)
{
int status;
status = dlm_init_mle_cache();
if (status) {
mlog(ML_ERROR, "Could not create o2dlm_mle slabcache\n");
goto error;
}
status = dlm_init_master_caches();
if (status) {
mlog(ML_ERROR, "Could not create o2dlm_lockres and "
"o2dlm_lockname slabcaches\n");
goto error;
}
status = dlm_init_lock_cache();
if (status) {
mlog(ML_ERROR, "Count not create o2dlm_lock slabcache\n");
goto error;
}
status = dlm_register_net_handlers();
if (status) {
mlog(ML_ERROR, "Unable to register network handlers\n");
goto error;
}
dlm_create_debugfs_root();
return 0;
error:
dlm_unregister_net_handlers();
dlm_destroy_lock_cache();
dlm_destroy_master_caches();
dlm_destroy_mle_cache();
return -1;
}
static void __exit dlm_exit (void)
{
dlm_destroy_debugfs_root();
dlm_unregister_net_handlers();
dlm_destroy_lock_cache();
dlm_destroy_master_caches();
dlm_destroy_mle_cache();
}
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OCFS2 Distributed Lock Management");
module_init(dlm_init);
module_exit(dlm_exit);
| linux-master | fs/ocfs2/dlm/dlmdomain.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmdebug.c
*
* debug functionality for the dlm
*
* Copyright (C) 2004, 2008 Oracle. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/sysctl.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#include "dlmdebug.h"
#define MLOG_MASK_PREFIX ML_DLM
#include "../cluster/masklog.h"
static int stringify_lockname(const char *lockname, int locklen, char *buf,
int len);
void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
{
spin_lock(&res->spinlock);
__dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
}
static void dlm_print_lockres_refmap(struct dlm_lock_resource *res)
{
int bit;
assert_spin_locked(&res->spinlock);
printk(" refmap nodes: [ ");
bit = 0;
while (1) {
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
if (bit >= O2NM_MAX_NODES)
break;
printk("%u ", bit);
bit++;
}
printk("], inflight=%u\n", res->inflight_locks);
}
static void __dlm_print_lock(struct dlm_lock *lock)
{
spin_lock(&lock->spinlock);
printk(" type=%d, conv=%d, node=%u, cookie=%u:%llu, "
"ref=%u, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c), "
"pending=(conv=%c,lock=%c,cancel=%c,unlock=%c)\n",
lock->ml.type, lock->ml.convert_type, lock->ml.node,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
kref_read(&lock->lock_refs),
(list_empty(&lock->ast_list) ? 'y' : 'n'),
(lock->ast_pending ? 'y' : 'n'),
(list_empty(&lock->bast_list) ? 'y' : 'n'),
(lock->bast_pending ? 'y' : 'n'),
(lock->convert_pending ? 'y' : 'n'),
(lock->lock_pending ? 'y' : 'n'),
(lock->cancel_pending ? 'y' : 'n'),
(lock->unlock_pending ? 'y' : 'n'));
spin_unlock(&lock->spinlock);
}
void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
{
struct dlm_lock *lock;
char buf[DLM_LOCKID_NAME_MAX];
assert_spin_locked(&res->spinlock);
stringify_lockname(res->lockname.name, res->lockname.len,
buf, sizeof(buf));
printk("lockres: %s, owner=%u, state=%u\n",
buf, res->owner, res->state);
printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
res->last_used, kref_read(&res->refs),
list_empty(&res->purge) ? "no" : "yes");
printk(" on dirty list: %s, on reco list: %s, "
"migrating pending: %s\n",
list_empty(&res->dirty) ? "no" : "yes",
list_empty(&res->recovering) ? "no" : "yes",
res->migration_pending ? "yes" : "no");
printk(" inflight locks: %d, asts reserved: %d\n",
res->inflight_locks, atomic_read(&res->asts_reserved));
dlm_print_lockres_refmap(res);
printk(" granted queue:\n");
list_for_each_entry(lock, &res->granted, list) {
__dlm_print_lock(lock);
}
printk(" converting queue:\n");
list_for_each_entry(lock, &res->converting, list) {
__dlm_print_lock(lock);
}
printk(" blocked queue:\n");
list_for_each_entry(lock, &res->blocked, list) {
__dlm_print_lock(lock);
}
}
void dlm_print_one_lock(struct dlm_lock *lockid)
{
dlm_print_one_lock_resource(lockid->lockres);
}
EXPORT_SYMBOL_GPL(dlm_print_one_lock);
static const char *dlm_errnames[] = {
[DLM_NORMAL] = "DLM_NORMAL",
[DLM_GRANTED] = "DLM_GRANTED",
[DLM_DENIED] = "DLM_DENIED",
[DLM_DENIED_NOLOCKS] = "DLM_DENIED_NOLOCKS",
[DLM_WORKING] = "DLM_WORKING",
[DLM_BLOCKED] = "DLM_BLOCKED",
[DLM_BLOCKED_ORPHAN] = "DLM_BLOCKED_ORPHAN",
[DLM_DENIED_GRACE_PERIOD] = "DLM_DENIED_GRACE_PERIOD",
[DLM_SYSERR] = "DLM_SYSERR",
[DLM_NOSUPPORT] = "DLM_NOSUPPORT",
[DLM_CANCELGRANT] = "DLM_CANCELGRANT",
[DLM_IVLOCKID] = "DLM_IVLOCKID",
[DLM_SYNC] = "DLM_SYNC",
[DLM_BADTYPE] = "DLM_BADTYPE",
[DLM_BADRESOURCE] = "DLM_BADRESOURCE",
[DLM_MAXHANDLES] = "DLM_MAXHANDLES",
[DLM_NOCLINFO] = "DLM_NOCLINFO",
[DLM_NOLOCKMGR] = "DLM_NOLOCKMGR",
[DLM_NOPURGED] = "DLM_NOPURGED",
[DLM_BADARGS] = "DLM_BADARGS",
[DLM_VOID] = "DLM_VOID",
[DLM_NOTQUEUED] = "DLM_NOTQUEUED",
[DLM_IVBUFLEN] = "DLM_IVBUFLEN",
[DLM_CVTUNGRANT] = "DLM_CVTUNGRANT",
[DLM_BADPARAM] = "DLM_BADPARAM",
[DLM_VALNOTVALID] = "DLM_VALNOTVALID",
[DLM_REJECTED] = "DLM_REJECTED",
[DLM_ABORT] = "DLM_ABORT",
[DLM_CANCEL] = "DLM_CANCEL",
[DLM_IVRESHANDLE] = "DLM_IVRESHANDLE",
[DLM_DEADLOCK] = "DLM_DEADLOCK",
[DLM_DENIED_NOASTS] = "DLM_DENIED_NOASTS",
[DLM_FORWARD] = "DLM_FORWARD",
[DLM_TIMEOUT] = "DLM_TIMEOUT",
[DLM_IVGROUPID] = "DLM_IVGROUPID",
[DLM_VERS_CONFLICT] = "DLM_VERS_CONFLICT",
[DLM_BAD_DEVICE_PATH] = "DLM_BAD_DEVICE_PATH",
[DLM_NO_DEVICE_PERMISSION] = "DLM_NO_DEVICE_PERMISSION",
[DLM_NO_CONTROL_DEVICE ] = "DLM_NO_CONTROL_DEVICE ",
[DLM_RECOVERING] = "DLM_RECOVERING",
[DLM_MIGRATING] = "DLM_MIGRATING",
[DLM_MAXSTATS] = "DLM_MAXSTATS",
};
static const char *dlm_errmsgs[] = {
[DLM_NORMAL] = "request in progress",
[DLM_GRANTED] = "request granted",
[DLM_DENIED] = "request denied",
[DLM_DENIED_NOLOCKS] = "request denied, out of system resources",
[DLM_WORKING] = "async request in progress",
[DLM_BLOCKED] = "lock request blocked",
[DLM_BLOCKED_ORPHAN] = "lock request blocked by a orphan lock",
[DLM_DENIED_GRACE_PERIOD] = "topological change in progress",
[DLM_SYSERR] = "system error",
[DLM_NOSUPPORT] = "unsupported",
[DLM_CANCELGRANT] = "can't cancel convert: already granted",
[DLM_IVLOCKID] = "bad lockid",
[DLM_SYNC] = "synchronous request granted",
[DLM_BADTYPE] = "bad resource type",
[DLM_BADRESOURCE] = "bad resource handle",
[DLM_MAXHANDLES] = "no more resource handles",
[DLM_NOCLINFO] = "can't contact cluster manager",
[DLM_NOLOCKMGR] = "can't contact lock manager",
[DLM_NOPURGED] = "can't contact purge daemon",
[DLM_BADARGS] = "bad api args",
[DLM_VOID] = "no status",
[DLM_NOTQUEUED] = "NOQUEUE was specified and request failed",
[DLM_IVBUFLEN] = "invalid resource name length",
[DLM_CVTUNGRANT] = "attempted to convert ungranted lock",
[DLM_BADPARAM] = "invalid lock mode specified",
[DLM_VALNOTVALID] = "value block has been invalidated",
[DLM_REJECTED] = "request rejected, unrecognized client",
[DLM_ABORT] = "blocked lock request cancelled",
[DLM_CANCEL] = "conversion request cancelled",
[DLM_IVRESHANDLE] = "invalid resource handle",
[DLM_DEADLOCK] = "deadlock recovery refused this request",
[DLM_DENIED_NOASTS] = "failed to allocate AST",
[DLM_FORWARD] = "request must wait for primary's response",
[DLM_TIMEOUT] = "timeout value for lock has expired",
[DLM_IVGROUPID] = "invalid group specification",
[DLM_VERS_CONFLICT] = "version conflicts prevent request handling",
[DLM_BAD_DEVICE_PATH] = "Locks device does not exist or path wrong",
[DLM_NO_DEVICE_PERMISSION] = "Client has insufficient perms for device",
[DLM_NO_CONTROL_DEVICE] = "Cannot set options on opened device ",
[DLM_RECOVERING] = "lock resource being recovered",
[DLM_MIGRATING] = "lock resource being migrated",
[DLM_MAXSTATS] = "invalid error number",
};
const char *dlm_errmsg(enum dlm_status err)
{
if (err >= DLM_MAXSTATS || err < 0)
return dlm_errmsgs[DLM_MAXSTATS];
return dlm_errmsgs[err];
}
EXPORT_SYMBOL_GPL(dlm_errmsg);
const char *dlm_errname(enum dlm_status err)
{
if (err >= DLM_MAXSTATS || err < 0)
return dlm_errnames[DLM_MAXSTATS];
return dlm_errnames[err];
}
EXPORT_SYMBOL_GPL(dlm_errname);
/* NOTE: This function converts a lockname into a string. It uses knowledge
* of the format of the lockname that should be outside the purview of the dlm.
* We are adding only to make dlm debugging slightly easier.
*
* For more on lockname formats, please refer to dlmglue.c and ocfs2_lockid.h.
*/
static int stringify_lockname(const char *lockname, int locklen, char *buf,
int len)
{
int out = 0;
__be64 inode_blkno_be;
#define OCFS2_DENTRY_LOCK_INO_START 18
if (*lockname == 'N') {
memcpy((__be64 *)&inode_blkno_be,
(char *)&lockname[OCFS2_DENTRY_LOCK_INO_START],
sizeof(__be64));
out += scnprintf(buf + out, len - out, "%.*s%08x",
OCFS2_DENTRY_LOCK_INO_START - 1, lockname,
(unsigned int)be64_to_cpu(inode_blkno_be));
} else
out += scnprintf(buf + out, len - out, "%.*s",
locklen, lockname);
return out;
}
static int stringify_nodemap(unsigned long *nodemap, int maxnodes,
char *buf, int len)
{
int out = 0;
int i = -1;
while ((i = find_next_bit(nodemap, maxnodes, i + 1)) < maxnodes)
out += scnprintf(buf + out, len - out, "%d ", i);
return out;
}
static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len)
{
int out = 0;
char *mle_type;
if (mle->type == DLM_MLE_BLOCK)
mle_type = "BLK";
else if (mle->type == DLM_MLE_MASTER)
mle_type = "MAS";
else
mle_type = "MIG";
out += stringify_lockname(mle->mname, mle->mnamelen, buf + out, len - out);
out += scnprintf(buf + out, len - out,
"\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n",
mle_type, mle->master, mle->new_master,
!list_empty(&mle->hb_events),
!!mle->inuse,
kref_read(&mle->mle_refs));
out += scnprintf(buf + out, len - out, "Maybe=");
out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
out += scnprintf(buf + out, len - out, "Vote=");
out += stringify_nodemap(mle->vote_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
out += scnprintf(buf + out, len - out, "Response=");
out += stringify_nodemap(mle->response_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
out += scnprintf(buf + out, len - out, "Node=");
out += stringify_nodemap(mle->node_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
out += scnprintf(buf + out, len - out, "\n");
return out;
}
void dlm_print_one_mle(struct dlm_master_list_entry *mle)
{
char *buf;
buf = (char *) get_zeroed_page(GFP_ATOMIC);
if (buf) {
dump_mle(mle, buf, PAGE_SIZE - 1);
free_page((unsigned long)buf);
}
}
#ifdef CONFIG_DEBUG_FS
static struct dentry *dlm_debugfs_root;
#define DLM_DEBUGFS_DIR "o2dlm"
#define DLM_DEBUGFS_DLM_STATE "dlm_state"
#define DLM_DEBUGFS_LOCKING_STATE "locking_state"
#define DLM_DEBUGFS_MLE_STATE "mle_state"
#define DLM_DEBUGFS_PURGE_LIST "purge_list"
/* begin - utils funcs */
static int debug_release(struct inode *inode, struct file *file)
{
free_page((unsigned long)file->private_data);
return 0;
}
static ssize_t debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
i_size_read(file->f_mapping->host));
}
/* end - util funcs */
/* begin - purge list funcs */
static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
{
struct dlm_lock_resource *res;
int out = 0;
unsigned long total = 0;
out += scnprintf(buf + out, len - out,
"Dumping Purgelist for Domain: %s\n", dlm->name);
spin_lock(&dlm->spinlock);
list_for_each_entry(res, &dlm->purge_list, purge) {
++total;
if (len - out < 100)
continue;
spin_lock(&res->spinlock);
out += stringify_lockname(res->lockname.name,
res->lockname.len,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\t%ld\n",
(jiffies - res->last_used)/HZ);
spin_unlock(&res->spinlock);
}
spin_unlock(&dlm->spinlock);
out += scnprintf(buf + out, len - out, "Total on list: %lu\n", total);
return out;
}
static int debug_purgelist_open(struct inode *inode, struct file *file)
{
struct dlm_ctxt *dlm = inode->i_private;
char *buf = NULL;
buf = (char *) get_zeroed_page(GFP_NOFS);
if (!buf)
goto bail;
i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
file->private_data = buf;
return 0;
bail:
return -ENOMEM;
}
static const struct file_operations debug_purgelist_fops = {
.open = debug_purgelist_open,
.release = debug_release,
.read = debug_read,
.llseek = generic_file_llseek,
};
/* end - purge list funcs */
/* begin - debug mle funcs */
static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
{
struct dlm_master_list_entry *mle;
struct hlist_head *bucket;
int i, out = 0;
unsigned long total = 0, longest = 0, bucket_count = 0;
out += scnprintf(buf + out, len - out,
"Dumping MLEs for Domain: %s\n", dlm->name);
spin_lock(&dlm->master_lock);
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);
hlist_for_each_entry(mle, bucket, master_hash_node) {
++total;
++bucket_count;
if (len - out < 200)
continue;
out += dump_mle(mle, buf + out, len - out);
}
longest = max(longest, bucket_count);
bucket_count = 0;
}
spin_unlock(&dlm->master_lock);
out += scnprintf(buf + out, len - out,
"Total: %lu, Longest: %lu\n", total, longest);
return out;
}
static int debug_mle_open(struct inode *inode, struct file *file)
{
struct dlm_ctxt *dlm = inode->i_private;
char *buf = NULL;
buf = (char *) get_zeroed_page(GFP_NOFS);
if (!buf)
goto bail;
i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
file->private_data = buf;
return 0;
bail:
return -ENOMEM;
}
static const struct file_operations debug_mle_fops = {
.open = debug_mle_open,
.release = debug_release,
.read = debug_read,
.llseek = generic_file_llseek,
};
/* end - debug mle funcs */
/* begin - debug lockres funcs */
static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len)
{
int out;
#define DEBUG_LOCK_VERSION 1
spin_lock(&lock->spinlock);
out = scnprintf(buf, len, "LOCK:%d,%d,%d,%d,%d,%d:%lld,%d,%d,%d,%d,%d,"
"%d,%d,%d,%d\n",
DEBUG_LOCK_VERSION,
list_type, lock->ml.type, lock->ml.convert_type,
lock->ml.node,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
!list_empty(&lock->ast_list),
!list_empty(&lock->bast_list),
lock->ast_pending, lock->bast_pending,
lock->convert_pending, lock->lock_pending,
lock->cancel_pending, lock->unlock_pending,
kref_read(&lock->lock_refs));
spin_unlock(&lock->spinlock);
return out;
}
static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len)
{
struct dlm_lock *lock;
int i;
int out = 0;
out += scnprintf(buf + out, len - out, "NAME:");
out += stringify_lockname(res->lockname.name, res->lockname.len,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
#define DEBUG_LRES_VERSION 1
out += scnprintf(buf + out, len - out,
"LRES:%d,%d,%d,%ld,%d,%d,%d,%d,%d,%d,%d\n",
DEBUG_LRES_VERSION,
res->owner, res->state, res->last_used,
!list_empty(&res->purge),
!list_empty(&res->dirty),
!list_empty(&res->recovering),
res->inflight_locks, res->migration_pending,
atomic_read(&res->asts_reserved),
kref_read(&res->refs));
/* refmap */
out += scnprintf(buf + out, len - out, "RMAP:");
out += stringify_nodemap(res->refmap, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
/* lvb */
out += scnprintf(buf + out, len - out, "LVBX:");
for (i = 0; i < DLM_LVB_LEN; i++)
out += scnprintf(buf + out, len - out,
"%02x", (unsigned char)res->lvb[i]);
out += scnprintf(buf + out, len - out, "\n");
/* granted */
list_for_each_entry(lock, &res->granted, list)
out += dump_lock(lock, 0, buf + out, len - out);
/* converting */
list_for_each_entry(lock, &res->converting, list)
out += dump_lock(lock, 1, buf + out, len - out);
/* blocked */
list_for_each_entry(lock, &res->blocked, list)
out += dump_lock(lock, 2, buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
return out;
}
static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
{
struct debug_lockres *dl = m->private;
struct dlm_ctxt *dlm = dl->dl_ctxt;
struct dlm_lock_resource *oldres = dl->dl_res;
struct dlm_lock_resource *res = NULL, *iter;
struct list_head *track_list;
spin_lock(&dlm->track_lock);
if (oldres)
track_list = &oldres->tracking;
else {
track_list = &dlm->tracking_list;
if (list_empty(track_list)) {
dl = NULL;
spin_unlock(&dlm->track_lock);
goto bail;
}
}
list_for_each_entry(iter, track_list, tracking) {
if (&iter->tracking != &dlm->tracking_list) {
dlm_lockres_get(iter);
res = iter;
}
break;
}
spin_unlock(&dlm->track_lock);
if (oldres)
dlm_lockres_put(oldres);
dl->dl_res = res;
if (res) {
spin_lock(&res->spinlock);
dump_lockres(res, dl->dl_buf, dl->dl_len - 1);
spin_unlock(&res->spinlock);
} else
dl = NULL;
bail:
/* passed to seq_show */
return dl;
}
static void lockres_seq_stop(struct seq_file *m, void *v)
{
}
static void *lockres_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
return NULL;
}
static int lockres_seq_show(struct seq_file *s, void *v)
{
struct debug_lockres *dl = (struct debug_lockres *)v;
seq_printf(s, "%s", dl->dl_buf);
return 0;
}
static const struct seq_operations debug_lockres_ops = {
.start = lockres_seq_start,
.stop = lockres_seq_stop,
.next = lockres_seq_next,
.show = lockres_seq_show,
};
static int debug_lockres_open(struct inode *inode, struct file *file)
{
struct dlm_ctxt *dlm = inode->i_private;
struct debug_lockres *dl;
void *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
goto bail;
dl = __seq_open_private(file, &debug_lockres_ops, sizeof(*dl));
if (!dl)
goto bailfree;
dl->dl_len = PAGE_SIZE;
dl->dl_buf = buf;
dlm_grab(dlm);
dl->dl_ctxt = dlm;
return 0;
bailfree:
kfree(buf);
bail:
mlog_errno(-ENOMEM);
return -ENOMEM;
}
static int debug_lockres_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct debug_lockres *dl = (struct debug_lockres *)seq->private;
if (dl->dl_res)
dlm_lockres_put(dl->dl_res);
dlm_put(dl->dl_ctxt);
kfree(dl->dl_buf);
return seq_release_private(inode, file);
}
static const struct file_operations debug_lockres_fops = {
.open = debug_lockres_open,
.release = debug_lockres_release,
.read = seq_read,
.llseek = seq_lseek,
};
/* end - debug lockres funcs */
/* begin - debug state funcs */
static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
{
int out = 0;
struct dlm_reco_node_data *node;
char *state;
int cur_mles = 0, tot_mles = 0;
int i;
spin_lock(&dlm->spinlock);
switch (dlm->dlm_state) {
case DLM_CTXT_NEW:
state = "NEW"; break;
case DLM_CTXT_JOINED:
state = "JOINED"; break;
case DLM_CTXT_IN_SHUTDOWN:
state = "SHUTDOWN"; break;
case DLM_CTXT_LEAVING:
state = "LEAVING"; break;
default:
state = "UNKNOWN"; break;
}
/* Domain: xxxxxxxxxx Key: 0xdfbac769 */
out += scnprintf(buf + out, len - out,
"Domain: %s Key: 0x%08x Protocol: %d.%d\n",
dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor);
/* Thread Pid: xxx Node: xxx State: xxxxx */
out += scnprintf(buf + out, len - out,
"Thread Pid: %d Node: %d State: %s\n",
task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
/* Number of Joins: xxx Joining Node: xxx */
out += scnprintf(buf + out, len - out,
"Number of Joins: %d Joining Node: %d\n",
dlm->num_joins, dlm->joining_node);
/* Domain Map: xx xx xx */
out += scnprintf(buf + out, len - out, "Domain Map: ");
out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
/* Exit Domain Map: xx xx xx */
out += scnprintf(buf + out, len - out, "Exit Domain Map: ");
out += stringify_nodemap(dlm->exit_domain_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
/* Live Map: xx xx xx */
out += scnprintf(buf + out, len - out, "Live Map: ");
out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
/* Lock Resources: xxx (xxx) */
out += scnprintf(buf + out, len - out,
"Lock Resources: %d (%d)\n",
atomic_read(&dlm->res_cur_count),
atomic_read(&dlm->res_tot_count));
for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
tot_mles += atomic_read(&dlm->mle_tot_count[i]);
for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
cur_mles += atomic_read(&dlm->mle_cur_count[i]);
/* MLEs: xxx (xxx) */
out += scnprintf(buf + out, len - out,
"MLEs: %d (%d)\n", cur_mles, tot_mles);
/* Blocking: xxx (xxx) */
out += scnprintf(buf + out, len - out,
" Blocking: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
/* Mastery: xxx (xxx) */
out += scnprintf(buf + out, len - out,
" Mastery: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
/* Migration: xxx (xxx) */
out += scnprintf(buf + out, len - out,
" Migration: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
/* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */
out += scnprintf(buf + out, len - out,
"Lists: Dirty=%s Purge=%s PendingASTs=%s "
"PendingBASTs=%s\n",
(list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
(list_empty(&dlm->purge_list) ? "Empty" : "InUse"),
(list_empty(&dlm->pending_asts) ? "Empty" : "InUse"),
(list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
/* Purge Count: xxx Refs: xxx */
out += scnprintf(buf + out, len - out,
"Purge Count: %d Refs: %d\n", dlm->purge_count,
kref_read(&dlm->dlm_refs));
/* Dead Node: xxx */
out += scnprintf(buf + out, len - out,
"Dead Node: %d\n", dlm->reco.dead_node);
/* What about DLM_RECO_STATE_FINALIZE? */
if (dlm->reco.state == DLM_RECO_STATE_ACTIVE)
state = "ACTIVE";
else
state = "INACTIVE";
/* Recovery Pid: xxxx Master: xxx State: xxxx */
out += scnprintf(buf + out, len - out,
"Recovery Pid: %d Master: %d State: %s\n",
task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.new_master, state);
/* Recovery Map: xx xx */
out += scnprintf(buf + out, len - out, "Recovery Map: ");
out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
buf + out, len - out);
out += scnprintf(buf + out, len - out, "\n");
/* Recovery Node State: */
out += scnprintf(buf + out, len - out, "Recovery Node State:\n");
list_for_each_entry(node, &dlm->reco.node_data, list) {
switch (node->state) {
case DLM_RECO_NODE_DATA_INIT:
state = "INIT";
break;
case DLM_RECO_NODE_DATA_REQUESTING:
state = "REQUESTING";
break;
case DLM_RECO_NODE_DATA_DEAD:
state = "DEAD";
break;
case DLM_RECO_NODE_DATA_RECEIVING:
state = "RECEIVING";
break;
case DLM_RECO_NODE_DATA_REQUESTED:
state = "REQUESTED";
break;
case DLM_RECO_NODE_DATA_DONE:
state = "DONE";
break;
case DLM_RECO_NODE_DATA_FINALIZE_SENT:
state = "FINALIZE-SENT";
break;
default:
state = "BAD";
break;
}
out += scnprintf(buf + out, len - out, "\t%u - %s\n",
node->node_num, state);
}
spin_unlock(&dlm->spinlock);
return out;
}
static int debug_state_open(struct inode *inode, struct file *file)
{
struct dlm_ctxt *dlm = inode->i_private;
char *buf = NULL;
buf = (char *) get_zeroed_page(GFP_NOFS);
if (!buf)
goto bail;
i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
file->private_data = buf;
return 0;
bail:
return -ENOMEM;
}
static const struct file_operations debug_state_fops = {
.open = debug_state_open,
.release = debug_release,
.read = debug_read,
.llseek = generic_file_llseek,
};
/* end - debug state funcs */
/* files in subroot */
void dlm_debug_init(struct dlm_ctxt *dlm)
{
/* for dumping dlm_ctxt */
debugfs_create_file(DLM_DEBUGFS_DLM_STATE, S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot, dlm, &debug_state_fops);
/* for dumping lockres */
debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE, S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot, dlm, &debug_lockres_fops);
/* for dumping mles */
debugfs_create_file(DLM_DEBUGFS_MLE_STATE, S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot, dlm, &debug_mle_fops);
/* for dumping lockres on the purge list */
debugfs_create_file(DLM_DEBUGFS_PURGE_LIST, S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot, dlm,
&debug_purgelist_fops);
}
/* subroot - domain dir */
void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
{
dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
dlm_debugfs_root);
}
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
debugfs_remove_recursive(dlm->dlm_debugfs_subroot);
}
/* debugfs root */
void dlm_create_debugfs_root(void)
{
dlm_debugfs_root = debugfs_create_dir(DLM_DEBUGFS_DIR, NULL);
}
void dlm_destroy_debugfs_root(void)
{
debugfs_remove(dlm_debugfs_root);
}
#endif /* CONFIG_DEBUG_FS */
| linux-master | fs/ocfs2/dlm/dlmdebug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmrecovery.c
*
* recovery stuff
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
#include "../cluster/masklog.h"
static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
static int dlm_recovery_thread(void *data);
static int dlm_do_recovery(struct dlm_ctxt *dlm);
static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
static int dlm_request_all_locks(struct dlm_ctxt *dlm,
u8 request_from, u8 dead_node);
static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm);
static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
const char *lockname, int namelen,
int total_locks, u64 cookie,
u8 flags, u8 master);
static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
struct dlm_migratable_lockres *mres,
u8 send_to,
struct dlm_lock_resource *res,
int total_locks);
static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_migratable_lockres *mres);
static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
u8 dead_node, u8 send_to);
static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
struct list_head *list, u8 dead_node);
static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
u8 dead_node, u8 new_master);
static void dlm_reco_ast(void *astdata);
static void dlm_reco_bast(void *astdata, int blocked_type);
static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
static void dlm_request_all_locks_worker(struct dlm_work_item *item,
void *data);
static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
u8 *real_master);
static u64 dlm_get_next_mig_cookie(void);
static DEFINE_SPINLOCK(dlm_reco_state_lock);
static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
static u64 dlm_mig_cookie = 1;
static u64 dlm_get_next_mig_cookie(void)
{
u64 c;
spin_lock(&dlm_mig_cookie_lock);
c = dlm_mig_cookie;
if (dlm_mig_cookie == (~0ULL))
dlm_mig_cookie = 1;
else
dlm_mig_cookie++;
spin_unlock(&dlm_mig_cookie_lock);
return c;
}
static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
u8 dead_node)
{
assert_spin_locked(&dlm->spinlock);
if (dlm->reco.dead_node != dead_node)
mlog(0, "%s: changing dead_node from %u to %u\n",
dlm->name, dlm->reco.dead_node, dead_node);
dlm->reco.dead_node = dead_node;
}
static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
u8 master)
{
assert_spin_locked(&dlm->spinlock);
mlog(0, "%s: changing new_master from %u to %u\n",
dlm->name, dlm->reco.new_master, master);
dlm->reco.new_master = master;
}
static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
{
assert_spin_locked(&dlm->spinlock);
clear_bit(dlm->reco.dead_node, dlm->recovery_map);
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
}
/* Worker function used during recovery. */
void dlm_dispatch_work(struct work_struct *work)
{
struct dlm_ctxt *dlm =
container_of(work, struct dlm_ctxt, dispatched_work);
LIST_HEAD(tmp_list);
struct dlm_work_item *item, *next;
dlm_workfunc_t *workfunc;
int tot=0;
spin_lock(&dlm->work_lock);
list_splice_init(&dlm->work_list, &tmp_list);
spin_unlock(&dlm->work_lock);
list_for_each_entry(item, &tmp_list, list) {
tot++;
}
mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
list_for_each_entry_safe(item, next, &tmp_list, list) {
workfunc = item->func;
list_del_init(&item->list);
/* already have ref on dlm to avoid having
* it disappear. just double-check. */
BUG_ON(item->dlm != dlm);
/* this is allowed to sleep and
* call network stuff */
workfunc(item, item->data);
dlm_put(dlm);
kfree(item);
}
}
/*
* RECOVERY THREAD
*/
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
{
/* wake the recovery thread
* this will wake the reco thread in one of three places
* 1) sleeping with no recovery happening
* 2) sleeping with recovery mastered elsewhere
* 3) recovery mastered here, waiting on reco data */
wake_up(&dlm->dlm_reco_thread_wq);
}
/* Launch the recovery thread */
int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
{
mlog(0, "starting dlm recovery thread...\n");
dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
"dlm_reco-%s", dlm->name);
if (IS_ERR(dlm->dlm_reco_thread_task)) {
mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
dlm->dlm_reco_thread_task = NULL;
return -EINVAL;
}
return 0;
}
void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
{
if (dlm->dlm_reco_thread_task) {
mlog(0, "waiting for dlm recovery thread to exit\n");
kthread_stop(dlm->dlm_reco_thread_task);
dlm->dlm_reco_thread_task = NULL;
}
}
/*
* this is lame, but here's how recovery works...
* 1) all recovery threads cluster wide will work on recovering
* ONE node at a time
* 2) negotiate who will take over all the locks for the dead node.
* thats right... ALL the locks.
* 3) once a new master is chosen, everyone scans all locks
* and moves aside those mastered by the dead guy
* 4) each of these locks should be locked until recovery is done
* 5) the new master collects up all of secondary lock queue info
* one lock at a time, forcing each node to communicate back
* before continuing
* 6) each secondary lock queue responds with the full known lock info
* 7) once the new master has run all its locks, it sends a ALLDONE!
* message to everyone
* 8) upon receiving this message, the secondary queue node unlocks
* and responds to the ALLDONE
* 9) once the new master gets responses from everyone, he unlocks
* everything and recovery for this dead node is done
*10) go back to 2) while there are still dead nodes
*
*/
static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
{
struct dlm_reco_node_data *ndata;
struct dlm_lock_resource *res;
mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
dlm->reco.dead_node, dlm->reco.new_master);
list_for_each_entry(ndata, &dlm->reco.node_data, list) {
char *st = "unknown";
switch (ndata->state) {
case DLM_RECO_NODE_DATA_INIT:
st = "init";
break;
case DLM_RECO_NODE_DATA_REQUESTING:
st = "requesting";
break;
case DLM_RECO_NODE_DATA_DEAD:
st = "dead";
break;
case DLM_RECO_NODE_DATA_RECEIVING:
st = "receiving";
break;
case DLM_RECO_NODE_DATA_REQUESTED:
st = "requested";
break;
case DLM_RECO_NODE_DATA_DONE:
st = "done";
break;
case DLM_RECO_NODE_DATA_FINALIZE_SENT:
st = "finalize-sent";
break;
default:
st = "bad";
break;
}
mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
dlm->name, ndata->node_num, st);
}
list_for_each_entry(res, &dlm->reco.resources, recovering) {
mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
dlm->name, res->lockname.len, res->lockname.name);
}
}
#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
static int dlm_recovery_thread(void *data)
{
int status;
struct dlm_ctxt *dlm = data;
unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
if (dlm_domain_fully_joined(dlm)) {
status = dlm_do_recovery(dlm);
if (status == -EAGAIN) {
/* do not sleep, recheck immediately. */
continue;
}
if (status < 0)
mlog_errno(status);
}
wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
kthread_should_stop(),
timeout);
}
mlog(0, "quitting DLM recovery thread\n");
return 0;
}
/* returns true when the recovery master has contacted us */
static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
{
int ready;
spin_lock(&dlm->spinlock);
ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
spin_unlock(&dlm->spinlock);
return ready;
}
/* returns true if node is no longer in the domain
* could be dead or just not joined */
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
{
int dead;
spin_lock(&dlm->spinlock);
dead = !test_bit(node, dlm->domain_map);
spin_unlock(&dlm->spinlock);
return dead;
}
/* returns true if node is no longer in the domain
* could be dead or just not joined */
static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
{
int recovered;
spin_lock(&dlm->spinlock);
recovered = !test_bit(node, dlm->recovery_map);
spin_unlock(&dlm->spinlock);
return recovered;
}
void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
{
if (dlm_is_node_dead(dlm, node))
return;
printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
"domain %s\n", node, dlm->name);
if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_is_node_dead(dlm, node),
msecs_to_jiffies(timeout));
else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_dead(dlm, node));
}
void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
{
if (dlm_is_node_recovered(dlm, node))
return;
printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
"domain %s\n", node, dlm->name);
if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_is_node_recovered(dlm, node),
msecs_to_jiffies(timeout));
else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_recovered(dlm, node));
}
/* callers of the top-level api calls (dlmlock/dlmunlock) should
* block on the dlm->reco.event when recovery is in progress.
* the dlm recovery thread will set this state when it begins
* recovering a dead node (as the new master or not) and clear
* the state and wake as soon as all affected lock resources have
* been marked with the RECOVERY flag */
static int dlm_in_recovery(struct dlm_ctxt *dlm)
{
int in_recovery;
spin_lock(&dlm->spinlock);
in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
spin_unlock(&dlm->spinlock);
return in_recovery;
}
void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
{
if (dlm_in_recovery(dlm)) {
mlog(0, "%s: reco thread %d in recovery: "
"state=%d, master=%u, dead=%u\n",
dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.state, dlm->reco.new_master,
dlm->reco.dead_node);
}
wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
}
static void dlm_begin_recovery(struct dlm_ctxt *dlm)
{
assert_spin_locked(&dlm->spinlock);
BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
dlm->name, dlm->reco.dead_node);
dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
}
static void dlm_end_recovery(struct dlm_ctxt *dlm)
{
spin_lock(&dlm->spinlock);
BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
wake_up(&dlm->reco.event);
}
static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
{
printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
"dead node %u in domain %s\n", dlm->reco.new_master,
(dlm->node_num == dlm->reco.new_master ? "me" : "he"),
dlm->reco.dead_node, dlm->name);
}
static int dlm_do_recovery(struct dlm_ctxt *dlm)
{
int status = 0;
int ret;
spin_lock(&dlm->spinlock);
if (dlm->migrate_done) {
mlog(0, "%s: no need do recovery after migrating all "
"lock resources\n", dlm->name);
spin_unlock(&dlm->spinlock);
return 0;
}
/* check to see if the new master has died */
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
test_bit(dlm->reco.new_master, dlm->recovery_map)) {
mlog(0, "new master %u died while recovering %u!\n",
dlm->reco.new_master, dlm->reco.dead_node);
/* unset the new_master, leave dead_node */
dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
}
/* select a target to recover */
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
int bit;
bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES || bit < 0)
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
else
dlm_set_reco_dead_node(dlm, bit);
} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
/* BUG? */
mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
dlm->reco.dead_node);
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
}
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
// mlog(0, "nothing to recover! sleeping now!\n");
spin_unlock(&dlm->spinlock);
/* return to main thread loop and sleep. */
return 0;
}
mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.dead_node);
/* take write barrier */
/* (stops the list reshuffling thread, proxy ast handling) */
dlm_begin_recovery(dlm);
spin_unlock(&dlm->spinlock);
if (dlm->reco.new_master == dlm->node_num)
goto master_here;
if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
/* choose a new master, returns 0 if this node
* is the master, -EEXIST if it's another node.
* this does not return until a new master is chosen
* or recovery completes entirely. */
ret = dlm_pick_recovery_master(dlm);
if (!ret) {
/* already notified everyone. go. */
goto master_here;
}
mlog(0, "another node will master this recovery session.\n");
}
dlm_print_recovery_master(dlm);
/* it is safe to start everything back up here
* because all of the dead node's lock resources
* have been marked as in-recovery */
dlm_end_recovery(dlm);
/* sleep out in main dlm_recovery_thread loop. */
return 0;
master_here:
dlm_print_recovery_master(dlm);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
if (status < 0) {
/* we should never hit this anymore */
mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
"retrying.\n", dlm->name, status, dlm->reco.dead_node);
/* yield a bit to allow any final network messages
* to get handled on remaining nodes */
msleep(100);
} else {
/* success! see if any other nodes need recovery */
mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
dlm->name, dlm->reco.dead_node, dlm->node_num);
spin_lock(&dlm->spinlock);
__dlm_reset_recovery(dlm);
dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
spin_unlock(&dlm->spinlock);
}
dlm_end_recovery(dlm);
/* continue and look for another dead node */
return -EAGAIN;
}
static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
{
int status = 0;
struct dlm_reco_node_data *ndata;
int all_nodes_done;
int destroy = 0;
int pass = 0;
do {
/* we have become recovery master. there is no escaping
* this, so just keep trying until we get it. */
status = dlm_init_recovery_area(dlm, dead_node);
if (status < 0) {
mlog(ML_ERROR, "%s: failed to alloc recovery area, "
"retrying\n", dlm->name);
msleep(1000);
}
} while (status != 0);
/* safe to access the node data list without a lock, since this
* process is the only one to change the list */
list_for_each_entry(ndata, &dlm->reco.node_data, list) {
BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
ndata->node_num);
if (ndata->node_num == dlm->node_num) {
ndata->state = DLM_RECO_NODE_DATA_DONE;
continue;
}
do {
status = dlm_request_all_locks(dlm, ndata->node_num,
dead_node);
if (status < 0) {
mlog_errno(status);
if (dlm_is_host_down(status)) {
/* node died, ignore it for recovery */
status = 0;
ndata->state = DLM_RECO_NODE_DATA_DEAD;
/* wait for the domain map to catch up
* with the network state. */
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_is_node_dead(dlm,
ndata->node_num),
msecs_to_jiffies(1000));
mlog(0, "waited 1 sec for %u, "
"dead? %s\n", ndata->node_num,
dlm_is_node_dead(dlm, ndata->node_num) ?
"yes" : "no");
} else {
/* -ENOMEM on the other node */
mlog(0, "%s: node %u returned "
"%d during recovery, retrying "
"after a short wait\n",
dlm->name, ndata->node_num,
status);
msleep(100);
}
}
} while (status != 0);
spin_lock(&dlm_reco_state_lock);
switch (ndata->state) {
case DLM_RECO_NODE_DATA_INIT:
case DLM_RECO_NODE_DATA_FINALIZE_SENT:
case DLM_RECO_NODE_DATA_REQUESTED:
BUG();
break;
case DLM_RECO_NODE_DATA_DEAD:
mlog(0, "node %u died after requesting "
"recovery info for node %u\n",
ndata->node_num, dead_node);
/* fine. don't need this node's info.
* continue without it. */
break;
case DLM_RECO_NODE_DATA_REQUESTING:
ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
mlog(0, "now receiving recovery data from "
"node %u for dead node %u\n",
ndata->node_num, dead_node);
break;
case DLM_RECO_NODE_DATA_RECEIVING:
mlog(0, "already receiving recovery data from "
"node %u for dead node %u\n",
ndata->node_num, dead_node);
break;
case DLM_RECO_NODE_DATA_DONE:
mlog(0, "already DONE receiving recovery data "
"from node %u for dead node %u\n",
ndata->node_num, dead_node);
break;
}
spin_unlock(&dlm_reco_state_lock);
}
mlog(0, "%s: Done requesting all lock info\n", dlm->name);
/* nodes should be sending reco data now
* just need to wait */
while (1) {
/* check all the nodes now to see if we are
* done, or if anyone died */
all_nodes_done = 1;
spin_lock(&dlm_reco_state_lock);
list_for_each_entry(ndata, &dlm->reco.node_data, list) {
mlog(0, "checking recovery state of node %u\n",
ndata->node_num);
switch (ndata->state) {
case DLM_RECO_NODE_DATA_INIT:
case DLM_RECO_NODE_DATA_REQUESTING:
mlog(ML_ERROR, "bad ndata state for "
"node %u: state=%d\n",
ndata->node_num, ndata->state);
BUG();
break;
case DLM_RECO_NODE_DATA_DEAD:
mlog(0, "node %u died after "
"requesting recovery info for "
"node %u\n", ndata->node_num,
dead_node);
break;
case DLM_RECO_NODE_DATA_RECEIVING:
case DLM_RECO_NODE_DATA_REQUESTED:
mlog(0, "%s: node %u still in state %s\n",
dlm->name, ndata->node_num,
ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
"receiving" : "requested");
all_nodes_done = 0;
break;
case DLM_RECO_NODE_DATA_DONE:
mlog(0, "%s: node %u state is done\n",
dlm->name, ndata->node_num);
break;
case DLM_RECO_NODE_DATA_FINALIZE_SENT:
mlog(0, "%s: node %u state is finalize\n",
dlm->name, ndata->node_num);
break;
}
}
spin_unlock(&dlm_reco_state_lock);
mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
all_nodes_done?"yes":"no");
if (all_nodes_done) {
int ret;
/* Set this flag on recovery master to avoid
* a new recovery for another dead node start
* before the recovery is not done. That may
* cause recovery hung.*/
spin_lock(&dlm->spinlock);
dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
spin_unlock(&dlm->spinlock);
/* all nodes are now in DLM_RECO_NODE_DATA_DONE state
* just send a finalize message to everyone and
* clean up */
mlog(0, "all nodes are done! send finalize\n");
ret = dlm_send_finalize_reco_message(dlm);
if (ret < 0)
mlog_errno(ret);
spin_lock(&dlm->spinlock);
dlm_finish_local_lockres_recovery(dlm, dead_node,
dlm->node_num);
spin_unlock(&dlm->spinlock);
mlog(0, "should be done with recovery!\n");
mlog(0, "finishing recovery of %s at %lu, "
"dead=%u, this=%u, new=%u\n", dlm->name,
jiffies, dlm->reco.dead_node,
dlm->node_num, dlm->reco.new_master);
destroy = 1;
status = 0;
/* rescan everything marked dirty along the way */
dlm_kick_thread(dlm, NULL);
break;
}
/* wait to be signalled, with periodic timeout
* to check for node death */
wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
kthread_should_stop(),
msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
}
if (destroy)
dlm_destroy_recovery_area(dlm);
return status;
}
static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
{
int num=0;
struct dlm_reco_node_data *ndata;
spin_lock(&dlm->spinlock);
bitmap_copy(dlm->reco.node_map, dlm->domain_map, O2NM_MAX_NODES);
/* nodes can only be removed (by dying) after dropping
* this lock, and death will be trapped later, so this should do */
spin_unlock(&dlm->spinlock);
while (1) {
num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
if (num >= O2NM_MAX_NODES) {
break;
}
BUG_ON(num == dead_node);
ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
if (!ndata) {
dlm_destroy_recovery_area(dlm);
return -ENOMEM;
}
ndata->node_num = num;
ndata->state = DLM_RECO_NODE_DATA_INIT;
spin_lock(&dlm_reco_state_lock);
list_add_tail(&ndata->list, &dlm->reco.node_data);
spin_unlock(&dlm_reco_state_lock);
num++;
}
return 0;
}
static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm)
{
struct dlm_reco_node_data *ndata, *next;
LIST_HEAD(tmplist);
spin_lock(&dlm_reco_state_lock);
list_splice_init(&dlm->reco.node_data, &tmplist);
spin_unlock(&dlm_reco_state_lock);
list_for_each_entry_safe(ndata, next, &tmplist, list) {
list_del_init(&ndata->list);
kfree(ndata);
}
}
static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
u8 dead_node)
{
struct dlm_lock_request lr;
int ret;
int status;
mlog(0, "\n");
mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
"to %u\n", dead_node, request_from);
memset(&lr, 0, sizeof(lr));
lr.node_idx = dlm->node_num;
lr.dead_node = dead_node;
// send message
ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
&lr, sizeof(lr), request_from, &status);
/* negative status is handled by caller */
if (ret < 0)
mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
"to recover dead node %u\n", dlm->name, ret,
request_from, dead_node);
else
ret = status;
// return from here, then
// sleep until all received or error
return ret;
}
int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
char *buf = NULL;
struct dlm_work_item *item = NULL;
if (!dlm_grab(dlm))
return -EINVAL;
if (lr->dead_node != dlm->reco.dead_node) {
mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
"dead_node is %u\n", dlm->name, lr->node_idx,
lr->dead_node, dlm->reco.dead_node);
dlm_print_reco_node_status(dlm);
/* this is a hack */
dlm_put(dlm);
return -ENOMEM;
}
BUG_ON(lr->dead_node != dlm->reco.dead_node);
item = kzalloc(sizeof(*item), GFP_NOFS);
if (!item) {
dlm_put(dlm);
return -ENOMEM;
}
/* this will get freed by dlm_request_all_locks_worker */
buf = (char *) __get_free_page(GFP_NOFS);
if (!buf) {
kfree(item);
dlm_put(dlm);
return -ENOMEM;
}
/* queue up work for dlm_request_all_locks_worker */
dlm_grab(dlm); /* get an extra ref for the work item */
dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
item->u.ral.reco_master = lr->node_idx;
item->u.ral.dead_node = lr->dead_node;
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
queue_work(dlm->dlm_worker, &dlm->dispatched_work);
dlm_put(dlm);
return 0;
}
static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
{
struct dlm_migratable_lockres *mres;
struct dlm_lock_resource *res;
struct dlm_ctxt *dlm;
LIST_HEAD(resources);
int ret;
u8 dead_node, reco_master;
int skip_all_done = 0;
dlm = item->dlm;
dead_node = item->u.ral.dead_node;
reco_master = item->u.ral.reco_master;
mres = (struct dlm_migratable_lockres *)data;
mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
dlm->name, dead_node, reco_master);
if (dead_node != dlm->reco.dead_node ||
reco_master != dlm->reco.new_master) {
/* worker could have been created before the recovery master
* died. if so, do not continue, but do not error. */
if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
mlog(ML_NOTICE, "%s: will not send recovery state, "
"recovery master %u died, thread=(dead=%u,mas=%u)"
" current=(dead=%u,mas=%u)\n", dlm->name,
reco_master, dead_node, reco_master,
dlm->reco.dead_node, dlm->reco.new_master);
} else {
mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
"master=%u), request(dead=%u, master=%u)\n",
dlm->name, dlm->reco.dead_node,
dlm->reco.new_master, dead_node, reco_master);
}
goto leave;
}
/* lock resources should have already been moved to the
* dlm->reco.resources list. now move items from that list
* to a temp list if the dead owner matches. note that the
* whole cluster recovers only one node at a time, so we
* can safely move UNKNOWN lock resources for each recovery
* session. */
dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
/* now we can begin blasting lockreses without the dlm lock */
/* any errors returned will be due to the new_master dying,
* the dlm_reco_thread should detect this */
list_for_each_entry(res, &resources, recovering) {
ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
DLM_MRES_RECOVERY);
if (ret < 0) {
mlog(ML_ERROR, "%s: node %u went down while sending "
"recovery state for dead node %u, ret=%d\n", dlm->name,
reco_master, dead_node, ret);
skip_all_done = 1;
break;
}
}
/* move the resources back to the list */
spin_lock(&dlm->spinlock);
list_splice_init(&resources, &dlm->reco.resources);
spin_unlock(&dlm->spinlock);
if (!skip_all_done) {
ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
if (ret < 0) {
mlog(ML_ERROR, "%s: node %u went down while sending "
"recovery all-done for dead node %u, ret=%d\n",
dlm->name, reco_master, dead_node, ret);
}
}
leave:
free_page((unsigned long)data);
}
static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
{
int ret, tmpret;
struct dlm_reco_data_done done_msg;
memset(&done_msg, 0, sizeof(done_msg));
done_msg.node_idx = dlm->node_num;
done_msg.dead_node = dead_node;
mlog(0, "sending DATA DONE message to %u, "
"my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
done_msg.dead_node);
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
if (ret < 0) {
mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
"to recover dead node %u\n", dlm->name, ret, send_to,
dead_node);
if (!dlm_is_host_down(ret)) {
BUG();
}
} else
ret = tmpret;
return ret;
}
int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
struct dlm_reco_node_data *ndata = NULL;
int ret = -EINVAL;
if (!dlm_grab(dlm))
return -EINVAL;
mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
"node_idx=%u, this node=%u\n", done->dead_node,
dlm->reco.dead_node, done->node_idx, dlm->node_num);
mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
"Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
"node_idx=%u, this node=%u\n", done->dead_node,
dlm->reco.dead_node, done->node_idx, dlm->node_num);
spin_lock(&dlm_reco_state_lock);
list_for_each_entry(ndata, &dlm->reco.node_data, list) {
if (ndata->node_num != done->node_idx)
continue;
switch (ndata->state) {
/* should have moved beyond INIT but not to FINALIZE yet */
case DLM_RECO_NODE_DATA_INIT:
case DLM_RECO_NODE_DATA_DEAD:
case DLM_RECO_NODE_DATA_FINALIZE_SENT:
mlog(ML_ERROR, "bad ndata state for node %u:"
" state=%d\n", ndata->node_num,
ndata->state);
BUG();
break;
/* these states are possible at this point, anywhere along
* the line of recovery */
case DLM_RECO_NODE_DATA_DONE:
case DLM_RECO_NODE_DATA_RECEIVING:
case DLM_RECO_NODE_DATA_REQUESTED:
case DLM_RECO_NODE_DATA_REQUESTING:
mlog(0, "node %u is DONE sending "
"recovery data!\n",
ndata->node_num);
ndata->state = DLM_RECO_NODE_DATA_DONE;
ret = 0;
break;
}
}
spin_unlock(&dlm_reco_state_lock);
/* wake the recovery thread, some node is done */
if (!ret)
dlm_kick_recovery_thread(dlm);
if (ret < 0)
mlog(ML_ERROR, "failed to find recovery node data for node "
"%u\n", done->node_idx);
dlm_put(dlm);
mlog(0, "leaving reco data done handler, ret=%d\n", ret);
return ret;
}
static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
struct list_head *list,
u8 dead_node)
{
struct dlm_lock_resource *res, *next;
struct dlm_lock *lock;
spin_lock(&dlm->spinlock);
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
/* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */
if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
spin_lock(&res->spinlock);
list_for_each_entry(lock, &res->granted, list) {
if (lock->ml.node == dead_node) {
mlog(0, "AHA! there was "
"a $RECOVERY lock for dead "
"node %u (%s)!\n",
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
/* Can't schedule DLM_UNLOCK_FREE_LOCK
* - do manually */
dlm_lock_put(lock);
break;
}
}
spin_unlock(&res->spinlock);
continue;
}
if (res->owner == dead_node) {
mlog(0, "found lockres owned by dead node while "
"doing recovery for node %u. sending it.\n",
dead_node);
list_move_tail(&res->recovering, list);
} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "found UNKNOWN owner while doing recovery "
"for node %u. sending it.\n", dead_node);
list_move_tail(&res->recovering, list);
}
}
spin_unlock(&dlm->spinlock);
}
static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
{
int total_locks = 0;
struct list_head *iter, *queue = &res->granted;
int i;
for (i=0; i<3; i++) {
list_for_each(iter, queue)
total_locks++;
queue++;
}
return total_locks;
}
static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
struct dlm_migratable_lockres *mres,
u8 send_to,
struct dlm_lock_resource *res,
int total_locks)
{
u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
int mres_total_locks = be32_to_cpu(mres->total_locks);
int ret = 0, status = 0;
u8 orig_flags = mres->flags,
orig_master = mres->master;
BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
if (!mres->num_locks)
return 0;
/* add an all-done flag if we reached the last lock */
orig_flags = mres->flags;
BUG_ON(total_locks > mres_total_locks);
if (total_locks == mres_total_locks)
mres->flags |= DLM_MRES_ALL_DONE;
mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
dlm->name, res->lockname.len, res->lockname.name,
orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
send_to);
/* send it */
ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
struct_size(mres, ml, mres->num_locks),
send_to, &status);
if (ret < 0) {
/* XXX: negative status is not handled.
* this will end up killing this node. */
mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
"node %u (%s)\n", dlm->name, mres->lockname_len,
mres->lockname, ret, send_to,
(orig_flags & DLM_MRES_MIGRATION ?
"migration" : "recovery"));
} else {
/* might get an -ENOMEM back here */
ret = status;
if (ret < 0) {
mlog_errno(ret);
if (ret == -EFAULT) {
mlog(ML_ERROR, "node %u told me to kill "
"myself!\n", send_to);
BUG();
}
}
}
/* zero and reinit the message buffer */
dlm_init_migratable_lockres(mres, res->lockname.name,
res->lockname.len, mres_total_locks,
mig_cookie, orig_flags, orig_master);
return ret;
}
static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
const char *lockname, int namelen,
int total_locks, u64 cookie,
u8 flags, u8 master)
{
/* mres here is one full page */
clear_page(mres);
mres->lockname_len = namelen;
memcpy(mres->lockname, lockname, namelen);
mres->num_locks = 0;
mres->total_locks = cpu_to_be32(total_locks);
mres->mig_cookie = cpu_to_be64(cookie);
mres->flags = flags;
mres->master = master;
}
static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
struct dlm_migratable_lockres *mres,
int queue)
{
if (!lock->lksb)
return;
/* Ignore lvb in all locks in the blocked list */
if (queue == DLM_BLOCKED_LIST)
return;
/* Only consider lvbs in locks with granted EX or PR lock levels */
if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
return;
if (dlm_lvb_is_empty(mres->lvb)) {
memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
return;
}
/* Ensure the lvb copied for migration matches in other valid locks */
if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
return;
mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
"node=%u\n",
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->lockres->lockname.len, lock->lockres->lockname.name,
lock->ml.node);
dlm_print_one_lock_resource(lock->lockres);
BUG();
}
/* returns 1 if this lock fills the network structure,
* 0 otherwise */
static int dlm_add_lock_to_array(struct dlm_lock *lock,
struct dlm_migratable_lockres *mres, int queue)
{
struct dlm_migratable_lock *ml;
int lock_num = mres->num_locks;
ml = &(mres->ml[lock_num]);
ml->cookie = lock->ml.cookie;
ml->type = lock->ml.type;
ml->convert_type = lock->ml.convert_type;
ml->highest_blocked = lock->ml.highest_blocked;
ml->list = queue;
if (lock->lksb) {
ml->flags = lock->lksb->flags;
dlm_prepare_lvb_for_migration(lock, mres, queue);
}
ml->node = lock->ml.node;
mres->num_locks++;
/* we reached the max, send this network message */
if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
return 1;
return 0;
}
static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
struct dlm_migratable_lockres *mres)
{
struct dlm_lock dummy;
memset(&dummy, 0, sizeof(dummy));
dummy.ml.cookie = 0;
dummy.ml.type = LKM_IVMODE;
dummy.ml.convert_type = LKM_IVMODE;
dummy.ml.highest_blocked = LKM_IVMODE;
dummy.lksb = NULL;
dummy.ml.node = dlm->node_num;
dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
}
static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
struct dlm_migratable_lock *ml,
u8 *nodenum)
{
if (unlikely(ml->cookie == 0 &&
ml->type == LKM_IVMODE &&
ml->convert_type == LKM_IVMODE &&
ml->highest_blocked == LKM_IVMODE &&
ml->list == DLM_BLOCKED_LIST)) {
*nodenum = ml->node;
return 1;
}
return 0;
}
int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_migratable_lockres *mres,
u8 send_to, u8 flags)
{
struct list_head *queue;
int total_locks, i;
u64 mig_cookie = 0;
struct dlm_lock *lock;
int ret = 0;
BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
mlog(0, "sending to %u\n", send_to);
total_locks = dlm_num_locks_in_lockres(res);
if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
/* rare, but possible */
mlog(0, "argh. lockres has %d locks. this will "
"require more than one network packet to "
"migrate\n", total_locks);
mig_cookie = dlm_get_next_mig_cookie();
}
dlm_init_migratable_lockres(mres, res->lockname.name,
res->lockname.len, total_locks,
mig_cookie, flags, res->owner);
total_locks = 0;
for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
queue = dlm_list_idx_to_ptr(res, i);
list_for_each_entry(lock, queue, list) {
/* add another lock. */
total_locks++;
if (!dlm_add_lock_to_array(lock, mres, i))
continue;
/* this filled the lock message,
* we must send it immediately. */
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
res, total_locks);
if (ret < 0)
goto error;
}
}
if (total_locks == 0) {
/* send a dummy lock to indicate a mastery reference only */
mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
dlm->name, res->lockname.len, res->lockname.name,
send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
"migration");
dlm_add_dummy_lock(dlm, mres);
}
/* flush any remaining locks */
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
if (ret < 0)
goto error;
return ret;
error:
mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
dlm->name, ret);
if (!dlm_is_host_down(ret))
BUG();
mlog(0, "%s: node %u went down while sending %s "
"lockres %.*s\n", dlm->name, send_to,
flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
res->lockname.len, res->lockname.name);
return ret;
}
/*
* this message will contain no more than one page worth of
* recovery data, and it will work on only one lockres.
* there may be many locks in this page, and we may need to wait
* for additional packets to complete all the locks (rare, but
* possible).
*/
/*
* NOTE: the allocation error cases here are scary
* we really cannot afford to fail an alloc in recovery
* do we spin? returning an error only delays the problem really
*/
int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_migratable_lockres *mres =
(struct dlm_migratable_lockres *)msg->buf;
int ret = 0;
u8 real_master;
u8 extra_refs = 0;
char *buf = NULL;
struct dlm_work_item *item = NULL;
struct dlm_lock_resource *res = NULL;
unsigned int hash;
if (!dlm_grab(dlm))
return -EINVAL;
if (!dlm_joined(dlm)) {
mlog(ML_ERROR, "Domain %s not joined! "
"lockres %.*s, master %u\n",
dlm->name, mres->lockname_len,
mres->lockname, mres->master);
dlm_put(dlm);
return -EINVAL;
}
BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
real_master = mres->master;
if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
/* cannot migrate a lockres with no master */
BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
}
mlog(0, "%s message received from node %u\n",
(mres->flags & DLM_MRES_RECOVERY) ?
"recovery" : "migration", mres->master);
if (mres->flags & DLM_MRES_ALL_DONE)
mlog(0, "all done flag. all lockres data received!\n");
ret = -ENOMEM;
buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
item = kzalloc(sizeof(*item), GFP_NOFS);
if (!buf || !item)
goto leave;
/* lookup the lock to see if we have a secondary queue for this
* already... just add the locks in and this will have its owner
* and RECOVERY flag changed when it completes. */
hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
hash);
if (res) {
/* this will get a ref on res */
/* mark it as recovering/migrating and hash it */
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
mlog(0, "%s: node is attempting to migrate "
"lockres %.*s, but marked as dropping "
" ref!\n", dlm->name,
mres->lockname_len, mres->lockname);
ret = -EINVAL;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
dlm_lockres_put(res);
goto leave;
}
if (mres->flags & DLM_MRES_RECOVERY) {
res->state |= DLM_LOCK_RES_RECOVERING;
} else {
if (res->state & DLM_LOCK_RES_MIGRATING) {
/* this is at least the second
* lockres message */
mlog(0, "lock %.*s is already migrating\n",
mres->lockname_len,
mres->lockname);
} else if (res->state & DLM_LOCK_RES_RECOVERING) {
/* caller should BUG */
mlog(ML_ERROR, "node is attempting to migrate "
"lock %.*s, but marked as recovering!\n",
mres->lockname_len, mres->lockname);
ret = -EFAULT;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
dlm_lockres_put(res);
goto leave;
}
res->state |= DLM_LOCK_RES_MIGRATING;
}
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
} else {
spin_unlock(&dlm->spinlock);
/* need to allocate, just like if it was
* mastered here normally */
res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
if (!res)
goto leave;
/* to match the ref that we would have gotten if
* dlm_lookup_lockres had succeeded */
dlm_lockres_get(res);
/* mark it as recovering/migrating and hash it */
if (mres->flags & DLM_MRES_RECOVERY)
res->state |= DLM_LOCK_RES_RECOVERING;
else
res->state |= DLM_LOCK_RES_MIGRATING;
spin_lock(&dlm->spinlock);
__dlm_insert_lockres(dlm, res);
spin_unlock(&dlm->spinlock);
/* Add an extra ref for this lock-less lockres lest the
* dlm_thread purges it before we get the chance to add
* locks to it */
dlm_lockres_get(res);
/* There are three refs that need to be put.
* 1. Taken above.
* 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
* 3. dlm_lookup_lockres()
* The first one is handled at the end of this function. The
* other two are handled in the worker thread after locks have
* been attached. Yes, we don't wait for purge time to match
* kref_init. The lockres will still have atleast one ref
* added because it is in the hash __dlm_insert_lockres() */
extra_refs++;
/* now that the new lockres is inserted,
* make it usable by other processes */
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
}
/* at this point we have allocated everything we need,
* and we have a hashed lockres with an extra ref and
* the proper res->state flags. */
ret = 0;
spin_lock(&res->spinlock);
/* drop this either when master requery finds a different master
* or when a lock is added by the recovery worker */
dlm_lockres_grab_inflight_ref(dlm, res);
if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
/* migration cannot have an unknown master */
BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
mlog(0, "recovery has passed me a lockres with an "
"unknown owner.. will need to requery: "
"%.*s\n", mres->lockname_len, mres->lockname);
} else {
/* take a reference now to pin the lockres, drop it
* when locks are added in the worker */
dlm_change_lockres_owner(dlm, res, dlm->node_num);
}
spin_unlock(&res->spinlock);
/* queue up work for dlm_mig_lockres_worker */
dlm_grab(dlm); /* get an extra ref for the work item */
memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
item->u.ml.lockres = res; /* already have a ref */
item->u.ml.real_master = real_master;
item->u.ml.extra_ref = extra_refs;
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
queue_work(dlm->dlm_worker, &dlm->dispatched_work);
leave:
/* One extra ref taken needs to be put here */
if (extra_refs)
dlm_lockres_put(res);
dlm_put(dlm);
if (ret < 0) {
kfree(buf);
kfree(item);
mlog_errno(ret);
}
return ret;
}
static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
{
struct dlm_ctxt *dlm;
struct dlm_migratable_lockres *mres;
int ret = 0;
struct dlm_lock_resource *res;
u8 real_master;
u8 extra_ref;
dlm = item->dlm;
mres = (struct dlm_migratable_lockres *)data;
res = item->u.ml.lockres;
real_master = item->u.ml.real_master;
extra_ref = item->u.ml.extra_ref;
if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
/* this case is super-rare. only occurs if
* node death happens during migration. */
again:
ret = dlm_lockres_master_requery(dlm, res, &real_master);
if (ret < 0) {
mlog(0, "dlm_lockres_master_requery ret=%d\n",
ret);
goto again;
}
if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "lockres %.*s not claimed. "
"this node will take it.\n",
res->lockname.len, res->lockname.name);
} else {
spin_lock(&res->spinlock);
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
mlog(0, "master needs to respond to sender "
"that node %u still owns %.*s\n",
real_master, res->lockname.len,
res->lockname.name);
/* cannot touch this lockres */
goto leave;
}
}
ret = dlm_process_recovery_data(dlm, res, mres);
if (ret < 0)
mlog(0, "dlm_process_recovery_data returned %d\n", ret);
else
mlog(0, "dlm_process_recovery_data succeeded\n");
if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
(DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
ret = dlm_finish_migration(dlm, res, mres->master);
if (ret < 0)
mlog_errno(ret);
}
leave:
/* See comment in dlm_mig_lockres_handler() */
if (res) {
if (extra_ref)
dlm_lockres_put(res);
dlm_lockres_put(res);
}
kfree(data);
}
static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
u8 *real_master)
{
struct dlm_node_iter iter;
int nodenum;
int ret = 0;
*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
/* we only reach here if one of the two nodes in a
* migration died while the migration was in progress.
* at this point we need to requery the master. we
* know that the new_master got as far as creating
* an mle on at least one node, but we do not know
* if any nodes had actually cleared the mle and set
* the master to the new_master. the old master
* is supposed to set the owner to UNKNOWN in the
* event of a new_master death, so the only possible
* responses that we can get from nodes here are
* that the master is new_master, or that the master
* is UNKNOWN.
* if all nodes come back with UNKNOWN then we know
* the lock needs remastering here.
* if any node comes back with a valid master, check
* to see if that master is the one that we are
* recovering. if so, then the new_master died and
* we need to remaster this lock. if not, then the
* new_master survived and that node will respond to
* other nodes about the owner.
* if there is an owner, this node needs to dump this
* lockres and alert the sender that this lockres
* was rejected. */
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
spin_unlock(&dlm->spinlock);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
/* do not send to self */
if (nodenum == dlm->node_num)
continue;
ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
if (ret < 0) {
mlog_errno(ret);
if (!dlm_is_host_down(ret))
BUG();
/* host is down, so answer for that node would be
* DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
}
if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "lock master is %u\n", *real_master);
break;
}
}
return ret;
}
int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 nodenum, u8 *real_master)
{
int ret;
struct dlm_master_requery req;
int status = DLM_LOCK_RES_OWNER_UNKNOWN;
memset(&req, 0, sizeof(req));
req.node_idx = dlm->node_num;
req.namelen = res->lockname.len;
memcpy(req.name, res->lockname.name, res->lockname.len);
resend:
ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
&req, sizeof(req), nodenum, &status);
if (ret < 0)
mlog(ML_ERROR, "Error %d when sending message %u (key "
"0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
dlm->key, nodenum);
else if (status == -ENOMEM) {
mlog_errno(status);
msleep(50);
goto resend;
} else {
BUG_ON(status < 0);
BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
*real_master = (u8) (status & 0xff);
mlog(0, "node %u responded to master requery with %u\n",
nodenum, *real_master);
ret = 0;
}
return ret;
}
/* this function cannot error, so unless the sending
* or receiving of the message failed, the owner can
* be trusted */
int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
struct dlm_lock_resource *res = NULL;
unsigned int hash;
int master = DLM_LOCK_RES_OWNER_UNKNOWN;
u32 flags = DLM_ASSERT_MASTER_REQUERY;
int dispatched = 0;
if (!dlm_grab(dlm)) {
/* since the domain has gone away on this
* node, the proper response is UNKNOWN */
return master;
}
hash = dlm_lockid_hash(req->name, req->namelen);
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
if (res) {
spin_lock(&res->spinlock);
master = res->owner;
if (master == dlm->node_num) {
int ret = dlm_dispatch_assert_master(dlm, res,
0, 0, flags);
if (ret < 0) {
mlog_errno(ret);
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
spin_unlock(&dlm->spinlock);
dlm_put(dlm);
/* sender will take care of this and retry */
return ret;
} else {
dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res);
spin_unlock(&res->spinlock);
}
} else {
/* put.. incase we are not the master */
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
}
}
spin_unlock(&dlm->spinlock);
if (!dispatched)
dlm_put(dlm);
return master;
}
static inline struct list_head *
dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
{
struct list_head *ret;
BUG_ON(list_num < 0);
BUG_ON(list_num > 2);
ret = &(res->granted);
ret += list_num;
return ret;
}
/* TODO: do ast flush business
* TODO: do MIGRATING and RECOVERING spinning
*/
/*
* NOTE about in-flight requests during migration:
*
* Before attempting the migrate, the master has marked the lockres as
* MIGRATING and then flushed all of its pending ASTS. So any in-flight
* requests either got queued before the MIGRATING flag got set, in which
* case the lock data will reflect the change and a return message is on
* the way, or the request failed to get in before MIGRATING got set. In
* this case, the caller will be told to spin and wait for the MIGRATING
* flag to be dropped, then recheck the master.
* This holds true for the convert, cancel and unlock cases, and since lvb
* updates are tied to these same messages, it applies to lvb updates as
* well. For the lock case, there is no way a lock can be on the master
* queue and not be on the secondary queue since the lock is always added
* locally first. This means that the new target node will never be sent
* a lock that he doesn't already have on the list.
* In total, this means that the local lock is correct and should not be
* updated to match the one sent by the master. Any messages sent back
* from the master before the MIGRATING flag will bring the lock properly
* up-to-date, and the change will be ordered properly for the waiter.
* We will *not* attempt to modify the lock underneath the waiter.
*/
static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_migratable_lockres *mres)
{
struct dlm_migratable_lock *ml;
struct list_head *queue, *iter;
struct list_head *tmpq = NULL;
struct dlm_lock *newlock = NULL;
struct dlm_lockstatus *lksb = NULL;
int ret = 0;
int i, j, bad;
struct dlm_lock *lock;
u8 from = O2NM_MAX_NODES;
__be64 c;
mlog(0, "running %d locks for this lockres\n", mres->num_locks);
for (i=0; i<mres->num_locks; i++) {
ml = &(mres->ml[i]);
if (dlm_is_dummy_lock(dlm, ml, &from)) {
/* placeholder, just need to set the refmap bit */
BUG_ON(mres->num_locks != 1);
mlog(0, "%s:%.*s: dummy lock for %u\n",
dlm->name, mres->lockname_len, mres->lockname,
from);
spin_lock(&res->spinlock);
dlm_lockres_set_refmap_bit(dlm, res, from);
spin_unlock(&res->spinlock);
break;
}
BUG_ON(ml->highest_blocked != LKM_IVMODE);
newlock = NULL;
lksb = NULL;
queue = dlm_list_num_to_pointer(res, ml->list);
tmpq = NULL;
/* if the lock is for the local node it needs to
* be moved to the proper location within the queue.
* do not allocate a new lock structure. */
if (ml->node == dlm->node_num) {
/* MIGRATION ONLY! */
BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
lock = NULL;
spin_lock(&res->spinlock);
for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
tmpq = dlm_list_idx_to_ptr(res, j);
list_for_each(iter, tmpq) {
lock = list_entry(iter,
struct dlm_lock, list);
if (lock->ml.cookie == ml->cookie)
break;
lock = NULL;
}
if (lock)
break;
}
/* lock is always created locally first, and
* destroyed locally last. it must be on the list */
if (!lock) {
c = ml->cookie;
mlog(ML_ERROR, "Could not find local lock "
"with cookie %u:%llu, node %u, "
"list %u, flags 0x%x, type %d, "
"conv %d, highest blocked %d\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
ml->node, ml->list, ml->flags, ml->type,
ml->convert_type, ml->highest_blocked);
__dlm_print_one_lock_resource(res);
BUG();
}
if (lock->ml.node != ml->node) {
c = lock->ml.cookie;
mlog(ML_ERROR, "Mismatched node# in lock "
"cookie %u:%llu, name %.*s, node %u\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
res->lockname.len, res->lockname.name,
lock->ml.node);
c = ml->cookie;
mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
"node %u, list %u, flags 0x%x, type %d, "
"conv %d, highest blocked %d\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
ml->node, ml->list, ml->flags, ml->type,
ml->convert_type, ml->highest_blocked);
__dlm_print_one_lock_resource(res);
BUG();
}
if (tmpq != queue) {
c = ml->cookie;
mlog(0, "Lock cookie %u:%llu was on list %u "
"instead of list %u for %.*s\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
j, ml->list, res->lockname.len,
res->lockname.name);
__dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
continue;
}
/* see NOTE above about why we do not update
* to match the master here */
/* move the lock to its proper place */
/* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, queue);
spin_unlock(&res->spinlock);
mlog(0, "just reordered a local lock!\n");
continue;
}
/* lock is for another node. */
newlock = dlm_new_lock(ml->type, ml->node,
be64_to_cpu(ml->cookie), NULL);
if (!newlock) {
ret = -ENOMEM;
goto leave;
}
lksb = newlock->lksb;
dlm_lock_attach_lockres(newlock, res);
if (ml->convert_type != LKM_IVMODE) {
BUG_ON(queue != &res->converting);
newlock->ml.convert_type = ml->convert_type;
}
lksb->flags |= (ml->flags &
(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
if (ml->type == LKM_NLMODE)
goto skip_lvb;
/*
* If the lock is in the blocked list it can't have a valid lvb,
* so skip it
*/
if (ml->list == DLM_BLOCKED_LIST)
goto skip_lvb;
if (!dlm_lvb_is_empty(mres->lvb)) {
if (lksb->flags & DLM_LKSB_PUT_LVB) {
/* other node was trying to update
* lvb when node died. recreate the
* lksb with the updated lvb. */
memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
/* the lock resource lvb update must happen
* NOW, before the spinlock is dropped.
* we no longer wait for the AST to update
* the lvb. */
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
} else {
/* otherwise, the node is sending its
* most recent valid lvb info */
BUG_ON(ml->type != LKM_EXMODE &&
ml->type != LKM_PRMODE);
if (!dlm_lvb_is_empty(res->lvb) &&
(ml->type == LKM_EXMODE ||
memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
int i;
mlog(ML_ERROR, "%s:%.*s: received bad "
"lvb! type=%d\n", dlm->name,
res->lockname.len,
res->lockname.name, ml->type);
printk("lockres lvb=[");
for (i=0; i<DLM_LVB_LEN; i++)
printk("%02x", res->lvb[i]);
printk("]\nmigrated lvb=[");
for (i=0; i<DLM_LVB_LEN; i++)
printk("%02x", mres->lvb[i]);
printk("]\n");
dlm_print_one_lock_resource(res);
BUG();
}
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
}
}
skip_lvb:
/* NOTE:
* wrt lock queue ordering and recovery:
* 1. order of locks on granted queue is
* meaningless.
* 2. order of locks on converting queue is
* LOST with the node death. sorry charlie.
* 3. order of locks on the blocked queue is
* also LOST.
* order of locks does not affect integrity, it
* just means that a lock request may get pushed
* back in line as a result of the node death.
* also note that for a given node the lock order
* for its secondary queue locks is preserved
* relative to each other, but clearly *not*
* preserved relative to locks from other nodes.
*/
bad = 0;
spin_lock(&res->spinlock);
list_for_each_entry(lock, queue, list) {
if (lock->ml.cookie == ml->cookie) {
c = lock->ml.cookie;
mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
"exists on this lockres!\n", dlm->name,
res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)));
mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
"node=%u, cookie=%u:%llu, queue=%d\n",
ml->type, ml->convert_type, ml->node,
dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
ml->list);
__dlm_print_one_lock_resource(res);
bad = 1;
break;
}
}
if (!bad) {
dlm_lock_get(newlock);
if (mres->flags & DLM_MRES_RECOVERY &&
ml->list == DLM_CONVERTING_LIST &&
newlock->ml.type >
newlock->ml.convert_type) {
/* newlock is doing downconvert, add it to the
* head of converting list */
list_add(&newlock->list, queue);
} else
list_add_tail(&newlock->list, queue);
mlog(0, "%s:%.*s: added lock for node %u, "
"setting refmap bit\n", dlm->name,
res->lockname.len, res->lockname.name, ml->node);
dlm_lockres_set_refmap_bit(dlm, res, ml->node);
}
spin_unlock(&res->spinlock);
}
mlog(0, "done running all the locks\n");
leave:
/* balance the ref taken when the work was queued */
spin_lock(&res->spinlock);
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
if (ret < 0)
mlog_errno(ret);
return ret;
}
void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
int i;
struct list_head *queue;
struct dlm_lock *lock, *next;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
res->state |= DLM_LOCK_RES_RECOVERING;
if (!list_empty(&res->recovering)) {
mlog(0,
"Recovering res %s:%.*s, is already on recovery list!\n",
dlm->name, res->lockname.len, res->lockname.name);
list_del_init(&res->recovering);
dlm_lockres_put(res);
}
/* We need to hold a reference while on the recovery list */
dlm_lockres_get(res);
list_add_tail(&res->recovering, &dlm->reco.resources);
/* find any pending locks and put them back on proper list */
for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
queue = dlm_list_idx_to_ptr(res, i);
list_for_each_entry_safe(lock, next, queue, list) {
dlm_lock_get(lock);
if (lock->convert_pending) {
/* move converting lock back to granted */
mlog(0, "node died with convert pending "
"on %.*s. move back to granted list.\n",
res->lockname.len, res->lockname.name);
dlm_revert_pending_convert(res, lock);
lock->convert_pending = 0;
} else if (lock->lock_pending) {
/* remove pending lock requests completely */
BUG_ON(i != DLM_BLOCKED_LIST);
mlog(0, "node died with lock pending "
"on %.*s. remove from blocked list and skip.\n",
res->lockname.len, res->lockname.name);
/* lock will be floating until ref in
* dlmlock_remote is freed after the network
* call returns. ok for it to not be on any
* list since no ast can be called
* (the master is dead). */
dlm_revert_pending_lock(res, lock);
lock->lock_pending = 0;
} else if (lock->unlock_pending) {
/* if an unlock was in progress, treat as
* if this had completed successfully
* before sending this lock state to the
* new master. note that the dlm_unlock
* call is still responsible for calling
* the unlockast. that will happen after
* the network call times out. for now,
* just move lists to prepare the new
* recovery master. */
BUG_ON(i != DLM_GRANTED_LIST);
mlog(0, "node died with unlock pending "
"on %.*s. remove from blocked list and skip.\n",
res->lockname.len, res->lockname.name);
dlm_commit_pending_unlock(res, lock);
lock->unlock_pending = 0;
} else if (lock->cancel_pending) {
/* if a cancel was in progress, treat as
* if this had completed successfully
* before sending this lock state to the
* new master */
BUG_ON(i != DLM_CONVERTING_LIST);
mlog(0, "node died with cancel pending "
"on %.*s. move back to granted list.\n",
res->lockname.len, res->lockname.name);
dlm_commit_pending_cancel(res, lock);
lock->cancel_pending = 0;
}
dlm_lock_put(lock);
}
}
}
/* removes all recovered locks from the recovery list.
* sets the res->owner to the new master.
* unsets the RECOVERY flag and wakes waiters. */
static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
u8 dead_node, u8 new_master)
{
int i;
struct hlist_head *bucket;
struct dlm_lock_resource *res, *next;
assert_spin_locked(&dlm->spinlock);
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
if (res->owner == dead_node) {
mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
dlm->name, res->lockname.len, res->lockname.name,
res->owner, new_master);
list_del_init(&res->recovering);
spin_lock(&res->spinlock);
/* new_master has our reference from
* the lock state sent during recovery */
dlm_change_lockres_owner(dlm, res, new_master);
res->state &= ~DLM_LOCK_RES_RECOVERING;
if (__dlm_lockres_has_locks(res))
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
dlm_lockres_put(res);
}
}
/* this will become unnecessary eventually, but
* for now we need to run the whole hash, clear
* the RECOVERING state and set the owner
* if necessary */
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, bucket, hash_node) {
if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
}
if (!(res->state & DLM_LOCK_RES_RECOVERING))
continue;
if (res->owner != dead_node &&
res->owner != dlm->node_num)
continue;
if (!list_empty(&res->recovering)) {
list_del_init(&res->recovering);
dlm_lockres_put(res);
}
/* new_master has our reference from
* the lock state sent during recovery */
mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
dlm->name, res->lockname.len, res->lockname.name,
res->owner, new_master);
spin_lock(&res->spinlock);
dlm_change_lockres_owner(dlm, res, new_master);
res->state &= ~DLM_LOCK_RES_RECOVERING;
if (__dlm_lockres_has_locks(res))
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
}
}
}
static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
{
if (local) {
if (lock->ml.type != LKM_EXMODE &&
lock->ml.type != LKM_PRMODE)
return 1;
} else if (lock->ml.type == LKM_EXMODE)
return 1;
return 0;
}
static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, u8 dead_node)
{
struct list_head *queue;
struct dlm_lock *lock;
int blank_lvb = 0, local = 0;
int i;
u8 search_node;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
if (res->owner == dlm->node_num)
/* if this node owned the lockres, and if the dead node
* had an EX when he died, blank out the lvb */
search_node = dead_node;
else {
/* if this is a secondary lockres, and we had no EX or PR
* locks granted, we can no longer trust the lvb */
search_node = dlm->node_num;
local = 1; /* check local state for valid lvb */
}
for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
queue = dlm_list_idx_to_ptr(res, i);
list_for_each_entry(lock, queue, list) {
if (lock->ml.node == search_node) {
if (dlm_lvb_needs_invalidation(lock, local)) {
/* zero the lksb lvb and lockres lvb */
blank_lvb = 1;
memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
}
}
}
}
if (blank_lvb) {
mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
res->lockname.len, res->lockname.name, dead_node);
memset(res->lvb, 0, DLM_LVB_LEN);
}
}
static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, u8 dead_node)
{
struct dlm_lock *lock, *next;
unsigned int freed = 0;
/* this node is the lockres master:
* 1) remove any stale locks for the dead node
* 2) if the dead node had an EX when he died, blank out the lvb
*/
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
/* We do two dlm_lock_put(). One for removing from list and the other is
* to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
/* TODO: check pending_asts, pending_basts here */
list_for_each_entry_safe(lock, next, &res->granted, list) {
if (lock->ml.node == dead_node) {
list_del_init(&lock->list);
dlm_lock_put(lock);
/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
dlm_lock_put(lock);
freed++;
}
}
list_for_each_entry_safe(lock, next, &res->converting, list) {
if (lock->ml.node == dead_node) {
list_del_init(&lock->list);
dlm_lock_put(lock);
/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
dlm_lock_put(lock);
freed++;
}
}
list_for_each_entry_safe(lock, next, &res->blocked, list) {
if (lock->ml.node == dead_node) {
list_del_init(&lock->list);
dlm_lock_put(lock);
/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
dlm_lock_put(lock);
freed++;
}
}
if (freed) {
mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
"dropping ref from lockres\n", dlm->name,
res->lockname.len, res->lockname.name, freed, dead_node);
if(!test_bit(dead_node, res->refmap)) {
mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
"but ref was not set\n", dlm->name,
res->lockname.len, res->lockname.name, freed, dead_node);
__dlm_print_one_lock_resource(res);
}
res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
} else if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
"no locks and had not purged before dying\n", dlm->name,
res->lockname.len, res->lockname.name, dead_node);
dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
}
/* do not kick thread yet */
__dlm_dirty_lockres(dlm, res);
}
static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
{
struct dlm_lock_resource *res;
int i;
struct hlist_head *bucket;
struct hlist_node *tmp;
struct dlm_lock *lock;
/* purge any stale mles */
dlm_clean_master_list(dlm, dead_node);
/*
* now clean up all lock resources. there are two rules:
*
* 1) if the dead node was the master, move the lockres
* to the recovering list. set the RECOVERING flag.
* this lockres needs to be cleaned up before it can
* be used further.
*
* 2) if this node was the master, remove all locks from
* each of the lockres queues that were owned by the
* dead node. once recovery finishes, the dlm thread
* can be kicked again to see if any ASTs or BASTs
* need to be fired as a result.
*/
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
/* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */
if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
spin_lock(&res->spinlock);
list_for_each_entry(lock, &res->granted, list) {
if (lock->ml.node == dead_node) {
mlog(0, "AHA! there was "
"a $RECOVERY lock for dead "
"node %u (%s)!\n",
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
/* Can't schedule
* DLM_UNLOCK_FREE_LOCK
* - do manually */
dlm_lock_put(lock);
break;
}
}
if ((res->owner == dead_node) &&
(res->state & DLM_LOCK_RES_DROPPING_REF)) {
dlm_lockres_get(res);
__dlm_do_purge_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
dlm_lockres_put(res);
continue;
} else if (res->owner == dlm->node_num)
dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
spin_unlock(&res->spinlock);
continue;
}
spin_lock(&res->spinlock);
/* zero the lvb if necessary */
dlm_revalidate_lvb(dlm, res, dead_node);
if (res->owner == dead_node) {
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
mlog(0, "%s:%.*s: owned by "
"dead node %u, this node was "
"dropping its ref when master died. "
"continue, purging the lockres.\n",
dlm->name, res->lockname.len,
res->lockname.name, dead_node);
dlm_lockres_get(res);
__dlm_do_purge_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
dlm_lockres_put(res);
continue;
}
dlm_move_lockres_to_recovery_list(dlm, res);
} else if (res->owner == dlm->node_num) {
dlm_free_dead_locks(dlm, res, dead_node);
__dlm_lockres_calc_usage(dlm, res);
} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
"no locks and had not purged before dying\n",
dlm->name, res->lockname.len,
res->lockname.name, dead_node);
dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
}
}
spin_unlock(&res->spinlock);
}
}
}
static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
{
assert_spin_locked(&dlm->spinlock);
if (dlm->reco.new_master == idx) {
mlog(0, "%s: recovery master %d just died\n",
dlm->name, idx);
if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
/* finalize1 was reached, so it is safe to clear
* the new_master and dead_node. that recovery
* is complete. */
mlog(0, "%s: dead master %d had reached "
"finalize1 state, clearing\n", dlm->name, idx);
dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
__dlm_reset_recovery(dlm);
}
}
/* Clean up join state on node death. */
if (dlm->joining_node == idx) {
mlog(0, "Clearing join state for node %u\n", idx);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
}
/* check to see if the node is already considered dead */
if (!test_bit(idx, dlm->live_nodes_map)) {
mlog(0, "for domain %s, node %d is already dead. "
"another node likely did recovery already.\n",
dlm->name, idx);
return;
}
/* check to see if we do not care about this node */
if (!test_bit(idx, dlm->domain_map)) {
/* This also catches the case that we get a node down
* but haven't joined the domain yet. */
mlog(0, "node %u already removed from domain!\n", idx);
return;
}
clear_bit(idx, dlm->live_nodes_map);
/* make sure local cleanup occurs before the heartbeat events */
if (!test_bit(idx, dlm->recovery_map))
dlm_do_local_recovery_cleanup(dlm, idx);
/* notify anything attached to the heartbeat events */
dlm_hb_event_notify_attached(dlm, idx, 0);
mlog(0, "node %u being removed from domain map!\n", idx);
clear_bit(idx, dlm->domain_map);
clear_bit(idx, dlm->exit_domain_map);
/* wake up migration waiters if a node goes down.
* perhaps later we can genericize this for other waiters. */
wake_up(&dlm->migration_wq);
set_bit(idx, dlm->recovery_map);
}
void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
{
struct dlm_ctxt *dlm = data;
if (!dlm_grab(dlm))
return;
/*
* This will notify any dlm users that a node in our domain
* went away without notifying us first.
*/
if (test_bit(idx, dlm->domain_map))
dlm_fire_domain_eviction_callbacks(dlm, idx);
spin_lock(&dlm->spinlock);
__dlm_hb_node_down(dlm, idx);
spin_unlock(&dlm->spinlock);
dlm_put(dlm);
}
void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
{
struct dlm_ctxt *dlm = data;
if (!dlm_grab(dlm))
return;
spin_lock(&dlm->spinlock);
set_bit(idx, dlm->live_nodes_map);
/* do NOT notify mle attached to the heartbeat events.
* new nodes are not interesting in mastery until joined. */
spin_unlock(&dlm->spinlock);
dlm_put(dlm);
}
static void dlm_reco_ast(void *astdata)
{
struct dlm_ctxt *dlm = astdata;
mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
dlm->node_num, dlm->name);
}
static void dlm_reco_bast(void *astdata, int blocked_type)
{
struct dlm_ctxt *dlm = astdata;
mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
dlm->node_num, dlm->name);
}
static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
{
mlog(0, "unlockast for recovery lock fired!\n");
}
/*
* dlm_pick_recovery_master will continually attempt to use
* dlmlock() on the special "$RECOVERY" lockres with the
* LKM_NOQUEUE flag to get an EX. every thread that enters
* this function on each node racing to become the recovery
* master will not stop attempting this until either:
* a) this node gets the EX (and becomes the recovery master),
* or b) dlm->reco.new_master gets set to some nodenum
* != O2NM_INVALID_NODE_NUM (another node will do the reco).
* so each time a recovery master is needed, the entire cluster
* will sync at this point. if the new master dies, that will
* be detected in dlm_do_recovery */
static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
{
enum dlm_status ret;
struct dlm_lockstatus lksb;
int status = -EINVAL;
mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
again:
memset(&lksb, 0, sizeof(lksb));
ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
dlm_reco_ast, dlm, dlm_reco_bast);
mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
dlm->name, ret, lksb.status);
if (ret == DLM_NORMAL) {
mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
dlm->name, dlm->node_num);
/* got the EX lock. check to see if another node
* just became the reco master */
if (dlm_reco_master_ready(dlm)) {
mlog(0, "%s: got reco EX lock, but %u will "
"do the recovery\n", dlm->name,
dlm->reco.new_master);
status = -EEXIST;
} else {
status = 0;
/* see if recovery was already finished elsewhere */
spin_lock(&dlm->spinlock);
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
status = -EINVAL;
mlog(0, "%s: got reco EX lock, but "
"node got recovered already\n", dlm->name);
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
mlog(ML_ERROR, "%s: new master is %u "
"but no dead node!\n",
dlm->name, dlm->reco.new_master);
BUG();
}
}
spin_unlock(&dlm->spinlock);
}
/* if this node has actually become the recovery master,
* set the master and send the messages to begin recovery */
if (!status) {
mlog(0, "%s: dead=%u, this=%u, sending "
"begin_reco now\n", dlm->name,
dlm->reco.dead_node, dlm->node_num);
status = dlm_send_begin_reco_message(dlm,
dlm->reco.dead_node);
/* this always succeeds */
BUG_ON(status);
/* set the new_master to this node */
spin_lock(&dlm->spinlock);
dlm_set_reco_master(dlm, dlm->node_num);
spin_unlock(&dlm->spinlock);
}
/* recovery lock is a special case. ast will not get fired,
* so just go ahead and unlock it. */
ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
if (ret == DLM_DENIED) {
mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
}
if (ret != DLM_NORMAL) {
/* this would really suck. this could only happen
* if there was a network error during the unlock
* because of node death. this means the unlock
* is actually "done" and the lock structure is
* even freed. we can continue, but only
* because this specific lock name is special. */
mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
}
} else if (ret == DLM_NOTQUEUED) {
mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
dlm->name, dlm->node_num);
/* another node is master. wait on
* reco.new_master != O2NM_INVALID_NODE_NUM
* for at most one second */
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_reco_master_ready(dlm),
msecs_to_jiffies(1000));
if (!dlm_reco_master_ready(dlm)) {
mlog(0, "%s: reco master taking awhile\n",
dlm->name);
goto again;
}
/* another node has informed this one that it is reco master */
mlog(0, "%s: reco master %u is ready to recover %u\n",
dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
status = -EEXIST;
} else if (ret == DLM_RECOVERING) {
mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
dlm->name, dlm->node_num);
goto again;
} else {
struct dlm_lock_resource *res;
/* dlmlock returned something other than NOTQUEUED or NORMAL */
mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
"lksb.status=%s\n", dlm->name, dlm_errname(ret),
dlm_errname(lksb.status));
res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
DLM_RECOVERY_LOCK_NAME_LEN);
if (res) {
dlm_print_one_lock_resource(res);
dlm_lockres_put(res);
} else {
mlog(ML_ERROR, "recovery lock not found\n");
}
BUG();
}
return status;
}
static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
{
struct dlm_begin_reco br;
int ret = 0;
struct dlm_node_iter iter;
int nodenum;
int status;
mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
spin_unlock(&dlm->spinlock);
clear_bit(dead_node, iter.node_map);
memset(&br, 0, sizeof(br));
br.node_idx = dlm->node_num;
br.dead_node = dead_node;
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
ret = 0;
if (nodenum == dead_node) {
mlog(0, "not sending begin reco to dead node "
"%u\n", dead_node);
continue;
}
if (nodenum == dlm->node_num) {
mlog(0, "not sending begin reco to self\n");
continue;
}
retry:
mlog(0, "attempting to send begin reco msg to %d\n",
nodenum);
ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
&br, sizeof(br), nodenum, &status);
/* negative status is handled ok by caller here */
if (ret >= 0)
ret = status;
if (dlm_is_host_down(ret)) {
/* node is down. not involved in recovery
* so just keep going */
mlog(ML_NOTICE, "%s: node %u was down when sending "
"begin reco msg (%d)\n", dlm->name, nodenum, ret);
ret = 0;
}
/*
* Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
* dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
* We are handling both for compatibility reasons.
*/
if (ret == -EAGAIN || ret == EAGAIN) {
mlog(0, "%s: trying to start recovery of node "
"%u, but node %u is waiting for last recovery "
"to complete, backoff for a bit\n", dlm->name,
dead_node, nodenum);
msleep(100);
goto retry;
}
if (ret < 0) {
struct dlm_lock_resource *res;
/* this is now a serious problem, possibly ENOMEM
* in the network stack. must retry */
mlog_errno(ret);
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
"returned %d\n", dlm->name, nodenum, ret);
res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
DLM_RECOVERY_LOCK_NAME_LEN);
if (res) {
dlm_print_one_lock_resource(res);
dlm_lockres_put(res);
} else {
mlog(ML_ERROR, "recovery lock not found\n");
}
/* sleep for a bit in hopes that we can avoid
* another ENOMEM */
msleep(100);
goto retry;
}
}
return ret;
}
int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
/* ok to return 0, domain has gone away */
if (!dlm_grab(dlm))
return 0;
spin_lock(&dlm->spinlock);
if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
"but this node is in finalize state, waiting on finalize2\n",
dlm->name, br->node_idx, br->dead_node,
dlm->reco.dead_node, dlm->reco.new_master);
spin_unlock(&dlm->spinlock);
dlm_put(dlm);
return -EAGAIN;
}
spin_unlock(&dlm->spinlock);
mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
dlm->name, br->node_idx, br->dead_node,
dlm->reco.dead_node, dlm->reco.new_master);
dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
spin_lock(&dlm->spinlock);
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
mlog(0, "%s: new_master %u died, changing "
"to %u\n", dlm->name, dlm->reco.new_master,
br->node_idx);
} else {
mlog(0, "%s: new_master %u NOT DEAD, changing "
"to %u\n", dlm->name, dlm->reco.new_master,
br->node_idx);
/* may not have seen the new master as dead yet */
}
}
if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
"node %u changing it to %u\n", dlm->name,
dlm->reco.dead_node, br->node_idx, br->dead_node);
}
dlm_set_reco_master(dlm, br->node_idx);
dlm_set_reco_dead_node(dlm, br->dead_node);
if (!test_bit(br->dead_node, dlm->recovery_map)) {
mlog(0, "recovery master %u sees %u as dead, but this "
"node has not yet. marking %u as dead\n",
br->node_idx, br->dead_node, br->dead_node);
if (!test_bit(br->dead_node, dlm->domain_map) ||
!test_bit(br->dead_node, dlm->live_nodes_map))
mlog(0, "%u not in domain/live_nodes map "
"so setting it in reco map manually\n",
br->dead_node);
/* force the recovery cleanup in __dlm_hb_node_down
* both of these will be cleared in a moment */
set_bit(br->dead_node, dlm->domain_map);
set_bit(br->dead_node, dlm->live_nodes_map);
__dlm_hb_node_down(dlm, br->dead_node);
}
spin_unlock(&dlm->spinlock);
dlm_kick_recovery_thread(dlm);
mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
dlm->name, br->node_idx, br->dead_node,
dlm->reco.dead_node, dlm->reco.new_master);
dlm_put(dlm);
return 0;
}
#define DLM_FINALIZE_STAGE2 0x01
static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
{
int ret = 0;
struct dlm_finalize_reco fr;
struct dlm_node_iter iter;
int nodenum;
int status;
int stage = 1;
mlog(0, "finishing recovery for node %s:%u, "
"stage %d\n", dlm->name, dlm->reco.dead_node, stage);
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
spin_unlock(&dlm->spinlock);
stage2:
memset(&fr, 0, sizeof(fr));
fr.node_idx = dlm->node_num;
fr.dead_node = dlm->reco.dead_node;
if (stage == 2)
fr.flags |= DLM_FINALIZE_STAGE2;
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
if (nodenum == dlm->node_num)
continue;
ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
&fr, sizeof(fr), nodenum, &status);
if (ret >= 0)
ret = status;
if (ret < 0) {
mlog(ML_ERROR, "Error %d when sending message %u (key "
"0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
dlm->key, nodenum);
if (dlm_is_host_down(ret)) {
/* this has no effect on this recovery
* session, so set the status to zero to
* finish out the last recovery */
mlog(ML_ERROR, "node %u went down after this "
"node finished recovery.\n", nodenum);
ret = 0;
continue;
}
break;
}
}
if (stage == 1) {
/* reset the node_iter back to the top and send finalize2 */
iter.curnode = -1;
stage = 2;
goto stage2;
}
return ret;
}
int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
int stage = 1;
/* ok to return 0, domain has gone away */
if (!dlm_grab(dlm))
return 0;
if (fr->flags & DLM_FINALIZE_STAGE2)
stage = 2;
mlog(0, "%s: node %u finalizing recovery stage%d of "
"node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
spin_lock(&dlm->spinlock);
if (dlm->reco.new_master != fr->node_idx) {
mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
"%u is supposed to be the new master, dead=%u\n",
fr->node_idx, dlm->reco.new_master, fr->dead_node);
BUG();
}
if (dlm->reco.dead_node != fr->dead_node) {
mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
"node %u, but node %u is supposed to be dead\n",
fr->node_idx, fr->dead_node, dlm->reco.dead_node);
BUG();
}
switch (stage) {
case 1:
dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
mlog(ML_ERROR, "%s: received finalize1 from "
"new master %u for dead node %u, but "
"this node has already received it!\n",
dlm->name, fr->node_idx, fr->dead_node);
dlm_print_reco_node_status(dlm);
BUG();
}
dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
spin_unlock(&dlm->spinlock);
break;
case 2:
if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
mlog(ML_ERROR, "%s: received finalize2 from "
"new master %u for dead node %u, but "
"this node did not have finalize1!\n",
dlm->name, fr->node_idx, fr->dead_node);
dlm_print_reco_node_status(dlm);
BUG();
}
dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
__dlm_reset_recovery(dlm);
spin_unlock(&dlm->spinlock);
dlm_kick_recovery_thread(dlm);
break;
}
mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
dlm_put(dlm);
return 0;
}
| linux-master | fs/ocfs2/dlm/dlmrecovery.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmast.c
*
* AST and BAST functionality for local and remote nodes
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/spinlock.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#define MLOG_MASK_PREFIX ML_DLM
#include "../cluster/masklog.h"
static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock);
static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
/* Should be called as an ast gets queued to see if the new
* lock level will obsolete a pending bast.
* For example, if dlm_thread queued a bast for an EX lock that
* was blocking another EX, but before sending the bast the
* lock owner downconverted to NL, the bast is now obsolete.
* Only the ast should be sent.
* This is needed because the lock and convert paths can queue
* asts out-of-band (not waiting for dlm_thread) in order to
* allow for LKM_NOQUEUE to get immediate responses. */
static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
assert_spin_locked(&dlm->ast_lock);
assert_spin_locked(&lock->spinlock);
if (lock->ml.highest_blocked == LKM_IVMODE)
return 0;
BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
if (lock->bast_pending &&
list_empty(&lock->bast_list))
/* old bast already sent, ok */
return 0;
if (lock->ml.type == LKM_EXMODE)
/* EX blocks anything left, any bast still valid */
return 0;
else if (lock->ml.type == LKM_NLMODE)
/* NL blocks nothing, no reason to send any bast, cancel it */
return 1;
else if (lock->ml.highest_blocked != LKM_EXMODE)
/* PR only blocks EX */
return 1;
return 0;
}
void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
struct dlm_lock_resource *res;
BUG_ON(!dlm);
BUG_ON(!lock);
res = lock->lockres;
assert_spin_locked(&dlm->ast_lock);
if (!list_empty(&lock->ast_list)) {
mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
"AST list not empty, pending %d, newlevel %d\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ast_pending, lock->ml.type);
BUG();
}
if (lock->ast_pending)
mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
/* putting lock on list, add a ref */
dlm_lock_get(lock);
spin_lock(&lock->spinlock);
/* check to see if this ast obsoletes the bast */
if (dlm_should_cancel_bast(dlm, lock)) {
mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
lock->bast_pending = 0;
list_del_init(&lock->bast_list);
lock->ml.highest_blocked = LKM_IVMODE;
/* removing lock from list, remove a ref. guaranteed
* this won't be the last ref because of the get above,
* so res->spinlock will not be taken here */
dlm_lock_put(lock);
/* free up the reserved bast that we are cancelling.
* guaranteed that this will not be the last reserved
* ast because *both* an ast and a bast were reserved
* to get to this point. the res->spinlock will not be
* taken here */
dlm_lockres_release_ast(dlm, res);
}
list_add_tail(&lock->ast_list, &dlm->pending_asts);
lock->ast_pending = 1;
spin_unlock(&lock->spinlock);
}
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
BUG_ON(!dlm);
BUG_ON(!lock);
spin_lock(&dlm->ast_lock);
__dlm_queue_ast(dlm, lock);
spin_unlock(&dlm->ast_lock);
}
void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
struct dlm_lock_resource *res;
BUG_ON(!dlm);
BUG_ON(!lock);
assert_spin_locked(&dlm->ast_lock);
res = lock->lockres;
BUG_ON(!list_empty(&lock->bast_list));
if (lock->bast_pending)
mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
/* putting lock on list, add a ref */
dlm_lock_get(lock);
spin_lock(&lock->spinlock);
list_add_tail(&lock->bast_list, &dlm->pending_basts);
lock->bast_pending = 1;
spin_unlock(&lock->spinlock);
}
static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
struct dlm_lockstatus *lksb = lock->lksb;
BUG_ON(!lksb);
/* only updates if this node masters the lockres */
spin_lock(&res->spinlock);
if (res->owner == dlm->node_num) {
/* check the lksb flags for the direction */
if (lksb->flags & DLM_LKSB_GET_LVB) {
mlog(0, "getting lvb from lockres for %s node\n",
lock->ml.node == dlm->node_num ? "master" :
"remote");
memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
}
/* Do nothing for lvb put requests - they should be done in
* place when the lock is downconverted - otherwise we risk
* racing gets and puts which could result in old lvb data
* being propagated. We leave the put flag set and clear it
* here. In the future we might want to clear it at the time
* the put is actually done.
*/
}
spin_unlock(&res->spinlock);
/* reset any lvb flags on the lksb */
lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
}
void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
dlm_astlockfunc_t *fn;
mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
fn = lock->ast;
BUG_ON(lock->ml.node != dlm->node_num);
dlm_update_lvb(dlm, res, lock);
(*fn)(lock->astdata);
}
int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
int ret;
struct dlm_lockstatus *lksb;
int lksbflags;
mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
lksb = lock->lksb;
BUG_ON(lock->ml.node == dlm->node_num);
lksbflags = lksb->flags;
dlm_update_lvb(dlm, res, lock);
/* lock request came from another node
* go do the ast over there */
ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
return ret;
}
void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock, int blocked_type)
{
dlm_bastlockfunc_t *fn = lock->bast;
BUG_ON(lock->ml.node != dlm->node_num);
mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
blocked_type);
(*fn)(lock->astdata, blocked_type);
}
int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
int ret;
unsigned int locklen;
struct dlm_ctxt *dlm = data;
struct dlm_lock_resource *res = NULL;
struct dlm_lock *lock = NULL;
struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
char *name;
struct list_head *head = NULL;
__be64 cookie;
u32 flags;
u8 node;
if (!dlm_grab(dlm)) {
dlm_error(DLM_REJECTED);
return DLM_REJECTED;
}
mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
"Domain %s not fully joined!\n", dlm->name);
name = past->name;
locklen = past->namelen;
cookie = past->cookie;
flags = be32_to_cpu(past->flags);
node = past->node_idx;
if (locklen > DLM_LOCKID_NAME_MAX) {
ret = DLM_IVBUFLEN;
mlog(ML_ERROR, "Invalid name length (%d) in proxy ast "
"handler!\n", locklen);
goto leave;
}
if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
(LKM_PUT_LVB|LKM_GET_LVB)) {
mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n",
flags);
ret = DLM_BADARGS;
goto leave;
}
mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
(flags & LKM_GET_LVB ? "get lvb" : "none"));
mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
if (past->type != DLM_AST &&
past->type != DLM_BAST) {
mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
"name=%.*s, node=%u\n", past->type,
dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
locklen, name, node);
ret = DLM_IVLOCKID;
goto leave;
}
res = dlm_lookup_lockres(dlm, name, locklen);
if (!res) {
mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, "
"name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"),
dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
locklen, name, node);
ret = DLM_IVLOCKID;
goto leave;
}
/* cannot get a proxy ast message if this node owns it */
BUG_ON(res->owner == dlm->node_num);
mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
res->lockname.name);
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
mlog(0, "Responding with DLM_RECOVERING!\n");
ret = DLM_RECOVERING;
goto unlock_out;
}
if (res->state & DLM_LOCK_RES_MIGRATING) {
mlog(0, "Responding with DLM_MIGRATING!\n");
ret = DLM_MIGRATING;
goto unlock_out;
}
/* try convert queue for both ast/bast */
head = &res->converting;
lock = NULL;
list_for_each_entry(lock, head, list) {
if (lock->ml.cookie == cookie)
goto do_ast;
}
/* if not on convert, try blocked for ast, granted for bast */
if (past->type == DLM_AST)
head = &res->blocked;
else
head = &res->granted;
list_for_each_entry(lock, head, list) {
/* if lock is found but unlock is pending ignore the bast */
if (lock->ml.cookie == cookie) {
if (lock->unlock_pending)
break;
goto do_ast;
}
}
mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
"node=%u\n", past->type == DLM_AST ? "" : "b",
dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
locklen, name, node);
ret = DLM_NORMAL;
unlock_out:
spin_unlock(&res->spinlock);
goto leave;
do_ast:
ret = DLM_NORMAL;
if (past->type == DLM_AST) {
/* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, &res->granted);
mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
lock->ml.type, lock->ml.convert_type);
if (lock->ml.convert_type != LKM_IVMODE) {
lock->ml.type = lock->ml.convert_type;
lock->ml.convert_type = LKM_IVMODE;
} else {
// should already be there....
}
lock->lksb->status = DLM_NORMAL;
/* if we requested the lvb, fetch it into our lksb now */
if (flags & LKM_GET_LVB) {
BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
}
}
spin_unlock(&res->spinlock);
if (past->type == DLM_AST)
dlm_do_local_ast(dlm, res, lock);
else
dlm_do_local_bast(dlm, res, lock, past->blocked_type);
leave:
if (res)
dlm_lockres_put(res);
dlm_put(dlm);
return ret;
}
int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock, int msg_type,
int blocked_type, int flags)
{
int ret = 0;
struct dlm_proxy_ast past;
struct kvec vec[2];
size_t veclen = 1;
int status;
mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
blocked_type);
memset(&past, 0, sizeof(struct dlm_proxy_ast));
past.node_idx = dlm->node_num;
past.type = msg_type;
past.blocked_type = blocked_type;
past.namelen = res->lockname.len;
memcpy(past.name, res->lockname.name, past.namelen);
past.cookie = lock->ml.cookie;
vec[0].iov_len = sizeof(struct dlm_proxy_ast);
vec[0].iov_base = &past;
if (flags & DLM_LKSB_GET_LVB) {
be32_add_cpu(&past.flags, LKM_GET_LVB);
vec[1].iov_len = DLM_LVB_LEN;
vec[1].iov_base = lock->lksb->lvb;
veclen++;
}
ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
lock->ml.node, &status);
if (ret < 0)
mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
dlm->name, res->lockname.len, res->lockname.name, ret,
lock->ml.node);
else {
if (status == DLM_RECOVERING) {
mlog(ML_ERROR, "sent AST to node %u, it thinks this "
"node is dead!\n", lock->ml.node);
BUG();
} else if (status == DLM_MIGRATING) {
mlog(ML_ERROR, "sent AST to node %u, it returned "
"DLM_MIGRATING!\n", lock->ml.node);
BUG();
} else if (status != DLM_NORMAL && status != DLM_IVLOCKID) {
mlog(ML_ERROR, "AST to node %u returned %d!\n",
lock->ml.node, status);
/* ignore it */
}
ret = 0;
}
return ret;
}
| linux-master | fs/ocfs2/dlm/dlmast.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmlock.c
*
* underlying calls for lock creation
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmconvert.h"
#define MLOG_MASK_PREFIX ML_DLM
#include "../cluster/masklog.h"
static struct kmem_cache *dlm_lock_cache;
static DEFINE_SPINLOCK(dlm_cookie_lock);
static u64 dlm_next_cookie = 1;
static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags);
static void dlm_init_lock(struct dlm_lock *newlock, int type,
u8 node, u64 cookie);
static void dlm_lock_release(struct kref *kref);
static void dlm_lock_detach_lockres(struct dlm_lock *lock);
int dlm_init_lock_cache(void)
{
dlm_lock_cache = kmem_cache_create("o2dlm_lock",
sizeof(struct dlm_lock),
0, SLAB_HWCACHE_ALIGN, NULL);
if (dlm_lock_cache == NULL)
return -ENOMEM;
return 0;
}
void dlm_destroy_lock_cache(void)
{
kmem_cache_destroy(dlm_lock_cache);
}
/* Tell us whether we can grant a new lock request.
* locking:
* caller needs: res->spinlock
* taken: none
* held on exit: none
* returns: 1 if the lock can be granted, 0 otherwise.
*/
static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
struct dlm_lock *tmplock;
list_for_each_entry(tmplock, &res->granted, list) {
if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
return 0;
}
list_for_each_entry(tmplock, &res->converting, list) {
if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
return 0;
if (!dlm_lock_compatible(tmplock->ml.convert_type,
lock->ml.type))
return 0;
}
return 1;
}
/* performs lock creation at the lockres master site
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: DLM_NORMAL, DLM_NOTQUEUED
*/
static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags)
{
int call_ast = 0, kick_thread = 0;
enum dlm_status status = DLM_NORMAL;
mlog(0, "type=%d\n", lock->ml.type);
spin_lock(&res->spinlock);
/* if called from dlm_create_lock_handler, need to
* ensure it will not sleep in dlm_wait_on_lockres */
status = __dlm_lockres_state_to_status(res);
if (status != DLM_NORMAL &&
lock->ml.node != dlm->node_num) {
/* erf. state changed after lock was dropped. */
spin_unlock(&res->spinlock);
dlm_error(status);
return status;
}
__dlm_wait_on_lockres(res);
__dlm_lockres_reserve_ast(res);
if (dlm_can_grant_new_lock(res, lock)) {
mlog(0, "I can grant this lock right away\n");
/* got it right away */
lock->lksb->status = DLM_NORMAL;
status = DLM_NORMAL;
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->granted);
/* for the recovery lock, we can't allow the ast
* to be queued since the dlmthread is already
* frozen. but the recovery lock is always locked
* with LKM_NOQUEUE so we do not need the ast in
* this special case */
if (!dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
kick_thread = 1;
call_ast = 1;
} else {
mlog(0, "%s: returning DLM_NORMAL to "
"node %u for reco lock\n", dlm->name,
lock->ml.node);
}
} else {
/* for NOQUEUE request, unless we get the
* lock right away, return DLM_NOTQUEUED */
if (flags & LKM_NOQUEUE) {
status = DLM_NOTQUEUED;
if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
mlog(0, "%s: returning NOTQUEUED to "
"node %u for reco lock\n", dlm->name,
lock->ml.node);
}
} else {
status = DLM_NORMAL;
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->blocked);
kick_thread = 1;
}
}
spin_unlock(&res->spinlock);
wake_up(&res->wq);
/* either queue the ast or release it */
if (call_ast)
dlm_queue_ast(dlm, lock);
else
dlm_lockres_release_ast(dlm, res);
dlm_lockres_calc_usage(dlm, res);
if (kick_thread)
dlm_kick_thread(dlm, res);
return status;
}
void dlm_revert_pending_lock(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
/* remove from local queue if it failed */
list_del_init(&lock->list);
lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
}
/*
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: DLM_DENIED, DLM_RECOVERING, or net status
*/
static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags)
{
enum dlm_status status = DLM_DENIED;
int lockres_changed = 1;
mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
lock->ml.type, res->lockname.len,
res->lockname.name, flags);
/*
* Wait if resource is getting recovered, remastered, etc.
* If the resource was remastered and new owner is self, then exit.
*/
spin_lock(&res->spinlock);
__dlm_wait_on_lockres(res);
if (res->owner == dlm->node_num) {
spin_unlock(&res->spinlock);
return DLM_RECOVERING;
}
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* add lock to local (secondary) queue */
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->blocked);
lock->lock_pending = 1;
spin_unlock(&res->spinlock);
/* spec seems to say that you will get DLM_NORMAL when the lock
* has been queued, meaning we need to wait for a reply here. */
status = dlm_send_remote_lock_request(dlm, res, lock, flags);
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->lock_pending = 0;
if (status != DLM_NORMAL) {
if (status == DLM_RECOVERING &&
dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* recovery lock was mastered by dead node.
* we need to have calc_usage shoot down this
* lockres and completely remaster it. */
mlog(0, "%s: recovery lock was owned by "
"dead node %u, remaster it now.\n",
dlm->name, res->owner);
} else if (status != DLM_NOTQUEUED) {
/*
* DO NOT call calc_usage, as this would unhash
* the remote lockres before we ever get to use
* it. treat as if we never made any change to
* the lockres.
*/
lockres_changed = 0;
dlm_error(status);
}
dlm_revert_pending_lock(res, lock);
dlm_lock_put(lock);
} else if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* special case for the $RECOVERY lock.
* there will never be an AST delivered to put
* this lock on the proper secondary queue
* (granted), so do it manually. */
mlog(0, "%s: $RECOVERY lock for this node (%u) is "
"mastered by %u; got lock, manually granting (no ast)\n",
dlm->name, dlm->node_num, res->owner);
list_move_tail(&lock->list, &res->granted);
}
spin_unlock(&res->spinlock);
if (lockres_changed)
dlm_lockres_calc_usage(dlm, res);
wake_up(&res->wq);
return status;
}
/* for remote lock creation.
* locking:
* caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
* taken: none
* held on exit: none
* returns: DLM_NOLOCKMGR, or net status
*/
static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags)
{
struct dlm_create_lock create;
int tmpret, status = 0;
enum dlm_status ret;
memset(&create, 0, sizeof(create));
create.node_idx = dlm->node_num;
create.requested_type = lock->ml.type;
create.cookie = lock->ml.cookie;
create.namelen = res->lockname.len;
create.flags = cpu_to_be32(flags);
memcpy(create.name, res->lockname.name, create.namelen);
tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
sizeof(create), res->owner, &status);
if (tmpret >= 0) {
ret = status;
if (ret == DLM_REJECTED) {
mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
"owned by node %u. That node is coming back up "
"currently.\n", dlm->name, create.namelen,
create.name, res->owner);
dlm_print_one_lock_resource(res);
BUG();
}
} else {
mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
"node %u\n", dlm->name, create.namelen, create.name,
tmpret, res->owner);
if (dlm_is_host_down(tmpret))
ret = DLM_RECOVERING;
else
ret = dlm_err_to_dlm_status(tmpret);
}
return ret;
}
void dlm_lock_get(struct dlm_lock *lock)
{
kref_get(&lock->lock_refs);
}
void dlm_lock_put(struct dlm_lock *lock)
{
kref_put(&lock->lock_refs, dlm_lock_release);
}
static void dlm_lock_release(struct kref *kref)
{
struct dlm_lock *lock;
lock = container_of(kref, struct dlm_lock, lock_refs);
BUG_ON(!list_empty(&lock->list));
BUG_ON(!list_empty(&lock->ast_list));
BUG_ON(!list_empty(&lock->bast_list));
BUG_ON(lock->ast_pending);
BUG_ON(lock->bast_pending);
dlm_lock_detach_lockres(lock);
if (lock->lksb_kernel_allocated) {
mlog(0, "freeing kernel-allocated lksb\n");
kfree(lock->lksb);
}
kmem_cache_free(dlm_lock_cache, lock);
}
/* associate a lock with it's lockres, getting a ref on the lockres */
void dlm_lock_attach_lockres(struct dlm_lock *lock,
struct dlm_lock_resource *res)
{
dlm_lockres_get(res);
lock->lockres = res;
}
/* drop ref on lockres, if there is still one associated with lock */
static void dlm_lock_detach_lockres(struct dlm_lock *lock)
{
struct dlm_lock_resource *res;
res = lock->lockres;
if (res) {
lock->lockres = NULL;
mlog(0, "removing lock's lockres reference\n");
dlm_lockres_put(res);
}
}
static void dlm_init_lock(struct dlm_lock *newlock, int type,
u8 node, u64 cookie)
{
INIT_LIST_HEAD(&newlock->list);
INIT_LIST_HEAD(&newlock->ast_list);
INIT_LIST_HEAD(&newlock->bast_list);
spin_lock_init(&newlock->spinlock);
newlock->ml.type = type;
newlock->ml.convert_type = LKM_IVMODE;
newlock->ml.highest_blocked = LKM_IVMODE;
newlock->ml.node = node;
newlock->ml.pad1 = 0;
newlock->ml.list = 0;
newlock->ml.flags = 0;
newlock->ast = NULL;
newlock->bast = NULL;
newlock->astdata = NULL;
newlock->ml.cookie = cpu_to_be64(cookie);
newlock->ast_pending = 0;
newlock->bast_pending = 0;
newlock->convert_pending = 0;
newlock->lock_pending = 0;
newlock->unlock_pending = 0;
newlock->cancel_pending = 0;
newlock->lksb_kernel_allocated = 0;
kref_init(&newlock->lock_refs);
}
struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
struct dlm_lockstatus *lksb)
{
struct dlm_lock *lock;
int kernel_allocated = 0;
lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
if (!lock)
return NULL;
if (!lksb) {
/* zero memory only if kernel-allocated */
lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
if (!lksb) {
kmem_cache_free(dlm_lock_cache, lock);
return NULL;
}
kernel_allocated = 1;
}
dlm_init_lock(lock, type, node, cookie);
if (kernel_allocated)
lock->lksb_kernel_allocated = 1;
lock->lksb = lksb;
lksb->lockid = lock;
return lock;
}
/* handler for lock creation net message
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
*/
int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
struct dlm_lock_resource *res = NULL;
struct dlm_lock *newlock = NULL;
struct dlm_lockstatus *lksb = NULL;
enum dlm_status status = DLM_NORMAL;
char *name;
unsigned int namelen;
BUG_ON(!dlm);
if (!dlm_grab(dlm))
return DLM_REJECTED;
name = create->name;
namelen = create->namelen;
status = DLM_REJECTED;
if (!dlm_domain_fully_joined(dlm)) {
mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
"sending a create_lock message for lock %.*s!\n",
dlm->name, create->node_idx, namelen, name);
dlm_error(status);
goto leave;
}
status = DLM_IVBUFLEN;
if (namelen > DLM_LOCKID_NAME_MAX) {
dlm_error(status);
goto leave;
}
status = DLM_SYSERR;
newlock = dlm_new_lock(create->requested_type,
create->node_idx,
be64_to_cpu(create->cookie), NULL);
if (!newlock) {
dlm_error(status);
goto leave;
}
lksb = newlock->lksb;
if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
lksb->flags |= DLM_LKSB_GET_LVB;
mlog(0, "set DLM_LKSB_GET_LVB flag\n");
}
status = DLM_IVLOCKID;
res = dlm_lookup_lockres(dlm, name, namelen);
if (!res) {
dlm_error(status);
goto leave;
}
spin_lock(&res->spinlock);
status = __dlm_lockres_state_to_status(res);
spin_unlock(&res->spinlock);
if (status != DLM_NORMAL) {
mlog(0, "lockres recovering/migrating/in-progress\n");
goto leave;
}
dlm_lock_attach_lockres(newlock, res);
status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
leave:
if (status != DLM_NORMAL)
if (newlock)
dlm_lock_put(newlock);
if (res)
dlm_lockres_put(res);
dlm_put(dlm);
return status;
}
/* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie)
{
u64 tmpnode = node_num;
/* shift single byte of node num into top 8 bits */
tmpnode <<= 56;
spin_lock(&dlm_cookie_lock);
*cookie = (dlm_next_cookie | tmpnode);
if (++dlm_next_cookie & 0xff00000000000000ull) {
mlog(0, "This node's cookie will now wrap!\n");
dlm_next_cookie = 1;
}
spin_unlock(&dlm_cookie_lock);
}
enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
struct dlm_lockstatus *lksb, int flags,
const char *name, int namelen, dlm_astlockfunc_t *ast,
void *data, dlm_bastlockfunc_t *bast)
{
enum dlm_status status;
struct dlm_lock_resource *res = NULL;
struct dlm_lock *lock = NULL;
int convert = 0, recovery = 0;
/* yes this function is a mess.
* TODO: clean this up. lots of common code in the
* lock and convert paths, especially in the retry blocks */
if (!lksb) {
dlm_error(DLM_BADARGS);
return DLM_BADARGS;
}
status = DLM_BADPARAM;
if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) {
dlm_error(status);
goto error;
}
if (flags & ~LKM_VALID_FLAGS) {
dlm_error(status);
goto error;
}
convert = (flags & LKM_CONVERT);
recovery = (flags & LKM_RECOVERY);
if (recovery &&
(!dlm_is_recovery_lock(name, namelen) || convert) ) {
dlm_error(status);
goto error;
}
if (convert && (flags & LKM_LOCAL)) {
mlog(ML_ERROR, "strange LOCAL convert request!\n");
goto error;
}
if (convert) {
/* CONVERT request */
/* if converting, must pass in a valid dlm_lock */
lock = lksb->lockid;
if (!lock) {
mlog(ML_ERROR, "NULL lock pointer in convert "
"request\n");
goto error;
}
res = lock->lockres;
if (!res) {
mlog(ML_ERROR, "NULL lockres pointer in convert "
"request\n");
goto error;
}
dlm_lockres_get(res);
/* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
* static after the original lock call. convert requests will
* ensure that everything is the same, or return DLM_BADARGS.
* this means that DLM_DENIED_NOASTS will never be returned.
*/
if (lock->lksb != lksb || lock->ast != ast ||
lock->bast != bast || lock->astdata != data) {
status = DLM_BADARGS;
mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, "
"astdata=%p\n", lksb, ast, bast, data);
mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
"astdata=%p\n", lock->lksb, lock->ast,
lock->bast, lock->astdata);
goto error;
}
retry_convert:
dlm_wait_for_recovery(dlm);
if (res->owner == dlm->node_num)
status = dlmconvert_master(dlm, res, lock, flags, mode);
else
status = dlmconvert_remote(dlm, res, lock, flags, mode);
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
/* for now, see how this works without sleeping
* and just retry right away. I suspect the reco
* or migration will complete fast enough that
* no waiting will be necessary */
mlog(0, "retrying convert with migration/recovery/"
"in-progress\n");
msleep(100);
goto retry_convert;
}
} else {
u64 tmpcookie;
/* LOCK request */
status = DLM_BADARGS;
if (!name) {
dlm_error(status);
goto error;
}
status = DLM_IVBUFLEN;
if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) {
dlm_error(status);
goto error;
}
dlm_get_next_cookie(dlm->node_num, &tmpcookie);
lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb);
if (!lock) {
dlm_error(status);
goto error;
}
if (!recovery)
dlm_wait_for_recovery(dlm);
/* find or create the lock resource */
res = dlm_get_lock_resource(dlm, name, namelen, flags);
if (!res) {
status = DLM_IVLOCKID;
dlm_error(status);
goto error;
}
mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
dlm_lock_attach_lockres(lock, res);
lock->ast = ast;
lock->bast = bast;
lock->astdata = data;
retry_lock:
if (flags & LKM_VALBLK) {
mlog(0, "LKM_VALBLK passed by caller\n");
/* LVB requests for non PR, PW or EX locks are
* ignored. */
if (mode < LKM_PRMODE)
flags &= ~LKM_VALBLK;
else {
flags |= LKM_GET_LVB;
lock->lksb->flags |= DLM_LKSB_GET_LVB;
}
}
if (res->owner == dlm->node_num)
status = dlmlock_master(dlm, res, lock, flags);
else
status = dlmlock_remote(dlm, res, lock, flags);
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
msleep(100);
if (recovery) {
if (status != DLM_RECOVERING)
goto retry_lock;
/* wait to see the node go down, then
* drop down and allow the lockres to
* get cleaned up. need to remaster. */
dlm_wait_for_node_death(dlm, res->owner,
DLM_NODE_DEATH_WAIT_MAX);
} else {
dlm_wait_for_recovery(dlm);
goto retry_lock;
}
}
/* Inflight taken in dlm_get_lock_resource() is dropped here */
spin_lock(&res->spinlock);
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
dlm_lockres_calc_usage(dlm, res);
dlm_kick_thread(dlm, res);
if (status != DLM_NORMAL) {
lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
if (status != DLM_NOTQUEUED)
dlm_error(status);
goto error;
}
}
error:
if (status != DLM_NORMAL) {
if (lock && !convert)
dlm_lock_put(lock);
// this is kind of unnecessary
lksb->status = status;
}
/* put lockres ref from the convert path
* or from dlm_get_lock_resource */
if (res)
dlm_lockres_put(res);
return status;
}
EXPORT_SYMBOL_GPL(dlmlock);
| linux-master | fs/ocfs2/dlm/dlmlock.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmmod.c
*
* standalone DLM module
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#include "dlmdebug.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
#include "../cluster/masklog.h"
static void dlm_mle_node_down(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle,
struct o2nm_node *node,
int idx);
static void dlm_mle_node_up(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle,
struct o2nm_node *node,
int idx);
static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
static int dlm_do_assert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
void *nodemap, u32 flags);
static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle,
const char *name,
unsigned int namelen)
{
if (dlm != mle->dlm)
return 0;
if (namelen != mle->mnamelen ||
memcmp(name, mle->mname, namelen) != 0)
return 0;
return 1;
}
static struct kmem_cache *dlm_lockres_cache;
static struct kmem_cache *dlm_lockname_cache;
static struct kmem_cache *dlm_mle_cache;
static void dlm_mle_release(struct kref *kref);
static void dlm_init_mle(struct dlm_master_list_entry *mle,
enum dlm_mle_type type,
struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
const char *name,
unsigned int namelen);
static void dlm_put_mle(struct dlm_master_list_entry *mle);
static void __dlm_put_mle(struct dlm_master_list_entry *mle);
static int dlm_find_mle(struct dlm_ctxt *dlm,
struct dlm_master_list_entry **mle,
char *name, unsigned int namelen);
static int dlm_do_master_request(struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle, int to);
static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle,
int *blocked);
static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle,
int blocked);
static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle,
struct dlm_master_list_entry **oldmle,
const char *name, unsigned int namelen,
u8 new_master, u8 master);
static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
u8 target);
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
int dlm_is_host_down(int errno)
{
switch (errno) {
case -EBADF:
case -ECONNREFUSED:
case -ENOTCONN:
case -ECONNRESET:
case -EPIPE:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ETIMEDOUT:
case -ECONNABORTED:
case -ENETDOWN:
case -ENETUNREACH:
case -ENETRESET:
case -ESHUTDOWN:
case -ENOPROTOOPT:
case -EINVAL: /* if returned from our tcp code,
this means there is no socket */
return 1;
}
return 0;
}
/*
* MASTER LIST FUNCTIONS
*/
/*
* regarding master list entries and heartbeat callbacks:
*
* in order to avoid sleeping and allocation that occurs in
* heartbeat, master list entries are simply attached to the
* dlm's established heartbeat callbacks. the mle is attached
* when it is created, and since the dlm->spinlock is held at
* that time, any heartbeat event will be properly discovered
* by the mle. the mle needs to be detached from the
* dlm->mle_hb_events list as soon as heartbeat events are no
* longer useful to the mle, and before the mle is freed.
*
* as a general rule, heartbeat events are no longer needed by
* the mle once an "answer" regarding the lock master has been
* received.
*/
static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle)
{
assert_spin_locked(&dlm->spinlock);
list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
}
static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle)
{
if (!list_empty(&mle->hb_events))
list_del_init(&mle->hb_events);
}
static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle)
{
spin_lock(&dlm->spinlock);
__dlm_mle_detach_hb_events(dlm, mle);
spin_unlock(&dlm->spinlock);
}
static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
{
struct dlm_ctxt *dlm;
dlm = mle->dlm;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
mle->inuse++;
kref_get(&mle->mle_refs);
}
static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
{
struct dlm_ctxt *dlm;
dlm = mle->dlm;
spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
mle->inuse--;
__dlm_put_mle(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
}
/* remove from list and free */
static void __dlm_put_mle(struct dlm_master_list_entry *mle)
{
struct dlm_ctxt *dlm;
dlm = mle->dlm;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
if (!kref_read(&mle->mle_refs)) {
/* this may or may not crash, but who cares.
* it's a BUG. */
mlog(ML_ERROR, "bad mle: %p\n", mle);
dlm_print_one_mle(mle);
BUG();
} else
kref_put(&mle->mle_refs, dlm_mle_release);
}
/* must not have any spinlocks coming in */
static void dlm_put_mle(struct dlm_master_list_entry *mle)
{
struct dlm_ctxt *dlm;
dlm = mle->dlm;
spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
__dlm_put_mle(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
}
static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
{
kref_get(&mle->mle_refs);
}
static void dlm_init_mle(struct dlm_master_list_entry *mle,
enum dlm_mle_type type,
struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
const char *name,
unsigned int namelen)
{
assert_spin_locked(&dlm->spinlock);
mle->dlm = dlm;
mle->type = type;
INIT_HLIST_NODE(&mle->master_hash_node);
INIT_LIST_HEAD(&mle->hb_events);
bitmap_zero(mle->maybe_map, O2NM_MAX_NODES);
spin_lock_init(&mle->spinlock);
init_waitqueue_head(&mle->wq);
atomic_set(&mle->woken, 0);
kref_init(&mle->mle_refs);
bitmap_zero(mle->response_map, O2NM_MAX_NODES);
mle->master = O2NM_MAX_NODES;
mle->new_master = O2NM_MAX_NODES;
mle->inuse = 0;
BUG_ON(mle->type != DLM_MLE_BLOCK &&
mle->type != DLM_MLE_MASTER &&
mle->type != DLM_MLE_MIGRATION);
if (mle->type == DLM_MLE_MASTER) {
BUG_ON(!res);
mle->mleres = res;
memcpy(mle->mname, res->lockname.name, res->lockname.len);
mle->mnamelen = res->lockname.len;
mle->mnamehash = res->lockname.hash;
} else {
BUG_ON(!name);
mle->mleres = NULL;
memcpy(mle->mname, name, namelen);
mle->mnamelen = namelen;
mle->mnamehash = dlm_lockid_hash(name, namelen);
}
atomic_inc(&dlm->mle_tot_count[mle->type]);
atomic_inc(&dlm->mle_cur_count[mle->type]);
/* copy off the node_map and register hb callbacks on our copy */
bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES);
bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES);
clear_bit(dlm->node_num, mle->vote_map);
clear_bit(dlm->node_num, mle->node_map);
/* attach the mle to the domain node up/down events */
__dlm_mle_attach_hb_events(dlm, mle);
}
void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
{
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
if (!hlist_unhashed(&mle->master_hash_node))
hlist_del_init(&mle->master_hash_node);
}
void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
{
struct hlist_head *bucket;
assert_spin_locked(&dlm->master_lock);
bucket = dlm_master_hash(dlm, mle->mnamehash);
hlist_add_head(&mle->master_hash_node, bucket);
}
/* returns 1 if found, 0 if not */
static int dlm_find_mle(struct dlm_ctxt *dlm,
struct dlm_master_list_entry **mle,
char *name, unsigned int namelen)
{
struct dlm_master_list_entry *tmpmle;
struct hlist_head *bucket;
unsigned int hash;
assert_spin_locked(&dlm->master_lock);
hash = dlm_lockid_hash(name, namelen);
bucket = dlm_master_hash(dlm, hash);
hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
continue;
dlm_get_mle(tmpmle);
*mle = tmpmle;
return 1;
}
return 0;
}
void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
{
struct dlm_master_list_entry *mle;
assert_spin_locked(&dlm->spinlock);
list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
if (node_up)
dlm_mle_node_up(dlm, mle, NULL, idx);
else
dlm_mle_node_down(dlm, mle, NULL, idx);
}
}
static void dlm_mle_node_down(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle,
struct o2nm_node *node, int idx)
{
spin_lock(&mle->spinlock);
if (!test_bit(idx, mle->node_map))
mlog(0, "node %u already removed from nodemap!\n", idx);
else
clear_bit(idx, mle->node_map);
spin_unlock(&mle->spinlock);
}
static void dlm_mle_node_up(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle,
struct o2nm_node *node, int idx)
{
spin_lock(&mle->spinlock);
if (test_bit(idx, mle->node_map))
mlog(0, "node %u already in node map!\n", idx);
else
set_bit(idx, mle->node_map);
spin_unlock(&mle->spinlock);
}
int dlm_init_mle_cache(void)
{
dlm_mle_cache = kmem_cache_create("o2dlm_mle",
sizeof(struct dlm_master_list_entry),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (dlm_mle_cache == NULL)
return -ENOMEM;
return 0;
}
void dlm_destroy_mle_cache(void)
{
kmem_cache_destroy(dlm_mle_cache);
}
static void dlm_mle_release(struct kref *kref)
{
struct dlm_master_list_entry *mle;
struct dlm_ctxt *dlm;
mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
dlm = mle->dlm;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
mle->type);
/* remove from list if not already */
__dlm_unlink_mle(dlm, mle);
/* detach the mle from the domain node up/down events */
__dlm_mle_detach_hb_events(dlm, mle);
atomic_dec(&dlm->mle_cur_count[mle->type]);
/* NOTE: kfree under spinlock here.
* if this is bad, we can move this to a freelist. */
kmem_cache_free(dlm_mle_cache, mle);
}
/*
* LOCK RESOURCE FUNCTIONS
*/
int dlm_init_master_caches(void)
{
dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
sizeof(struct dlm_lock_resource),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!dlm_lockres_cache)
goto bail;
dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
DLM_LOCKID_NAME_MAX, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!dlm_lockname_cache)
goto bail;
return 0;
bail:
dlm_destroy_master_caches();
return -ENOMEM;
}
void dlm_destroy_master_caches(void)
{
kmem_cache_destroy(dlm_lockname_cache);
dlm_lockname_cache = NULL;
kmem_cache_destroy(dlm_lockres_cache);
dlm_lockres_cache = NULL;
}
static void dlm_lockres_release(struct kref *kref)
{
struct dlm_lock_resource *res;
struct dlm_ctxt *dlm;
res = container_of(kref, struct dlm_lock_resource, refs);
dlm = res->dlm;
/* This should not happen -- all lockres' have a name
* associated with them at init time. */
BUG_ON(!res->lockname.name);
mlog(0, "destroying lockres %.*s\n", res->lockname.len,
res->lockname.name);
atomic_dec(&dlm->res_cur_count);
if (!hlist_unhashed(&res->hash_node) ||
!list_empty(&res->granted) ||
!list_empty(&res->converting) ||
!list_empty(&res->blocked) ||
!list_empty(&res->dirty) ||
!list_empty(&res->recovering) ||
!list_empty(&res->purge)) {
mlog(ML_ERROR,
"Going to BUG for resource %.*s."
" We're on a list! [%c%c%c%c%c%c%c]\n",
res->lockname.len, res->lockname.name,
!hlist_unhashed(&res->hash_node) ? 'H' : ' ',
!list_empty(&res->granted) ? 'G' : ' ',
!list_empty(&res->converting) ? 'C' : ' ',
!list_empty(&res->blocked) ? 'B' : ' ',
!list_empty(&res->dirty) ? 'D' : ' ',
!list_empty(&res->recovering) ? 'R' : ' ',
!list_empty(&res->purge) ? 'P' : ' ');
dlm_print_one_lock_resource(res);
}
/* By the time we're ready to blow this guy away, we shouldn't
* be on any lists. */
BUG_ON(!hlist_unhashed(&res->hash_node));
BUG_ON(!list_empty(&res->granted));
BUG_ON(!list_empty(&res->converting));
BUG_ON(!list_empty(&res->blocked));
BUG_ON(!list_empty(&res->dirty));
BUG_ON(!list_empty(&res->recovering));
BUG_ON(!list_empty(&res->purge));
kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
kmem_cache_free(dlm_lockres_cache, res);
}
void dlm_lockres_put(struct dlm_lock_resource *res)
{
kref_put(&res->refs, dlm_lockres_release);
}
static void dlm_init_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
const char *name, unsigned int namelen)
{
char *qname;
/* If we memset here, we lose our reference to the kmalloc'd
* res->lockname.name, so be sure to init every field
* correctly! */
qname = (char *) res->lockname.name;
memcpy(qname, name, namelen);
res->lockname.len = namelen;
res->lockname.hash = dlm_lockid_hash(name, namelen);
init_waitqueue_head(&res->wq);
spin_lock_init(&res->spinlock);
INIT_HLIST_NODE(&res->hash_node);
INIT_LIST_HEAD(&res->granted);
INIT_LIST_HEAD(&res->converting);
INIT_LIST_HEAD(&res->blocked);
INIT_LIST_HEAD(&res->dirty);
INIT_LIST_HEAD(&res->recovering);
INIT_LIST_HEAD(&res->purge);
INIT_LIST_HEAD(&res->tracking);
atomic_set(&res->asts_reserved, 0);
res->migration_pending = 0;
res->inflight_locks = 0;
res->inflight_assert_workers = 0;
res->dlm = dlm;
kref_init(&res->refs);
atomic_inc(&dlm->res_tot_count);
atomic_inc(&dlm->res_cur_count);
/* just for consistency */
spin_lock(&res->spinlock);
dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
spin_unlock(&res->spinlock);
res->state = DLM_LOCK_RES_IN_PROGRESS;
res->last_used = 0;
spin_lock(&dlm->track_lock);
list_add_tail(&res->tracking, &dlm->tracking_list);
spin_unlock(&dlm->track_lock);
memset(res->lvb, 0, DLM_LVB_LEN);
bitmap_zero(res->refmap, O2NM_MAX_NODES);
}
struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
const char *name,
unsigned int namelen)
{
struct dlm_lock_resource *res = NULL;
res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
if (!res)
goto error;
res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
if (!res->lockname.name)
goto error;
dlm_init_lockres(dlm, res, name, namelen);
return res;
error:
if (res)
kmem_cache_free(dlm_lockres_cache, res);
return NULL;
}
void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, int bit)
{
assert_spin_locked(&res->spinlock);
mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
res->lockname.name, bit, __builtin_return_address(0));
set_bit(bit, res->refmap);
}
void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, int bit)
{
assert_spin_locked(&res->spinlock);
mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
res->lockname.name, bit, __builtin_return_address(0));
clear_bit(bit, res->refmap);
}
static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
res->inflight_locks++;
mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
res->lockname.len, res->lockname.name, res->inflight_locks,
__builtin_return_address(0));
}
void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
__dlm_lockres_grab_inflight_ref(dlm, res);
}
void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
BUG_ON(res->inflight_locks == 0);
res->inflight_locks--;
mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
res->lockname.len, res->lockname.name, res->inflight_locks,
__builtin_return_address(0));
wake_up(&res->wq);
}
void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
res->inflight_assert_workers++;
mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
dlm->name, res->lockname.len, res->lockname.name,
res->inflight_assert_workers);
}
static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
BUG_ON(res->inflight_assert_workers == 0);
res->inflight_assert_workers--;
mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
dlm->name, res->lockname.len, res->lockname.name,
res->inflight_assert_workers);
}
static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
spin_lock(&res->spinlock);
__dlm_lockres_drop_inflight_worker(dlm, res);
spin_unlock(&res->spinlock);
}
/*
* lookup a lock resource by name.
* may already exist in the hashtable.
* lockid is null terminated
*
* if not, allocate enough for the lockres and for
* the temporary structure used in doing the mastering.
*
* also, do a lookup in the dlm->master_list to see
* if another node has begun mastering the same lock.
* if so, there should be a block entry in there
* for this name, and we should *not* attempt to master
* the lock here. need to wait around for that node
* to assert_master (or die).
*
*/
struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
const char *lockid,
int namelen,
int flags)
{
struct dlm_lock_resource *tmpres=NULL, *res=NULL;
struct dlm_master_list_entry *mle = NULL;
struct dlm_master_list_entry *alloc_mle = NULL;
int blocked = 0;
int ret, nodenum;
struct dlm_node_iter iter;
unsigned int hash;
int tries = 0;
int bit, wait_on_recovery = 0;
BUG_ON(!lockid);
hash = dlm_lockid_hash(lockid, namelen);
mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
lookup:
spin_lock(&dlm->spinlock);
tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
if (tmpres) {
spin_unlock(&dlm->spinlock);
spin_lock(&tmpres->spinlock);
/*
* Right after dlm spinlock was released, dlm_thread could have
* purged the lockres. Check if lockres got unhashed. If so
* start over.
*/
if (hlist_unhashed(&tmpres->hash_node)) {
spin_unlock(&tmpres->spinlock);
dlm_lockres_put(tmpres);
tmpres = NULL;
goto lookup;
}
/* Wait on the thread that is mastering the resource */
if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
__dlm_wait_on_lockres(tmpres);
BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
spin_unlock(&tmpres->spinlock);
dlm_lockres_put(tmpres);
tmpres = NULL;
goto lookup;
}
/* Wait on the resource purge to complete before continuing */
if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
BUG_ON(tmpres->owner == dlm->node_num);
__dlm_wait_on_lockres_flags(tmpres,
DLM_LOCK_RES_DROPPING_REF);
spin_unlock(&tmpres->spinlock);
dlm_lockres_put(tmpres);
tmpres = NULL;
goto lookup;
}
/* Grab inflight ref to pin the resource */
dlm_lockres_grab_inflight_ref(dlm, tmpres);
spin_unlock(&tmpres->spinlock);
if (res) {
spin_lock(&dlm->track_lock);
if (!list_empty(&res->tracking))
list_del_init(&res->tracking);
else
mlog(ML_ERROR, "Resource %.*s not "
"on the Tracking list\n",
res->lockname.len,
res->lockname.name);
spin_unlock(&dlm->track_lock);
dlm_lockres_put(res);
}
res = tmpres;
goto leave;
}
if (!res) {
spin_unlock(&dlm->spinlock);
mlog(0, "allocating a new resource\n");
/* nothing found and we need to allocate one. */
alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!alloc_mle)
goto leave;
res = dlm_new_lockres(dlm, lockid, namelen);
if (!res)
goto leave;
goto lookup;
}
mlog(0, "no lockres found, allocated our own: %p\n", res);
if (flags & LKM_LOCAL) {
/* caller knows it's safe to assume it's not mastered elsewhere
* DONE! return right away */
spin_lock(&res->spinlock);
dlm_change_lockres_owner(dlm, res, dlm->node_num);
__dlm_insert_lockres(dlm, res);
dlm_lockres_grab_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
/* lockres still marked IN_PROGRESS */
goto wake_waiters;
}
/* check master list to see if another node has started mastering it */
spin_lock(&dlm->master_lock);
/* if we found a block, wait for lock to be mastered by another node */
blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
if (blocked) {
int mig;
if (mle->type == DLM_MLE_MASTER) {
mlog(ML_ERROR, "master entry for nonexistent lock!\n");
BUG();
}
mig = (mle->type == DLM_MLE_MIGRATION);
/* if there is a migration in progress, let the migration
* finish before continuing. we can wait for the absence
* of the MIGRATION mle: either the migrate finished or
* one of the nodes died and the mle was cleaned up.
* if there is a BLOCK here, but it already has a master
* set, we are too late. the master does not have a ref
* for us in the refmap. detach the mle and drop it.
* either way, go back to the top and start over. */
if (mig || mle->master != O2NM_MAX_NODES) {
BUG_ON(mig && mle->master == dlm->node_num);
/* we arrived too late. the master does not
* have a ref for us. retry. */
mlog(0, "%s:%.*s: late on %s\n",
dlm->name, namelen, lockid,
mig ? "MIGRATION" : "BLOCK");
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
/* master is known, detach */
if (!mig)
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
mle = NULL;
/* this is lame, but we can't wait on either
* the mle or lockres waitqueue here */
if (mig)
msleep(100);
goto lookup;
}
} else {
/* go ahead and try to master lock on this node */
mle = alloc_mle;
/* make sure this does not get freed below */
alloc_mle = NULL;
dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
set_bit(dlm->node_num, mle->maybe_map);
__dlm_insert_mle(dlm, mle);
/* still holding the dlm spinlock, check the recovery map
* to see if there are any nodes that still need to be
* considered. these will not appear in the mle nodemap
* but they might own this lockres. wait on them. */
bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
}
}
/* at this point there is either a DLM_MLE_BLOCK or a
* DLM_MLE_MASTER on the master list, so it's safe to add the
* lockres to the hashtable. anyone who finds the lock will
* still have to wait on the IN_PROGRESS. */
/* finally add the lockres to its hash bucket */
__dlm_insert_lockres(dlm, res);
/* since this lockres is new it doesn't not require the spinlock */
__dlm_lockres_grab_inflight_ref(dlm, res);
/* get an extra ref on the mle in case this is a BLOCK
* if so, the creator of the BLOCK may try to put the last
* ref at this time in the assert master handler, so we
* need an extra one to keep from a bad ptr deref. */
dlm_get_mle_inuse(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
redo_request:
while (wait_on_recovery) {
/* any cluster changes that occurred after dropping the
* dlm spinlock would be detectable be a change on the mle,
* so we only need to clear out the recovery map once. */
if (dlm_is_recovery_lock(lockid, namelen)) {
mlog(0, "%s: Recovery map is not empty, but must "
"master $RECOVERY lock now\n", dlm->name);
if (!dlm_pre_master_reco_lockres(dlm, res))
wait_on_recovery = 0;
else {
mlog(0, "%s: waiting 500ms for heartbeat state "
"change\n", dlm->name);
msleep(500);
}
continue;
}
dlm_kick_recovery_thread(dlm);
msleep(1000);
dlm_wait_for_recovery(dlm);
spin_lock(&dlm->spinlock);
bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
} else
wait_on_recovery = 0;
spin_unlock(&dlm->spinlock);
if (wait_on_recovery)
dlm_wait_for_node_recovery(dlm, bit, 10000);
}
/* must wait for lock to be mastered elsewhere */
if (blocked)
goto wait;
ret = -EINVAL;
dlm_node_iter_init(mle->vote_map, &iter);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
ret = dlm_do_master_request(res, mle, nodenum);
if (ret < 0)
mlog_errno(ret);
if (mle->master != O2NM_MAX_NODES) {
/* found a master ! */
if (mle->master <= nodenum)
break;
/* if our master request has not reached the master
* yet, keep going until it does. this is how the
* master will know that asserts are needed back to
* the lower nodes. */
mlog(0, "%s: res %.*s, Requests only up to %u but "
"master is %u, keep going\n", dlm->name, namelen,
lockid, nodenum, mle->master);
}
}
wait:
/* keep going until the response map includes all nodes */
ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
if (ret < 0) {
wait_on_recovery = 1;
mlog(0, "%s: res %.*s, Node map changed, redo the master "
"request now, blocked=%d\n", dlm->name, res->lockname.len,
res->lockname.name, blocked);
if (++tries > 20) {
mlog(ML_ERROR, "%s: res %.*s, Spinning on "
"dlm_wait_for_lock_mastery, blocked = %d\n",
dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
dlm_print_one_mle(mle);
tries = 0;
}
goto redo_request;
}
mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
res->lockname.name, res->owner);
/* make sure we never continue without this */
BUG_ON(res->owner == O2NM_MAX_NODES);
/* master is known, detach if not already detached */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
/* put the extra ref */
dlm_put_mle_inuse(mle);
wake_waiters:
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
leave:
/* need to free the unused mle */
if (alloc_mle)
kmem_cache_free(dlm_mle_cache, alloc_mle);
return res;
}
#define DLM_MASTERY_TIMEOUT_MS 5000
static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle,
int *blocked)
{
u8 m;
int ret, bit;
int map_changed, voting_done;
int assert, sleep;
recheck:
ret = 0;
assert = 0;
/* check if another node has already become the owner */
spin_lock(&res->spinlock);
if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
res->lockname.len, res->lockname.name, res->owner);
spin_unlock(&res->spinlock);
/* this will cause the master to re-assert across
* the whole cluster, freeing up mles */
if (res->owner != dlm->node_num) {
ret = dlm_do_master_request(res, mle, res->owner);
if (ret < 0) {
/* give recovery a chance to run */
mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
msleep(500);
goto recheck;
}
}
ret = 0;
goto leave;
}
spin_unlock(&res->spinlock);
spin_lock(&mle->spinlock);
m = mle->master;
map_changed = !bitmap_equal(mle->vote_map, mle->node_map,
O2NM_MAX_NODES);
voting_done = bitmap_equal(mle->vote_map, mle->response_map,
O2NM_MAX_NODES);
/* restart if we hit any errors */
if (map_changed) {
int b;
mlog(0, "%s: %.*s: node map changed, restarting\n",
dlm->name, res->lockname.len, res->lockname.name);
ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
b = (mle->type == DLM_MLE_BLOCK);
if ((*blocked && !b) || (!*blocked && b)) {
mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
dlm->name, res->lockname.len, res->lockname.name,
*blocked, b);
*blocked = b;
}
spin_unlock(&mle->spinlock);
if (ret < 0) {
mlog_errno(ret);
goto leave;
}
mlog(0, "%s:%.*s: restart lock mastery succeeded, "
"rechecking now\n", dlm->name, res->lockname.len,
res->lockname.name);
goto recheck;
} else {
if (!voting_done) {
mlog(0, "map not changed and voting not done "
"for %s:%.*s\n", dlm->name, res->lockname.len,
res->lockname.name);
}
}
if (m != O2NM_MAX_NODES) {
/* another node has done an assert!
* all done! */
sleep = 0;
} else {
sleep = 1;
/* have all nodes responded? */
if (voting_done && !*blocked) {
bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (dlm->node_num <= bit) {
/* my node number is lowest.
* now tell other nodes that I am
* mastering this. */
mle->master = dlm->node_num;
/* ref was grabbed in get_lock_resource
* will be dropped in dlmlock_master */
assert = 1;
sleep = 0;
}
/* if voting is done, but we have not received
* an assert master yet, we must sleep */
}
}
spin_unlock(&mle->spinlock);
/* sleep if we haven't finished voting yet */
if (sleep) {
unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
atomic_set(&mle->woken, 0);
(void)wait_event_timeout(mle->wq,
(atomic_read(&mle->woken) == 1),
timeo);
if (res->owner == O2NM_MAX_NODES) {
mlog(0, "%s:%.*s: waiting again\n", dlm->name,
res->lockname.len, res->lockname.name);
goto recheck;
}
mlog(0, "done waiting, master is %u\n", res->owner);
ret = 0;
goto leave;
}
ret = 0; /* done */
if (assert) {
m = dlm->node_num;
mlog(0, "about to master %.*s here, this=%u\n",
res->lockname.len, res->lockname.name, m);
ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
if (ret) {
/* This is a failure in the network path,
* not in the response to the assert_master
* (any nonzero response is a BUG on this node).
* Most likely a socket just got disconnected
* due to node death. */
mlog_errno(ret);
}
/* no longer need to restart lock mastery.
* all living nodes have been contacted. */
ret = 0;
}
/* set the lockres owner */
spin_lock(&res->spinlock);
/* mastery reference obtained either during
* assert_master_handler or in get_lock_resource */
dlm_change_lockres_owner(dlm, res, m);
spin_unlock(&res->spinlock);
leave:
return ret;
}
struct dlm_bitmap_diff_iter
{
int curnode;
unsigned long *orig_bm;
unsigned long *cur_bm;
unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
};
enum dlm_node_state_change
{
NODE_DOWN = -1,
NODE_NO_CHANGE = 0,
NODE_UP
};
static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
unsigned long *orig_bm,
unsigned long *cur_bm)
{
unsigned long p1, p2;
int i;
iter->curnode = -1;
iter->orig_bm = orig_bm;
iter->cur_bm = cur_bm;
for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
p1 = *(iter->orig_bm + i);
p2 = *(iter->cur_bm + i);
iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
}
}
static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
enum dlm_node_state_change *state)
{
int bit;
if (iter->curnode >= O2NM_MAX_NODES)
return -ENOENT;
bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
iter->curnode+1);
if (bit >= O2NM_MAX_NODES) {
iter->curnode = O2NM_MAX_NODES;
return -ENOENT;
}
/* if it was there in the original then this node died */
if (test_bit(bit, iter->orig_bm))
*state = NODE_DOWN;
else
*state = NODE_UP;
iter->curnode = bit;
return bit;
}
static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle,
int blocked)
{
struct dlm_bitmap_diff_iter bdi;
enum dlm_node_state_change sc;
int node;
int ret = 0;
mlog(0, "something happened such that the "
"master process may need to be restarted!\n");
assert_spin_locked(&mle->spinlock);
dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
node = dlm_bitmap_diff_iter_next(&bdi, &sc);
while (node >= 0) {
if (sc == NODE_UP) {
/* a node came up. clear any old vote from
* the response map and set it in the vote map
* then restart the mastery. */
mlog(ML_NOTICE, "node %d up while restarting\n", node);
/* redo the master request, but only for the new node */
mlog(0, "sending request to new node\n");
clear_bit(node, mle->response_map);
set_bit(node, mle->vote_map);
} else {
mlog(ML_ERROR, "node down! %d\n", node);
if (blocked) {
int lowest = find_first_bit(mle->maybe_map,
O2NM_MAX_NODES);
/* act like it was never there */
clear_bit(node, mle->maybe_map);
if (node == lowest) {
mlog(0, "expected master %u died"
" while this node was blocked "
"waiting on it!\n", node);
lowest = find_next_bit(mle->maybe_map,
O2NM_MAX_NODES,
lowest+1);
if (lowest < O2NM_MAX_NODES) {
mlog(0, "%s:%.*s:still "
"blocked. waiting on %u "
"now\n", dlm->name,
res->lockname.len,
res->lockname.name,
lowest);
} else {
/* mle is an MLE_BLOCK, but
* there is now nothing left to
* block on. we need to return
* all the way back out and try
* again with an MLE_MASTER.
* dlm_do_local_recovery_cleanup
* has already run, so the mle
* refcount is ok */
mlog(0, "%s:%.*s: no "
"longer blocking. try to "
"master this here\n",
dlm->name,
res->lockname.len,
res->lockname.name);
mle->type = DLM_MLE_MASTER;
mle->mleres = res;
}
}
}
/* now blank out everything, as if we had never
* contacted anyone */
bitmap_zero(mle->maybe_map, O2NM_MAX_NODES);
bitmap_zero(mle->response_map, O2NM_MAX_NODES);
/* reset the vote_map to the current node_map */
bitmap_copy(mle->vote_map, mle->node_map,
O2NM_MAX_NODES);
/* put myself into the maybe map */
if (mle->type != DLM_MLE_BLOCK)
set_bit(dlm->node_num, mle->maybe_map);
}
ret = -EAGAIN;
node = dlm_bitmap_diff_iter_next(&bdi, &sc);
}
return ret;
}
/*
* DLM_MASTER_REQUEST_MSG
*
* returns: 0 on success,
* -errno on a network error
*
* on error, the caller should assume the target node is "dead"
*
*/
static int dlm_do_master_request(struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle, int to)
{
struct dlm_ctxt *dlm = mle->dlm;
struct dlm_master_request request;
int ret, response=0, resend;
memset(&request, 0, sizeof(request));
request.node_idx = dlm->node_num;
BUG_ON(mle->type == DLM_MLE_MIGRATION);
request.namelen = (u8)mle->mnamelen;
memcpy(request.name, mle->mname, request.namelen);
again:
ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
sizeof(request), to, &response);
if (ret < 0) {
if (ret == -ESRCH) {
/* should never happen */
mlog(ML_ERROR, "TCP stack not ready!\n");
BUG();
} else if (ret == -EINVAL) {
mlog(ML_ERROR, "bad args passed to o2net!\n");
BUG();
} else if (ret == -ENOMEM) {
mlog(ML_ERROR, "out of memory while trying to send "
"network message! retrying\n");
/* this is totally crude */
msleep(50);
goto again;
} else if (!dlm_is_host_down(ret)) {
/* not a network error. bad. */
mlog_errno(ret);
mlog(ML_ERROR, "unhandled error!");
BUG();
}
/* all other errors should be network errors,
* and likely indicate node death */
mlog(ML_ERROR, "link to %d went down!\n", to);
goto out;
}
ret = 0;
resend = 0;
spin_lock(&mle->spinlock);
switch (response) {
case DLM_MASTER_RESP_YES:
set_bit(to, mle->response_map);
mlog(0, "node %u is the master, response=YES\n", to);
mlog(0, "%s:%.*s: master node %u now knows I have a "
"reference\n", dlm->name, res->lockname.len,
res->lockname.name, to);
mle->master = to;
break;
case DLM_MASTER_RESP_NO:
mlog(0, "node %u not master, response=NO\n", to);
set_bit(to, mle->response_map);
break;
case DLM_MASTER_RESP_MAYBE:
mlog(0, "node %u not master, response=MAYBE\n", to);
set_bit(to, mle->response_map);
set_bit(to, mle->maybe_map);
break;
case DLM_MASTER_RESP_ERROR:
mlog(0, "node %u hit an error, resending\n", to);
resend = 1;
response = 0;
break;
default:
mlog(ML_ERROR, "bad response! %u\n", response);
BUG();
}
spin_unlock(&mle->spinlock);
if (resend) {
/* this is also totally crude */
msleep(50);
goto again;
}
out:
return ret;
}
/*
* locks that can be taken here:
* dlm->spinlock
* res->spinlock
* mle->spinlock
* dlm->master_list
*
* if possible, TRIM THIS DOWN!!!
*/
int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
u8 response = DLM_MASTER_RESP_MAYBE;
struct dlm_ctxt *dlm = data;
struct dlm_lock_resource *res = NULL;
struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
char *name;
unsigned int namelen, hash;
int found, ret;
int set_maybe;
int dispatch_assert = 0;
int dispatched = 0;
if (!dlm_grab(dlm))
return DLM_MASTER_RESP_NO;
if (!dlm_domain_fully_joined(dlm)) {
response = DLM_MASTER_RESP_NO;
goto send_response;
}
name = request->name;
namelen = request->namelen;
hash = dlm_lockid_hash(name, namelen);
if (namelen > DLM_LOCKID_NAME_MAX) {
response = DLM_IVBUFLEN;
goto send_response;
}
way_up_top:
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, name, namelen, hash);
if (res) {
spin_unlock(&dlm->spinlock);
/* take care of the easy cases up front */
spin_lock(&res->spinlock);
/*
* Right after dlm spinlock was released, dlm_thread could have
* purged the lockres. Check if lockres got unhashed. If so
* start over.
*/
if (hlist_unhashed(&res->hash_node)) {
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
goto way_up_top;
}
if (res->state & (DLM_LOCK_RES_RECOVERING|
DLM_LOCK_RES_MIGRATING)) {
spin_unlock(&res->spinlock);
mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
"being recovered/migrated\n");
response = DLM_MASTER_RESP_ERROR;
if (mle)
kmem_cache_free(dlm_mle_cache, mle);
goto send_response;
}
if (res->owner == dlm->node_num) {
dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
spin_unlock(&res->spinlock);
response = DLM_MASTER_RESP_YES;
if (mle)
kmem_cache_free(dlm_mle_cache, mle);
/* this node is the owner.
* there is some extra work that needs to
* happen now. the requesting node has
* caused all nodes up to this one to
* create mles. this node now needs to
* go back and clean those up. */
dispatch_assert = 1;
goto send_response;
} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
spin_unlock(&res->spinlock);
// mlog(0, "node %u is the master\n", res->owner);
response = DLM_MASTER_RESP_NO;
if (mle)
kmem_cache_free(dlm_mle_cache, mle);
goto send_response;
}
/* ok, there is no owner. either this node is
* being blocked, or it is actively trying to
* master this lock. */
if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
mlog(ML_ERROR, "lock with no owner should be "
"in-progress!\n");
BUG();
}
// mlog(0, "lockres is in progress...\n");
spin_lock(&dlm->master_lock);
found = dlm_find_mle(dlm, &tmpmle, name, namelen);
if (!found) {
mlog(ML_ERROR, "no mle found for this lock!\n");
BUG();
}
set_maybe = 1;
spin_lock(&tmpmle->spinlock);
if (tmpmle->type == DLM_MLE_BLOCK) {
// mlog(0, "this node is waiting for "
// "lockres to be mastered\n");
response = DLM_MASTER_RESP_NO;
} else if (tmpmle->type == DLM_MLE_MIGRATION) {
mlog(0, "node %u is master, but trying to migrate to "
"node %u.\n", tmpmle->master, tmpmle->new_master);
if (tmpmle->master == dlm->node_num) {
mlog(ML_ERROR, "no owner on lockres, but this "
"node is trying to migrate it to %u?!\n",
tmpmle->new_master);
BUG();
} else {
/* the real master can respond on its own */
response = DLM_MASTER_RESP_NO;
}
} else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
set_maybe = 0;
if (tmpmle->master == dlm->node_num) {
response = DLM_MASTER_RESP_YES;
/* this node will be the owner.
* go back and clean the mles on any
* other nodes */
dispatch_assert = 1;
dlm_lockres_set_refmap_bit(dlm, res,
request->node_idx);
} else
response = DLM_MASTER_RESP_NO;
} else {
// mlog(0, "this node is attempting to "
// "master lockres\n");
response = DLM_MASTER_RESP_MAYBE;
}
if (set_maybe)
set_bit(request->node_idx, tmpmle->maybe_map);
spin_unlock(&tmpmle->spinlock);
spin_unlock(&dlm->master_lock);
spin_unlock(&res->spinlock);
/* keep the mle attached to heartbeat events */
dlm_put_mle(tmpmle);
if (mle)
kmem_cache_free(dlm_mle_cache, mle);
goto send_response;
}
/*
* lockres doesn't exist on this node
* if there is an MLE_BLOCK, return NO
* if there is an MLE_MASTER, return MAYBE
* otherwise, add an MLE_BLOCK, return NO
*/
spin_lock(&dlm->master_lock);
found = dlm_find_mle(dlm, &tmpmle, name, namelen);
if (!found) {
/* this lockid has never been seen on this node yet */
// mlog(0, "no mle found\n");
if (!mle) {
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!mle) {
response = DLM_MASTER_RESP_ERROR;
mlog_errno(-ENOMEM);
goto send_response;
}
goto way_up_top;
}
// mlog(0, "this is second time thru, already allocated, "
// "add the block.\n");
dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
set_bit(request->node_idx, mle->maybe_map);
__dlm_insert_mle(dlm, mle);
response = DLM_MASTER_RESP_NO;
} else {
spin_lock(&tmpmle->spinlock);
if (tmpmle->master == dlm->node_num) {
mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
BUG();
}
if (tmpmle->type == DLM_MLE_BLOCK)
response = DLM_MASTER_RESP_NO;
else if (tmpmle->type == DLM_MLE_MIGRATION) {
mlog(0, "migration mle was found (%u->%u)\n",
tmpmle->master, tmpmle->new_master);
/* real master can respond on its own */
response = DLM_MASTER_RESP_NO;
} else
response = DLM_MASTER_RESP_MAYBE;
set_bit(request->node_idx, tmpmle->maybe_map);
spin_unlock(&tmpmle->spinlock);
}
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
if (found) {
/* keep the mle attached to heartbeat events */
dlm_put_mle(tmpmle);
}
send_response:
/*
* __dlm_lookup_lockres() grabbed a reference to this lockres.
* The reference is released by dlm_assert_master_worker() under
* the call to dlm_dispatch_assert_master(). If
* dlm_assert_master_worker() isn't called, we drop it here.
*/
if (dispatch_assert) {
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
dlm->node_num, res->lockname.len, res->lockname.name);
spin_lock(&res->spinlock);
ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
DLM_ASSERT_MASTER_MLE_CLEANUP);
if (ret < 0) {
mlog(ML_ERROR, "failed to dispatch assert master work\n");
response = DLM_MASTER_RESP_ERROR;
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
} else {
dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res);
spin_unlock(&res->spinlock);
}
} else {
if (res)
dlm_lockres_put(res);
}
if (!dispatched)
dlm_put(dlm);
return response;
}
/*
* DLM_ASSERT_MASTER_MSG
*/
/*
* NOTE: this can be used for debugging
* can periodically run all locks owned by this node
* and re-assert across the cluster...
*/
static int dlm_do_assert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
void *nodemap, u32 flags)
{
struct dlm_assert_master assert;
int to, tmpret;
struct dlm_node_iter iter;
int ret = 0;
int reassert;
const char *lockname = res->lockname.name;
unsigned int namelen = res->lockname.len;
BUG_ON(namelen > O2NM_MAX_NAME_LEN);
spin_lock(&res->spinlock);
res->state |= DLM_LOCK_RES_SETREF_INPROG;
spin_unlock(&res->spinlock);
again:
reassert = 0;
/* note that if this nodemap is empty, it returns 0 */
dlm_node_iter_init(nodemap, &iter);
while ((to = dlm_node_iter_next(&iter)) >= 0) {
int r = 0;
struct dlm_master_list_entry *mle = NULL;
mlog(0, "sending assert master to %d (%.*s)\n", to,
namelen, lockname);
memset(&assert, 0, sizeof(assert));
assert.node_idx = dlm->node_num;
assert.namelen = namelen;
memcpy(assert.name, lockname, namelen);
assert.flags = cpu_to_be32(flags);
tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
&assert, sizeof(assert), to, &r);
if (tmpret < 0) {
mlog(ML_ERROR, "Error %d when sending message %u (key "
"0x%x) to node %u\n", tmpret,
DLM_ASSERT_MASTER_MSG, dlm->key, to);
if (!dlm_is_host_down(tmpret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
BUG();
}
/* a node died. finish out the rest of the nodes. */
mlog(0, "link to %d went down!\n", to);
/* any nonzero status return will do */
ret = tmpret;
r = 0;
} else if (r < 0) {
/* ok, something horribly messed. kill thyself. */
mlog(ML_ERROR,"during assert master of %.*s to %u, "
"got %d.\n", namelen, lockname, to, r);
spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
if (dlm_find_mle(dlm, &mle, (char *)lockname,
namelen)) {
dlm_print_one_mle(mle);
__dlm_put_mle(mle);
}
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
BUG();
}
if (r & DLM_ASSERT_RESPONSE_REASSERT &&
!(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
mlog(ML_ERROR, "%.*s: very strange, "
"master MLE but no lockres on %u\n",
namelen, lockname, to);
}
if (r & DLM_ASSERT_RESPONSE_REASSERT) {
mlog(0, "%.*s: node %u create mles on other "
"nodes and requests a re-assert\n",
namelen, lockname, to);
reassert = 1;
}
if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
mlog(0, "%.*s: node %u has a reference to this "
"lockres, set the bit in the refmap\n",
namelen, lockname, to);
spin_lock(&res->spinlock);
dlm_lockres_set_refmap_bit(dlm, res, to);
spin_unlock(&res->spinlock);
}
}
if (reassert)
goto again;
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
return ret;
}
/*
* locks that can be taken here:
* dlm->spinlock
* res->spinlock
* mle->spinlock
* dlm->master_list
*
* if possible, TRIM THIS DOWN!!!
*/
int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_master_list_entry *mle = NULL;
struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
struct dlm_lock_resource *res = NULL;
char *name;
unsigned int namelen, hash;
u32 flags;
int master_request = 0, have_lockres_ref = 0;
int ret = 0;
if (!dlm_grab(dlm))
return 0;
name = assert->name;
namelen = assert->namelen;
hash = dlm_lockid_hash(name, namelen);
flags = be32_to_cpu(assert->flags);
if (namelen > DLM_LOCKID_NAME_MAX) {
mlog(ML_ERROR, "Invalid name length!");
goto done;
}
spin_lock(&dlm->spinlock);
if (flags)
mlog(0, "assert_master with flags: %u\n", flags);
/* find the MLE */
spin_lock(&dlm->master_lock);
if (!dlm_find_mle(dlm, &mle, name, namelen)) {
/* not an error, could be master just re-asserting */
mlog(0, "just got an assert_master from %u, but no "
"MLE for it! (%.*s)\n", assert->node_idx,
namelen, name);
} else {
int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES) {
/* not necessarily an error, though less likely.
* could be master just re-asserting. */
mlog(0, "no bits set in the maybe_map, but %u "
"is asserting! (%.*s)\n", assert->node_idx,
namelen, name);
} else if (bit != assert->node_idx) {
if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
mlog(0, "master %u was found, %u should "
"back off\n", assert->node_idx, bit);
} else {
/* with the fix for bug 569, a higher node
* number winning the mastery will respond
* YES to mastery requests, but this node
* had no way of knowing. let it pass. */
mlog(0, "%u is the lowest node, "
"%u is asserting. (%.*s) %u must "
"have begun after %u won.\n", bit,
assert->node_idx, namelen, name, bit,
assert->node_idx);
}
}
if (mle->type == DLM_MLE_MIGRATION) {
if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
mlog(0, "%s:%.*s: got cleanup assert"
" from %u for migration\n",
dlm->name, namelen, name,
assert->node_idx);
} else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
mlog(0, "%s:%.*s: got unrelated assert"
" from %u for migration, ignoring\n",
dlm->name, namelen, name,
assert->node_idx);
__dlm_put_mle(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
goto done;
}
}
}
spin_unlock(&dlm->master_lock);
/* ok everything checks out with the MLE
* now check to see if there is a lockres */
res = __dlm_lookup_lockres(dlm, name, namelen, hash);
if (res) {
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
mlog(ML_ERROR, "%u asserting but %.*s is "
"RECOVERING!\n", assert->node_idx, namelen, name);
goto kill;
}
if (!mle) {
if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
res->owner != assert->node_idx) {
mlog(ML_ERROR, "DIE! Mastery assert from %u, "
"but current owner is %u! (%.*s)\n",
assert->node_idx, res->owner, namelen,
name);
__dlm_print_one_lock_resource(res);
BUG();
}
} else if (mle->type != DLM_MLE_MIGRATION) {
if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
/* owner is just re-asserting */
if (res->owner == assert->node_idx) {
mlog(0, "owner %u re-asserting on "
"lock %.*s\n", assert->node_idx,
namelen, name);
goto ok;
}
mlog(ML_ERROR, "got assert_master from "
"node %u, but %u is the owner! "
"(%.*s)\n", assert->node_idx,
res->owner, namelen, name);
goto kill;
}
if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
mlog(ML_ERROR, "got assert from %u, but lock "
"with no owner should be "
"in-progress! (%.*s)\n",
assert->node_idx,
namelen, name);
goto kill;
}
} else /* mle->type == DLM_MLE_MIGRATION */ {
/* should only be getting an assert from new master */
if (assert->node_idx != mle->new_master) {
mlog(ML_ERROR, "got assert from %u, but "
"new master is %u, and old master "
"was %u (%.*s)\n",
assert->node_idx, mle->new_master,
mle->master, namelen, name);
goto kill;
}
}
ok:
spin_unlock(&res->spinlock);
}
// mlog(0, "woo! got an assert_master from node %u!\n",
// assert->node_idx);
if (mle) {
int extra_ref = 0;
int nn = -1;
int rr, err = 0;
spin_lock(&mle->spinlock);
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
extra_ref = 1;
else {
/* MASTER mle: if any bits set in the response map
* then the calling node needs to re-assert to clear
* up nodes that this node contacted */
while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
nn+1)) < O2NM_MAX_NODES) {
if (nn != dlm->node_num && nn != assert->node_idx) {
master_request = 1;
break;
}
}
}
mle->master = assert->node_idx;
atomic_set(&mle->woken, 1);
wake_up(&mle->wq);
spin_unlock(&mle->spinlock);
if (res) {
int wake = 0;
spin_lock(&res->spinlock);
if (mle->type == DLM_MLE_MIGRATION) {
mlog(0, "finishing off migration of lockres %.*s, "
"from %u to %u\n",
res->lockname.len, res->lockname.name,
dlm->node_num, mle->new_master);
res->state &= ~DLM_LOCK_RES_MIGRATING;
wake = 1;
dlm_change_lockres_owner(dlm, res, mle->new_master);
BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
} else {
dlm_change_lockres_owner(dlm, res, mle->master);
}
spin_unlock(&res->spinlock);
have_lockres_ref = 1;
if (wake)
wake_up(&res->wq);
}
/* master is known, detach if not already detached.
* ensures that only one assert_master call will happen
* on this mle. */
spin_lock(&dlm->master_lock);
rr = kref_read(&mle->mle_refs);
if (mle->inuse > 0) {
if (extra_ref && rr < 3)
err = 1;
else if (!extra_ref && rr < 2)
err = 1;
} else {
if (extra_ref && rr < 2)
err = 1;
else if (!extra_ref && rr < 1)
err = 1;
}
if (err) {
mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
"that will mess up this node, refs=%d, extra=%d, "
"inuse=%d\n", dlm->name, namelen, name,
assert->node_idx, rr, extra_ref, mle->inuse);
dlm_print_one_mle(mle);
}
__dlm_unlink_mle(dlm, mle);
__dlm_mle_detach_hb_events(dlm, mle);
__dlm_put_mle(mle);
if (extra_ref) {
/* the assert master message now balances the extra
* ref given by the master / migration request message.
* if this is the last put, it will be removed
* from the list. */
__dlm_put_mle(mle);
}
spin_unlock(&dlm->master_lock);
} else if (res) {
if (res->owner != assert->node_idx) {
mlog(0, "assert_master from %u, but current "
"owner is %u (%.*s), no mle\n", assert->node_idx,
res->owner, namelen, name);
}
}
spin_unlock(&dlm->spinlock);
done:
ret = 0;
if (res) {
spin_lock(&res->spinlock);
res->state |= DLM_LOCK_RES_SETREF_INPROG;
spin_unlock(&res->spinlock);
*ret_data = (void *)res;
}
dlm_put(dlm);
if (master_request) {
mlog(0, "need to tell master to reassert\n");
/* positive. negative would shoot down the node. */
ret |= DLM_ASSERT_RESPONSE_REASSERT;
if (!have_lockres_ref) {
mlog(ML_ERROR, "strange, got assert from %u, MASTER "
"mle present here for %s:%.*s, but no lockres!\n",
assert->node_idx, dlm->name, namelen, name);
}
}
if (have_lockres_ref) {
/* let the master know we have a reference to the lockres */
ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
dlm->name, namelen, name, assert->node_idx);
}
return ret;
kill:
/* kill the caller! */
mlog(ML_ERROR, "Bad message received from another node. Dumping state "
"and killing the other node now! This node is OK and can continue.\n");
__dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
spin_lock(&dlm->master_lock);
if (mle)
__dlm_put_mle(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
*ret_data = (void *)res;
dlm_put(dlm);
return -EINVAL;
}
void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
{
struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
if (ret_data) {
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
dlm_lockres_put(res);
}
return;
}
int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
int ignore_higher, u8 request_from, u32 flags)
{
struct dlm_work_item *item;
item = kzalloc(sizeof(*item), GFP_ATOMIC);
if (!item)
return -ENOMEM;
/* queue up work for dlm_assert_master_worker */
dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
item->u.am.lockres = res; /* already have a ref */
/* can optionally ignore node numbers higher than this node */
item->u.am.ignore_higher = ignore_higher;
item->u.am.request_from = request_from;
item->u.am.flags = flags;
if (ignore_higher)
mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
res->lockname.name);
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
queue_work(dlm->dlm_worker, &dlm->dispatched_work);
return 0;
}
static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
{
struct dlm_ctxt *dlm = data;
int ret = 0;
struct dlm_lock_resource *res;
unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
int ignore_higher;
int bit;
u8 request_from;
u32 flags;
dlm = item->dlm;
res = item->u.am.lockres;
ignore_higher = item->u.am.ignore_higher;
request_from = item->u.am.request_from;
flags = item->u.am.flags;
spin_lock(&dlm->spinlock);
bitmap_copy(nodemap, dlm->domain_map, O2NM_MAX_NODES);
spin_unlock(&dlm->spinlock);
clear_bit(dlm->node_num, nodemap);
if (ignore_higher) {
/* if is this just to clear up mles for nodes below
* this node, do not send the message to the original
* caller or any node number higher than this */
clear_bit(request_from, nodemap);
bit = dlm->node_num;
while (1) {
bit = find_next_bit(nodemap, O2NM_MAX_NODES,
bit+1);
if (bit >= O2NM_MAX_NODES)
break;
clear_bit(bit, nodemap);
}
}
/*
* If we're migrating this lock to someone else, we are no
* longer allowed to assert out own mastery. OTOH, we need to
* prevent migration from starting while we're still asserting
* our dominance. The reserved ast delays migration.
*/
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_MIGRATING) {
mlog(0, "Someone asked us to assert mastery, but we're "
"in the middle of migration. Skipping assert, "
"the new master will handle that.\n");
spin_unlock(&res->spinlock);
goto put;
} else
__dlm_lockres_reserve_ast(res);
spin_unlock(&res->spinlock);
/* this call now finishes out the nodemap
* even if one or more nodes die */
mlog(0, "worker about to master %.*s here, this=%u\n",
res->lockname.len, res->lockname.name, dlm->node_num);
ret = dlm_do_assert_master(dlm, res, nodemap, flags);
if (ret < 0) {
/* no need to restart, we are done */
if (!dlm_is_host_down(ret))
mlog_errno(ret);
}
/* Ok, we've asserted ourselves. Let's let migration start. */
dlm_lockres_release_ast(dlm, res);
put:
dlm_lockres_drop_inflight_worker(dlm, res);
dlm_lockres_put(res);
mlog(0, "finished with dlm_assert_master_worker\n");
}
/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
* We cannot wait for node recovery to complete to begin mastering this
* lockres because this lockres is used to kick off recovery! ;-)
* So, do a pre-check on all living nodes to see if any of those nodes
* think that $RECOVERY is currently mastered by a dead node. If so,
* we wait a short time to allow that node to get notified by its own
* heartbeat stack, then check again. All $RECOVERY lock resources
* mastered by dead nodes are purged when the heartbeat callback is
* fired, so we can know for sure that it is safe to continue once
* the node returns a live node or no node. */
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
struct dlm_node_iter iter;
int nodenum;
int ret = 0;
u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
spin_unlock(&dlm->spinlock);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
/* do not send to self */
if (nodenum == dlm->node_num)
continue;
ret = dlm_do_master_requery(dlm, res, nodenum, &master);
if (ret < 0) {
mlog_errno(ret);
if (!dlm_is_host_down(ret))
BUG();
/* host is down, so answer for that node would be
* DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
ret = 0;
}
if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
/* check to see if this master is in the recovery map */
spin_lock(&dlm->spinlock);
if (test_bit(master, dlm->recovery_map)) {
mlog(ML_NOTICE, "%s: node %u has not seen "
"node %u go down yet, and thinks the "
"dead node is mastering the recovery "
"lock. must wait.\n", dlm->name,
nodenum, master);
ret = -EAGAIN;
}
spin_unlock(&dlm->spinlock);
mlog(0, "%s: reco lock master is %u\n", dlm->name,
master);
break;
}
}
return ret;
}
/*
* DLM_DEREF_LOCKRES_MSG
*/
int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
struct dlm_deref_lockres deref;
int ret = 0, r;
const char *lockname;
unsigned int namelen;
lockname = res->lockname.name;
namelen = res->lockname.len;
BUG_ON(namelen > O2NM_MAX_NAME_LEN);
memset(&deref, 0, sizeof(deref));
deref.node_idx = dlm->node_num;
deref.namelen = namelen;
memcpy(deref.name, lockname, namelen);
ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
&deref, sizeof(deref), res->owner, &r);
if (ret < 0)
mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
dlm->name, namelen, lockname, ret, res->owner);
else if (r < 0) {
/* BAD. other node says I did not have a ref. */
mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
dlm->name, namelen, lockname, res->owner, r);
dlm_print_one_lock_resource(res);
if (r == -ENOMEM)
BUG();
} else
ret = r;
return ret;
}
int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
struct dlm_lock_resource *res = NULL;
char *name;
unsigned int namelen;
int ret = -EINVAL;
u8 node;
unsigned int hash;
struct dlm_work_item *item;
int cleared = 0;
int dispatch = 0;
if (!dlm_grab(dlm))
return 0;
name = deref->name;
namelen = deref->namelen;
node = deref->node_idx;
if (namelen > DLM_LOCKID_NAME_MAX) {
mlog(ML_ERROR, "Invalid name length!");
goto done;
}
if (deref->node_idx >= O2NM_MAX_NODES) {
mlog(ML_ERROR, "Invalid node number: %u\n", node);
goto done;
}
hash = dlm_lockid_hash(name, namelen);
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
if (!res) {
spin_unlock(&dlm->spinlock);
mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
dlm->name, namelen, name);
goto done;
}
spin_unlock(&dlm->spinlock);
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_SETREF_INPROG)
dispatch = 1;
else {
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
if (test_bit(node, res->refmap)) {
dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
}
spin_unlock(&res->spinlock);
if (!dispatch) {
if (cleared)
dlm_lockres_calc_usage(dlm, res);
else {
mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
"but it is already dropped!\n", dlm->name,
res->lockname.len, res->lockname.name, node);
dlm_print_one_lock_resource(res);
}
ret = DLM_DEREF_RESPONSE_DONE;
goto done;
}
item = kzalloc(sizeof(*item), GFP_NOFS);
if (!item) {
ret = -ENOMEM;
mlog_errno(ret);
goto done;
}
dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
item->u.dl.deref_res = res;
item->u.dl.deref_node = node;
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
queue_work(dlm->dlm_worker, &dlm->dispatched_work);
return DLM_DEREF_RESPONSE_INPROG;
done:
if (res)
dlm_lockres_put(res);
dlm_put(dlm);
return ret;
}
int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_deref_lockres_done *deref
= (struct dlm_deref_lockres_done *)msg->buf;
struct dlm_lock_resource *res = NULL;
char *name;
unsigned int namelen;
int ret = -EINVAL;
u8 node;
unsigned int hash;
if (!dlm_grab(dlm))
return 0;
name = deref->name;
namelen = deref->namelen;
node = deref->node_idx;
if (namelen > DLM_LOCKID_NAME_MAX) {
mlog(ML_ERROR, "Invalid name length!");
goto done;
}
if (deref->node_idx >= O2NM_MAX_NODES) {
mlog(ML_ERROR, "Invalid node number: %u\n", node);
goto done;
}
hash = dlm_lockid_hash(name, namelen);
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
if (!res) {
spin_unlock(&dlm->spinlock);
mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
dlm->name, namelen, name);
goto done;
}
spin_lock(&res->spinlock);
if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done "
"but it is already derefed!\n", dlm->name,
res->lockname.len, res->lockname.name, node);
ret = 0;
goto done;
}
__dlm_do_purge_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
spin_unlock(&dlm->spinlock);
ret = 0;
done:
if (res)
dlm_lockres_put(res);
dlm_put(dlm);
return ret;
}
static void dlm_drop_lockres_ref_done(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, u8 node)
{
struct dlm_deref_lockres_done deref;
int ret = 0, r;
const char *lockname;
unsigned int namelen;
lockname = res->lockname.name;
namelen = res->lockname.len;
BUG_ON(namelen > O2NM_MAX_NAME_LEN);
memset(&deref, 0, sizeof(deref));
deref.node_idx = dlm->node_num;
deref.namelen = namelen;
memcpy(deref.name, lockname, namelen);
ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key,
&deref, sizeof(deref), node, &r);
if (ret < 0) {
mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE "
" to node %u\n", dlm->name, namelen,
lockname, ret, node);
} else if (r < 0) {
/* ignore the error */
mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
dlm->name, namelen, lockname, node, r);
dlm_print_one_lock_resource(res);
}
}
static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
{
struct dlm_ctxt *dlm;
struct dlm_lock_resource *res;
u8 node;
u8 cleared = 0;
dlm = item->dlm;
res = item->u.dl.deref_res;
node = item->u.dl.deref_node;
spin_lock(&res->spinlock);
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
if (test_bit(node, res->refmap)) {
dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
spin_unlock(&res->spinlock);
dlm_drop_lockres_ref_done(dlm, res, node);
if (cleared) {
mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
dlm->name, res->lockname.len, res->lockname.name, node);
dlm_lockres_calc_usage(dlm, res);
} else {
mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
"but it is already dropped!\n", dlm->name,
res->lockname.len, res->lockname.name, node);
dlm_print_one_lock_resource(res);
}
dlm_lockres_put(res);
}
/*
* A migratable resource is one that is :
* 1. locally mastered, and,
* 2. zero local locks, and,
* 3. one or more non-local locks, or, one or more references
* Returns 1 if yes, 0 if not.
*/
static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
enum dlm_lockres_list idx;
int nonlocal = 0, node_ref;
struct list_head *queue;
struct dlm_lock *lock;
u64 cookie;
assert_spin_locked(&res->spinlock);
/* delay migration when the lockres is in MIGRATING state */
if (res->state & DLM_LOCK_RES_MIGRATING)
return 0;
/* delay migration when the lockres is in RECOCERING state */
if (res->state & (DLM_LOCK_RES_RECOVERING|
DLM_LOCK_RES_RECOVERY_WAITING))
return 0;
if (res->owner != dlm->node_num)
return 0;
for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
queue = dlm_list_idx_to_ptr(res, idx);
list_for_each_entry(lock, queue, list) {
if (lock->ml.node != dlm->node_num) {
nonlocal++;
continue;
}
cookie = be64_to_cpu(lock->ml.cookie);
mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on "
"%s list\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(cookie),
dlm_get_lock_cookie_seq(cookie),
dlm_list_in_text(idx));
return 0;
}
}
if (!nonlocal) {
node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (node_ref >= O2NM_MAX_NODES)
return 0;
}
mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len,
res->lockname.name);
return 1;
}
/*
* DLM_MIGRATE_LOCKRES
*/
static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, u8 target)
{
struct dlm_master_list_entry *mle = NULL;
struct dlm_master_list_entry *oldmle = NULL;
struct dlm_migratable_lockres *mres = NULL;
int ret = 0;
const char *name;
unsigned int namelen;
int mle_added = 0;
int wake = 0;
if (!dlm_grab(dlm))
return -EINVAL;
name = res->lockname.name;
namelen = res->lockname.len;
mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
target);
/* preallocate up front. if this fails, abort */
ret = -ENOMEM;
mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
if (!mres) {
mlog_errno(ret);
goto leave;
}
mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!mle) {
mlog_errno(ret);
goto leave;
}
ret = 0;
/*
* clear any existing master requests and
* add the migration mle to the list
*/
spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
namelen, target, dlm->node_num);
/* get an extra reference on the mle.
* otherwise the assert_master from the new
* master will destroy this.
*/
if (ret != -EEXIST)
dlm_get_mle_inuse(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
if (ret == -EEXIST) {
mlog(0, "another process is already migrating it\n");
goto fail;
}
mle_added = 1;
/*
* set the MIGRATING flag and flush asts
* if we fail after this we need to re-dirty the lockres
*/
if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
"the target went down.\n", res->lockname.len,
res->lockname.name, target);
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_MIGRATING;
wake = 1;
spin_unlock(&res->spinlock);
ret = -EINVAL;
}
fail:
if (ret != -EEXIST && oldmle) {
/* master is known, detach if not already detached */
dlm_mle_detach_hb_events(dlm, oldmle);
dlm_put_mle(oldmle);
}
if (ret < 0) {
if (mle_added) {
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
dlm_put_mle_inuse(mle);
} else if (mle) {
kmem_cache_free(dlm_mle_cache, mle);
mle = NULL;
}
goto leave;
}
/*
* at this point, we have a migration target, an mle
* in the master list, and the MIGRATING flag set on
* the lockres
*/
/* now that remote nodes are spinning on the MIGRATING flag,
* ensure that all assert_master work is flushed. */
flush_workqueue(dlm->dlm_worker);
/* notify new node and send all lock state */
/* call send_one_lockres with migration flag.
* this serves as notice to the target node that a
* migration is starting. */
ret = dlm_send_one_lockres(dlm, res, mres, target,
DLM_MRES_MIGRATION);
if (ret < 0) {
mlog(0, "migration to node %u failed with %d\n",
target, ret);
/* migration failed, detach and clean up mle */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
dlm_put_mle_inuse(mle);
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_MIGRATING;
wake = 1;
spin_unlock(&res->spinlock);
if (dlm_is_host_down(ret))
dlm_wait_for_node_death(dlm, target,
DLM_NODE_DEATH_WAIT_MAX);
goto leave;
}
/* at this point, the target sends a message to all nodes,
* (using dlm_do_migrate_request). this node is skipped since
* we had to put an mle in the list to begin the process. this
* node now waits for target to do an assert master. this node
* will be the last one notified, ensuring that the migration
* is complete everywhere. if the target dies while this is
* going on, some nodes could potentially see the target as the
* master, so it is important that my recovery finds the migration
* mle and sets the master to UNKNOWN. */
/* wait for new node to assert master */
while (1) {
ret = wait_event_interruptible_timeout(mle->wq,
(atomic_read(&mle->woken) == 1),
msecs_to_jiffies(5000));
if (ret >= 0) {
if (atomic_read(&mle->woken) == 1 ||
res->owner == target)
break;
mlog(0, "%s:%.*s: timed out during migration\n",
dlm->name, res->lockname.len, res->lockname.name);
/* avoid hang during shutdown when migrating lockres
* to a node which also goes down */
if (dlm_is_node_dead(dlm, target)) {
mlog(0, "%s:%.*s: expected migration "
"target %u is no longer up, restarting\n",
dlm->name, res->lockname.len,
res->lockname.name, target);
ret = -EINVAL;
/* migration failed, detach and clean up mle */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
dlm_put_mle_inuse(mle);
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_MIGRATING;
wake = 1;
spin_unlock(&res->spinlock);
goto leave;
}
} else
mlog(0, "%s:%.*s: caught signal during migration\n",
dlm->name, res->lockname.len, res->lockname.name);
}
/* all done, set the owner, clear the flag */
spin_lock(&res->spinlock);
dlm_set_lockres_owner(dlm, res, target);
res->state &= ~DLM_LOCK_RES_MIGRATING;
dlm_remove_nonlocal_locks(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
/* master is known, detach if not already detached */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle_inuse(mle);
ret = 0;
dlm_lockres_calc_usage(dlm, res);
leave:
/* re-dirty the lockres if we failed */
if (ret < 0)
dlm_kick_thread(dlm, res);
/* wake up waiters if the MIGRATING flag got set
* but migration failed */
if (wake)
wake_up(&res->wq);
if (mres)
free_page((unsigned long)mres);
dlm_put(dlm);
mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
name, target, ret);
return ret;
}
/*
* Should be called only after beginning the domain leave process.
* There should not be any remaining locks on nonlocal lock resources,
* and there should be no local locks left on locally mastered resources.
*
* Called with the dlm spinlock held, may drop it to do migration, but
* will re-acquire before exit.
*
* Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
*/
int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
__must_hold(&dlm->spinlock)
{
int ret;
int lock_dropped = 0;
u8 target = O2NM_MAX_NODES;
assert_spin_locked(&dlm->spinlock);
spin_lock(&res->spinlock);
if (dlm_is_lockres_migratable(dlm, res))
target = dlm_pick_migration_target(dlm, res);
spin_unlock(&res->spinlock);
if (target == O2NM_MAX_NODES)
goto leave;
/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
spin_unlock(&dlm->spinlock);
lock_dropped = 1;
ret = dlm_migrate_lockres(dlm, res, target);
if (ret)
mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
dlm->name, res->lockname.len, res->lockname.name,
target, ret);
spin_lock(&dlm->spinlock);
leave:
return lock_dropped;
}
int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
int ret;
spin_lock(&dlm->ast_lock);
spin_lock(&lock->spinlock);
ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
spin_unlock(&lock->spinlock);
spin_unlock(&dlm->ast_lock);
return ret;
}
static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
u8 mig_target)
{
int can_proceed;
spin_lock(&res->spinlock);
can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
spin_unlock(&res->spinlock);
/* target has died, so make the caller break out of the
* wait_event, but caller must recheck the domain_map */
spin_lock(&dlm->spinlock);
if (!test_bit(mig_target, dlm->domain_map))
can_proceed = 1;
spin_unlock(&dlm->spinlock);
return can_proceed;
}
static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
int ret;
spin_lock(&res->spinlock);
ret = !!(res->state & DLM_LOCK_RES_DIRTY);
spin_unlock(&res->spinlock);
return ret;
}
static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
u8 target)
{
int ret = 0;
mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
res->lockname.len, res->lockname.name, dlm->node_num,
target);
/* need to set MIGRATING flag on lockres. this is done by
* ensuring that all asts have been flushed for this lockres. */
spin_lock(&res->spinlock);
BUG_ON(res->migration_pending);
res->migration_pending = 1;
/* strategy is to reserve an extra ast then release
* it below, letting the release do all of the work */
__dlm_lockres_reserve_ast(res);
spin_unlock(&res->spinlock);
/* now flush all the pending asts */
dlm_kick_thread(dlm, res);
/* before waiting on DIRTY, block processes which may
* try to dirty the lockres before MIGRATING is set */
spin_lock(&res->spinlock);
BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
spin_unlock(&res->spinlock);
/* now wait on any pending asts and the DIRTY state */
wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
dlm_lockres_release_ast(dlm, res);
mlog(0, "about to wait on migration_wq, dirty=%s\n",
res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
/* if the extra ref we just put was the final one, this
* will pass thru immediately. otherwise, we need to wait
* for the last ast to finish. */
again:
ret = wait_event_interruptible_timeout(dlm->migration_wq,
dlm_migration_can_proceed(dlm, res, target),
msecs_to_jiffies(1000));
if (ret < 0) {
mlog(0, "woken again: migrating? %s, dead? %s\n",
res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
test_bit(target, dlm->domain_map) ? "no":"yes");
} else {
mlog(0, "all is well: migrating? %s, dead? %s\n",
res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
test_bit(target, dlm->domain_map) ? "no":"yes");
}
if (!dlm_migration_can_proceed(dlm, res, target)) {
mlog(0, "trying again...\n");
goto again;
}
ret = 0;
/* did the target go down or die? */
spin_lock(&dlm->spinlock);
if (!test_bit(target, dlm->domain_map)) {
mlog(ML_ERROR, "aha. migration target %u just went down\n",
target);
ret = -EHOSTDOWN;
}
spin_unlock(&dlm->spinlock);
/*
* if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
* another try; otherwise, we are sure the MIGRATING state is there,
* drop the unneeded state which blocked threads trying to DIRTY
*/
spin_lock(&res->spinlock);
BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
if (!ret)
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
else
res->migration_pending = 0;
spin_unlock(&res->spinlock);
/*
* at this point:
*
* o the DLM_LOCK_RES_MIGRATING flag is set if target not down
* o there are no pending asts on this lockres
* o all processes trying to reserve an ast on this
* lockres must wait for the MIGRATING flag to clear
*/
return ret;
}
/* last step in the migration process.
* original master calls this to free all of the dlm_lock
* structures that used to be for other nodes. */
static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
struct list_head *queue = &res->granted;
int i, bit;
struct dlm_lock *lock, *next;
assert_spin_locked(&res->spinlock);
BUG_ON(res->owner == dlm->node_num);
for (i=0; i<3; i++) {
list_for_each_entry_safe(lock, next, queue, list) {
if (lock->ml.node != dlm->node_num) {
mlog(0, "putting lock for node %u\n",
lock->ml.node);
/* be extra careful */
BUG_ON(!list_empty(&lock->ast_list));
BUG_ON(!list_empty(&lock->bast_list));
BUG_ON(lock->ast_pending);
BUG_ON(lock->bast_pending);
dlm_lockres_clear_refmap_bit(dlm, res,
lock->ml.node);
list_del_init(&lock->list);
dlm_lock_put(lock);
/* In a normal unlock, we would have added a
* DLM_UNLOCK_FREE_LOCK action. Force it. */
dlm_lock_put(lock);
}
}
queue++;
}
bit = 0;
while (1) {
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
if (bit >= O2NM_MAX_NODES)
break;
/* do not clear the local node reference, if there is a
* process holding this, let it drop the ref itself */
if (bit != dlm->node_num) {
mlog(0, "%s:%.*s: node %u had a ref to this "
"migrating lockres, clearing\n", dlm->name,
res->lockname.len, res->lockname.name, bit);
dlm_lockres_clear_refmap_bit(dlm, res, bit);
}
bit++;
}
}
/*
* Pick a node to migrate the lock resource to. This function selects a
* potential target based first on the locks and then on refmap. It skips
* nodes that are in the process of exiting the domain.
*/
static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
enum dlm_lockres_list idx;
struct list_head *queue;
struct dlm_lock *lock;
int noderef;
u8 nodenum = O2NM_MAX_NODES;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
/* Go through all the locks */
for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
queue = dlm_list_idx_to_ptr(res, idx);
list_for_each_entry(lock, queue, list) {
if (lock->ml.node == dlm->node_num)
continue;
if (test_bit(lock->ml.node, dlm->exit_domain_map))
continue;
nodenum = lock->ml.node;
goto bail;
}
}
/* Go thru the refmap */
noderef = -1;
while (1) {
noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
noderef + 1);
if (noderef >= O2NM_MAX_NODES)
break;
if (noderef == dlm->node_num)
continue;
if (test_bit(noderef, dlm->exit_domain_map))
continue;
nodenum = noderef;
goto bail;
}
bail:
return nodenum;
}
/* this is called by the new master once all lockres
* data has been received */
static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
u8 master, u8 new_master,
struct dlm_node_iter *iter)
{
struct dlm_migrate_request migrate;
int ret, skip, status = 0;
int nodenum;
memset(&migrate, 0, sizeof(migrate));
migrate.namelen = res->lockname.len;
memcpy(migrate.name, res->lockname.name, migrate.namelen);
migrate.new_master = new_master;
migrate.master = master;
ret = 0;
/* send message to all nodes, except the master and myself */
while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
if (nodenum == master ||
nodenum == new_master)
continue;
/* We could race exit domain. If exited, skip. */
spin_lock(&dlm->spinlock);
skip = (!test_bit(nodenum, dlm->domain_map));
spin_unlock(&dlm->spinlock);
if (skip) {
clear_bit(nodenum, iter->node_map);
continue;
}
ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
&migrate, sizeof(migrate), nodenum,
&status);
if (ret < 0) {
mlog(ML_ERROR, "%s: res %.*s, Error %d send "
"MIGRATE_REQUEST to node %u\n", dlm->name,
migrate.namelen, migrate.name, ret, nodenum);
if (!dlm_is_host_down(ret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", ret);
BUG();
}
clear_bit(nodenum, iter->node_map);
ret = 0;
} else if (status < 0) {
mlog(0, "migrate request (node %u) returned %d!\n",
nodenum, status);
ret = status;
} else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
/* during the migration request we short-circuited
* the mastery of the lockres. make sure we have
* a mastery ref for nodenum */
mlog(0, "%s:%.*s: need ref for node %u\n",
dlm->name, res->lockname.len, res->lockname.name,
nodenum);
spin_lock(&res->spinlock);
dlm_lockres_set_refmap_bit(dlm, res, nodenum);
spin_unlock(&res->spinlock);
}
}
if (ret < 0)
mlog_errno(ret);
mlog(0, "returning ret=%d\n", ret);
return ret;
}
/* if there is an existing mle for this lockres, we now know who the master is.
* (the one who sent us *this* message) we can clear it up right away.
* since the process that put the mle on the list still has a reference to it,
* we can unhash it now, set the master and wake the process. as a result,
* we will have no mle in the list to start with. now we can add an mle for
* the migration and this should be the only one found for those scanning the
* list. */
int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_lock_resource *res = NULL;
struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
const char *name;
unsigned int namelen, hash;
int ret = 0;
if (!dlm_grab(dlm))
return 0;
name = migrate->name;
namelen = migrate->namelen;
hash = dlm_lockid_hash(name, namelen);
/* preallocate.. if this fails, abort */
mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!mle) {
ret = -ENOMEM;
goto leave;
}
/* check for pre-existing lock */
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, name, namelen, hash);
if (res) {
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
/* if all is working ok, this can only mean that we got
* a migrate request from a node that we now see as
* dead. what can we do here? drop it to the floor? */
spin_unlock(&res->spinlock);
mlog(ML_ERROR, "Got a migrate request, but the "
"lockres is marked as recovering!");
kmem_cache_free(dlm_mle_cache, mle);
ret = -EINVAL; /* need a better solution */
goto unlock;
}
res->state |= DLM_LOCK_RES_MIGRATING;
spin_unlock(&res->spinlock);
}
spin_lock(&dlm->master_lock);
/* ignore status. only nonzero status would BUG. */
ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
name, namelen,
migrate->new_master,
migrate->master);
if (ret < 0)
kmem_cache_free(dlm_mle_cache, mle);
spin_unlock(&dlm->master_lock);
unlock:
spin_unlock(&dlm->spinlock);
if (oldmle) {
/* master is known, detach if not already detached */
dlm_mle_detach_hb_events(dlm, oldmle);
dlm_put_mle(oldmle);
}
if (res)
dlm_lockres_put(res);
leave:
dlm_put(dlm);
return ret;
}
/* must be holding dlm->spinlock and dlm->master_lock
* when adding a migration mle, we can clear any other mles
* in the master list because we know with certainty that
* the master is "master". so we remove any old mle from
* the list after setting it's master field, and then add
* the new migration mle. this way we can hold with the rule
* of having only one mle for a given lock name at all times. */
static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_master_list_entry *mle,
struct dlm_master_list_entry **oldmle,
const char *name, unsigned int namelen,
u8 new_master, u8 master)
{
int found;
int ret = 0;
*oldmle = NULL;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
/* caller is responsible for any ref taken here on oldmle */
found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
if (found) {
struct dlm_master_list_entry *tmp = *oldmle;
spin_lock(&tmp->spinlock);
if (tmp->type == DLM_MLE_MIGRATION) {
if (master == dlm->node_num) {
/* ah another process raced me to it */
mlog(0, "tried to migrate %.*s, but some "
"process beat me to it\n",
namelen, name);
spin_unlock(&tmp->spinlock);
return -EEXIST;
} else {
/* bad. 2 NODES are trying to migrate! */
mlog(ML_ERROR, "migration error mle: "
"master=%u new_master=%u // request: "
"master=%u new_master=%u // "
"lockres=%.*s\n",
tmp->master, tmp->new_master,
master, new_master,
namelen, name);
BUG();
}
} else {
/* this is essentially what assert_master does */
tmp->master = master;
atomic_set(&tmp->woken, 1);
wake_up(&tmp->wq);
/* remove it so that only one mle will be found */
__dlm_unlink_mle(dlm, tmp);
__dlm_mle_detach_hb_events(dlm, tmp);
if (tmp->type == DLM_MLE_MASTER) {
ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
"telling master to get ref "
"for cleared out mle during "
"migration\n", dlm->name,
namelen, name, master,
new_master);
}
}
spin_unlock(&tmp->spinlock);
}
/* now add a migration mle to the tail of the list */
dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
mle->new_master = new_master;
/* the new master will be sending an assert master for this.
* at that point we will get the refmap reference */
mle->master = master;
/* do this for consistency with other mle types */
set_bit(new_master, mle->maybe_map);
__dlm_insert_mle(dlm, mle);
return ret;
}
/*
* Sets the owner of the lockres, associated to the mle, to UNKNOWN
*/
static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle)
{
struct dlm_lock_resource *res;
/* Find the lockres associated to the mle and set its owner to UNK */
res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
mle->mnamehash);
if (res) {
spin_unlock(&dlm->master_lock);
/* move lockres onto recovery list */
spin_lock(&res->spinlock);
dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
dlm_move_lockres_to_recovery_list(dlm, res);
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
/* about to get rid of mle, detach from heartbeat */
__dlm_mle_detach_hb_events(dlm, mle);
/* dump the mle */
spin_lock(&dlm->master_lock);
__dlm_put_mle(mle);
spin_unlock(&dlm->master_lock);
}
return res;
}
static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle)
{
__dlm_mle_detach_hb_events(dlm, mle);
spin_lock(&mle->spinlock);
__dlm_unlink_mle(dlm, mle);
atomic_set(&mle->woken, 1);
spin_unlock(&mle->spinlock);
wake_up(&mle->wq);
}
static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle, u8 dead_node)
{
int bit;
BUG_ON(mle->type != DLM_MLE_BLOCK);
spin_lock(&mle->spinlock);
bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit != dead_node) {
mlog(0, "mle found, but dead node %u would not have been "
"master\n", dead_node);
spin_unlock(&mle->spinlock);
} else {
/* Must drop the refcount by one since the assert_master will
* never arrive. This may result in the mle being unlinked and
* freed, but there may still be a process waiting in the
* dlmlock path which is fine. */
mlog(0, "node %u was expected master\n", dead_node);
atomic_set(&mle->woken, 1);
spin_unlock(&mle->spinlock);
wake_up(&mle->wq);
/* Do not need events any longer, so detach from heartbeat */
__dlm_mle_detach_hb_events(dlm, mle);
__dlm_put_mle(mle);
}
}
void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
{
struct dlm_master_list_entry *mle;
struct dlm_lock_resource *res;
struct hlist_head *bucket;
struct hlist_node *tmp;
unsigned int i;
mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
top:
assert_spin_locked(&dlm->spinlock);
/* clean the master list */
spin_lock(&dlm->master_lock);
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);
hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
BUG_ON(mle->type != DLM_MLE_BLOCK &&
mle->type != DLM_MLE_MASTER &&
mle->type != DLM_MLE_MIGRATION);
/* MASTER mles are initiated locally. The waiting
* process will notice the node map change shortly.
* Let that happen as normal. */
if (mle->type == DLM_MLE_MASTER)
continue;
/* BLOCK mles are initiated by other nodes. Need to
* clean up if the dead node would have been the
* master. */
if (mle->type == DLM_MLE_BLOCK) {
dlm_clean_block_mle(dlm, mle, dead_node);
continue;
}
/* Everything else is a MIGRATION mle */
/* The rule for MIGRATION mles is that the master
* becomes UNKNOWN if *either* the original or the new
* master dies. All UNKNOWN lockres' are sent to
* whichever node becomes the recovery master. The new
* master is responsible for determining if there is
* still a master for this lockres, or if he needs to
* take over mastery. Either way, this node should
* expect another message to resolve this. */
if (mle->master != dead_node &&
mle->new_master != dead_node)
continue;
if (mle->new_master == dead_node && mle->inuse) {
mlog(ML_NOTICE, "%s: target %u died during "
"migration from %u, the MLE is "
"still keep used, ignore it!\n",
dlm->name, dead_node,
mle->master);
continue;
}
/* If we have reached this point, this mle needs to be
* removed from the list and freed. */
dlm_clean_migration_mle(dlm, mle);
mlog(0, "%s: node %u died during migration from "
"%u to %u!\n", dlm->name, dead_node, mle->master,
mle->new_master);
/* If we find a lockres associated with the mle, we've
* hit this rare case that messes up our lock ordering.
* If so, we need to drop the master lock so that we can
* take the lockres lock, meaning that we will have to
* restart from the head of list. */
res = dlm_reset_mleres_owner(dlm, mle);
if (res)
/* restart */
goto top;
/* This may be the last reference */
__dlm_put_mle(mle);
}
}
spin_unlock(&dlm->master_lock);
}
int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 old_master)
{
struct dlm_node_iter iter;
int ret = 0;
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
clear_bit(old_master, iter.node_map);
clear_bit(dlm->node_num, iter.node_map);
spin_unlock(&dlm->spinlock);
/* ownership of the lockres is changing. account for the
* mastery reference here since old_master will briefly have
* a reference after the migration completes */
spin_lock(&res->spinlock);
dlm_lockres_set_refmap_bit(dlm, res, old_master);
spin_unlock(&res->spinlock);
mlog(0, "now time to do a migrate request to other nodes\n");
ret = dlm_do_migrate_request(dlm, res, old_master,
dlm->node_num, &iter);
if (ret < 0) {
mlog_errno(ret);
goto leave;
}
mlog(0, "doing assert master of %.*s to all except the original node\n",
res->lockname.len, res->lockname.name);
/* this call now finishes out the nodemap
* even if one or more nodes die */
ret = dlm_do_assert_master(dlm, res, iter.node_map,
DLM_ASSERT_MASTER_FINISH_MIGRATION);
if (ret < 0) {
/* no longer need to retry. all living nodes contacted. */
mlog_errno(ret);
ret = 0;
}
bitmap_zero(iter.node_map, O2NM_MAX_NODES);
set_bit(old_master, iter.node_map);
mlog(0, "doing assert master of %.*s back to %u\n",
res->lockname.len, res->lockname.name, old_master);
ret = dlm_do_assert_master(dlm, res, iter.node_map,
DLM_ASSERT_MASTER_FINISH_MIGRATION);
if (ret < 0) {
mlog(0, "assert master to original master failed "
"with %d.\n", ret);
/* the only nonzero status here would be because of
* a dead original node. we're done. */
ret = 0;
}
/* all done, set the owner, clear the flag */
spin_lock(&res->spinlock);
dlm_set_lockres_owner(dlm, res, dlm->node_num);
res->state &= ~DLM_LOCK_RES_MIGRATING;
spin_unlock(&res->spinlock);
/* re-dirty it on the new master */
dlm_kick_thread(dlm, res);
wake_up(&res->wq);
leave:
return ret;
}
/*
* LOCKRES AST REFCOUNT
* this is integral to migration
*/
/* for future intent to call an ast, reserve one ahead of time.
* this should be called only after waiting on the lockres
* with dlm_wait_on_lockres, and while still holding the
* spinlock after the call. */
void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
if (res->state & DLM_LOCK_RES_MIGRATING) {
__dlm_print_one_lock_resource(res);
}
BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
atomic_inc(&res->asts_reserved);
}
/*
* used to drop the reserved ast, either because it went unused,
* or because the ast/bast was actually called.
*
* also, if there is a pending migration on this lockres,
* and this was the last pending ast on the lockres,
* atomically set the MIGRATING flag before we drop the lock.
* this is how we ensure that migration can proceed with no
* asts in progress. note that it is ok if the state of the
* queues is such that a lock should be granted in the future
* or that a bast should be fired, because the new master will
* shuffle the lists on this lockres as soon as it is migrated.
*/
void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
return;
if (!res->migration_pending) {
spin_unlock(&res->spinlock);
return;
}
BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
res->migration_pending = 0;
res->state |= DLM_LOCK_RES_MIGRATING;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
wake_up(&dlm->migration_wq);
}
void dlm_force_free_mles(struct dlm_ctxt *dlm)
{
int i;
struct hlist_head *bucket;
struct dlm_master_list_entry *mle;
struct hlist_node *tmp;
/*
* We notified all other nodes that we are exiting the domain and
* marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
* around we force free them and wake any processes that are waiting
* on the mles
*/
spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES));
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);
hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
if (mle->type != DLM_MLE_BLOCK) {
mlog(ML_ERROR, "bad mle: %p\n", mle);
dlm_print_one_mle(mle);
}
atomic_set(&mle->woken, 1);
wake_up(&mle->wq);
__dlm_unlink_mle(dlm, mle);
__dlm_mle_detach_hb_events(dlm, mle);
__dlm_put_mle(mle);
}
}
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
}
| linux-master | fs/ocfs2/dlm/dlmmaster.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmthread.c
*
* standalone DLM module
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
#include "../cluster/masklog.h"
static int dlm_thread(void *data);
static void dlm_flush_asts(struct dlm_ctxt *dlm);
/* will exit holding res->spinlock, but may drop in function */
/* waits until flags are cleared on res->state */
void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
{
DECLARE_WAITQUEUE(wait, current);
assert_spin_locked(&res->spinlock);
add_wait_queue(&res->wq, &wait);
repeat:
set_current_state(TASK_UNINTERRUPTIBLE);
if (res->state & flags) {
spin_unlock(&res->spinlock);
schedule();
spin_lock(&res->spinlock);
goto repeat;
}
remove_wait_queue(&res->wq, &wait);
__set_current_state(TASK_RUNNING);
}
int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
{
if (list_empty(&res->granted) &&
list_empty(&res->converting) &&
list_empty(&res->blocked))
return 0;
return 1;
}
/* "unused": the lockres has no locks, is not on the dirty list,
* has no inflight locks (in the gap between mastery and acquiring
* the first lock), and has no bits in its refmap.
* truly ready to be freed. */
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
int bit;
assert_spin_locked(&res->spinlock);
if (__dlm_lockres_has_locks(res))
return 0;
/* Locks are in the process of being created */
if (res->inflight_locks)
return 0;
if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
return 0;
if (res->state & (DLM_LOCK_RES_RECOVERING|
DLM_LOCK_RES_RECOVERY_WAITING))
return 0;
/* Another node has this resource with this node as the master */
bit = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES)
return 0;
return 1;
}
/* Call whenever you may have added or deleted something from one of
* the lockres queue's. This will figure out whether it belongs on the
* unused list or not and does the appropriate thing. */
void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
if (__dlm_lockres_unused(res)){
if (list_empty(&res->purge)) {
mlog(0, "%s: Adding res %.*s to purge list\n",
dlm->name, res->lockname.len, res->lockname.name);
res->last_used = jiffies;
dlm_lockres_get(res);
list_add_tail(&res->purge, &dlm->purge_list);
dlm->purge_count++;
}
} else if (!list_empty(&res->purge)) {
mlog(0, "%s: Removing res %.*s from purge list\n",
dlm->name, res->lockname.len, res->lockname.name);
list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--;
}
}
void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
__dlm_lockres_calc_usage(dlm, res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
}
/*
* Do the real purge work:
* unhash the lockres, and
* clear flag DLM_LOCK_RES_DROPPING_REF.
* It requires dlm and lockres spinlock to be taken.
*/
void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
if (!list_empty(&res->purge)) {
mlog(0, "%s: Removing res %.*s from purgelist\n",
dlm->name, res->lockname.len, res->lockname.name);
list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--;
}
if (!__dlm_lockres_unused(res)) {
mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
dlm->name, res->lockname.len, res->lockname.name);
__dlm_print_one_lock_resource(res);
BUG();
}
__dlm_unhash_lockres(dlm, res);
spin_lock(&dlm->track_lock);
if (!list_empty(&res->tracking))
list_del_init(&res->tracking);
else {
mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
dlm->name, res->lockname.len, res->lockname.name);
__dlm_print_one_lock_resource(res);
}
spin_unlock(&dlm->track_lock);
/*
* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource.
*/
res->state &= ~DLM_LOCK_RES_DROPPING_REF;
}
static void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
int master;
int ret = 0;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
master = (res->owner == dlm->node_num);
mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
res->lockname.len, res->lockname.name, master);
if (!master) {
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
dlm->name, res->lockname.len, res->lockname.name);
spin_unlock(&res->spinlock);
return;
}
res->state |= DLM_LOCK_RES_DROPPING_REF;
/* drop spinlock... retake below */
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
spin_lock(&res->spinlock);
/* This ensures that clear refmap is sent after the set */
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
spin_unlock(&res->spinlock);
/* clear our bit from the master's refmap, ignore errors */
ret = dlm_drop_lockres_ref(dlm, res);
if (ret < 0) {
if (!dlm_is_host_down(ret))
BUG();
}
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
}
if (!list_empty(&res->purge)) {
mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
dlm->name, res->lockname.len, res->lockname.name, master);
list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--;
}
if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
mlog(0, "%s: deref %.*s in progress\n",
dlm->name, res->lockname.len, res->lockname.name);
spin_unlock(&res->spinlock);
return;
}
if (!__dlm_lockres_unused(res)) {
mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
dlm->name, res->lockname.len, res->lockname.name);
__dlm_print_one_lock_resource(res);
BUG();
}
__dlm_unhash_lockres(dlm, res);
spin_lock(&dlm->track_lock);
if (!list_empty(&res->tracking))
list_del_init(&res->tracking);
else {
mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
res->lockname.len, res->lockname.name);
__dlm_print_one_lock_resource(res);
}
spin_unlock(&dlm->track_lock);
/* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
if (!master) {
res->state &= ~DLM_LOCK_RES_DROPPING_REF;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
} else
spin_unlock(&res->spinlock);
}
static void dlm_run_purge_list(struct dlm_ctxt *dlm,
int purge_now)
{
unsigned int run_max, unused;
unsigned long purge_jiffies;
struct dlm_lock_resource *lockres;
spin_lock(&dlm->spinlock);
run_max = dlm->purge_count;
while(run_max && !list_empty(&dlm->purge_list)) {
run_max--;
lockres = list_entry(dlm->purge_list.next,
struct dlm_lock_resource, purge);
spin_lock(&lockres->spinlock);
purge_jiffies = lockres->last_used +
msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
/* Make sure that we want to be processing this guy at
* this time. */
if (!purge_now && time_after(purge_jiffies, jiffies)) {
/* Since resources are added to the purge list
* in tail order, we can stop at the first
* unpurgable resource -- anyone added after
* him will have a greater last_used value */
spin_unlock(&lockres->spinlock);
break;
}
/* Status of the lockres *might* change so double
* check. If the lockres is unused, holding the dlm
* spinlock will prevent people from getting and more
* refs on it. */
unused = __dlm_lockres_unused(lockres);
if (!unused ||
(lockres->state & DLM_LOCK_RES_MIGRATING) ||
(lockres->inflight_assert_workers != 0)) {
mlog(0, "%s: res %.*s is in use or being remastered, "
"used %d, state %d, assert master workers %u\n",
dlm->name, lockres->lockname.len,
lockres->lockname.name,
!unused, lockres->state,
lockres->inflight_assert_workers);
list_move_tail(&lockres->purge, &dlm->purge_list);
spin_unlock(&lockres->spinlock);
continue;
}
dlm_lockres_get(lockres);
dlm_purge_lockres(dlm, lockres);
dlm_lockres_put(lockres);
/* Avoid adding any scheduling latencies */
cond_resched_lock(&dlm->spinlock);
}
spin_unlock(&dlm->spinlock);
}
static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
struct dlm_lock *lock, *target;
int can_grant = 1;
/*
* Because this function is called with the lockres
* spinlock, and because we know that it is not migrating/
* recovering/in-progress, it is fine to reserve asts and
* basts right before queueing them all throughout
*/
assert_spin_locked(&dlm->ast_lock);
assert_spin_locked(&res->spinlock);
BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
DLM_LOCK_RES_RECOVERING|
DLM_LOCK_RES_IN_PROGRESS)));
converting:
if (list_empty(&res->converting))
goto blocked;
mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
res->lockname.len, res->lockname.name);
target = list_entry(res->converting.next, struct dlm_lock, list);
if (target->ml.convert_type == LKM_IVMODE) {
mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
dlm->name, res->lockname.len, res->lockname.name);
BUG();
}
list_for_each_entry(lock, &res->granted, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type,
target->ml.convert_type)) {
can_grant = 0;
/* queue the BAST if not already */
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
/* update the highest_blocked if needed */
if (lock->ml.highest_blocked < target->ml.convert_type)
lock->ml.highest_blocked =
target->ml.convert_type;
}
}
list_for_each_entry(lock, &res->converting, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type,
target->ml.convert_type)) {
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.convert_type)
lock->ml.highest_blocked =
target->ml.convert_type;
}
}
/* we can convert the lock */
if (can_grant) {
spin_lock(&target->spinlock);
BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
"%d => %d, node %u\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
target->ml.type,
target->ml.convert_type, target->ml.node);
target->ml.type = target->ml.convert_type;
target->ml.convert_type = LKM_IVMODE;
list_move_tail(&target->list, &res->granted);
BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL;
spin_unlock(&target->spinlock);
__dlm_lockres_reserve_ast(res);
__dlm_queue_ast(dlm, target);
/* go back and check for more */
goto converting;
}
blocked:
if (list_empty(&res->blocked))
goto leave;
target = list_entry(res->blocked.next, struct dlm_lock, list);
list_for_each_entry(lock, &res->granted, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.type)
lock->ml.highest_blocked = target->ml.type;
}
}
list_for_each_entry(lock, &res->converting, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.type)
lock->ml.highest_blocked = target->ml.type;
}
}
/* we can grant the blocked lock (only
* possible if converting list empty) */
if (can_grant) {
spin_lock(&target->spinlock);
BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
"node %u\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
target->ml.type, target->ml.node);
/* target->ml.type is already correct */
list_move_tail(&target->list, &res->granted);
BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL;
spin_unlock(&target->spinlock);
__dlm_lockres_reserve_ast(res);
__dlm_queue_ast(dlm, target);
/* go back and check for more */
goto converting;
}
leave:
return;
}
/* must have NO locks when calling this with res !=NULL * */
void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
if (res) {
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
}
wake_up(&dlm->dlm_thread_wq);
}
void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
/* don't shuffle secondary queues */
if (res->owner == dlm->node_num) {
if (res->state & (DLM_LOCK_RES_MIGRATING |
DLM_LOCK_RES_BLOCK_DIRTY))
return;
if (list_empty(&res->dirty)) {
/* ref for dirty_list */
dlm_lockres_get(res);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
}
}
mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
res->lockname.name);
}
/* Launch the NM thread for the mounted volume */
int dlm_launch_thread(struct dlm_ctxt *dlm)
{
mlog(0, "Starting dlm_thread...\n");
dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm-%s",
dlm->name);
if (IS_ERR(dlm->dlm_thread_task)) {
mlog_errno(PTR_ERR(dlm->dlm_thread_task));
dlm->dlm_thread_task = NULL;
return -EINVAL;
}
return 0;
}
void dlm_complete_thread(struct dlm_ctxt *dlm)
{
if (dlm->dlm_thread_task) {
mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
kthread_stop(dlm->dlm_thread_task);
dlm->dlm_thread_task = NULL;
}
}
static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
{
int empty;
spin_lock(&dlm->spinlock);
empty = list_empty(&dlm->dirty_list);
spin_unlock(&dlm->spinlock);
return empty;
}
static void dlm_flush_asts(struct dlm_ctxt *dlm)
{
int ret;
struct dlm_lock *lock;
struct dlm_lock_resource *res;
u8 hi;
spin_lock(&dlm->ast_lock);
while (!list_empty(&dlm->pending_asts)) {
lock = list_entry(dlm->pending_asts.next,
struct dlm_lock, ast_list);
/* get an extra ref on lock */
dlm_lock_get(lock);
res = lock->lockres;
mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
"node %u\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ml.type, lock->ml.node);
BUG_ON(!lock->ast_pending);
/* remove from list (including ref) */
list_del_init(&lock->ast_list);
dlm_lock_put(lock);
spin_unlock(&dlm->ast_lock);
if (lock->ml.node != dlm->node_num) {
ret = dlm_do_remote_ast(dlm, res, lock);
if (ret < 0)
mlog_errno(ret);
} else
dlm_do_local_ast(dlm, res, lock);
spin_lock(&dlm->ast_lock);
/* possible that another ast was queued while
* we were delivering the last one */
if (!list_empty(&lock->ast_list)) {
mlog(0, "%s: res %.*s, AST queued while flushing last "
"one\n", dlm->name, res->lockname.len,
res->lockname.name);
} else
lock->ast_pending = 0;
/* drop the extra ref.
* this may drop it completely. */
dlm_lock_put(lock);
dlm_lockres_release_ast(dlm, res);
}
while (!list_empty(&dlm->pending_basts)) {
lock = list_entry(dlm->pending_basts.next,
struct dlm_lock, bast_list);
/* get an extra ref on lock */
dlm_lock_get(lock);
res = lock->lockres;
BUG_ON(!lock->bast_pending);
/* get the highest blocked lock, and reset */
spin_lock(&lock->spinlock);
BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
hi = lock->ml.highest_blocked;
lock->ml.highest_blocked = LKM_IVMODE;
spin_unlock(&lock->spinlock);
/* remove from list (including ref) */
list_del_init(&lock->bast_list);
dlm_lock_put(lock);
spin_unlock(&dlm->ast_lock);
mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
"blocked %d, node %u\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
hi, lock->ml.node);
if (lock->ml.node != dlm->node_num) {
ret = dlm_send_proxy_bast(dlm, res, lock, hi);
if (ret < 0)
mlog_errno(ret);
} else
dlm_do_local_bast(dlm, res, lock, hi);
spin_lock(&dlm->ast_lock);
/* possible that another bast was queued while
* we were delivering the last one */
if (!list_empty(&lock->bast_list)) {
mlog(0, "%s: res %.*s, BAST queued while flushing last "
"one\n", dlm->name, res->lockname.len,
res->lockname.name);
} else
lock->bast_pending = 0;
/* drop the extra ref.
* this may drop it completely. */
dlm_lock_put(lock);
dlm_lockres_release_ast(dlm, res);
}
wake_up(&dlm->ast_wq);
spin_unlock(&dlm->ast_lock);
}
#define DLM_THREAD_TIMEOUT_MS (4 * 1000)
#define DLM_THREAD_MAX_DIRTY 100
static int dlm_thread(void *data)
{
struct dlm_lock_resource *res;
struct dlm_ctxt *dlm = data;
unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
int n = DLM_THREAD_MAX_DIRTY;
/* dlm_shutting_down is very point-in-time, but that
* doesn't matter as we'll just loop back around if we
* get false on the leading edge of a state
* transition. */
dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
/* We really don't want to hold dlm->spinlock while
* calling dlm_shuffle_lists on each lockres that
* needs to have its queues adjusted and AST/BASTs
* run. So let's pull each entry off the dirty_list
* and drop dlm->spinlock ASAP. Once off the list,
* res->spinlock needs to be taken again to protect
* the queues while calling dlm_shuffle_lists. */
spin_lock(&dlm->spinlock);
while (!list_empty(&dlm->dirty_list)) {
int delay = 0;
res = list_entry(dlm->dirty_list.next,
struct dlm_lock_resource, dirty);
/* peel a lockres off, remove it from the list,
* unset the dirty flag and drop the dlm lock */
BUG_ON(!res);
dlm_lockres_get(res);
spin_lock(&res->spinlock);
/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
list_del_init(&res->dirty);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
/* Drop dirty_list ref */
dlm_lockres_put(res);
/* lockres can be re-dirtied/re-added to the
* dirty_list in this gap, but that is ok */
spin_lock(&dlm->ast_lock);
spin_lock(&res->spinlock);
if (res->owner != dlm->node_num) {
__dlm_print_one_lock_resource(res);
mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
" dirty %d\n", dlm->name,
!!(res->state & DLM_LOCK_RES_IN_PROGRESS),
!!(res->state & DLM_LOCK_RES_MIGRATING),
!!(res->state & DLM_LOCK_RES_RECOVERING),
!!(res->state & DLM_LOCK_RES_DIRTY));
}
BUG_ON(res->owner != dlm->node_num);
/* it is now ok to move lockreses in these states
* to the dirty list, assuming that they will only be
* dirty for a short while. */
BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
DLM_LOCK_RES_RECOVERING |
DLM_LOCK_RES_RECOVERY_WAITING)) {
/* move it to the tail and keep going */
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->ast_lock);
mlog(0, "%s: res %.*s, inprogress, delay list "
"shuffle, state %d\n", dlm->name,
res->lockname.len, res->lockname.name,
res->state);
delay = 1;
goto in_progress;
}
/* at this point the lockres is not migrating/
* recovering/in-progress. we have the lockres
* spinlock and do NOT have the dlm lock.
* safe to reserve/queue asts and run the lists. */
/* called while holding lockres lock */
dlm_shuffle_lists(dlm, res);
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->ast_lock);
dlm_lockres_calc_usage(dlm, res);
in_progress:
spin_lock(&dlm->spinlock);
/* if the lock was in-progress, stick
* it on the back of the list */
if (delay) {
spin_lock(&res->spinlock);
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
}
dlm_lockres_put(res);
/* unlikely, but we may need to give time to
* other tasks */
if (!--n) {
mlog(0, "%s: Throttling dlm thread\n",
dlm->name);
break;
}
}
spin_unlock(&dlm->spinlock);
dlm_flush_asts(dlm);
/* yield and continue right away if there is more work to do */
if (!n) {
cond_resched();
continue;
}
wait_event_interruptible_timeout(dlm->dlm_thread_wq,
!dlm_dirty_list_empty(dlm) ||
kthread_should_stop(),
timeout);
}
mlog(0, "quitting DLM thread\n");
return 0;
}
| linux-master | fs/ocfs2/dlm/dlmthread.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dlmunlock.c
*
* underlying calls for unlocking locks
*
* Copyright (C) 2004 Oracle. All rights reserved.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#define MLOG_MASK_PREFIX ML_DLM
#include "../cluster/masklog.h"
#define DLM_UNLOCK_FREE_LOCK 0x00000001
#define DLM_UNLOCK_CALL_AST 0x00000002
#define DLM_UNLOCK_REMOVE_LOCK 0x00000004
#define DLM_UNLOCK_REGRANT_LOCK 0x00000008
#define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010
static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int *actions);
static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int *actions);
static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int flags,
u8 owner);
/*
* according to the spec:
* http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
*
* flags & LKM_CANCEL != 0: must be converting or blocked
* flags & LKM_CANCEL == 0: must be granted
*
* So to unlock a converting lock, you must first cancel the
* convert (passing LKM_CANCEL in flags), then call the unlock
* again (with no LKM_CANCEL in flags).
*/
/*
* locking:
* caller needs: none
* taken: res->spinlock and lock->spinlock taken and dropped
* held on exit: none
* returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
* all callers should have taken an extra ref on lock coming in
*/
static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int flags, int *call_ast,
int master_node)
{
enum dlm_status status;
int actions = 0;
int in_use;
u8 owner;
int recovery_wait = 0;
mlog(0, "master_node = %d, valblk = %d\n", master_node,
flags & LKM_VALBLK);
if (master_node)
BUG_ON(res->owner != dlm->node_num);
else
BUG_ON(res->owner == dlm->node_num);
spin_lock(&dlm->ast_lock);
/* We want to be sure that we're not freeing a lock
* that still has AST's pending... */
in_use = !list_empty(&lock->ast_list);
spin_unlock(&dlm->ast_lock);
if (in_use && !(flags & LKM_CANCEL)) {
mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
"while waiting for an ast!", res->lockname.len,
res->lockname.name);
return DLM_BADPARAM;
}
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
if (master_node && !(flags & LKM_CANCEL)) {
mlog(ML_ERROR, "lockres in progress!\n");
spin_unlock(&res->spinlock);
return DLM_FORWARD;
}
/* ok for this to sleep if not in a network handler */
__dlm_wait_on_lockres(res);
res->state |= DLM_LOCK_RES_IN_PROGRESS;
}
spin_lock(&lock->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
status = DLM_RECOVERING;
goto leave;
}
if (res->state & DLM_LOCK_RES_MIGRATING) {
status = DLM_MIGRATING;
goto leave;
}
/* see above for what the spec says about
* LKM_CANCEL and the lock queue state */
if (flags & LKM_CANCEL)
status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
else
status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
goto leave;
/* By now this has been masked out of cancel requests. */
if (flags & LKM_VALBLK) {
/* make the final update to the lvb */
if (master_node)
memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
else
flags |= LKM_PUT_LVB; /* let the send function
* handle it. */
}
if (!master_node) {
owner = res->owner;
/* drop locks and send message */
if (flags & LKM_CANCEL)
lock->cancel_pending = 1;
else
lock->unlock_pending = 1;
spin_unlock(&lock->spinlock);
spin_unlock(&res->spinlock);
status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
flags, owner);
spin_lock(&res->spinlock);
spin_lock(&lock->spinlock);
/* if the master told us the lock was already granted,
* let the ast handle all of these actions */
if (status == DLM_CANCELGRANT) {
actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
DLM_UNLOCK_REGRANT_LOCK|
DLM_UNLOCK_CLEAR_CONVERT_TYPE);
} else if (status == DLM_RECOVERING ||
status == DLM_MIGRATING ||
status == DLM_FORWARD ||
status == DLM_NOLOCKMGR
) {
/* must clear the actions because this unlock
* is about to be retried. cannot free or do
* any list manipulation. */
mlog(0, "%s:%.*s: clearing actions, %s\n",
dlm->name, res->lockname.len,
res->lockname.name,
status==DLM_RECOVERING?"recovering":
(status==DLM_MIGRATING?"migrating":
(status == DLM_FORWARD ? "forward" :
"nolockmanager")));
actions = 0;
}
if (flags & LKM_CANCEL)
lock->cancel_pending = 0;
else {
if (!lock->unlock_pending)
recovery_wait = 1;
else
lock->unlock_pending = 0;
}
}
/* get an extra ref on lock. if we are just switching
* lists here, we dont want the lock to go away. */
dlm_lock_get(lock);
if (actions & DLM_UNLOCK_REMOVE_LOCK) {
list_del_init(&lock->list);
dlm_lock_put(lock);
}
if (actions & DLM_UNLOCK_REGRANT_LOCK) {
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->granted);
}
if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
mlog(0, "clearing convert_type at %smaster node\n",
master_node ? "" : "non-");
lock->ml.convert_type = LKM_IVMODE;
}
/* remove the extra ref on lock */
dlm_lock_put(lock);
leave:
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
if (!dlm_lock_on_list(&res->converting, lock))
BUG_ON(lock->ml.convert_type != LKM_IVMODE);
else
BUG_ON(lock->ml.convert_type == LKM_IVMODE);
spin_unlock(&lock->spinlock);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
if (recovery_wait) {
spin_lock(&res->spinlock);
/* Unlock request will directly succeed after owner dies,
* and the lock is already removed from grant list. We have to
* wait for RECOVERING done or we miss the chance to purge it
* since the removement is much faster than RECOVERING proc.
*/
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
spin_unlock(&res->spinlock);
}
/* let the caller's final dlm_lock_put handle the actual kfree */
if (actions & DLM_UNLOCK_FREE_LOCK) {
/* this should always be coupled with list removal */
BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
kref_read(&lock->lock_refs)-1);
dlm_lock_put(lock);
}
if (actions & DLM_UNLOCK_CALL_AST)
*call_ast = 1;
/* if cancel or unlock succeeded, lvb work is done */
if (status == DLM_NORMAL)
lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
return status;
}
void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
/* leave DLM_LKSB_PUT_LVB on the lksb so any final
* update of the lvb will be sent to the new master */
list_del_init(&lock->list);
}
void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
list_move_tail(&lock->list, &res->granted);
lock->ml.convert_type = LKM_IVMODE;
}
static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int flags,
int *call_ast)
{
return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
}
static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int flags, int *call_ast)
{
return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
}
/*
* locking:
* caller needs: none
* taken: none
* held on exit: none
* returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
*/
static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int flags,
u8 owner)
{
struct dlm_unlock_lock unlock;
int tmpret;
enum dlm_status ret;
int status = 0;
struct kvec vec[2];
size_t veclen = 1;
mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
if (owner == dlm->node_num) {
/* ended up trying to contact ourself. this means
* that the lockres had been remote but became local
* via a migration. just retry it, now as local */
mlog(0, "%s:%.*s: this node became the master due to a "
"migration, re-evaluate now\n", dlm->name,
res->lockname.len, res->lockname.name);
return DLM_FORWARD;
}
memset(&unlock, 0, sizeof(unlock));
unlock.node_idx = dlm->node_num;
unlock.flags = cpu_to_be32(flags);
unlock.cookie = lock->ml.cookie;
unlock.namelen = res->lockname.len;
memcpy(unlock.name, res->lockname.name, unlock.namelen);
vec[0].iov_len = sizeof(struct dlm_unlock_lock);
vec[0].iov_base = &unlock;
if (flags & LKM_PUT_LVB) {
/* extra data to send if we are updating lvb */
vec[1].iov_len = DLM_LVB_LEN;
vec[1].iov_base = lock->lksb->lvb;
veclen++;
}
tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key,
vec, veclen, owner, &status);
if (tmpret >= 0) {
// successfully sent and received
if (status == DLM_FORWARD)
mlog(0, "master was in-progress. retry\n");
ret = status;
} else {
mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
"node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner);
if (dlm_is_host_down(tmpret)) {
/* NOTE: this seems strange, but it is what we want.
* when the master goes down during a cancel or
* unlock, the recovery code completes the operation
* as if the master had not died, then passes the
* updated state to the recovery master. this thread
* just needs to finish out the operation and call
* the unlockast. */
if (dlm_is_node_dead(dlm, owner))
ret = DLM_NORMAL;
else
ret = DLM_NOLOCKMGR;
} else {
/* something bad. this will BUG in ocfs2 */
ret = dlm_err_to_dlm_status(tmpret);
}
}
return ret;
}
/*
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
* return value from dlmunlock_master
*/
int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
struct dlm_lock_resource *res = NULL;
struct dlm_lock *lock = NULL, *iter;
enum dlm_status status = DLM_NORMAL;
int i;
struct dlm_lockstatus *lksb = NULL;
int ignore;
u32 flags;
struct list_head *queue;
flags = be32_to_cpu(unlock->flags);
if (flags & LKM_GET_LVB) {
mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n");
return DLM_BADARGS;
}
if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL "
"request!\n");
return DLM_BADARGS;
}
if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
return DLM_IVBUFLEN;
}
if (!dlm_grab(dlm))
return DLM_FORWARD;
mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
"Domain %s not fully joined!\n", dlm->name);
mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
if (!res) {
/* We assume here that a no lock resource simply means
* it was migrated away and destroyed before the other
* node could detect it. */
mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
status = DLM_FORWARD;
goto not_found;
}
queue=&res->granted;
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
spin_unlock(&res->spinlock);
mlog(0, "returning DLM_RECOVERING\n");
status = DLM_RECOVERING;
goto leave;
}
if (res->state & DLM_LOCK_RES_MIGRATING) {
spin_unlock(&res->spinlock);
mlog(0, "returning DLM_MIGRATING\n");
status = DLM_MIGRATING;
goto leave;
}
if (res->owner != dlm->node_num) {
spin_unlock(&res->spinlock);
mlog(0, "returning DLM_FORWARD -- not master\n");
status = DLM_FORWARD;
goto leave;
}
for (i=0; i<3; i++) {
list_for_each_entry(iter, queue, list) {
if (iter->ml.cookie == unlock->cookie &&
iter->ml.node == unlock->node_idx) {
dlm_lock_get(iter);
lock = iter;
break;
}
}
if (lock)
break;
/* scan granted -> converting -> blocked queues */
queue++;
}
spin_unlock(&res->spinlock);
if (!lock) {
status = DLM_IVLOCKID;
goto not_found;
}
/* lock was found on queue */
lksb = lock->lksb;
if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
lock->ml.type != LKM_EXMODE)
flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
/* unlockast only called on originating node */
if (flags & LKM_PUT_LVB) {
lksb->flags |= DLM_LKSB_PUT_LVB;
memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN);
}
/* if this is in-progress, propagate the DLM_FORWARD
* all the way back out */
status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
if (status == DLM_FORWARD)
mlog(0, "lockres is in progress\n");
if (flags & LKM_PUT_LVB)
lksb->flags &= ~DLM_LKSB_PUT_LVB;
dlm_lockres_calc_usage(dlm, res);
dlm_kick_thread(dlm, res);
not_found:
if (!lock)
mlog(ML_ERROR, "failed to find lock to unlock! "
"cookie=%u:%llu\n",
dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
else
dlm_lock_put(lock);
leave:
if (res)
dlm_lockres_put(res);
dlm_put(dlm);
return status;
}
static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int *actions)
{
enum dlm_status status;
if (dlm_lock_on_list(&res->blocked, lock)) {
/* cancel this outright */
status = DLM_NORMAL;
*actions = (DLM_UNLOCK_CALL_AST |
DLM_UNLOCK_REMOVE_LOCK);
} else if (dlm_lock_on_list(&res->converting, lock)) {
/* cancel the request, put back on granted */
status = DLM_NORMAL;
*actions = (DLM_UNLOCK_CALL_AST |
DLM_UNLOCK_REMOVE_LOCK |
DLM_UNLOCK_REGRANT_LOCK |
DLM_UNLOCK_CLEAR_CONVERT_TYPE);
} else if (dlm_lock_on_list(&res->granted, lock)) {
/* too late, already granted. */
status = DLM_CANCELGRANT;
*actions = DLM_UNLOCK_CALL_AST;
} else {
mlog(ML_ERROR, "lock to cancel is not on any list!\n");
status = DLM_IVLOCKID;
*actions = 0;
}
return status;
}
static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock,
struct dlm_lockstatus *lksb,
int *actions)
{
enum dlm_status status;
/* unlock request */
if (!dlm_lock_on_list(&res->granted, lock)) {
status = DLM_DENIED;
dlm_error(status);
*actions = 0;
} else {
/* unlock granted lock */
status = DLM_NORMAL;
*actions = (DLM_UNLOCK_FREE_LOCK |
DLM_UNLOCK_CALL_AST |
DLM_UNLOCK_REMOVE_LOCK);
}
return status;
}
/* there seems to be no point in doing this async
* since (even for the remote case) there is really
* no work to queue up... so just do it and fire the
* unlockast by hand when done... */
enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
int flags, dlm_astunlockfunc_t *unlockast, void *data)
{
enum dlm_status status;
struct dlm_lock_resource *res;
struct dlm_lock *lock = NULL;
int call_ast, is_master;
if (!lksb) {
dlm_error(DLM_BADARGS);
return DLM_BADARGS;
}
if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) {
dlm_error(DLM_BADPARAM);
return DLM_BADPARAM;
}
if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
flags &= ~LKM_VALBLK;
}
if (!lksb->lockid || !lksb->lockid->lockres) {
dlm_error(DLM_BADPARAM);
return DLM_BADPARAM;
}
lock = lksb->lockid;
BUG_ON(!lock);
dlm_lock_get(lock);
res = lock->lockres;
BUG_ON(!res);
dlm_lockres_get(res);
retry:
call_ast = 0;
/* need to retry up here because owner may have changed */
mlog(0, "lock=%p res=%p\n", lock, res);
spin_lock(&res->spinlock);
is_master = (res->owner == dlm->node_num);
if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
flags &= ~LKM_VALBLK;
spin_unlock(&res->spinlock);
if (is_master) {
status = dlmunlock_master(dlm, res, lock, lksb, flags,
&call_ast);
mlog(0, "done calling dlmunlock_master: returned %d, "
"call_ast is %d\n", status, call_ast);
} else {
status = dlmunlock_remote(dlm, res, lock, lksb, flags,
&call_ast);
mlog(0, "done calling dlmunlock_remote: returned %d, "
"call_ast is %d\n", status, call_ast);
}
if (status == DLM_RECOVERING ||
status == DLM_MIGRATING ||
status == DLM_FORWARD ||
status == DLM_NOLOCKMGR) {
/* We want to go away for a tiny bit to allow recovery
* / migration to complete on this resource. I don't
* know of any wait queue we could sleep on as this
* may be happening on another node. Perhaps the
* proper solution is to queue up requests on the
* other end? */
/* do we want to yield(); ?? */
msleep(50);
mlog(0, "retrying unlock due to pending recovery/"
"migration/in-progress/reconnect\n");
goto retry;
}
if (call_ast) {
mlog(0, "calling unlockast(%p, %d)\n", data, status);
if (is_master) {
/* it is possible that there is one last bast
* pending. make sure it is flushed, then
* call the unlockast.
* not an issue if this is a mastered remotely,
* since this lock has been removed from the
* lockres queues and cannot be found. */
dlm_kick_thread(dlm, NULL);
wait_event(dlm->ast_wq,
dlm_lock_basts_flushed(dlm, lock));
}
(*unlockast)(data, status);
}
if (status == DLM_CANCELGRANT)
status = DLM_NORMAL;
if (status == DLM_NORMAL) {
mlog(0, "kicking the thread\n");
dlm_kick_thread(dlm, res);
} else
dlm_error(status);
dlm_lockres_calc_usage(dlm, res);
dlm_lockres_put(res);
dlm_lock_put(lock);
mlog(0, "returning status=%d!\n", status);
return status;
}
EXPORT_SYMBOL_GPL(dlmunlock);
| linux-master | fs/ocfs2/dlm/dlmunlock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* inode.c - part of debugfs, a tiny little debug file system
*
* Copyright (C) 2004,2019 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2004 IBM Inc.
* Copyright (C) 2019 Linux Foundation <[email protected]>
*
* debugfs is for people to use instead of /proc or /sys.
* See ./Documentation/core-api/kernel-api.rst for more details.
*/
#define pr_fmt(fmt) "debugfs: " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/namei.h>
#include <linux/debugfs.h>
#include <linux/fsnotify.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/security.h>
#include "internal.h"
#define DEBUGFS_DEFAULT_MODE 0700
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
/*
* Don't allow access attributes to be changed whilst the kernel is locked down
* so that we can use the file mode as part of a heuristic to determine whether
* to lock down individual files.
*/
static int debugfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *ia)
{
int ret;
if (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) {
ret = security_locked_down(LOCKDOWN_DEBUGFS);
if (ret)
return ret;
}
return simple_setattr(&nop_mnt_idmap, dentry, ia);
}
static const struct inode_operations debugfs_file_inode_operations = {
.setattr = debugfs_setattr,
};
static const struct inode_operations debugfs_dir_inode_operations = {
.lookup = simple_lookup,
.setattr = debugfs_setattr,
};
static const struct inode_operations debugfs_symlink_inode_operations = {
.get_link = simple_get_link,
.setattr = debugfs_setattr,
};
static struct inode *debugfs_get_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
return inode;
}
struct debugfs_mount_opts {
kuid_t uid;
kgid_t gid;
umode_t mode;
/* Opt_* bitfield. */
unsigned int opts;
};
enum {
Opt_uid,
Opt_gid,
Opt_mode,
Opt_err
};
static const match_table_t tokens = {
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_mode, "mode=%o"},
{Opt_err, NULL}
};
struct debugfs_fs_info {
struct debugfs_mount_opts mount_opts;
};
static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
{
substring_t args[MAX_OPT_ARGS];
int option;
int token;
kuid_t uid;
kgid_t gid;
char *p;
opts->opts = 0;
opts->mode = DEBUGFS_DEFAULT_MODE;
while ((p = strsep(&data, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_uid:
if (match_int(&args[0], &option))
return -EINVAL;
uid = make_kuid(current_user_ns(), option);
if (!uid_valid(uid))
return -EINVAL;
opts->uid = uid;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return -EINVAL;
gid = make_kgid(current_user_ns(), option);
if (!gid_valid(gid))
return -EINVAL;
opts->gid = gid;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->mode = option & S_IALLUGO;
break;
/*
* We might like to report bad mount options here;
* but traditionally debugfs has ignored all mount options
*/
}
opts->opts |= BIT(token);
}
return 0;
}
static void _debugfs_apply_options(struct super_block *sb, bool remount)
{
struct debugfs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = d_inode(sb->s_root);
struct debugfs_mount_opts *opts = &fsi->mount_opts;
/*
* On remount, only reset mode/uid/gid if they were provided as mount
* options.
*/
if (!remount || opts->opts & BIT(Opt_mode)) {
inode->i_mode &= ~S_IALLUGO;
inode->i_mode |= opts->mode;
}
if (!remount || opts->opts & BIT(Opt_uid))
inode->i_uid = opts->uid;
if (!remount || opts->opts & BIT(Opt_gid))
inode->i_gid = opts->gid;
}
static void debugfs_apply_options(struct super_block *sb)
{
_debugfs_apply_options(sb, false);
}
static void debugfs_apply_options_remount(struct super_block *sb)
{
_debugfs_apply_options(sb, true);
}
static int debugfs_remount(struct super_block *sb, int *flags, char *data)
{
int err;
struct debugfs_fs_info *fsi = sb->s_fs_info;
sync_filesystem(sb);
err = debugfs_parse_options(data, &fsi->mount_opts);
if (err)
goto fail;
debugfs_apply_options_remount(sb);
fail:
return err;
}
static int debugfs_show_options(struct seq_file *m, struct dentry *root)
{
struct debugfs_fs_info *fsi = root->d_sb->s_fs_info;
struct debugfs_mount_opts *opts = &fsi->mount_opts;
if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
from_kuid_munged(&init_user_ns, opts->uid));
if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
from_kgid_munged(&init_user_ns, opts->gid));
if (opts->mode != DEBUGFS_DEFAULT_MODE)
seq_printf(m, ",mode=%o", opts->mode);
return 0;
}
static void debugfs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
free_inode_nonrcu(inode);
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
.remount_fs = debugfs_remount,
.show_options = debugfs_show_options,
.free_inode = debugfs_free_inode,
};
static void debugfs_release_dentry(struct dentry *dentry)
{
void *fsd = dentry->d_fsdata;
if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
kfree(dentry->d_fsdata);
}
static struct vfsmount *debugfs_automount(struct path *path)
{
debugfs_automount_t f;
f = (debugfs_automount_t)path->dentry->d_fsdata;
return f(path->dentry, d_inode(path->dentry)->i_private);
}
static const struct dentry_operations debugfs_dops = {
.d_delete = always_delete_dentry,
.d_release = debugfs_release_dentry,
.d_automount = debugfs_automount,
};
static int debug_fill_super(struct super_block *sb, void *data, int silent)
{
static const struct tree_descr debug_files[] = {{""}};
struct debugfs_fs_info *fsi;
int err;
fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
sb->s_fs_info = fsi;
if (!fsi) {
err = -ENOMEM;
goto fail;
}
err = debugfs_parse_options(data, &fsi->mount_opts);
if (err)
goto fail;
err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
if (err)
goto fail;
sb->s_op = &debugfs_super_operations;
sb->s_d_op = &debugfs_dops;
debugfs_apply_options(sb);
return 0;
fail:
kfree(fsi);
sb->s_fs_info = NULL;
return err;
}
static struct dentry *debug_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
if (!(debugfs_allow & DEBUGFS_ALLOW_API))
return ERR_PTR(-EPERM);
return mount_single(fs_type, flags, data, debug_fill_super);
}
static struct file_system_type debug_fs_type = {
.owner = THIS_MODULE,
.name = "debugfs",
.mount = debug_mount,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("debugfs");
/**
* debugfs_lookup() - look up an existing debugfs file
* @name: a pointer to a string containing the name of the file to look up.
* @parent: a pointer to the parent dentry of the file.
*
* This function will return a pointer to a dentry if it succeeds. If the file
* doesn't exist or an error occurs, %NULL will be returned. The returned
* dentry must be passed to dput() when it is no longer needed.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
{
struct dentry *dentry;
if (!debugfs_initialized() || IS_ERR_OR_NULL(name) || IS_ERR(parent))
return NULL;
if (!parent)
parent = debugfs_mount->mnt_root;
dentry = lookup_positive_unlocked(name, parent, strlen(name));
if (IS_ERR(dentry))
return NULL;
return dentry;
}
EXPORT_SYMBOL_GPL(debugfs_lookup);
static struct dentry *start_creating(const char *name, struct dentry *parent)
{
struct dentry *dentry;
int error;
if (!(debugfs_allow & DEBUGFS_ALLOW_API))
return ERR_PTR(-EPERM);
if (!debugfs_initialized())
return ERR_PTR(-ENOENT);
pr_debug("creating file '%s'\n", name);
if (IS_ERR(parent))
return parent;
error = simple_pin_fs(&debug_fs_type, &debugfs_mount,
&debugfs_mount_count);
if (error) {
pr_err("Unable to pin filesystem for file '%s'\n", name);
return ERR_PTR(error);
}
/* If the parent is not specified, we create it in the root.
* We need the root dentry to do this, which is in the super
* block. A pointer to that is in the struct vfsmount that we
* have around.
*/
if (!parent)
parent = debugfs_mount->mnt_root;
inode_lock(d_inode(parent));
if (unlikely(IS_DEADDIR(d_inode(parent))))
dentry = ERR_PTR(-ENOENT);
else
dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
if (d_is_dir(dentry))
pr_err("Directory '%s' with parent '%s' already present!\n",
name, parent->d_name.name);
else
pr_err("File '%s' in directory '%s' already present!\n",
name, parent->d_name.name);
dput(dentry);
dentry = ERR_PTR(-EEXIST);
}
if (IS_ERR(dentry)) {
inode_unlock(d_inode(parent));
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
return dentry;
}
static struct dentry *failed_creating(struct dentry *dentry)
{
inode_unlock(d_inode(dentry->d_parent));
dput(dentry);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
return ERR_PTR(-ENOMEM);
}
static struct dentry *end_creating(struct dentry *dentry)
{
inode_unlock(d_inode(dentry->d_parent));
return dentry;
}
static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *proxy_fops,
const struct file_operations *real_fops)
{
struct dentry *dentry;
struct inode *inode;
if (!(mode & S_IFMT))
mode |= S_IFREG;
BUG_ON(!S_ISREG(mode));
dentry = start_creating(name, parent);
if (IS_ERR(dentry))
return dentry;
if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
failed_creating(dentry);
return ERR_PTR(-EPERM);
}
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create file '%s'\n",
name);
return failed_creating(dentry);
}
inode->i_mode = mode;
inode->i_private = data;
inode->i_op = &debugfs_file_inode_operations;
inode->i_fop = proxy_fops;
dentry->d_fsdata = (void *)((unsigned long)real_fops |
DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
d_instantiate(dentry, inode);
fsnotify_create(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
/**
* debugfs_create_file - create a file in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
*
* This is the basic "create a file" function for debugfs. It allows for a
* wide range of flexibility in creating a file, or a directory (if you want
* to create a directory, the debugfs_create_dir() function is
* recommended to be used instead.)
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*
* NOTE: it's expected that most callers should _ignore_ the errors returned
* by this function. Other debugfs functions handle the fact that the "dentry"
* passed to them could be an error and they don't crash in that case.
* Drivers should generally work fine even if debugfs fails to init anyway.
*/
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
return __debugfs_create_file(name, mode, parent, data,
fops ? &debugfs_full_proxy_file_operations :
&debugfs_noop_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file);
/**
* debugfs_create_file_unsafe - create a file in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
*
* debugfs_create_file_unsafe() is completely analogous to
* debugfs_create_file(), the only difference being that the fops
* handed it will not get protected against file removals by the
* debugfs core.
*
* It is your responsibility to protect your struct file_operation
* methods against file removals by means of debugfs_file_get()
* and debugfs_file_put(). ->open() is still protected by
* debugfs though.
*
* Any struct file_operations defined by means of
* DEFINE_DEBUGFS_ATTRIBUTE() is protected against file removals and
* thus, may be used here.
*/
struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
return __debugfs_create_file(name, mode, parent, data,
fops ? &debugfs_open_proxy_file_operations :
&debugfs_noop_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
/**
* debugfs_create_file_size - create a file in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
* @file_size: initial file size
*
* This is the basic "create a file" function for debugfs. It allows for a
* wide range of flexibility in creating a file, or a directory (if you want
* to create a directory, the debugfs_create_dir() function is
* recommended to be used instead.)
*/
void debugfs_create_file_size(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
loff_t file_size)
{
struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
if (!IS_ERR(de))
d_inode(de)->i_size = file_size;
}
EXPORT_SYMBOL_GPL(debugfs_create_file_size);
/**
* debugfs_create_dir - create a directory in the debugfs filesystem
* @name: a pointer to a string containing the name of the directory to
* create.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* directory will be created in the root of the debugfs filesystem.
*
* This function creates a directory in debugfs with the given name.
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*
* NOTE: it's expected that most callers should _ignore_ the errors returned
* by this function. Other debugfs functions handle the fact that the "dentry"
* passed to them could be an error and they don't crash in that case.
* Drivers should generally work fine even if debugfs fails to init anyway.
*/
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
{
struct dentry *dentry = start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
failed_creating(dentry);
return ERR_PTR(-EPERM);
}
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create directory '%s'\n",
name);
return failed_creating(dentry);
}
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
inode->i_op = &debugfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
/**
* debugfs_create_automount - create automount point in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @f: function to be called when pathname resolution steps on that one.
* @data: opaque argument to pass to f().
*
* @f should return what ->d_automount() would.
*/
struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
debugfs_automount_t f,
void *data)
{
struct dentry *dentry = start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
failed_creating(dentry);
return ERR_PTR(-EPERM);
}
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create automount '%s'\n",
name);
return failed_creating(dentry);
}
make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
dentry->d_fsdata = (void *)f;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
EXPORT_SYMBOL(debugfs_create_automount);
/**
* debugfs_create_symlink- create a symbolic link in the debugfs filesystem
* @name: a pointer to a string containing the name of the symbolic link to
* create.
* @parent: a pointer to the parent dentry for this symbolic link. This
* should be a directory dentry if set. If this parameter is NULL,
* then the symbolic link will be created in the root of the debugfs
* filesystem.
* @target: a pointer to a string containing the path to the target of the
* symbolic link.
*
* This function creates a symbolic link with the given name in debugfs that
* links to the given target path.
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the symbolic
* link is to be removed (no automatic cleanup happens if your module is
* unloaded, you are responsible here.) If an error occurs, ERR_PTR(-ERROR)
* will be returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *target)
{
struct dentry *dentry;
struct inode *inode;
char *link = kstrdup(target, GFP_KERNEL);
if (!link)
return ERR_PTR(-ENOMEM);
dentry = start_creating(name, parent);
if (IS_ERR(dentry)) {
kfree(link);
return dentry;
}
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create symlink '%s'\n",
name);
kfree(link);
return failed_creating(dentry);
}
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_op = &debugfs_symlink_inode_operations;
inode->i_link = link;
d_instantiate(dentry, inode);
return end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_symlink);
static void __debugfs_file_removed(struct dentry *dentry)
{
struct debugfs_fsdata *fsd;
/*
* Paired with the closing smp_mb() implied by a successful
* cmpxchg() in debugfs_file_get(): either
* debugfs_file_get() must see a dead dentry or we must see a
* debugfs_fsdata instance at ->d_fsdata here (or both).
*/
smp_mb();
fsd = READ_ONCE(dentry->d_fsdata);
if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
return;
if (!refcount_dec_and_test(&fsd->active_users))
wait_for_completion(&fsd->active_users_drained);
}
static void remove_one(struct dentry *victim)
{
if (d_is_reg(victim))
__debugfs_file_removed(victim);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
/**
* debugfs_remove - recursively removes a directory
* @dentry: a pointer to a the dentry of the directory to be removed. If this
* parameter is NULL or an error value, nothing will be done.
*
* This function recursively removes a directory tree in debugfs that
* was previously created with a call to another debugfs function
* (like debugfs_create_file() or variants thereof.)
*
* This function is required to be called in order for the file to be
* removed, no automatic cleanup of files will happen when a module is
* removed, you are responsible here.
*/
void debugfs_remove(struct dentry *dentry)
{
if (IS_ERR_OR_NULL(dentry))
return;
simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count);
simple_recursive_removal(dentry, remove_one);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
EXPORT_SYMBOL_GPL(debugfs_remove);
/**
* debugfs_lookup_and_remove - lookup a directory or file and recursively remove it
* @name: a pointer to a string containing the name of the item to look up.
* @parent: a pointer to the parent dentry of the item.
*
* This is the equlivant of doing something like
* debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
* handled for the directory being looked up.
*/
void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
{
struct dentry *dentry;
dentry = debugfs_lookup(name, parent);
if (!dentry)
return;
debugfs_remove(dentry);
dput(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
/**
* debugfs_rename - rename a file/directory in the debugfs filesystem
* @old_dir: a pointer to the parent dentry for the renamed object. This
* should be a directory dentry.
* @old_dentry: dentry of an object to be renamed.
* @new_dir: a pointer to the parent dentry where the object should be
* moved. This should be a directory dentry.
* @new_name: a pointer to a string containing the target name.
*
* This function renames a file/directory in debugfs. The target must not
* exist for rename to succeed.
*
* This function will return a pointer to old_dentry (which is updated to
* reflect renaming) if it succeeds. If an error occurs, ERR_PTR(-ERROR)
* will be returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name)
{
int error;
struct dentry *dentry = NULL, *trap;
struct name_snapshot old_name;
if (IS_ERR(old_dir))
return old_dir;
if (IS_ERR(new_dir))
return new_dir;
if (IS_ERR_OR_NULL(old_dentry))
return old_dentry;
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
goto exit;
/* Source does not exist, cyclic rename, or mountpoint? */
if (d_really_is_negative(old_dentry) || old_dentry == trap ||
d_mountpoint(old_dentry))
goto exit;
dentry = lookup_one_len(new_name, new_dir, strlen(new_name));
/* Lookup failed, cyclic rename or target exists? */
if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
goto exit;
take_dentry_name_snapshot(&old_name, old_dentry);
error = simple_rename(&nop_mnt_idmap, d_inode(old_dir), old_dentry,
d_inode(new_dir), dentry, 0);
if (error) {
release_dentry_name_snapshot(&old_name);
goto exit;
}
d_move(old_dentry, dentry);
fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name,
d_is_dir(old_dentry),
NULL, old_dentry);
release_dentry_name_snapshot(&old_name);
unlock_rename(new_dir, old_dir);
dput(dentry);
return old_dentry;
exit:
if (dentry && !IS_ERR(dentry))
dput(dentry);
unlock_rename(new_dir, old_dir);
if (IS_ERR(dentry))
return dentry;
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(debugfs_rename);
/**
* debugfs_initialized - Tells whether debugfs has been registered
*/
bool debugfs_initialized(void)
{
return debugfs_registered;
}
EXPORT_SYMBOL_GPL(debugfs_initialized);
static int __init debugfs_kernel(char *str)
{
if (str) {
if (!strcmp(str, "on"))
debugfs_allow = DEBUGFS_ALLOW_API | DEBUGFS_ALLOW_MOUNT;
else if (!strcmp(str, "no-mount"))
debugfs_allow = DEBUGFS_ALLOW_API;
else if (!strcmp(str, "off"))
debugfs_allow = 0;
}
return 0;
}
early_param("debugfs", debugfs_kernel);
static int __init debugfs_init(void)
{
int retval;
if (!(debugfs_allow & DEBUGFS_ALLOW_MOUNT))
return -EPERM;
retval = sysfs_create_mount_point(kernel_kobj, "debug");
if (retval)
return retval;
retval = register_filesystem(&debug_fs_type);
if (retval)
sysfs_remove_mount_point(kernel_kobj, "debug");
else
debugfs_registered = true;
return retval;
}
core_initcall(debugfs_init);
| linux-master | fs/debugfs/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* file.c - part of debugfs, a tiny little debug file system
*
* Copyright (C) 2004 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2004 IBM Inc.
*
* debugfs is for people to use instead of /proc or /sys.
* See Documentation/filesystems/ for more details.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/poll.h>
#include <linux/security.h>
#include "internal.h"
struct poll_table_struct;
static ssize_t default_read_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return 0;
}
static ssize_t default_write_file(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return count;
}
const struct file_operations debugfs_noop_file_operations = {
.read = default_read_file,
.write = default_write_file,
.open = simple_open,
.llseek = noop_llseek,
};
#define F_DENTRY(filp) ((filp)->f_path.dentry)
const struct file_operations *debugfs_real_fops(const struct file *filp)
{
struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata;
if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) {
/*
* Urgh, we've been called w/o a protecting
* debugfs_file_get().
*/
WARN_ON(1);
return NULL;
}
return fsd->real_fops;
}
EXPORT_SYMBOL_GPL(debugfs_real_fops);
/**
* debugfs_file_get - mark the beginning of file data access
* @dentry: the dentry object whose data is being accessed.
*
* Up to a matching call to debugfs_file_put(), any successive call
* into the file removing functions debugfs_remove() and
* debugfs_remove_recursive() will block. Since associated private
* file data may only get freed after a successful return of any of
* the removal functions, you may safely access it after a successful
* call to debugfs_file_get() without worrying about lifetime issues.
*
* If -%EIO is returned, the file has already been removed and thus,
* it is not safe to access any of its data. If, on the other hand,
* it is allowed to access the file data, zero is returned.
*/
int debugfs_file_get(struct dentry *dentry)
{
struct debugfs_fsdata *fsd;
void *d_fsd;
d_fsd = READ_ONCE(dentry->d_fsdata);
if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
fsd = d_fsd;
} else {
fsd = kmalloc(sizeof(*fsd), GFP_KERNEL);
if (!fsd)
return -ENOMEM;
fsd->real_fops = (void *)((unsigned long)d_fsd &
~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
refcount_set(&fsd->active_users, 1);
init_completion(&fsd->active_users_drained);
if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
kfree(fsd);
fsd = READ_ONCE(dentry->d_fsdata);
}
}
/*
* In case of a successful cmpxchg() above, this check is
* strictly necessary and must follow it, see the comment in
* __debugfs_remove_file().
* OTOH, if the cmpxchg() hasn't been executed or wasn't
* successful, this serves the purpose of not starving
* removers.
*/
if (d_unlinked(dentry))
return -EIO;
if (!refcount_inc_not_zero(&fsd->active_users))
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(debugfs_file_get);
/**
* debugfs_file_put - mark the end of file data access
* @dentry: the dentry object formerly passed to
* debugfs_file_get().
*
* Allow any ongoing concurrent call into debugfs_remove() or
* debugfs_remove_recursive() blocked by a former call to
* debugfs_file_get() to proceed and return to its caller.
*/
void debugfs_file_put(struct dentry *dentry)
{
struct debugfs_fsdata *fsd = READ_ONCE(dentry->d_fsdata);
if (refcount_dec_and_test(&fsd->active_users))
complete(&fsd->active_users_drained);
}
EXPORT_SYMBOL_GPL(debugfs_file_put);
/*
* Only permit access to world-readable files when the kernel is locked down.
* We also need to exclude any file that has ways to write or alter it as root
* can bypass the permissions check.
*/
static int debugfs_locked_down(struct inode *inode,
struct file *filp,
const struct file_operations *real_fops)
{
if ((inode->i_mode & 07777 & ~0444) == 0 &&
!(filp->f_mode & FMODE_WRITE) &&
!real_fops->unlocked_ioctl &&
!real_fops->compat_ioctl &&
!real_fops->mmap)
return 0;
if (security_locked_down(LOCKDOWN_DEBUGFS))
return -EPERM;
return 0;
}
static int open_proxy_open(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops = NULL;
int r;
r = debugfs_file_get(dentry);
if (r)
return r == -EIO ? -ENOENT : r;
real_fops = debugfs_real_fops(filp);
r = debugfs_locked_down(inode, filp, real_fops);
if (r)
goto out;
if (!fops_get(real_fops)) {
#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING) {
r = -ENXIO;
goto out;
}
#endif
/* Huh? Module did not clean up after itself at exit? */
WARN(1, "debugfs file owner did not clean up at exit: %pd",
dentry);
r = -ENXIO;
goto out;
}
replace_fops(filp, real_fops);
if (real_fops->open)
r = real_fops->open(inode, filp);
out:
debugfs_file_put(dentry);
return r;
}
const struct file_operations debugfs_open_proxy_file_operations = {
.open = open_proxy_open,
};
#define PROTO(args...) args
#define ARGS(args...) args
#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \
static ret_type full_proxy_ ## name(proto) \
{ \
struct dentry *dentry = F_DENTRY(filp); \
const struct file_operations *real_fops; \
ret_type r; \
\
r = debugfs_file_get(dentry); \
if (unlikely(r)) \
return r; \
real_fops = debugfs_real_fops(filp); \
r = real_fops->name(args); \
debugfs_file_put(dentry); \
return r; \
}
FULL_PROXY_FUNC(llseek, loff_t, filp,
PROTO(struct file *filp, loff_t offset, int whence),
ARGS(filp, offset, whence));
FULL_PROXY_FUNC(read, ssize_t, filp,
PROTO(struct file *filp, char __user *buf, size_t size,
loff_t *ppos),
ARGS(filp, buf, size, ppos));
FULL_PROXY_FUNC(write, ssize_t, filp,
PROTO(struct file *filp, const char __user *buf, size_t size,
loff_t *ppos),
ARGS(filp, buf, size, ppos));
FULL_PROXY_FUNC(unlocked_ioctl, long, filp,
PROTO(struct file *filp, unsigned int cmd, unsigned long arg),
ARGS(filp, cmd, arg));
static __poll_t full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct dentry *dentry = F_DENTRY(filp);
__poll_t r = 0;
const struct file_operations *real_fops;
if (debugfs_file_get(dentry))
return EPOLLHUP;
real_fops = debugfs_real_fops(filp);
r = real_fops->poll(filp, wait);
debugfs_file_put(dentry);
return r;
}
static int full_proxy_release(struct inode *inode, struct file *filp)
{
const struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops = debugfs_real_fops(filp);
const struct file_operations *proxy_fops = filp->f_op;
int r = 0;
/*
* We must not protect this against removal races here: the
* original releaser should be called unconditionally in order
* not to leak any resources. Releasers must not assume that
* ->i_private is still being meaningful here.
*/
if (real_fops->release)
r = real_fops->release(inode, filp);
replace_fops(filp, d_inode(dentry)->i_fop);
kfree(proxy_fops);
fops_put(real_fops);
return r;
}
static void __full_proxy_fops_init(struct file_operations *proxy_fops,
const struct file_operations *real_fops)
{
proxy_fops->release = full_proxy_release;
if (real_fops->llseek)
proxy_fops->llseek = full_proxy_llseek;
if (real_fops->read)
proxy_fops->read = full_proxy_read;
if (real_fops->write)
proxy_fops->write = full_proxy_write;
if (real_fops->poll)
proxy_fops->poll = full_proxy_poll;
if (real_fops->unlocked_ioctl)
proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl;
}
static int full_proxy_open(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops = NULL;
struct file_operations *proxy_fops = NULL;
int r;
r = debugfs_file_get(dentry);
if (r)
return r == -EIO ? -ENOENT : r;
real_fops = debugfs_real_fops(filp);
r = debugfs_locked_down(inode, filp, real_fops);
if (r)
goto out;
if (!fops_get(real_fops)) {
#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING) {
r = -ENXIO;
goto out;
}
#endif
/* Huh? Module did not cleanup after itself at exit? */
WARN(1, "debugfs file owner did not clean up at exit: %pd",
dentry);
r = -ENXIO;
goto out;
}
proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL);
if (!proxy_fops) {
r = -ENOMEM;
goto free_proxy;
}
__full_proxy_fops_init(proxy_fops, real_fops);
replace_fops(filp, proxy_fops);
if (real_fops->open) {
r = real_fops->open(inode, filp);
if (r) {
replace_fops(filp, d_inode(dentry)->i_fop);
goto free_proxy;
} else if (filp->f_op != proxy_fops) {
/* No protection against file removal anymore. */
WARN(1, "debugfs file owner replaced proxy fops: %pd",
dentry);
goto free_proxy;
}
}
goto out;
free_proxy:
kfree(proxy_fops);
fops_put(real_fops);
out:
debugfs_file_put(dentry);
return r;
}
const struct file_operations debugfs_full_proxy_file_operations = {
.open = full_proxy_open,
};
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
struct dentry *dentry = F_DENTRY(file);
ssize_t ret;
ret = debugfs_file_get(dentry);
if (unlikely(ret))
return ret;
ret = simple_attr_read(file, buf, len, ppos);
debugfs_file_put(dentry);
return ret;
}
EXPORT_SYMBOL_GPL(debugfs_attr_read);
static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf,
size_t len, loff_t *ppos, bool is_signed)
{
struct dentry *dentry = F_DENTRY(file);
ssize_t ret;
ret = debugfs_file_get(dentry);
if (unlikely(ret))
return ret;
if (is_signed)
ret = simple_attr_write_signed(file, buf, len, ppos);
else
ret = simple_attr_write(file, buf, len, ppos);
debugfs_file_put(dentry);
return ret;
}
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
return debugfs_attr_write_xsigned(file, buf, len, ppos, false);
}
EXPORT_SYMBOL_GPL(debugfs_attr_write);
ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
return debugfs_attr_write_xsigned(file, buf, len, ppos, true);
}
EXPORT_SYMBOL_GPL(debugfs_attr_write_signed);
static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *value,
const struct file_operations *fops,
const struct file_operations *fops_ro,
const struct file_operations *fops_wo)
{
/* if there are no write bits set, make read only */
if (!(mode & S_IWUGO))
return debugfs_create_file_unsafe(name, mode, parent, value,
fops_ro);
/* if there are no read bits set, make write only */
if (!(mode & S_IRUGO))
return debugfs_create_file_unsafe(name, mode, parent, value,
fops_wo);
return debugfs_create_file_unsafe(name, mode, parent, value, fops);
}
static int debugfs_u8_set(void *data, u64 val)
{
*(u8 *)data = val;
return 0;
}
static int debugfs_u8_get(void *data, u64 *val)
{
*val = *(u8 *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_ro, debugfs_u8_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n");
/**
* debugfs_create_u8 - create a debugfs file that is used to read and write an unsigned 8-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
*/
void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
u8 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8,
&fops_u8_ro, &fops_u8_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u8);
static int debugfs_u16_set(void *data, u64 val)
{
*(u16 *)data = val;
return 0;
}
static int debugfs_u16_get(void *data, u64 *val)
{
*val = *(u16 *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_ro, debugfs_u16_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n");
/**
* debugfs_create_u16 - create a debugfs file that is used to read and write an unsigned 16-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
*/
void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
u16 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16,
&fops_u16_ro, &fops_u16_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u16);
static int debugfs_u32_set(void *data, u64 val)
{
*(u32 *)data = val;
return 0;
}
static int debugfs_u32_get(void *data, u64 *val)
{
*val = *(u32 *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_ro, debugfs_u32_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n");
/**
* debugfs_create_u32 - create a debugfs file that is used to read and write an unsigned 32-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
*/
void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
u32 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u32,
&fops_u32_ro, &fops_u32_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u32);
static int debugfs_u64_set(void *data, u64 val)
{
*(u64 *)data = val;
return 0;
}
static int debugfs_u64_get(void *data, u64 *val)
{
*val = *(u64 *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_ro, debugfs_u64_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
/**
* debugfs_create_u64 - create a debugfs file that is used to read and write an unsigned 64-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
*/
void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
u64 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64,
&fops_u64_ro, &fops_u64_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u64);
static int debugfs_ulong_set(void *data, u64 val)
{
*(unsigned long *)data = val;
return 0;
}
static int debugfs_ulong_get(void *data, u64 *val)
{
*val = *(unsigned long *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong, debugfs_ulong_get, debugfs_ulong_set,
"%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_ro, debugfs_ulong_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n");
/**
* debugfs_create_ulong - create a debugfs file that is used to read and write
* an unsigned long value.
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
*/
void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
unsigned long *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_ulong,
&fops_ulong_ro, &fops_ulong_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_ulong);
DEFINE_DEBUGFS_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x8_ro, debugfs_u8_get, NULL, "0x%02llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x8_wo, NULL, debugfs_u8_set, "0x%02llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set,
"0x%04llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x16_ro, debugfs_u16_get, NULL, "0x%04llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x16_wo, NULL, debugfs_u16_set, "0x%04llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set,
"0x%08llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set,
"0x%016llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_ro, debugfs_u64_get, NULL, "0x%016llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n");
/*
* debugfs_create_x{8,16,32,64} - create a debugfs file that is used to read and write an unsigned {8,16,32,64}-bit value
*
* These functions are exactly the same as the above functions (but use a hex
* output for the decimal challenged). For details look at the above unsigned
* decimal functions.
*/
/**
* debugfs_create_x8 - create a debugfs file that is used to read and write an unsigned 8-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*/
void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
u8 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8,
&fops_x8_ro, &fops_x8_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x8);
/**
* debugfs_create_x16 - create a debugfs file that is used to read and write an unsigned 16-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*/
void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
u16 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16,
&fops_x16_ro, &fops_x16_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x16);
/**
* debugfs_create_x32 - create a debugfs file that is used to read and write an unsigned 32-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*/
void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
u32 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32,
&fops_x32_ro, &fops_x32_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x32);
/**
* debugfs_create_x64 - create a debugfs file that is used to read and write an unsigned 64-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*/
void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
u64 *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64,
&fops_x64_ro, &fops_x64_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x64);
static int debugfs_size_t_set(void *data, u64 val)
{
*(size_t *)data = val;
return 0;
}
static int debugfs_size_t_get(void *data, u64 *val)
{
*val = *(size_t *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t, debugfs_size_t_get, debugfs_size_t_set,
"%llu\n"); /* %llu and %zu are more or less the same */
DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_ro, debugfs_size_t_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n");
/**
* debugfs_create_size_t - create a debugfs file that is used to read and write an size_t value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*/
void debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent, size_t *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_size_t,
&fops_size_t_ro, &fops_size_t_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_size_t);
static int debugfs_atomic_t_set(void *data, u64 val)
{
atomic_set((atomic_t *)data, val);
return 0;
}
static int debugfs_atomic_t_get(void *data, u64 *val)
{
*val = atomic_read((atomic_t *)data);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get,
debugfs_atomic_t_set, "%lld\n");
DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
"%lld\n");
DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
"%lld\n");
/**
* debugfs_create_atomic_t - create a debugfs file that is used to read and
* write an atomic_t value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*/
void debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_atomic_t,
&fops_atomic_t_ro, &fops_atomic_t_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_atomic_t);
ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[2];
bool val;
int r;
struct dentry *dentry = F_DENTRY(file);
r = debugfs_file_get(dentry);
if (unlikely(r))
return r;
val = *(bool *)file->private_data;
debugfs_file_put(dentry);
if (val)
buf[0] = 'Y';
else
buf[0] = 'N';
buf[1] = '\n';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
EXPORT_SYMBOL_GPL(debugfs_read_file_bool);
ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
bool bv;
int r;
bool *val = file->private_data;
struct dentry *dentry = F_DENTRY(file);
r = kstrtobool_from_user(user_buf, count, &bv);
if (!r) {
r = debugfs_file_get(dentry);
if (unlikely(r))
return r;
*val = bv;
debugfs_file_put(dentry);
}
return count;
}
EXPORT_SYMBOL_GPL(debugfs_write_file_bool);
static const struct file_operations fops_bool = {
.read = debugfs_read_file_bool,
.write = debugfs_write_file_bool,
.open = simple_open,
.llseek = default_llseek,
};
static const struct file_operations fops_bool_ro = {
.read = debugfs_read_file_bool,
.open = simple_open,
.llseek = default_llseek,
};
static const struct file_operations fops_bool_wo = {
.write = debugfs_write_file_bool,
.open = simple_open,
.llseek = default_llseek,
};
/**
* debugfs_create_bool - create a debugfs file that is used to read and write a boolean value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
*/
void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
bool *value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_bool,
&fops_bool_ro, &fops_bool_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_bool);
ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct dentry *dentry = F_DENTRY(file);
char *str, *copy = NULL;
int copy_len, len;
ssize_t ret;
ret = debugfs_file_get(dentry);
if (unlikely(ret))
return ret;
str = *(char **)file->private_data;
len = strlen(str) + 1;
copy = kmalloc(len, GFP_KERNEL);
if (!copy) {
debugfs_file_put(dentry);
return -ENOMEM;
}
copy_len = strscpy(copy, str, len);
debugfs_file_put(dentry);
if (copy_len < 0) {
kfree(copy);
return copy_len;
}
copy[copy_len] = '\n';
ret = simple_read_from_buffer(user_buf, count, ppos, copy, len);
kfree(copy);
return ret;
}
EXPORT_SYMBOL_GPL(debugfs_create_str);
static ssize_t debugfs_write_file_str(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct dentry *dentry = F_DENTRY(file);
char *old, *new = NULL;
int pos = *ppos;
int r;
r = debugfs_file_get(dentry);
if (unlikely(r))
return r;
old = *(char **)file->private_data;
/* only allow strict concatenation */
r = -EINVAL;
if (pos && pos != strlen(old))
goto error;
r = -E2BIG;
if (pos + count + 1 > PAGE_SIZE)
goto error;
r = -ENOMEM;
new = kmalloc(pos + count + 1, GFP_KERNEL);
if (!new)
goto error;
if (pos)
memcpy(new, old, pos);
r = -EFAULT;
if (copy_from_user(new + pos, user_buf, count))
goto error;
new[pos + count] = '\0';
strim(new);
rcu_assign_pointer(*(char **)file->private_data, new);
synchronize_rcu();
kfree(old);
debugfs_file_put(dentry);
return count;
error:
kfree(new);
debugfs_file_put(dentry);
return r;
}
static const struct file_operations fops_str = {
.read = debugfs_read_file_str,
.write = debugfs_write_file_str,
.open = simple_open,
.llseek = default_llseek,
};
static const struct file_operations fops_str_ro = {
.read = debugfs_read_file_str,
.open = simple_open,
.llseek = default_llseek,
};
static const struct file_operations fops_str_wo = {
.write = debugfs_write_file_str,
.open = simple_open,
.llseek = default_llseek,
};
/**
* debugfs_create_str - create a debugfs file that is used to read and write a string value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
*/
void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent, char **value)
{
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_str,
&fops_str_ro, &fops_str_wo);
}
static ssize_t read_file_blob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct debugfs_blob_wrapper *blob = file->private_data;
struct dentry *dentry = F_DENTRY(file);
ssize_t r;
r = debugfs_file_get(dentry);
if (unlikely(r))
return r;
r = simple_read_from_buffer(user_buf, count, ppos, blob->data,
blob->size);
debugfs_file_put(dentry);
return r;
}
static const struct file_operations fops_blob = {
.read = read_file_blob,
.open = simple_open,
.llseek = default_llseek,
};
/**
* debugfs_create_blob - create a debugfs file that is used to read a binary blob
* @name: a pointer to a string containing the name of the file to create.
* @mode: the read permission that the file should have (other permissions are
* masked out)
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @blob: a pointer to a struct debugfs_blob_wrapper which contains a pointer
* to the blob data and the size of the data.
*
* This function creates a file in debugfs with the given name that exports
* @blob->data as a binary blob. If the @mode variable is so set it can be
* read from. Writing is not supported.
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
* be returned.
*/
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
return debugfs_create_file_unsafe(name, mode & 0444, parent, blob, &fops_blob);
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
static size_t u32_format_array(char *buf, size_t bufsize,
u32 *array, int array_size)
{
size_t ret = 0;
while (--array_size >= 0) {
size_t len;
char term = array_size ? ' ' : '\n';
len = snprintf(buf, bufsize, "%u%c", *array++, term);
ret += len;
buf += len;
bufsize -= len;
}
return ret;
}
static int u32_array_open(struct inode *inode, struct file *file)
{
struct debugfs_u32_array *data = inode->i_private;
int size, elements = data->n_elements;
char *buf;
/*
* Max size:
* - 10 digits + ' '/'\n' = 11 bytes per number
* - terminating NUL character
*/
size = elements*11;
buf = kmalloc(size+1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[size] = 0;
file->private_data = buf;
u32_format_array(buf, size, data->array, data->n_elements);
return nonseekable_open(inode, file);
}
static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
loff_t *ppos)
{
size_t size = strlen(file->private_data);
return simple_read_from_buffer(buf, len, ppos,
file->private_data, size);
}
static int u32_array_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static const struct file_operations u32_array_fops = {
.owner = THIS_MODULE,
.open = u32_array_open,
.release = u32_array_release,
.read = u32_array_read,
.llseek = no_llseek,
};
/**
* debugfs_create_u32_array - create a debugfs file that is used to read u32
* array.
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @array: wrapper struct containing data pointer and size of the array.
*
* This function creates a file in debugfs with the given name that exports
* @array as data. If the @mode variable is so set it can be read from.
* Writing is not supported. Seek within the file is also not supported.
* Once array is created its size can not be changed.
*/
void debugfs_create_u32_array(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_u32_array *array)
{
debugfs_create_file_unsafe(name, mode, parent, array, &u32_array_fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_u32_array);
#ifdef CONFIG_HAS_IOMEM
/*
* The regset32 stuff is used to print 32-bit registers using the
* seq_file utilities. We offer printing a register set in an already-opened
* sequential file or create a debugfs file that only prints a regset32.
*/
/**
* debugfs_print_regs32 - use seq_print to describe a set of registers
* @s: the seq_file structure being used to generate output
* @regs: an array if struct debugfs_reg32 structures
* @nregs: the length of the above array
* @base: the base address to be used in reading the registers
* @prefix: a string to be prefixed to every output line
*
* This function outputs a text block describing the current values of
* some 32-bit hardware registers. It is meant to be used within debugfs
* files based on seq_file that need to show registers, intermixed with other
* information. The prefix argument may be used to specify a leading string,
* because some peripherals have several blocks of identical registers,
* for example configuration of dma channels
*/
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix)
{
int i;
for (i = 0; i < nregs; i++, regs++) {
if (prefix)
seq_printf(s, "%s", prefix);
seq_printf(s, "%s = 0x%08x\n", regs->name,
readl(base + regs->offset));
if (seq_has_overflowed(s))
break;
}
}
EXPORT_SYMBOL_GPL(debugfs_print_regs32);
static int debugfs_regset32_show(struct seq_file *s, void *data)
{
struct debugfs_regset32 *regset = s->private;
if (regset->dev)
pm_runtime_get_sync(regset->dev);
debugfs_print_regs32(s, regset->regs, regset->nregs, regset->base, "");
if (regset->dev)
pm_runtime_put(regset->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(debugfs_regset32);
/**
* debugfs_create_regset32 - create a debugfs file that returns register values
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @regset: a pointer to a struct debugfs_regset32, which contains a pointer
* to an array of register definitions, the array size and the base
* address where the register bank is to be found.
*
* This function creates a file in debugfs with the given name that reports
* the names and values of a set of 32-bit registers. If the @mode variable
* is so set it can be read from. Writing is not supported.
*/
void debugfs_create_regset32(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_regset32 *regset)
{
debugfs_create_file(name, mode, parent, regset, &debugfs_regset32_fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_regset32);
#endif /* CONFIG_HAS_IOMEM */
struct debugfs_devm_entry {
int (*read)(struct seq_file *seq, void *data);
struct device *dev;
};
static int debugfs_devm_entry_open(struct inode *inode, struct file *f)
{
struct debugfs_devm_entry *entry = inode->i_private;
return single_open(f, entry->read, entry->dev);
}
static const struct file_operations debugfs_devm_entry_ops = {
.owner = THIS_MODULE,
.open = debugfs_devm_entry_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek
};
/**
* debugfs_create_devm_seqfile - create a debugfs file that is bound to device.
*
* @dev: device related to this debugfs file.
* @name: name of the debugfs file.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @read_fn: function pointer called to print the seq_file content.
*/
void debugfs_create_devm_seqfile(struct device *dev, const char *name,
struct dentry *parent,
int (*read_fn)(struct seq_file *s, void *data))
{
struct debugfs_devm_entry *entry;
if (IS_ERR(parent))
return;
entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
if (!entry)
return;
entry->read = read_fn;
entry->dev = dev;
debugfs_create_file(name, S_IRUGO, parent, entry,
&debugfs_devm_entry_ops);
}
EXPORT_SYMBOL_GPL(debugfs_create_devm_seqfile);
| linux-master | fs/debugfs/file.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/hfsplus/super.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/vfs.h>
#include <linux/nls.h>
static struct inode *hfsplus_alloc_inode(struct super_block *sb);
static void hfsplus_free_inode(struct inode *inode);
#include "hfsplus_fs.h"
#include "xattr.h"
static int hfsplus_system_read_inode(struct inode *inode)
{
struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
hfsplus_inode_read_fork(inode, &vhdr->ext_file);
inode->i_mapping->a_ops = &hfsplus_btree_aops;
break;
case HFSPLUS_CAT_CNID:
hfsplus_inode_read_fork(inode, &vhdr->cat_file);
inode->i_mapping->a_ops = &hfsplus_btree_aops;
break;
case HFSPLUS_ALLOC_CNID:
hfsplus_inode_read_fork(inode, &vhdr->alloc_file);
inode->i_mapping->a_ops = &hfsplus_aops;
break;
case HFSPLUS_START_CNID:
hfsplus_inode_read_fork(inode, &vhdr->start_file);
break;
case HFSPLUS_ATTR_CNID:
hfsplus_inode_read_fork(inode, &vhdr->attr_file);
inode->i_mapping->a_ops = &hfsplus_btree_aops;
break;
default:
return -EIO;
}
return 0;
}
struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
{
struct hfs_find_data fd;
struct inode *inode;
int err;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
mutex_init(&HFSPLUS_I(inode)->extents_lock);
HFSPLUS_I(inode)->flags = 0;
HFSPLUS_I(inode)->extent_state = 0;
HFSPLUS_I(inode)->rsrc_inode = NULL;
atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
inode->i_ino == HFSPLUS_ROOT_CNID) {
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
if (!err) {
err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
if (!err)
err = hfsplus_cat_read_inode(inode, &fd);
hfs_find_exit(&fd);
}
} else {
err = hfsplus_system_read_inode(inode);
}
if (err) {
iget_failed(inode);
return ERR_PTR(err);
}
unlock_new_inode(inode);
return inode;
}
static int hfsplus_system_write_inode(struct inode *inode)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
struct hfsplus_vh *vhdr = sbi->s_vhdr;
struct hfsplus_fork_raw *fork;
struct hfs_btree *tree = NULL;
switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
fork = &vhdr->ext_file;
tree = sbi->ext_tree;
break;
case HFSPLUS_CAT_CNID:
fork = &vhdr->cat_file;
tree = sbi->cat_tree;
break;
case HFSPLUS_ALLOC_CNID:
fork = &vhdr->alloc_file;
break;
case HFSPLUS_START_CNID:
fork = &vhdr->start_file;
break;
case HFSPLUS_ATTR_CNID:
fork = &vhdr->attr_file;
tree = sbi->attr_tree;
break;
default:
return -EIO;
}
if (fork->total_size != cpu_to_be64(inode->i_size)) {
set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
hfsplus_mark_mdb_dirty(inode->i_sb);
}
hfsplus_inode_write_fork(inode, fork);
if (tree) {
int err = hfs_btree_write(tree);
if (err) {
pr_err("b-tree write err: %d, ino %lu\n",
err, inode->i_ino);
return err;
}
}
return 0;
}
static int hfsplus_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
int err;
hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
err = hfsplus_ext_write_extent(inode);
if (err)
return err;
if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
inode->i_ino == HFSPLUS_ROOT_CNID)
return hfsplus_cat_write_inode(inode);
else
return hfsplus_system_write_inode(inode);
}
static void hfsplus_evict_inode(struct inode *inode)
{
hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (HFSPLUS_IS_RSRC(inode)) {
HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
iput(HFSPLUS_I(inode)->rsrc_inode);
}
}
static int hfsplus_sync_fs(struct super_block *sb, int wait)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_vh *vhdr = sbi->s_vhdr;
int write_backup = 0;
int error, error2;
if (!wait)
return 0;
hfs_dbg(SUPER, "hfsplus_sync_fs\n");
/*
* Explicitly write out the special metadata inodes.
*
* While these special inodes are marked as hashed and written
* out peridocically by the flusher threads we redirty them
* during writeout of normal inodes, and thus the life lock
* prevents us from getting the latest state to disk.
*/
error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
if (!error)
error = error2;
if (sbi->attr_tree) {
error2 =
filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
if (!error)
error = error2;
}
error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
if (!error)
error = error2;
mutex_lock(&sbi->vh_mutex);
mutex_lock(&sbi->alloc_mutex);
vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
vhdr->folder_count = cpu_to_be32(sbi->folder_count);
vhdr->file_count = cpu_to_be32(sbi->file_count);
if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
write_backup = 1;
}
error2 = hfsplus_submit_bio(sb,
sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
sbi->s_vhdr_buf, NULL, REQ_OP_WRITE |
REQ_SYNC);
if (!error)
error = error2;
if (!write_backup)
goto out;
error2 = hfsplus_submit_bio(sb,
sbi->part_start + sbi->sect_count - 2,
sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE |
REQ_SYNC);
if (!error)
error2 = error;
out:
mutex_unlock(&sbi->alloc_mutex);
mutex_unlock(&sbi->vh_mutex);
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(sb->s_bdev);
return error;
}
static void delayed_sync_fs(struct work_struct *work)
{
int err;
struct hfsplus_sb_info *sbi;
sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
spin_lock(&sbi->work_lock);
sbi->work_queued = 0;
spin_unlock(&sbi->work_lock);
err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
if (err)
pr_err("delayed sync fs err %d\n", err);
}
void hfsplus_mark_mdb_dirty(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
unsigned long delay;
if (sb_rdonly(sb))
return;
spin_lock(&sbi->work_lock);
if (!sbi->work_queued) {
delay = msecs_to_jiffies(dirty_writeback_interval * 10);
queue_delayed_work(system_long_wq, &sbi->sync_work, delay);
sbi->work_queued = 1;
}
spin_unlock(&sbi->work_lock);
}
static void hfsplus_put_super(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
hfs_dbg(SUPER, "hfsplus_put_super\n");
cancel_delayed_work_sync(&sbi->sync_work);
if (!sb_rdonly(sb) && sbi->s_vhdr) {
struct hfsplus_vh *vhdr = sbi->s_vhdr;
vhdr->modify_date = hfsp_now2mt();
vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
hfsplus_sync_fs(sb, 1);
}
iput(sbi->alloc_file);
iput(sbi->hidden_dir);
hfs_btree_close(sbi->attr_tree);
hfs_btree_close(sbi->cat_tree);
hfs_btree_close(sbi->ext_tree);
kfree(sbi->s_vhdr_buf);
kfree(sbi->s_backup_vhdr_buf);
unload_nls(sbi->nls);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = HFSPLUS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
buf->f_bavail = buf->f_bfree;
buf->f_files = 0xFFFFFFFF;
buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
buf->f_fsid = u64_to_fsid(id);
buf->f_namelen = HFSPLUS_MAX_STRLEN;
return 0;
}
static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
if (!(*flags & SB_RDONLY)) {
struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
int force = 0;
if (!hfsplus_parse_options_remount(data, &force))
return -EINVAL;
if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
*flags |= SB_RDONLY;
} else if (force) {
/* nothing */
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
pr_warn("filesystem is marked locked, leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
*flags |= SB_RDONLY;
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
pr_warn("filesystem is marked journaled, leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
*flags |= SB_RDONLY;
}
}
return 0;
}
static const struct super_operations hfsplus_sops = {
.alloc_inode = hfsplus_alloc_inode,
.free_inode = hfsplus_free_inode,
.write_inode = hfsplus_write_inode,
.evict_inode = hfsplus_evict_inode,
.put_super = hfsplus_put_super,
.sync_fs = hfsplus_sync_fs,
.statfs = hfsplus_statfs,
.remount_fs = hfsplus_remount,
.show_options = hfsplus_show_options,
};
static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
{
struct hfsplus_vh *vhdr;
struct hfsplus_sb_info *sbi;
hfsplus_cat_entry entry;
struct hfs_find_data fd;
struct inode *root, *inode;
struct qstr str;
struct nls_table *nls = NULL;
u64 last_fs_block, last_fs_page;
int err;
err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto out;
sb->s_fs_info = sbi;
mutex_init(&sbi->alloc_mutex);
mutex_init(&sbi->vh_mutex);
spin_lock_init(&sbi->work_lock);
INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
hfsplus_fill_defaults(sbi);
err = -EINVAL;
if (!hfsplus_parse_options(data, sbi)) {
pr_err("unable to parse mount options\n");
goto out_unload_nls;
}
/* temporarily use utf8 to correctly find the hidden dir below */
nls = sbi->nls;
sbi->nls = load_nls("utf8");
if (!sbi->nls) {
pr_err("unable to load nls for utf8\n");
goto out_unload_nls;
}
/* Grab the volume header */
if (hfsplus_read_wrapper(sb)) {
if (!silent)
pr_warn("unable to find HFS+ superblock\n");
goto out_unload_nls;
}
vhdr = sbi->s_vhdr;
/* Copy parts of the volume header into the superblock */
sb->s_magic = HFSPLUS_VOLHEAD_SIG;
if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
pr_err("wrong filesystem version\n");
goto out_free_vhdr;
}
sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
sbi->file_count = be32_to_cpu(vhdr->file_count);
sbi->folder_count = be32_to_cpu(vhdr->folder_count);
sbi->data_clump_blocks =
be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
if (!sbi->data_clump_blocks)
sbi->data_clump_blocks = 1;
sbi->rsrc_clump_blocks =
be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
if (!sbi->rsrc_clump_blocks)
sbi->rsrc_clump_blocks = 1;
err = -EFBIG;
last_fs_block = sbi->total_blocks - 1;
last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
PAGE_SHIFT;
if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
(last_fs_page > (pgoff_t)(~0ULL))) {
pr_err("filesystem size too large\n");
goto out_free_vhdr;
}
/* Set up operations so we can load metadata */
sb->s_op = &hfsplus_sops;
sb->s_maxbytes = MAX_LFS_FILESIZE;
if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
sb->s_flags |= SB_RDONLY;
} else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
/* nothing */
} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
pr_warn("Filesystem is marked locked, mounting read-only.\n");
sb->s_flags |= SB_RDONLY;
} else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
!sb_rdonly(sb)) {
pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
sb->s_flags |= SB_RDONLY;
}
err = -EINVAL;
/* Load metadata objects (B*Trees) */
sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
if (!sbi->ext_tree) {
pr_err("failed to load extents file\n");
goto out_free_vhdr;
}
sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
if (!sbi->cat_tree) {
pr_err("failed to load catalog file\n");
goto out_close_ext_tree;
}
atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
if (vhdr->attr_file.total_blocks != 0) {
sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
if (!sbi->attr_tree) {
pr_err("failed to load attributes file\n");
goto out_close_cat_tree;
}
atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
}
sb->s_xattr = hfsplus_xattr_handlers;
inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
if (IS_ERR(inode)) {
pr_err("failed to load allocation file\n");
err = PTR_ERR(inode);
goto out_close_attr_tree;
}
sbi->alloc_file = inode;
/* Load the root directory */
root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
if (IS_ERR(root)) {
pr_err("failed to load root directory\n");
err = PTR_ERR(root);
goto out_put_alloc_file;
}
sb->s_d_op = &hfsplus_dentry_operations;
sb->s_root = d_make_root(root);
if (!sb->s_root) {
err = -ENOMEM;
goto out_put_alloc_file;
}
str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
str.name = HFSP_HIDDENDIR_NAME;
err = hfs_find_init(sbi->cat_tree, &fd);
if (err)
goto out_put_root;
err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
if (unlikely(err < 0))
goto out_put_root;
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
err = -EINVAL;
goto out_put_root;
}
inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_put_root;
}
sbi->hidden_dir = inode;
} else
hfs_find_exit(&fd);
if (!sb_rdonly(sb)) {
/*
* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
* all three are registered with Apple for our use
*/
vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
vhdr->modify_date = hfsp_now2mt();
be32_add_cpu(&vhdr->write_count, 1);
vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
hfsplus_sync_fs(sb, 1);
if (!sbi->hidden_dir) {
mutex_lock(&sbi->vh_mutex);
sbi->hidden_dir = hfsplus_new_inode(sb, root, S_IFDIR);
if (!sbi->hidden_dir) {
mutex_unlock(&sbi->vh_mutex);
err = -ENOMEM;
goto out_put_root;
}
err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
&str, sbi->hidden_dir);
if (err) {
mutex_unlock(&sbi->vh_mutex);
goto out_put_hidden_dir;
}
err = hfsplus_init_security(sbi->hidden_dir,
root, &str);
if (err == -EOPNOTSUPP)
err = 0; /* Operation is not supported. */
else if (err) {
/*
* Try to delete anyway without
* error analysis.
*/
hfsplus_delete_cat(sbi->hidden_dir->i_ino,
root, &str);
mutex_unlock(&sbi->vh_mutex);
goto out_put_hidden_dir;
}
mutex_unlock(&sbi->vh_mutex);
hfsplus_mark_inode_dirty(sbi->hidden_dir,
HFSPLUS_I_CAT_DIRTY);
}
}
unload_nls(sbi->nls);
sbi->nls = nls;
return 0;
out_put_hidden_dir:
cancel_delayed_work_sync(&sbi->sync_work);
iput(sbi->hidden_dir);
out_put_root:
dput(sb->s_root);
sb->s_root = NULL;
out_put_alloc_file:
iput(sbi->alloc_file);
out_close_attr_tree:
hfs_btree_close(sbi->attr_tree);
out_close_cat_tree:
hfs_btree_close(sbi->cat_tree);
out_close_ext_tree:
hfs_btree_close(sbi->ext_tree);
out_free_vhdr:
kfree(sbi->s_vhdr_buf);
kfree(sbi->s_backup_vhdr_buf);
out_unload_nls:
unload_nls(sbi->nls);
unload_nls(nls);
kfree(sbi);
out:
return err;
}
MODULE_AUTHOR("Brad Boyer");
MODULE_DESCRIPTION("Extended Macintosh Filesystem");
MODULE_LICENSE("GPL");
static struct kmem_cache *hfsplus_inode_cachep;
static struct inode *hfsplus_alloc_inode(struct super_block *sb)
{
struct hfsplus_inode_info *i;
i = alloc_inode_sb(sb, hfsplus_inode_cachep, GFP_KERNEL);
return i ? &i->vfs_inode : NULL;
}
static void hfsplus_free_inode(struct inode *inode)
{
kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
}
#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
static struct dentry *hfsplus_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
}
static struct file_system_type hfsplus_fs_type = {
.owner = THIS_MODULE,
.name = "hfsplus",
.mount = hfsplus_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("hfsplus");
static void hfsplus_init_once(void *p)
{
struct hfsplus_inode_info *i = p;
inode_init_once(&i->vfs_inode);
}
static int __init init_hfsplus_fs(void)
{
int err;
hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
hfsplus_init_once);
if (!hfsplus_inode_cachep)
return -ENOMEM;
err = hfsplus_create_attr_tree_cache();
if (err)
goto destroy_inode_cache;
err = register_filesystem(&hfsplus_fs_type);
if (err)
goto destroy_attr_tree_cache;
return 0;
destroy_attr_tree_cache:
hfsplus_destroy_attr_tree_cache();
destroy_inode_cache:
kmem_cache_destroy(hfsplus_inode_cachep);
return err;
}
static void __exit exit_hfsplus_fs(void)
{
unregister_filesystem(&hfsplus_fs_type);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
hfsplus_destroy_attr_tree_cache();
kmem_cache_destroy(hfsplus_inode_cachep);
}
module_init(init_hfsplus_fs)
module_exit(exit_hfsplus_fs)
| linux-master | fs/hfsplus/super.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/extents.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handling of Extents both in catalog and extents overflow trees
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
/* Compare two extents keys, returns 0 on same, pos/neg for difference */
int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2)
{
__be32 k1id, k2id;
__be32 k1s, k2s;
k1id = k1->ext.cnid;
k2id = k2->ext.cnid;
if (k1id != k2id)
return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
if (k1->ext.fork_type != k2->ext.fork_type)
return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
k1s = k1->ext.start_block;
k2s = k2->ext.start_block;
if (k1s == k2s)
return 0;
return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
}
static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
u32 block, u8 type)
{
key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
key->ext.cnid = cpu_to_be32(cnid);
key->ext.start_block = cpu_to_be32(block);
key->ext.fork_type = type;
key->ext.pad = 0;
}
static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
{
int i;
u32 count;
for (i = 0; i < 8; ext++, i++) {
count = be32_to_cpu(ext->block_count);
if (off < count)
return be32_to_cpu(ext->start_block) + off;
off -= count;
}
/* panic? */
return 0;
}
static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
{
int i;
u32 count = 0;
for (i = 0; i < 8; ext++, i++)
count += be32_to_cpu(ext->block_count);
return count;
}
static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
{
int i;
ext += 7;
for (i = 0; i < 7; ext--, i++)
if (ext->block_count)
break;
return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
}
static int __hfsplus_ext_write_extent(struct inode *inode,
struct hfs_find_data *fd)
{
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res;
WARN_ON(!mutex_is_locked(&hip->extents_lock));
hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
HFSPLUS_IS_RSRC(inode) ?
HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
res = hfs_brec_find(fd, hfs_find_rec_by_key);
if (hip->extent_state & HFSPLUS_EXT_NEW) {
if (res != -ENOENT)
return res;
/* Fail early and avoid ENOSPC during the btree operation */
res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
if (res)
return res;
hfs_brec_insert(fd, hip->cached_extents,
sizeof(hfsplus_extent_rec));
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
} else {
if (res)
return res;
hfs_bnode_write(fd->bnode, hip->cached_extents,
fd->entryoffset, fd->entrylength);
hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
}
/*
* We can't just use hfsplus_mark_inode_dirty here, because we
* also get called from hfsplus_write_inode, which should not
* redirty the inode. Instead the callers have to be careful
* to explicily mark the inode dirty, too.
*/
set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
return 0;
}
static int hfsplus_ext_write_extent_locked(struct inode *inode)
{
int res = 0;
if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
struct hfs_find_data fd;
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
if (res)
return res;
res = __hfsplus_ext_write_extent(inode, &fd);
hfs_find_exit(&fd);
}
return res;
}
int hfsplus_ext_write_extent(struct inode *inode)
{
int res;
mutex_lock(&HFSPLUS_I(inode)->extents_lock);
res = hfsplus_ext_write_extent_locked(inode);
mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
return res;
}
static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
struct hfsplus_extent *extent,
u32 cnid, u32 block, u8 type)
{
int res;
hfsplus_ext_build_key(fd->search_key, cnid, block, type);
fd->key->ext.cnid = 0;
res = hfs_brec_find(fd, hfs_find_rec_by_key);
if (res && res != -ENOENT)
return res;
if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
fd->key->ext.fork_type != fd->search_key->ext.fork_type)
return -ENOENT;
if (fd->entrylength != sizeof(hfsplus_extent_rec))
return -EIO;
hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
sizeof(hfsplus_extent_rec));
return 0;
}
static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
struct inode *inode, u32 block)
{
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res;
WARN_ON(!mutex_is_locked(&hip->extents_lock));
if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
res = __hfsplus_ext_write_extent(inode, fd);
if (res)
return res;
}
res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
block, HFSPLUS_IS_RSRC(inode) ?
HFSPLUS_TYPE_RSRC :
HFSPLUS_TYPE_DATA);
if (!res) {
hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
hip->cached_blocks =
hfsplus_ext_block_count(hip->cached_extents);
} else {
hip->cached_start = hip->cached_blocks = 0;
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
}
return res;
}
static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
{
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
struct hfs_find_data fd;
int res;
if (block >= hip->cached_start &&
block < hip->cached_start + hip->cached_blocks)
return 0;
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
if (!res) {
res = __hfsplus_ext_cache_extent(&fd, inode, block);
hfs_find_exit(&fd);
}
return res;
}
/* Get a block at iblock for inode, possibly allocating if create */
int hfsplus_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res = -EIO;
u32 ablock, dblock, mask;
sector_t sector;
int was_dirty = 0;
/* Convert inode block to disk allocation block */
ablock = iblock >> sbi->fs_shift;
if (iblock >= hip->fs_blocks) {
if (!create)
return 0;
if (iblock > hip->fs_blocks)
return -EIO;
if (ablock >= hip->alloc_blocks) {
res = hfsplus_file_extend(inode, false);
if (res)
return res;
}
} else
create = 0;
if (ablock < hip->first_blocks) {
dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
goto done;
}
if (inode->i_ino == HFSPLUS_EXT_CNID)
return -EIO;
mutex_lock(&hip->extents_lock);
/*
* hfsplus_ext_read_extent will write out a cached extent into
* the extents btree. In that case we may have to mark the inode
* dirty even for a pure read of an extent here.
*/
was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
res = hfsplus_ext_read_extent(inode, ablock);
if (res) {
mutex_unlock(&hip->extents_lock);
return -EIO;
}
dblock = hfsplus_ext_find_block(hip->cached_extents,
ablock - hip->cached_start);
mutex_unlock(&hip->extents_lock);
done:
hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
inode->i_ino, (long long)iblock, dblock);
mask = (1 << sbi->fs_shift) - 1;
sector = ((sector_t)dblock << sbi->fs_shift) +
sbi->blockoffset + (iblock & mask);
map_bh(bh_result, sb, sector);
if (create) {
set_buffer_new(bh_result);
hip->phys_size += sb->s_blocksize;
hip->fs_blocks++;
inode_add_bytes(inode, sb->s_blocksize);
}
if (create || was_dirty)
mark_inode_dirty(inode);
return 0;
}
static void hfsplus_dump_extent(struct hfsplus_extent *extent)
{
int i;
hfs_dbg(EXTENT, " ");
for (i = 0; i < 8; i++)
hfs_dbg_cont(EXTENT, " %u:%u",
be32_to_cpu(extent[i].start_block),
be32_to_cpu(extent[i].block_count));
hfs_dbg_cont(EXTENT, "\n");
}
static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
u32 alloc_block, u32 block_count)
{
u32 count, start;
int i;
hfsplus_dump_extent(extent);
for (i = 0; i < 8; extent++, i++) {
count = be32_to_cpu(extent->block_count);
if (offset == count) {
start = be32_to_cpu(extent->start_block);
if (alloc_block != start + count) {
if (++i >= 8)
return -ENOSPC;
extent++;
extent->start_block = cpu_to_be32(alloc_block);
} else
block_count += count;
extent->block_count = cpu_to_be32(block_count);
return 0;
} else if (offset < count)
break;
offset -= count;
}
/* panic? */
return -EIO;
}
static int hfsplus_free_extents(struct super_block *sb,
struct hfsplus_extent *extent,
u32 offset, u32 block_nr)
{
u32 count, start;
int i;
int err = 0;
/* Mapping the allocation file may lock the extent tree */
WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
hfsplus_dump_extent(extent);
for (i = 0; i < 8; extent++, i++) {
count = be32_to_cpu(extent->block_count);
if (offset == count)
goto found;
else if (offset < count)
break;
offset -= count;
}
/* panic? */
return -EIO;
found:
for (;;) {
start = be32_to_cpu(extent->start_block);
if (count <= block_nr) {
err = hfsplus_block_free(sb, start, count);
if (err) {
pr_err("can't free extent\n");
hfs_dbg(EXTENT, " start: %u count: %u\n",
start, count);
}
extent->block_count = 0;
extent->start_block = 0;
block_nr -= count;
} else {
count -= block_nr;
err = hfsplus_block_free(sb, start + count, block_nr);
if (err) {
pr_err("can't free extent\n");
hfs_dbg(EXTENT, " start: %u count: %u\n",
start, count);
}
extent->block_count = cpu_to_be32(count);
block_nr = 0;
}
if (!block_nr || !i) {
/*
* Try to free all extents and
* return only last error
*/
return err;
}
i--;
extent--;
count = be32_to_cpu(extent->block_count);
}
}
int hfsplus_free_fork(struct super_block *sb, u32 cnid,
struct hfsplus_fork_raw *fork, int type)
{
struct hfs_find_data fd;
hfsplus_extent_rec ext_entry;
u32 total_blocks, blocks, start;
int res, i;
total_blocks = be32_to_cpu(fork->total_blocks);
if (!total_blocks)
return 0;
blocks = 0;
for (i = 0; i < 8; i++)
blocks += be32_to_cpu(fork->extents[i].block_count);
res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
if (res)
return res;
if (total_blocks == blocks)
return 0;
res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
if (res)
return res;
do {
res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
total_blocks, type);
if (res)
break;
start = be32_to_cpu(fd.key->ext.start_block);
hfs_brec_remove(&fd);
mutex_unlock(&fd.tree->tree_lock);
hfsplus_free_extents(sb, ext_entry, total_blocks - start,
total_blocks);
total_blocks = start;
mutex_lock(&fd.tree->tree_lock);
} while (total_blocks > blocks);
hfs_find_exit(&fd);
return res;
}
int hfsplus_file_extend(struct inode *inode, bool zeroout)
{
struct super_block *sb = inode->i_sb;
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
u32 start, len, goal;
int res;
if (sbi->alloc_file->i_size * 8 <
sbi->total_blocks - sbi->free_blocks + 8) {
/* extend alloc file */
pr_err_ratelimited("extend alloc file! (%llu,%u,%u)\n",
sbi->alloc_file->i_size * 8,
sbi->total_blocks, sbi->free_blocks);
return -ENOSPC;
}
mutex_lock(&hip->extents_lock);
if (hip->alloc_blocks == hip->first_blocks)
goal = hfsplus_ext_lastblock(hip->first_extents);
else {
res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
if (res)
goto out;
goal = hfsplus_ext_lastblock(hip->cached_extents);
}
len = hip->clump_blocks;
start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
if (start >= sbi->total_blocks) {
start = hfsplus_block_allocate(sb, goal, 0, &len);
if (start >= goal) {
res = -ENOSPC;
goto out;
}
}
if (zeroout) {
res = sb_issue_zeroout(sb, start, len, GFP_NOFS);
if (res)
goto out;
}
hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
if (hip->alloc_blocks <= hip->first_blocks) {
if (!hip->first_blocks) {
hfs_dbg(EXTENT, "first extents\n");
/* no extents yet */
hip->first_extents[0].start_block = cpu_to_be32(start);
hip->first_extents[0].block_count = cpu_to_be32(len);
res = 0;
} else {
/* try to append to extents in inode */
res = hfsplus_add_extent(hip->first_extents,
hip->alloc_blocks,
start, len);
if (res == -ENOSPC)
goto insert_extent;
}
if (!res) {
hfsplus_dump_extent(hip->first_extents);
hip->first_blocks += len;
}
} else {
res = hfsplus_add_extent(hip->cached_extents,
hip->alloc_blocks - hip->cached_start,
start, len);
if (!res) {
hfsplus_dump_extent(hip->cached_extents);
hip->extent_state |= HFSPLUS_EXT_DIRTY;
hip->cached_blocks += len;
} else if (res == -ENOSPC)
goto insert_extent;
}
out:
if (!res) {
hip->alloc_blocks += len;
mutex_unlock(&hip->extents_lock);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
return 0;
}
mutex_unlock(&hip->extents_lock);
return res;
insert_extent:
hfs_dbg(EXTENT, "insert new extent\n");
res = hfsplus_ext_write_extent_locked(inode);
if (res)
goto out;
memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
hip->cached_extents[0].start_block = cpu_to_be32(start);
hip->cached_extents[0].block_count = cpu_to_be32(len);
hfsplus_dump_extent(hip->cached_extents);
hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
hip->cached_start = hip->alloc_blocks;
hip->cached_blocks = len;
res = 0;
goto out;
}
void hfsplus_file_truncate(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
struct hfs_find_data fd;
u32 alloc_cnt, blk_cnt, start;
int res;
hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
inode->i_ino, (long long)hip->phys_size, inode->i_size);
if (inode->i_size > hip->phys_size) {
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata = NULL;
loff_t size = inode->i_size;
res = hfsplus_write_begin(NULL, mapping, size, 0,
&page, &fsdata);
if (res)
return;
res = generic_write_end(NULL, mapping, size, 0, 0,
page, fsdata);
if (res < 0)
return;
mark_inode_dirty(inode);
return;
} else if (inode->i_size == hip->phys_size)
return;
blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
HFSPLUS_SB(sb)->alloc_blksz_shift;
mutex_lock(&hip->extents_lock);
alloc_cnt = hip->alloc_blocks;
if (blk_cnt == alloc_cnt)
goto out_unlock;
res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
if (res) {
mutex_unlock(&hip->extents_lock);
/* XXX: We lack error handling of hfsplus_file_truncate() */
return;
}
while (1) {
if (alloc_cnt == hip->first_blocks) {
mutex_unlock(&fd.tree->tree_lock);
hfsplus_free_extents(sb, hip->first_extents,
alloc_cnt, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->first_extents);
hip->first_blocks = blk_cnt;
mutex_lock(&fd.tree->tree_lock);
break;
}
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
if (res)
break;
start = hip->cached_start;
if (blk_cnt <= start)
hfs_brec_remove(&fd);
mutex_unlock(&fd.tree->tree_lock);
hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->cached_extents);
mutex_lock(&fd.tree->tree_lock);
if (blk_cnt > start) {
hip->extent_state |= HFSPLUS_EXT_DIRTY;
break;
}
alloc_cnt = start;
hip->cached_start = hip->cached_blocks = 0;
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
}
hfs_find_exit(&fd);
hip->alloc_blocks = blk_cnt;
out_unlock:
mutex_unlock(&hip->extents_lock);
hip->phys_size = inode->i_size;
hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
sb->s_blocksize_bits;
inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
}
| linux-master | fs/hfsplus/extents.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/bnode.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handle basic btree node operations
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/swap.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
/* Copy a specified range of bytes from the raw data of a node */
void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
{
struct page **pagep;
int l;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
l = min_t(int, len, PAGE_SIZE - off);
memcpy_from_page(buf, *pagep, off, l);
while ((len -= l) != 0) {
buf += l;
l = min_t(int, len, PAGE_SIZE);
memcpy_from_page(buf, *++pagep, 0, l);
}
}
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
{
__be16 data;
/* TODO: optimize later... */
hfs_bnode_read(node, &data, off, 2);
return be16_to_cpu(data);
}
u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
{
u8 data;
/* TODO: optimize later... */
hfs_bnode_read(node, &data, off, 1);
return data;
}
void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
{
struct hfs_btree *tree;
int key_len;
tree = node->tree;
if (node->type == HFS_NODE_LEAF ||
tree->attributes & HFS_TREE_VARIDXKEYS ||
node->tree->cnid == HFSPLUS_ATTR_CNID)
key_len = hfs_bnode_read_u16(node, off) + 2;
else
key_len = tree->max_key_len + 2;
hfs_bnode_read(node, key, off, key_len);
}
void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
{
struct page **pagep;
int l;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
l = min_t(int, len, PAGE_SIZE - off);
memcpy_to_page(*pagep, off, buf, l);
set_page_dirty(*pagep);
while ((len -= l) != 0) {
buf += l;
l = min_t(int, len, PAGE_SIZE);
memcpy_to_page(*++pagep, 0, buf, l);
set_page_dirty(*pagep);
}
}
void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
{
__be16 v = cpu_to_be16(data);
/* TODO: optimize later... */
hfs_bnode_write(node, &v, off, 2);
}
void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
{
struct page **pagep;
int l;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
l = min_t(int, len, PAGE_SIZE - off);
memzero_page(*pagep, off, l);
set_page_dirty(*pagep);
while ((len -= l) != 0) {
l = min_t(int, len, PAGE_SIZE);
memzero_page(*++pagep, 0, l);
set_page_dirty(*pagep);
}
}
void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
struct hfs_bnode *src_node, int src, int len)
{
struct page **src_page, **dst_page;
int l;
hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
if (!len)
return;
src += src_node->page_offset;
dst += dst_node->page_offset;
src_page = src_node->page + (src >> PAGE_SHIFT);
src &= ~PAGE_MASK;
dst_page = dst_node->page + (dst >> PAGE_SHIFT);
dst &= ~PAGE_MASK;
if (src == dst) {
l = min_t(int, len, PAGE_SIZE - src);
memcpy_page(*dst_page, src, *src_page, src, l);
set_page_dirty(*dst_page);
while ((len -= l) != 0) {
l = min_t(int, len, PAGE_SIZE);
memcpy_page(*++dst_page, 0, *++src_page, 0, l);
set_page_dirty(*dst_page);
}
} else {
void *src_ptr, *dst_ptr;
do {
dst_ptr = kmap_local_page(*dst_page) + dst;
src_ptr = kmap_local_page(*src_page) + src;
if (PAGE_SIZE - src < PAGE_SIZE - dst) {
l = PAGE_SIZE - src;
src = 0;
dst += l;
} else {
l = PAGE_SIZE - dst;
src += l;
dst = 0;
}
l = min(len, l);
memcpy(dst_ptr, src_ptr, l);
kunmap_local(src_ptr);
set_page_dirty(*dst_page);
kunmap_local(dst_ptr);
if (!dst)
dst_page++;
else
src_page++;
} while ((len -= l));
}
}
void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
{
struct page **src_page, **dst_page;
void *src_ptr, *dst_ptr;
int l;
hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
if (!len)
return;
src += node->page_offset;
dst += node->page_offset;
if (dst > src) {
src += len - 1;
src_page = node->page + (src >> PAGE_SHIFT);
src = (src & ~PAGE_MASK) + 1;
dst += len - 1;
dst_page = node->page + (dst >> PAGE_SHIFT);
dst = (dst & ~PAGE_MASK) + 1;
if (src == dst) {
while (src < len) {
dst_ptr = kmap_local_page(*dst_page);
src_ptr = kmap_local_page(*src_page);
memmove(dst_ptr, src_ptr, src);
kunmap_local(src_ptr);
set_page_dirty(*dst_page);
kunmap_local(dst_ptr);
len -= src;
src = PAGE_SIZE;
src_page--;
dst_page--;
}
src -= len;
dst_ptr = kmap_local_page(*dst_page);
src_ptr = kmap_local_page(*src_page);
memmove(dst_ptr + src, src_ptr + src, len);
kunmap_local(src_ptr);
set_page_dirty(*dst_page);
kunmap_local(dst_ptr);
} else {
do {
dst_ptr = kmap_local_page(*dst_page) + dst;
src_ptr = kmap_local_page(*src_page) + src;
if (src < dst) {
l = src;
src = PAGE_SIZE;
dst -= l;
} else {
l = dst;
src -= l;
dst = PAGE_SIZE;
}
l = min(len, l);
memmove(dst_ptr - l, src_ptr - l, l);
kunmap_local(src_ptr);
set_page_dirty(*dst_page);
kunmap_local(dst_ptr);
if (dst == PAGE_SIZE)
dst_page--;
else
src_page--;
} while ((len -= l));
}
} else {
src_page = node->page + (src >> PAGE_SHIFT);
src &= ~PAGE_MASK;
dst_page = node->page + (dst >> PAGE_SHIFT);
dst &= ~PAGE_MASK;
if (src == dst) {
l = min_t(int, len, PAGE_SIZE - src);
dst_ptr = kmap_local_page(*dst_page) + src;
src_ptr = kmap_local_page(*src_page) + src;
memmove(dst_ptr, src_ptr, l);
kunmap_local(src_ptr);
set_page_dirty(*dst_page);
kunmap_local(dst_ptr);
while ((len -= l) != 0) {
l = min_t(int, len, PAGE_SIZE);
dst_ptr = kmap_local_page(*++dst_page);
src_ptr = kmap_local_page(*++src_page);
memmove(dst_ptr, src_ptr, l);
kunmap_local(src_ptr);
set_page_dirty(*dst_page);
kunmap_local(dst_ptr);
}
} else {
do {
dst_ptr = kmap_local_page(*dst_page) + dst;
src_ptr = kmap_local_page(*src_page) + src;
if (PAGE_SIZE - src <
PAGE_SIZE - dst) {
l = PAGE_SIZE - src;
src = 0;
dst += l;
} else {
l = PAGE_SIZE - dst;
src += l;
dst = 0;
}
l = min(len, l);
memmove(dst_ptr, src_ptr, l);
kunmap_local(src_ptr);
set_page_dirty(*dst_page);
kunmap_local(dst_ptr);
if (!dst)
dst_page++;
else
src_page++;
} while ((len -= l));
}
}
}
void hfs_bnode_dump(struct hfs_bnode *node)
{
struct hfs_bnode_desc desc;
__be32 cnid;
int i, off, key_off;
hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
hfs_bnode_read(node, &desc, 0, sizeof(desc));
hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
desc.type, desc.height, be16_to_cpu(desc.num_recs));
off = node->tree->node_size - 2;
for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
key_off = hfs_bnode_read_u16(node, off);
hfs_dbg(BNODE_MOD, " %d", key_off);
if (i && node->type == HFS_NODE_INDEX) {
int tmp;
if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
node->tree->cnid == HFSPLUS_ATTR_CNID)
tmp = hfs_bnode_read_u16(node, key_off) + 2;
else
tmp = node->tree->max_key_len + 2;
hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
hfs_bnode_read(node, &cnid, key_off + tmp, 4);
hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
} else if (i && node->type == HFS_NODE_LEAF) {
int tmp;
tmp = hfs_bnode_read_u16(node, key_off);
hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
}
}
hfs_dbg_cont(BNODE_MOD, "\n");
}
void hfs_bnode_unlink(struct hfs_bnode *node)
{
struct hfs_btree *tree;
struct hfs_bnode *tmp;
__be32 cnid;
tree = node->tree;
if (node->prev) {
tmp = hfs_bnode_find(tree, node->prev);
if (IS_ERR(tmp))
return;
tmp->next = node->next;
cnid = cpu_to_be32(tmp->next);
hfs_bnode_write(tmp, &cnid,
offsetof(struct hfs_bnode_desc, next), 4);
hfs_bnode_put(tmp);
} else if (node->type == HFS_NODE_LEAF)
tree->leaf_head = node->next;
if (node->next) {
tmp = hfs_bnode_find(tree, node->next);
if (IS_ERR(tmp))
return;
tmp->prev = node->prev;
cnid = cpu_to_be32(tmp->prev);
hfs_bnode_write(tmp, &cnid,
offsetof(struct hfs_bnode_desc, prev), 4);
hfs_bnode_put(tmp);
} else if (node->type == HFS_NODE_LEAF)
tree->leaf_tail = node->prev;
/* move down? */
if (!node->prev && !node->next)
hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
if (!node->parent) {
tree->root = 0;
tree->depth = 0;
}
set_bit(HFS_BNODE_DELETED, &node->flags);
}
static inline int hfs_bnode_hash(u32 num)
{
num = (num >> 16) + num;
num += num >> 8;
return num & (NODE_HASH_SIZE - 1);
}
struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
{
struct hfs_bnode *node;
if (cnid >= tree->node_count) {
pr_err("request for non-existent node %d in B*Tree\n",
cnid);
return NULL;
}
for (node = tree->node_hash[hfs_bnode_hash(cnid)];
node; node = node->next_hash)
if (node->this == cnid)
return node;
return NULL;
}
static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
{
struct hfs_bnode *node, *node2;
struct address_space *mapping;
struct page *page;
int size, block, i, hash;
loff_t off;
if (cnid >= tree->node_count) {
pr_err("request for non-existent node %d in B*Tree\n",
cnid);
return NULL;
}
size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
sizeof(struct page *);
node = kzalloc(size, GFP_KERNEL);
if (!node)
return NULL;
node->tree = tree;
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
atomic_set(&node->refcnt, 1);
hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
node->tree->cnid, node->this);
init_waitqueue_head(&node->lock_wq);
spin_lock(&tree->hash_lock);
node2 = hfs_bnode_findhash(tree, cnid);
if (!node2) {
hash = hfs_bnode_hash(cnid);
node->next_hash = tree->node_hash[hash];
tree->node_hash[hash] = node;
tree->node_hash_cnt++;
} else {
spin_unlock(&tree->hash_lock);
kfree(node);
wait_event(node2->lock_wq,
!test_bit(HFS_BNODE_NEW, &node2->flags));
return node2;
}
spin_unlock(&tree->hash_lock);
mapping = tree->inode->i_mapping;
off = (loff_t)cnid << tree->node_size_shift;
block = off >> PAGE_SHIFT;
node->page_offset = off & ~PAGE_MASK;
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
page = read_mapping_page(mapping, block, NULL);
if (IS_ERR(page))
goto fail;
node->page[i] = page;
}
return node;
fail:
set_bit(HFS_BNODE_ERROR, &node->flags);
return node;
}
void hfs_bnode_unhash(struct hfs_bnode *node)
{
struct hfs_bnode **p;
hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt));
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
;
BUG_ON(!*p);
*p = node->next_hash;
node->tree->node_hash_cnt--;
}
/* Load a particular node out of a tree */
struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
{
struct hfs_bnode *node;
struct hfs_bnode_desc *desc;
int i, rec_off, off, next_off;
int entry_size, key_size;
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, num);
if (node) {
hfs_bnode_get(node);
spin_unlock(&tree->hash_lock);
wait_event(node->lock_wq,
!test_bit(HFS_BNODE_NEW, &node->flags));
if (test_bit(HFS_BNODE_ERROR, &node->flags))
goto node_error;
return node;
}
spin_unlock(&tree->hash_lock);
node = __hfs_bnode_create(tree, num);
if (!node)
return ERR_PTR(-ENOMEM);
if (test_bit(HFS_BNODE_ERROR, &node->flags))
goto node_error;
if (!test_bit(HFS_BNODE_NEW, &node->flags))
return node;
desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
node->page_offset);
node->prev = be32_to_cpu(desc->prev);
node->next = be32_to_cpu(desc->next);
node->num_recs = be16_to_cpu(desc->num_recs);
node->type = desc->type;
node->height = desc->height;
kunmap_local(desc);
switch (node->type) {
case HFS_NODE_HEADER:
case HFS_NODE_MAP:
if (node->height != 0)
goto node_error;
break;
case HFS_NODE_LEAF:
if (node->height != 1)
goto node_error;
break;
case HFS_NODE_INDEX:
if (node->height <= 1 || node->height > tree->depth)
goto node_error;
break;
default:
goto node_error;
}
rec_off = tree->node_size - 2;
off = hfs_bnode_read_u16(node, rec_off);
if (off != sizeof(struct hfs_bnode_desc))
goto node_error;
for (i = 1; i <= node->num_recs; off = next_off, i++) {
rec_off -= 2;
next_off = hfs_bnode_read_u16(node, rec_off);
if (next_off <= off ||
next_off > tree->node_size ||
next_off & 1)
goto node_error;
entry_size = next_off - off;
if (node->type != HFS_NODE_INDEX &&
node->type != HFS_NODE_LEAF)
continue;
key_size = hfs_bnode_read_u16(node, off) + 2;
if (key_size >= entry_size || key_size & 1)
goto node_error;
}
clear_bit(HFS_BNODE_NEW, &node->flags);
wake_up(&node->lock_wq);
return node;
node_error:
set_bit(HFS_BNODE_ERROR, &node->flags);
clear_bit(HFS_BNODE_NEW, &node->flags);
wake_up(&node->lock_wq);
hfs_bnode_put(node);
return ERR_PTR(-EIO);
}
void hfs_bnode_free(struct hfs_bnode *node)
{
int i;
for (i = 0; i < node->tree->pages_per_bnode; i++)
if (node->page[i])
put_page(node->page[i]);
kfree(node);
}
struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
{
struct hfs_bnode *node;
struct page **pagep;
int i;
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, num);
spin_unlock(&tree->hash_lock);
if (node) {
pr_crit("new node %u already hashed?\n", num);
WARN_ON(1);
return node;
}
node = __hfs_bnode_create(tree, num);
if (!node)
return ERR_PTR(-ENOMEM);
if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
hfs_bnode_put(node);
return ERR_PTR(-EIO);
}
pagep = node->page;
memzero_page(*pagep, node->page_offset,
min_t(int, PAGE_SIZE, tree->node_size));
set_page_dirty(*pagep);
for (i = 1; i < tree->pages_per_bnode; i++) {
memzero_page(*++pagep, 0, PAGE_SIZE);
set_page_dirty(*pagep);
}
clear_bit(HFS_BNODE_NEW, &node->flags);
wake_up(&node->lock_wq);
return node;
}
void hfs_bnode_get(struct hfs_bnode *node)
{
if (node) {
atomic_inc(&node->refcnt);
hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
}
}
/* Dispose of resources used by a node */
void hfs_bnode_put(struct hfs_bnode *node)
{
if (node) {
struct hfs_btree *tree = node->tree;
int i;
hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
BUG_ON(!atomic_read(&node->refcnt));
if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
return;
for (i = 0; i < tree->pages_per_bnode; i++) {
if (!node->page[i])
continue;
mark_page_accessed(node->page[i]);
}
if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
hfs_bnode_unhash(node);
spin_unlock(&tree->hash_lock);
if (hfs_bnode_need_zeroout(tree))
hfs_bnode_clear(node, 0, tree->node_size);
hfs_bmap_free(node);
hfs_bnode_free(node);
return;
}
spin_unlock(&tree->hash_lock);
}
}
/*
* Unused nodes have to be zeroed if this is the catalog tree and
* a corresponding flag in the volume header is set.
*/
bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
{
struct super_block *sb = tree->inode->i_sb;
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
return tree->cnid == HFSPLUS_CAT_CNID &&
volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
}
| linux-master | fs/hfsplus/bnode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/btree.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handle opening/closing btree
*/
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/log2.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
/*
* Initial source code of clump size calculation is gotten
* from http://opensource.apple.com/tarballs/diskdev_cmds/
*/
#define CLUMP_ENTRIES 15
static short clumptbl[CLUMP_ENTRIES * 3] = {
/*
* Volume Attributes Catalog Extents
* Size Clump (MB) Clump (MB) Clump (MB)
*/
/* 1GB */ 4, 4, 4,
/* 2GB */ 6, 6, 4,
/* 4GB */ 8, 8, 4,
/* 8GB */ 11, 11, 5,
/*
* For volumes 16GB and larger, we want to make sure that a full OS
* install won't require fragmentation of the Catalog or Attributes
* B-trees. We do this by making the clump sizes sufficiently large,
* and by leaving a gap after the B-trees for them to grow into.
*
* For SnowLeopard 10A298, a FullNetInstall with all packages selected
* results in:
* Catalog B-tree Header
* nodeSize: 8192
* totalNodes: 31616
* freeNodes: 1978
* (used = 231.55 MB)
* Attributes B-tree Header
* nodeSize: 8192
* totalNodes: 63232
* freeNodes: 958
* (used = 486.52 MB)
*
* We also want Time Machine backup volumes to have a sufficiently
* large clump size to reduce fragmentation.
*
* The series of numbers for Catalog and Attribute form a geometric
* series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
* the previous term. For Attributes (16GB to 512GB), each term is
* 4**(1/5) times the previous term. For 1TB to 16TB, each term is
* 2**(1/5) times the previous term.
*/
/* 16GB */ 64, 32, 5,
/* 32GB */ 84, 49, 6,
/* 64GB */ 111, 74, 7,
/* 128GB */ 147, 111, 8,
/* 256GB */ 194, 169, 9,
/* 512GB */ 256, 256, 11,
/* 1TB */ 294, 294, 14,
/* 2TB */ 338, 338, 16,
/* 4TB */ 388, 388, 20,
/* 8TB */ 446, 446, 25,
/* 16TB */ 512, 512, 32
};
u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
u64 sectors, int file_id)
{
u32 mod = max(node_size, block_size);
u32 clump_size;
int column;
int i;
/* Figure out which column of the above table to use for this file. */
switch (file_id) {
case HFSPLUS_ATTR_CNID:
column = 0;
break;
case HFSPLUS_CAT_CNID:
column = 1;
break;
default:
column = 2;
break;
}
/*
* The default clump size is 0.8% of the volume size. And
* it must also be a multiple of the node and block size.
*/
if (sectors < 0x200000) {
clump_size = sectors << 2; /* 0.8 % */
if (clump_size < (8 * node_size))
clump_size = 8 * node_size;
} else {
/* turn exponent into table index... */
for (i = 0, sectors = sectors >> 22;
sectors && (i < CLUMP_ENTRIES - 1);
++i, sectors = sectors >> 1) {
/* empty body */
}
clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
}
/*
* Round the clump size to a multiple of node and block size.
* NOTE: This rounds down.
*/
clump_size /= mod;
clump_size *= mod;
/*
* Rounding down could have rounded down to 0 if the block size was
* greater than the clump size. If so, just use one block or node.
*/
if (clump_size == 0)
clump_size = mod;
return clump_size;
}
/* Get a reference to a B*Tree and do some initial checks */
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
{
struct hfs_btree *tree;
struct hfs_btree_header_rec *head;
struct address_space *mapping;
struct inode *inode;
struct page *page;
unsigned int size;
tree = kzalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
return NULL;
mutex_init(&tree->tree_lock);
spin_lock_init(&tree->hash_lock);
tree->sb = sb;
tree->cnid = id;
inode = hfsplus_iget(sb, id);
if (IS_ERR(inode))
goto free_tree;
tree->inode = inode;
if (!HFSPLUS_I(tree->inode)->first_blocks) {
pr_err("invalid btree extent records (0 size)\n");
goto free_inode;
}
mapping = tree->inode->i_mapping;
page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto free_inode;
/* Load the header */
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
sizeof(struct hfs_bnode_desc));
tree->root = be32_to_cpu(head->root);
tree->leaf_count = be32_to_cpu(head->leaf_count);
tree->leaf_head = be32_to_cpu(head->leaf_head);
tree->leaf_tail = be32_to_cpu(head->leaf_tail);
tree->node_count = be32_to_cpu(head->node_count);
tree->free_nodes = be32_to_cpu(head->free_nodes);
tree->attributes = be32_to_cpu(head->attributes);
tree->node_size = be16_to_cpu(head->node_size);
tree->max_key_len = be16_to_cpu(head->max_key_len);
tree->depth = be16_to_cpu(head->depth);
/* Verify the tree and set the correct compare function */
switch (id) {
case HFSPLUS_EXT_CNID:
if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
pr_err("invalid extent max_key_len %d\n",
tree->max_key_len);
goto fail_page;
}
if (tree->attributes & HFS_TREE_VARIDXKEYS) {
pr_err("invalid extent btree flag\n");
goto fail_page;
}
tree->keycmp = hfsplus_ext_cmp_key;
break;
case HFSPLUS_CAT_CNID:
if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
pr_err("invalid catalog max_key_len %d\n",
tree->max_key_len);
goto fail_page;
}
if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
pr_err("invalid catalog btree flag\n");
goto fail_page;
}
if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
(head->key_type == HFSPLUS_KEY_BINARY))
tree->keycmp = hfsplus_cat_bin_cmp_key;
else {
tree->keycmp = hfsplus_cat_case_cmp_key;
set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
}
break;
case HFSPLUS_ATTR_CNID:
if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
pr_err("invalid attributes max_key_len %d\n",
tree->max_key_len);
goto fail_page;
}
tree->keycmp = hfsplus_attr_bin_cmp_key;
break;
default:
pr_err("unknown B*Tree requested\n");
goto fail_page;
}
if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
pr_err("invalid btree flag\n");
goto fail_page;
}
size = tree->node_size;
if (!is_power_of_2(size))
goto fail_page;
if (!tree->node_count)
goto fail_page;
tree->node_size_shift = ffs(size) - 1;
tree->pages_per_bnode =
(tree->node_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
kunmap_local(head);
put_page(page);
return tree;
fail_page:
kunmap_local(head);
put_page(page);
free_inode:
tree->inode->i_mapping->a_ops = &hfsplus_aops;
iput(tree->inode);
free_tree:
kfree(tree);
return NULL;
}
/* Release resources used by a btree */
void hfs_btree_close(struct hfs_btree *tree)
{
struct hfs_bnode *node;
int i;
if (!tree)
return;
for (i = 0; i < NODE_HASH_SIZE; i++) {
while ((node = tree->node_hash[i])) {
tree->node_hash[i] = node->next_hash;
if (atomic_read(&node->refcnt))
pr_crit("node %d:%d "
"still has %d user(s)!\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
hfs_bnode_free(node);
tree->node_hash_cnt--;
}
}
iput(tree->inode);
kfree(tree);
}
int hfs_btree_write(struct hfs_btree *tree)
{
struct hfs_btree_header_rec *head;
struct hfs_bnode *node;
struct page *page;
node = hfs_bnode_find(tree, 0);
if (IS_ERR(node))
/* panic? */
return -EIO;
/* Load the header */
page = node->page[0];
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
sizeof(struct hfs_bnode_desc));
head->root = cpu_to_be32(tree->root);
head->leaf_count = cpu_to_be32(tree->leaf_count);
head->leaf_head = cpu_to_be32(tree->leaf_head);
head->leaf_tail = cpu_to_be32(tree->leaf_tail);
head->node_count = cpu_to_be32(tree->node_count);
head->free_nodes = cpu_to_be32(tree->free_nodes);
head->attributes = cpu_to_be32(tree->attributes);
head->depth = cpu_to_be16(tree->depth);
kunmap_local(head);
set_page_dirty(page);
hfs_bnode_put(node);
return 0;
}
static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
{
struct hfs_btree *tree = prev->tree;
struct hfs_bnode *node;
struct hfs_bnode_desc desc;
__be32 cnid;
node = hfs_bnode_create(tree, idx);
if (IS_ERR(node))
return node;
tree->free_nodes--;
prev->next = idx;
cnid = cpu_to_be32(idx);
hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
node->type = HFS_NODE_MAP;
node->num_recs = 1;
hfs_bnode_clear(node, 0, tree->node_size);
desc.next = 0;
desc.prev = 0;
desc.type = HFS_NODE_MAP;
desc.height = 0;
desc.num_recs = cpu_to_be16(1);
desc.reserved = 0;
hfs_bnode_write(node, &desc, 0, sizeof(desc));
hfs_bnode_write_u16(node, 14, 0x8000);
hfs_bnode_write_u16(node, tree->node_size - 2, 14);
hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
return node;
}
/* Make sure @tree has enough space for the @rsvd_nodes */
int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
struct inode *inode = tree->inode;
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
u32 count;
int res;
if (rsvd_nodes <= 0)
return 0;
while (tree->free_nodes < rsvd_nodes) {
res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
if (res)
return res;
hip->phys_size = inode->i_size =
(loff_t)hip->alloc_blocks <<
HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
hip->fs_blocks =
hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
return 0;
}
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
{
struct hfs_bnode *node, *next_node;
struct page **pagep;
u32 nidx, idx;
unsigned off;
u16 off16;
u16 len;
u8 *data, byte, m;
int i, res;
res = hfs_bmap_reserve(tree, 1);
if (res)
return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
if (IS_ERR(node))
return node;
len = hfs_brec_lenoff(node, 2, &off16);
off = off16;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
idx = 0;
for (;;) {
while (len) {
byte = data[off];
if (byte != 0xff) {
for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
if (!(byte & m)) {
idx += i;
data[off] |= m;
set_page_dirty(*pagep);
kunmap_local(data);
tree->free_nodes--;
mark_inode_dirty(tree->inode);
hfs_bnode_put(node);
return hfs_bnode_create(tree,
idx);
}
}
}
if (++off >= PAGE_SIZE) {
kunmap_local(data);
data = kmap_local_page(*++pagep);
off = 0;
}
idx += 8;
len--;
}
kunmap_local(data);
nidx = node->next;
if (!nidx) {
hfs_dbg(BNODE_MOD, "create new bmap node\n");
next_node = hfs_bmap_new_bmap(node, idx);
} else
next_node = hfs_bnode_find(tree, nidx);
hfs_bnode_put(node);
if (IS_ERR(next_node))
return next_node;
node = next_node;
len = hfs_brec_lenoff(node, 0, &off16);
off = off16;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
}
}
void hfs_bmap_free(struct hfs_bnode *node)
{
struct hfs_btree *tree;
struct page *page;
u16 off, len;
u32 nidx;
u8 *data, byte, m;
hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
BUG_ON(!node->this);
tree = node->tree;
nidx = node->this;
node = hfs_bnode_find(tree, 0);
if (IS_ERR(node))
return;
len = hfs_brec_lenoff(node, 2, &off);
while (nidx >= len * 8) {
u32 i;
nidx -= len * 8;
i = node->next;
if (!i) {
/* panic */;
pr_crit("unable to free bnode %u. "
"bmap not found!\n",
node->this);
hfs_bnode_put(node);
return;
}
hfs_bnode_put(node);
node = hfs_bnode_find(tree, i);
if (IS_ERR(node))
return;
if (node->type != HFS_NODE_MAP) {
/* panic */;
pr_crit("invalid bmap found! "
"(%u,%d)\n",
node->this, node->type);
hfs_bnode_put(node);
return;
}
len = hfs_brec_lenoff(node, 0, &off);
}
off += node->page_offset + nidx / 8;
page = node->page[off >> PAGE_SHIFT];
data = kmap_local_page(page);
off &= ~PAGE_MASK;
m = 1 << (~nidx & 7);
byte = data[off];
if (!(byte & m)) {
pr_crit("trying to free free bnode "
"%u(%d)\n",
node->this, node->type);
kunmap_local(data);
hfs_bnode_put(node);
return;
}
data[off] = byte & ~m;
set_page_dirty(page);
kunmap_local(data);
hfs_bnode_put(node);
tree->free_nodes++;
mark_inode_dirty(tree->inode);
}
| linux-master | fs/hfsplus/btree.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/unicode.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handler routines for unicode strings
*/
#include <linux/types.h>
#include <linux/nls.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
/* Fold the case of a unicode char, given the 16 bit value */
/* Returns folded char, or 0 if ignorable */
static inline u16 case_fold(u16 c)
{
u16 tmp;
tmp = hfsplus_case_fold_table[c >> 8];
if (tmp)
tmp = hfsplus_case_fold_table[tmp + (c & 0xff)];
else
tmp = c;
return tmp;
}
/* Compare unicode strings, return values like normal strcmp */
int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2)
{
u16 len1, len2, c1, c2;
const hfsplus_unichr *p1, *p2;
len1 = be16_to_cpu(s1->length);
len2 = be16_to_cpu(s2->length);
p1 = s1->unicode;
p2 = s2->unicode;
while (1) {
c1 = c2 = 0;
while (len1 && !c1) {
c1 = case_fold(be16_to_cpu(*p1));
p1++;
len1--;
}
while (len2 && !c2) {
c2 = case_fold(be16_to_cpu(*p2));
p2++;
len2--;
}
if (c1 != c2)
return (c1 < c2) ? -1 : 1;
if (!c1 && !c2)
return 0;
}
}
/* Compare names as a sequence of 16-bit unsigned integers */
int hfsplus_strcmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2)
{
u16 len1, len2, c1, c2;
const hfsplus_unichr *p1, *p2;
int len;
len1 = be16_to_cpu(s1->length);
len2 = be16_to_cpu(s2->length);
p1 = s1->unicode;
p2 = s2->unicode;
for (len = min(len1, len2); len > 0; len--) {
c1 = be16_to_cpu(*p1);
c2 = be16_to_cpu(*p2);
if (c1 != c2)
return c1 < c2 ? -1 : 1;
p1++;
p2++;
}
return len1 < len2 ? -1 :
len1 > len2 ? 1 : 0;
}
#define Hangul_SBase 0xac00
#define Hangul_LBase 0x1100
#define Hangul_VBase 0x1161
#define Hangul_TBase 0x11a7
#define Hangul_SCount 11172
#define Hangul_LCount 19
#define Hangul_VCount 21
#define Hangul_TCount 28
#define Hangul_NCount (Hangul_VCount * Hangul_TCount)
static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
{
int i, s, e;
s = 1;
e = p[1];
if (!e || cc < p[s * 2] || cc > p[e * 2])
return NULL;
do {
i = (s + e) / 2;
if (cc > p[i * 2])
s = i + 1;
else if (cc < p[i * 2])
e = i - 1;
else
return hfsplus_compose_table + p[i * 2 + 1];
} while (s <= e);
return NULL;
}
int hfsplus_uni2asc(struct super_block *sb,
const struct hfsplus_unistr *ustr,
char *astr, int *len_p)
{
const hfsplus_unichr *ip;
struct nls_table *nls = HFSPLUS_SB(sb)->nls;
u8 *op;
u16 cc, c0, c1;
u16 *ce1, *ce2;
int i, len, ustrlen, res, compose;
op = astr;
ip = ustr->unicode;
ustrlen = be16_to_cpu(ustr->length);
len = *len_p;
ce1 = NULL;
compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
while (ustrlen > 0) {
c0 = be16_to_cpu(*ip++);
ustrlen--;
/* search for single decomposed char */
if (likely(compose))
ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0);
if (ce1)
cc = ce1[0];
else
cc = 0;
if (cc) {
/* start of a possibly decomposed Hangul char */
if (cc != 0xffff)
goto done;
if (!ustrlen)
goto same;
c1 = be16_to_cpu(*ip) - Hangul_VBase;
if (c1 < Hangul_VCount) {
/* compose the Hangul char */
cc = (c0 - Hangul_LBase) * Hangul_VCount;
cc = (cc + c1) * Hangul_TCount;
cc += Hangul_SBase;
ip++;
ustrlen--;
if (!ustrlen)
goto done;
c1 = be16_to_cpu(*ip) - Hangul_TBase;
if (c1 > 0 && c1 < Hangul_TCount) {
cc += c1;
ip++;
ustrlen--;
}
goto done;
}
}
while (1) {
/* main loop for common case of not composed chars */
if (!ustrlen)
goto same;
c1 = be16_to_cpu(*ip);
if (likely(compose))
ce1 = hfsplus_compose_lookup(
hfsplus_compose_table, c1);
if (ce1)
break;
switch (c0) {
case 0:
c0 = 0x2400;
break;
case '/':
c0 = ':';
break;
}
res = nls->uni2char(c0, op, len);
if (res < 0) {
if (res == -ENAMETOOLONG)
goto out;
*op = '?';
res = 1;
}
op += res;
len -= res;
c0 = c1;
ip++;
ustrlen--;
}
ce2 = hfsplus_compose_lookup(ce1, c0);
if (ce2) {
i = 1;
while (i < ustrlen) {
ce1 = hfsplus_compose_lookup(ce2,
be16_to_cpu(ip[i]));
if (!ce1)
break;
i++;
ce2 = ce1;
}
cc = ce2[0];
if (cc) {
ip += i;
ustrlen -= i;
goto done;
}
}
same:
switch (c0) {
case 0:
cc = 0x2400;
break;
case '/':
cc = ':';
break;
default:
cc = c0;
}
done:
res = nls->uni2char(cc, op, len);
if (res < 0) {
if (res == -ENAMETOOLONG)
goto out;
*op = '?';
res = 1;
}
op += res;
len -= res;
}
res = 0;
out:
*len_p = (char *)op - astr;
return res;
}
/*
* Convert one or more ASCII characters into a single unicode character.
* Returns the number of ASCII characters corresponding to the unicode char.
*/
static inline int asc2unichar(struct super_block *sb, const char *astr, int len,
wchar_t *uc)
{
int size = HFSPLUS_SB(sb)->nls->char2uni(astr, len, uc);
if (size <= 0) {
*uc = '?';
size = 1;
}
switch (*uc) {
case 0x2400:
*uc = 0;
break;
case ':':
*uc = '/';
break;
}
return size;
}
/* Decomposes a non-Hangul unicode character. */
static u16 *hfsplus_decompose_nonhangul(wchar_t uc, int *size)
{
int off;
off = hfsplus_decompose_table[(uc >> 12) & 0xf];
if (off == 0 || off == 0xffff)
return NULL;
off = hfsplus_decompose_table[off + ((uc >> 8) & 0xf)];
if (!off)
return NULL;
off = hfsplus_decompose_table[off + ((uc >> 4) & 0xf)];
if (!off)
return NULL;
off = hfsplus_decompose_table[off + (uc & 0xf)];
*size = off & 3;
if (*size == 0)
return NULL;
return hfsplus_decompose_table + (off / 4);
}
/*
* Try to decompose a unicode character as Hangul. Return 0 if @uc is not
* precomposed Hangul, otherwise return the length of the decomposition.
*
* This function was adapted from sample code from the Unicode Standard
* Annex #15: Unicode Normalization Forms, version 3.2.0.
*
* Copyright (C) 1991-2018 Unicode, Inc. All rights reserved. Distributed
* under the Terms of Use in http://www.unicode.org/copyright.html.
*/
static int hfsplus_try_decompose_hangul(wchar_t uc, u16 *result)
{
int index;
int l, v, t;
index = uc - Hangul_SBase;
if (index < 0 || index >= Hangul_SCount)
return 0;
l = Hangul_LBase + index / Hangul_NCount;
v = Hangul_VBase + (index % Hangul_NCount) / Hangul_TCount;
t = Hangul_TBase + index % Hangul_TCount;
result[0] = l;
result[1] = v;
if (t != Hangul_TBase) {
result[2] = t;
return 3;
}
return 2;
}
/* Decomposes a single unicode character. */
static u16 *decompose_unichar(wchar_t uc, int *size, u16 *hangul_buffer)
{
u16 *result;
/* Hangul is handled separately */
result = hangul_buffer;
*size = hfsplus_try_decompose_hangul(uc, result);
if (*size == 0)
result = hfsplus_decompose_nonhangul(uc, size);
return result;
}
int hfsplus_asc2uni(struct super_block *sb,
struct hfsplus_unistr *ustr, int max_unistr_len,
const char *astr, int len)
{
int size, dsize, decompose;
u16 *dstr, outlen = 0;
wchar_t c;
u16 dhangul[3];
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
while (outlen < max_unistr_len && len > 0) {
size = asc2unichar(sb, astr, len, &c);
if (decompose)
dstr = decompose_unichar(c, &dsize, dhangul);
else
dstr = NULL;
if (dstr) {
if (outlen + dsize > max_unistr_len)
break;
do {
ustr->unicode[outlen++] = cpu_to_be16(*dstr++);
} while (--dsize > 0);
} else
ustr->unicode[outlen++] = cpu_to_be16(c);
astr += size;
len -= size;
}
ustr->length = cpu_to_be16(outlen);
if (len > 0)
return -ENAMETOOLONG;
return 0;
}
/*
* Hash a string to an integer as appropriate for the HFS+ filesystem.
* Composed unicode characters are decomposed and case-folding is performed
* if the appropriate bits are (un)set on the superblock.
*/
int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
{
struct super_block *sb = dentry->d_sb;
const char *astr;
const u16 *dstr;
int casefold, decompose, size, len;
unsigned long hash;
wchar_t c;
u16 c2;
u16 dhangul[3];
casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
hash = init_name_hash(dentry);
astr = str->name;
len = str->len;
while (len > 0) {
int dsize;
size = asc2unichar(sb, astr, len, &c);
astr += size;
len -= size;
if (decompose)
dstr = decompose_unichar(c, &dsize, dhangul);
else
dstr = NULL;
if (dstr) {
do {
c2 = *dstr++;
if (casefold)
c2 = case_fold(c2);
if (!casefold || c2)
hash = partial_name_hash(c2, hash);
} while (--dsize > 0);
} else {
c2 = c;
if (casefold)
c2 = case_fold(c2);
if (!casefold || c2)
hash = partial_name_hash(c2, hash);
}
}
str->hash = end_name_hash(hash);
return 0;
}
/*
* Compare strings with HFS+ filename ordering.
* Composed unicode characters are decomposed and case-folding is performed
* if the appropriate bits are (un)set on the superblock.
*/
int hfsplus_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
struct super_block *sb = dentry->d_sb;
int casefold, decompose, size;
int dsize1, dsize2, len1, len2;
const u16 *dstr1, *dstr2;
const char *astr1, *astr2;
u16 c1, c2;
wchar_t c;
u16 dhangul_1[3], dhangul_2[3];
casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
astr1 = str;
len1 = len;
astr2 = name->name;
len2 = name->len;
dsize1 = dsize2 = 0;
dstr1 = dstr2 = NULL;
while (len1 > 0 && len2 > 0) {
if (!dsize1) {
size = asc2unichar(sb, astr1, len1, &c);
astr1 += size;
len1 -= size;
if (decompose)
dstr1 = decompose_unichar(c, &dsize1,
dhangul_1);
if (!decompose || !dstr1) {
c1 = c;
dstr1 = &c1;
dsize1 = 1;
}
}
if (!dsize2) {
size = asc2unichar(sb, astr2, len2, &c);
astr2 += size;
len2 -= size;
if (decompose)
dstr2 = decompose_unichar(c, &dsize2,
dhangul_2);
if (!decompose || !dstr2) {
c2 = c;
dstr2 = &c2;
dsize2 = 1;
}
}
c1 = *dstr1;
c2 = *dstr2;
if (casefold) {
c1 = case_fold(c1);
if (!c1) {
dstr1++;
dsize1--;
continue;
}
c2 = case_fold(c2);
if (!c2) {
dstr2++;
dsize2--;
continue;
}
}
if (c1 < c2)
return -1;
else if (c1 > c2)
return 1;
dstr1++;
dsize1--;
dstr2++;
dsize2--;
}
if (len1 < len2)
return -1;
if (len1 > len2)
return 1;
return 0;
}
| linux-master | fs/hfsplus/unicode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/tables.c
*
* Various data tables
*/
#include "hfsplus_fs.h"
/*
* Unicode case folding table taken from Apple Technote #1150
* (HFS Plus Volume Format)
*/
u16 hfsplus_case_fold_table[] = {
/*
* The lower case table consists of a 256-entry high-byte table followed by
* some number of 256-entry subtables. The high-byte table contains either an
* offset to the subtable for characters with that high byte or zero, which
* means that there are no case mappings or ignored characters in that block.
* Ignored characters are mapped to zero.
*/
// High-byte indices ( == 0 iff no case mapping and no ignorables )
/* 0 */ 0x0100, 0x0200, 0x0000, 0x0300, 0x0400, 0x0500, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 1 */ 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 2 */ 0x0700, 0x0800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0900, 0x0A00,
// Table 1 (for high byte 0x00)
/* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
/* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
/* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
/* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
/* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
/* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
/* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
/* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
/* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
/* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
/* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
/* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
/* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7,
0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
/* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF,
/* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
/* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF,
// Table 2 (for high byte 0x01)
/* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107,
0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F,
/* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117,
0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F,
/* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127,
0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F,
/* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137,
0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140,
/* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147,
0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F,
/* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157,
0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F,
/* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167,
0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F,
/* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177,
0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F,
/* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188,
0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259,
/* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268,
0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275,
/* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8,
0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF,
/* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292,
0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF,
/* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9,
0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF,
/* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7,
0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF,
/* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7,
0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF,
/* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7,
0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF,
// Table 3 (for high byte 0x03)
/* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307,
0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F,
/* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F,
/* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327,
0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F,
/* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337,
0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F,
/* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347,
0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F,
/* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357,
0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F,
/* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367,
0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F,
/* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377,
0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F,
/* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387,
0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F,
/* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
/* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF,
/* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
/* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF,
/* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7,
0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF,
/* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7,
0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF,
/* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7,
0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF,
// Table 4 (for high byte 0x04)
/* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407,
0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F,
/* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
/* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
/* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
/* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
/* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457,
0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F,
/* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467,
0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F,
/* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477,
0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F,
/* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487,
0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F,
/* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497,
0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F,
/* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7,
0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF,
/* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7,
0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF,
/* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8,
0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF,
/* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7,
0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF,
/* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7,
0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF,
/* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7,
0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF,
// Table 5 (for high byte 0x05)
/* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507,
0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F,
/* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517,
0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F,
/* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527,
0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F,
/* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
/* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
/* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557,
0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F,
/* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
/* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
/* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587,
0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F,
/* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597,
0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F,
/* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7,
0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF,
/* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7,
0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF,
/* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7,
0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF,
/* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7,
0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF,
/* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7,
0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF,
/* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7,
0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF,
// Table 6 (for high byte 0x10)
/* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F,
/* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017,
0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F,
/* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F,
/* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037,
0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F,
/* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047,
0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F,
/* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057,
0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F,
/* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067,
0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F,
/* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077,
0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F,
/* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087,
0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F,
/* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097,
0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F,
/* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
/* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
/* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7,
0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF,
/* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
/* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
/* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7,
0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF,
// Table 7 (for high byte 0x20)
/* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000,
/* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017,
0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F,
/* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027,
0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F,
/* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037,
0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F,
/* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047,
0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F,
/* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057,
0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F,
/* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067,
0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077,
0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F,
/* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087,
0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F,
/* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097,
0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F,
/* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7,
0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF,
/* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7,
0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF,
/* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7,
0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF,
/* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7,
0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF,
/* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7,
0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF,
/* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7,
0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF,
// Table 8 (for high byte 0x21)
/* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107,
0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F,
/* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117,
0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F,
/* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127,
0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F,
/* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137,
0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F,
/* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147,
0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F,
/* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157,
0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F,
/* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
/* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
/* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187,
0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F,
/* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197,
0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F,
/* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7,
0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF,
/* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7,
0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF,
/* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7,
0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF,
/* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7,
0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF,
/* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7,
0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF,
/* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7,
0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF,
// Table 9 (for high byte 0xFE)
/* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07,
0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F,
/* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17,
0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F,
/* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27,
0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F,
/* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37,
0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F,
/* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47,
0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F,
/* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57,
0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F,
/* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67,
0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F,
/* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77,
0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F,
/* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87,
0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F,
/* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97,
0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F,
/* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7,
0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF,
/* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7,
0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF,
/* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7,
0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF,
/* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7,
0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF,
/* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7,
0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF,
/* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7,
0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000,
// Table 10 (for high byte 0xFF)
/* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07,
0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F,
/* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17,
0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F,
/* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
/* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F,
/* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
/* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F,
/* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67,
0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F,
/* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77,
0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F,
/* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87,
0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F,
/* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97,
0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F,
/* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7,
0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF,
/* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7,
0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF,
/* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7,
0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF,
/* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7,
0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF,
/* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7,
0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF,
/* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7,
0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF,
};
u16 hfsplus_decompose_table[] = {
/* base table */
0x0010, 0x04c0, 0x0000, 0x06f0, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0x07b0,
/* char table 0x0___ */
0x0020, 0x0070, 0x0160, 0x0190, 0x0230, 0x0000, 0x0000, 0x0000,
0x0000, 0x02d0, 0x0340, 0x0360, 0x03b0, 0x03e0, 0x0400, 0x0430,
/* char table 0x00__ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0030, 0x0040, 0x0050, 0x0060,
/* char values 0x00c_ */
0x2042, 0x204a, 0x2052, 0x205a, 0x2062, 0x206a, 0x0000, 0x2072,
0x207a, 0x2082, 0x208a, 0x2092, 0x209a, 0x20a2, 0x20aa, 0x20b2,
/* char values 0x00d_ */
0x0000, 0x20ba, 0x20c2, 0x20ca, 0x20d2, 0x20da, 0x20e2, 0x0000,
0x0000, 0x20ea, 0x20f2, 0x20fa, 0x2102, 0x210a, 0x0000, 0x0000,
/* char values 0x00e_ */
0x2112, 0x211a, 0x2122, 0x212a, 0x2132, 0x213a, 0x0000, 0x2142,
0x214a, 0x2152, 0x215a, 0x2162, 0x216a, 0x2172, 0x217a, 0x2182,
/* char values 0x00f_ */
0x0000, 0x218a, 0x2192, 0x219a, 0x21a2, 0x21aa, 0x21b2, 0x0000,
0x0000, 0x21ba, 0x21c2, 0x21ca, 0x21d2, 0x21da, 0x0000, 0x21e2,
/* char table 0x01__ */
0x0080, 0x0090, 0x00a0, 0x00b0, 0x00c0, 0x00d0, 0x00e0, 0x00f0,
0x0000, 0x0000, 0x0100, 0x0110, 0x0120, 0x0130, 0x0140, 0x0150,
/* char values 0x010_ */
0x21ea, 0x21f2, 0x21fa, 0x2202, 0x220a, 0x2212, 0x221a, 0x2222,
0x222a, 0x2232, 0x223a, 0x2242, 0x224a, 0x2252, 0x225a, 0x2262,
/* char values 0x011_ */
0x0000, 0x0000, 0x226a, 0x2272, 0x227a, 0x2282, 0x228a, 0x2292,
0x229a, 0x22a2, 0x22aa, 0x22b2, 0x22ba, 0x22c2, 0x22ca, 0x22d2,
/* char values 0x012_ */
0x22da, 0x22e2, 0x22ea, 0x22f2, 0x22fa, 0x2302, 0x0000, 0x0000,
0x230a, 0x2312, 0x231a, 0x2322, 0x232a, 0x2332, 0x233a, 0x2342,
/* char values 0x013_ */
0x234a, 0x0000, 0x0000, 0x0000, 0x2352, 0x235a, 0x2362, 0x236a,
0x0000, 0x2372, 0x237a, 0x2382, 0x238a, 0x2392, 0x239a, 0x0000,
/* char values 0x014_ */
0x0000, 0x0000, 0x0000, 0x23a2, 0x23aa, 0x23b2, 0x23ba, 0x23c2,
0x23ca, 0x0000, 0x0000, 0x0000, 0x23d2, 0x23da, 0x23e2, 0x23ea,
/* char values 0x015_ */
0x23f2, 0x23fa, 0x0000, 0x0000, 0x2402, 0x240a, 0x2412, 0x241a,
0x2422, 0x242a, 0x2432, 0x243a, 0x2442, 0x244a, 0x2452, 0x245a,
/* char values 0x016_ */
0x2462, 0x246a, 0x2472, 0x247a, 0x2482, 0x248a, 0x0000, 0x0000,
0x2492, 0x249a, 0x24a2, 0x24aa, 0x24b2, 0x24ba, 0x24c2, 0x24ca,
/* char values 0x017_ */
0x24d2, 0x24da, 0x24e2, 0x24ea, 0x24f2, 0x24fa, 0x2502, 0x250a,
0x2512, 0x251a, 0x2522, 0x252a, 0x2532, 0x253a, 0x2542, 0x0000,
/* char values 0x01a_ */
0x254a, 0x2552, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x255a,
/* char values 0x01b_ */
0x2562, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x01c_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x256a, 0x2572, 0x257a,
/* char values 0x01d_ */
0x2582, 0x258a, 0x2592, 0x259a, 0x25a2, 0x25ab, 0x25b7, 0x25c3,
0x25cf, 0x25db, 0x25e7, 0x25f3, 0x25ff, 0x0000, 0x260b, 0x2617,
/* char values 0x01e_ */
0x2623, 0x262f, 0x263a, 0x2642, 0x0000, 0x0000, 0x264a, 0x2652,
0x265a, 0x2662, 0x266a, 0x2672, 0x267b, 0x2687, 0x2692, 0x269a,
/* char values 0x01f_ */
0x26a2, 0x0000, 0x0000, 0x0000, 0x26aa, 0x26b2, 0x0000, 0x0000,
0x0000, 0x0000, 0x26bb, 0x26c7, 0x26d2, 0x26da, 0x26e2, 0x26ea,
/* char table 0x02__ */
0x0170, 0x0180, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x020_ */
0x26f2, 0x26fa, 0x2702, 0x270a, 0x2712, 0x271a, 0x2722, 0x272a,
0x2732, 0x273a, 0x2742, 0x274a, 0x2752, 0x275a, 0x2762, 0x276a,
/* char values 0x021_ */
0x2772, 0x277a, 0x2782, 0x278a, 0x2792, 0x279a, 0x27a2, 0x27aa,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x03__ */
0x0000, 0x01a0, 0x0000, 0x0000, 0x01b0, 0x0000, 0x0000, 0x01c0,
0x01d0, 0x01e0, 0x01f0, 0x0200, 0x0210, 0x0220, 0x0000, 0x0000,
/* char values 0x031_ */
0x27b2, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x034_ */
0x27b9, 0x27bd, 0x0000, 0x27c1, 0x27c6, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x037_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x27cd, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x27d1, 0x0000,
/* char values 0x038_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x27d6, 0x27de, 0x27e5,
0x27ea, 0x27f2, 0x27fa, 0x0000, 0x2802, 0x0000, 0x280a, 0x2812,
/* char values 0x039_ */
0x281b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x03a_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x2826, 0x282e, 0x2836, 0x283e, 0x2846, 0x284e,
/* char values 0x03b_ */
0x2857, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x03c_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x2862, 0x286a, 0x2872, 0x287a, 0x2882, 0x0000,
/* char values 0x03d_ */
0x0000, 0x0000, 0x0000, 0x288a, 0x2892, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x04__ */
0x0240, 0x0250, 0x0000, 0x0260, 0x0000, 0x0270, 0x0000, 0x0280,
0x0000, 0x0000, 0x0000, 0x0000, 0x0290, 0x02a0, 0x02b0, 0x02c0,
/* char values 0x040_ */
0x0000, 0x289a, 0x0000, 0x28a2, 0x0000, 0x0000, 0x0000, 0x28aa,
0x0000, 0x0000, 0x0000, 0x0000, 0x28b2, 0x0000, 0x28ba, 0x0000,
/* char values 0x041_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x28c2, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x043_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x28ca, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x045_ */
0x0000, 0x28d2, 0x0000, 0x28da, 0x0000, 0x0000, 0x0000, 0x28e2,
0x0000, 0x0000, 0x0000, 0x0000, 0x28ea, 0x0000, 0x28f2, 0x0000,
/* char values 0x047_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x28fa, 0x2902,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x04c_ */
0x0000, 0x290a, 0x2912, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x04d_ */
0x291a, 0x2922, 0x292a, 0x2932, 0x2939, 0x293d, 0x2942, 0x294a,
0x2951, 0x2955, 0x295a, 0x2962, 0x296a, 0x2972, 0x297a, 0x2982,
/* char values 0x04e_ */
0x2989, 0x298d, 0x2992, 0x299a, 0x29a2, 0x29aa, 0x29b2, 0x29ba,
0x29c1, 0x29c5, 0x29ca, 0x29d2, 0x0000, 0x0000, 0x29da, 0x29e2,
/* char values 0x04f_ */
0x29ea, 0x29f2, 0x29fa, 0x2a02, 0x2a0a, 0x2a12, 0x0000, 0x0000,
0x2a1a, 0x2a22, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x09__ */
0x0000, 0x0000, 0x02e0, 0x02f0, 0x0000, 0x0300, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0310, 0x0320, 0x0330, 0x0000, 0x0000,
/* char values 0x092_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x2a2a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x093_ */
0x0000, 0x2a32, 0x0000, 0x0000, 0x2a3a, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x095_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x2a42, 0x2a4a, 0x2a52, 0x2a5a, 0x2a62, 0x2a6a, 0x2a72, 0x2a7a,
/* char values 0x09b_ */
0x2a82, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x09c_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x2a8a, 0x2a92, 0x0000, 0x0000, 0x0000,
/* char values 0x09d_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x2a9a, 0x2aa2, 0x0000, 0x2aaa,
/* char table 0x0a__ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0350, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0a5_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x2ab2, 0x2aba, 0x2ac2, 0x2aca, 0x0000, 0x2ad2, 0x0000,
/* char table 0x0b__ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0370, 0x0380, 0x0000, 0x0000,
0x0000, 0x0390, 0x0000, 0x0000, 0x03a0, 0x0000, 0x0000, 0x0000,
/* char values 0x0b4_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x2ada, 0x0000, 0x0000, 0x2ae2, 0x2aea, 0x0000, 0x0000, 0x0000,
/* char values 0x0b5_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x2af2, 0x2afa, 0x0000, 0x2b02,
/* char values 0x0b9_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x2b0a, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0bc_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x2b12, 0x2b1a, 0x2b22, 0x0000, 0x0000, 0x0000,
/* char table 0x0c__ */
0x0000, 0x0000, 0x0000, 0x0000, 0x03c0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x03d0, 0x0000, 0x0000, 0x0000,
/* char values 0x0c4_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x2b2a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0cc_ */
0x2b32, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2b3a,
0x2b42, 0x0000, 0x2b4a, 0x2b53, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x0d__ */
0x0000, 0x0000, 0x0000, 0x0000, 0x03f0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0d4_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x2b5e, 0x2b66, 0x2b6e, 0x0000, 0x0000, 0x0000,
/* char table 0x0e__ */
0x0000, 0x0000, 0x0000, 0x0410, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0420, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0e3_ */
0x0000, 0x0000, 0x0000, 0x2b76, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0eb_ */
0x0000, 0x0000, 0x0000, 0x2b7e, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x0f__ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0440, 0x0450, 0x0460, 0x0470,
0x0480, 0x0490, 0x04a0, 0x04b0, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0f4_ */
0x0000, 0x0000, 0x0000, 0x2b86, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2b8e, 0x0000, 0x0000,
/* char values 0x0f5_ */
0x0000, 0x0000, 0x2b96, 0x0000, 0x0000, 0x0000, 0x0000, 0x2b9e,
0x0000, 0x0000, 0x0000, 0x0000, 0x2ba6, 0x0000, 0x0000, 0x0000,
/* char values 0x0f6_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x2bae, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0f7_ */
0x0000, 0x0000, 0x0000, 0x2bb6, 0x0000, 0x2bbe, 0x2bc6, 0x2bcf,
0x2bda, 0x2be3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0f8_ */
0x0000, 0x2bee, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x0f9_ */
0x0000, 0x0000, 0x0000, 0x2bf6, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2bfe, 0x0000, 0x0000,
/* char values 0x0fa_ */
0x0000, 0x0000, 0x2c06, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c0e,
0x0000, 0x0000, 0x0000, 0x0000, 0x2c16, 0x0000, 0x0000, 0x0000,
/* char values 0x0fb_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x2c1e, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x1___ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x04d0, 0x05e0,
/* char table 0x1e__ */
0x04e0, 0x04f0, 0x0500, 0x0510, 0x0520, 0x0530, 0x0540, 0x0550,
0x0560, 0x0570, 0x0580, 0x0590, 0x05a0, 0x05b0, 0x05c0, 0x05d0,
/* char values 0x1e0_ */
0x2c26, 0x2c2e, 0x2c36, 0x2c3e, 0x2c46, 0x2c4e, 0x2c56, 0x2c5e,
0x2c67, 0x2c73, 0x2c7e, 0x2c86, 0x2c8e, 0x2c96, 0x2c9e, 0x2ca6,
/* char values 0x1e1_ */
0x2cae, 0x2cb6, 0x2cbe, 0x2cc6, 0x2ccf, 0x2cdb, 0x2ce7, 0x2cf3,
0x2cfe, 0x2d06, 0x2d0e, 0x2d16, 0x2d1f, 0x2d2b, 0x2d36, 0x2d3e,
/* char values 0x1e2_ */
0x2d46, 0x2d4e, 0x2d56, 0x2d5e, 0x2d66, 0x2d6e, 0x2d76, 0x2d7e,
0x2d86, 0x2d8e, 0x2d96, 0x2d9e, 0x2da6, 0x2dae, 0x2db7, 0x2dc3,
/* char values 0x1e3_ */
0x2dce, 0x2dd6, 0x2dde, 0x2de6, 0x2dee, 0x2df6, 0x2dfe, 0x2e06,
0x2e0f, 0x2e1b, 0x2e26, 0x2e2e, 0x2e36, 0x2e3e, 0x2e46, 0x2e4e,
/* char values 0x1e4_ */
0x2e56, 0x2e5e, 0x2e66, 0x2e6e, 0x2e76, 0x2e7e, 0x2e86, 0x2e8e,
0x2e96, 0x2e9e, 0x2ea6, 0x2eae, 0x2eb7, 0x2ec3, 0x2ecf, 0x2edb,
/* char values 0x1e5_ */
0x2ee7, 0x2ef3, 0x2eff, 0x2f0b, 0x2f16, 0x2f1e, 0x2f26, 0x2f2e,
0x2f36, 0x2f3e, 0x2f46, 0x2f4e, 0x2f57, 0x2f63, 0x2f6e, 0x2f76,
/* char values 0x1e6_ */
0x2f7e, 0x2f86, 0x2f8e, 0x2f96, 0x2f9f, 0x2fab, 0x2fb7, 0x2fc3,
0x2fcf, 0x2fdb, 0x2fe6, 0x2fee, 0x2ff6, 0x2ffe, 0x3006, 0x300e,
/* char values 0x1e7_ */
0x3016, 0x301e, 0x3026, 0x302e, 0x3036, 0x303e, 0x3046, 0x304e,
0x3057, 0x3063, 0x306f, 0x307b, 0x3086, 0x308e, 0x3096, 0x309e,
/* char values 0x1e8_ */
0x30a6, 0x30ae, 0x30b6, 0x30be, 0x30c6, 0x30ce, 0x30d6, 0x30de,
0x30e6, 0x30ee, 0x30f6, 0x30fe, 0x3106, 0x310e, 0x3116, 0x311e,
/* char values 0x1e9_ */
0x3126, 0x312e, 0x3136, 0x313e, 0x3146, 0x314e, 0x3156, 0x315e,
0x3166, 0x316e, 0x0000, 0x3176, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x1ea_ */
0x317e, 0x3186, 0x318e, 0x3196, 0x319f, 0x31ab, 0x31b7, 0x31c3,
0x31cf, 0x31db, 0x31e7, 0x31f3, 0x31ff, 0x320b, 0x3217, 0x3223,
/* char values 0x1eb_ */
0x322f, 0x323b, 0x3247, 0x3253, 0x325f, 0x326b, 0x3277, 0x3283,
0x328e, 0x3296, 0x329e, 0x32a6, 0x32ae, 0x32b6, 0x32bf, 0x32cb,
/* char values 0x1ec_ */
0x32d7, 0x32e3, 0x32ef, 0x32fb, 0x3307, 0x3313, 0x331f, 0x332b,
0x3336, 0x333e, 0x3346, 0x334e, 0x3356, 0x335e, 0x3366, 0x336e,
/* char values 0x1ed_ */
0x3377, 0x3383, 0x338f, 0x339b, 0x33a7, 0x33b3, 0x33bf, 0x33cb,
0x33d7, 0x33e3, 0x33ef, 0x33fb, 0x3407, 0x3413, 0x341f, 0x342b,
/* char values 0x1ee_ */
0x3437, 0x3443, 0x344f, 0x345b, 0x3466, 0x346e, 0x3476, 0x347e,
0x3487, 0x3493, 0x349f, 0x34ab, 0x34b7, 0x34c3, 0x34cf, 0x34db,
/* char values 0x1ef_ */
0x34e7, 0x34f3, 0x34fe, 0x3506, 0x350e, 0x3516, 0x351e, 0x3526,
0x352e, 0x3536, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x1f__ */
0x05f0, 0x0600, 0x0610, 0x0620, 0x0630, 0x0640, 0x0650, 0x0660,
0x0670, 0x0680, 0x0690, 0x06a0, 0x06b0, 0x06c0, 0x06d0, 0x06e0,
/* char values 0x1f0_ */
0x353e, 0x3546, 0x354f, 0x355b, 0x3567, 0x3573, 0x357f, 0x358b,
0x3596, 0x359e, 0x35a7, 0x35b3, 0x35bf, 0x35cb, 0x35d7, 0x35e3,
/* char values 0x1f1_ */
0x35ee, 0x35f6, 0x35ff, 0x360b, 0x3617, 0x3623, 0x0000, 0x0000,
0x362e, 0x3636, 0x363f, 0x364b, 0x3657, 0x3663, 0x0000, 0x0000,
/* char values 0x1f2_ */
0x366e, 0x3676, 0x367f, 0x368b, 0x3697, 0x36a3, 0x36af, 0x36bb,
0x36c6, 0x36ce, 0x36d7, 0x36e3, 0x36ef, 0x36fb, 0x3707, 0x3713,
/* char values 0x1f3_ */
0x371e, 0x3726, 0x372f, 0x373b, 0x3747, 0x3753, 0x375f, 0x376b,
0x3776, 0x377e, 0x3787, 0x3793, 0x379f, 0x37ab, 0x37b7, 0x37c3,
/* char values 0x1f4_ */
0x37ce, 0x37d6, 0x37df, 0x37eb, 0x37f7, 0x3803, 0x0000, 0x0000,
0x380e, 0x3816, 0x381f, 0x382b, 0x3837, 0x3843, 0x0000, 0x0000,
/* char values 0x1f5_ */
0x384e, 0x3856, 0x385f, 0x386b, 0x3877, 0x3883, 0x388f, 0x389b,
0x0000, 0x38a6, 0x0000, 0x38af, 0x0000, 0x38bb, 0x0000, 0x38c7,
/* char values 0x1f6_ */
0x38d2, 0x38da, 0x38e3, 0x38ef, 0x38fb, 0x3907, 0x3913, 0x391f,
0x392a, 0x3932, 0x393b, 0x3947, 0x3953, 0x395f, 0x396b, 0x3977,
/* char values 0x1f7_ */
0x3982, 0x398a, 0x3992, 0x399a, 0x39a2, 0x39aa, 0x39b2, 0x39ba,
0x39c2, 0x39ca, 0x39d2, 0x39da, 0x39e2, 0x39ea, 0x0000, 0x0000,
/* char values 0x1f8_ */
0x39f3, 0x39ff, 0x3a0c, 0x3a1c, 0x3a2c, 0x3a3c, 0x3a4c, 0x3a5c,
0x3a6b, 0x3a77, 0x3a84, 0x3a94, 0x3aa4, 0x3ab4, 0x3ac4, 0x3ad4,
/* char values 0x1f9_ */
0x3ae3, 0x3aef, 0x3afc, 0x3b0c, 0x3b1c, 0x3b2c, 0x3b3c, 0x3b4c,
0x3b5b, 0x3b67, 0x3b74, 0x3b84, 0x3b94, 0x3ba4, 0x3bb4, 0x3bc4,
/* char values 0x1fa_ */
0x3bd3, 0x3bdf, 0x3bec, 0x3bfc, 0x3c0c, 0x3c1c, 0x3c2c, 0x3c3c,
0x3c4b, 0x3c57, 0x3c64, 0x3c74, 0x3c84, 0x3c94, 0x3ca4, 0x3cb4,
/* char values 0x1fb_ */
0x3cc2, 0x3cca, 0x3cd3, 0x3cde, 0x3ce7, 0x0000, 0x3cf2, 0x3cfb,
0x3d06, 0x3d0e, 0x3d16, 0x3d1e, 0x3d26, 0x0000, 0x3d2d, 0x0000,
/* char values 0x1fc_ */
0x0000, 0x3d32, 0x3d3b, 0x3d46, 0x3d4f, 0x0000, 0x3d5a, 0x3d63,
0x3d6e, 0x3d76, 0x3d7e, 0x3d86, 0x3d8e, 0x3d96, 0x3d9e, 0x3da6,
/* char values 0x1fd_ */
0x3dae, 0x3db6, 0x3dbf, 0x3dcb, 0x0000, 0x0000, 0x3dd6, 0x3ddf,
0x3dea, 0x3df2, 0x3dfa, 0x3e02, 0x0000, 0x3e0a, 0x3e12, 0x3e1a,
/* char values 0x1fe_ */
0x3e22, 0x3e2a, 0x3e33, 0x3e3f, 0x3e4a, 0x3e52, 0x3e5a, 0x3e63,
0x3e6e, 0x3e76, 0x3e7e, 0x3e86, 0x3e8e, 0x3e96, 0x3e9e, 0x3ea5,
/* char values 0x1ff_ */
0x0000, 0x0000, 0x3eab, 0x3eb6, 0x3ebf, 0x0000, 0x3eca, 0x3ed3,
0x3ede, 0x3ee6, 0x3eee, 0x3ef6, 0x3efe, 0x3f05, 0x0000, 0x0000,
/* char table 0x3___ */
0x0700, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0x30__ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0710, 0x0720, 0x0730, 0x0740,
0x0000, 0x0750, 0x0760, 0x0770, 0x0780, 0x0790, 0x0000, 0x07a0,
/* char values 0x304_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x3f0a, 0x0000, 0x3f12, 0x0000,
/* char values 0x305_ */
0x3f1a, 0x0000, 0x3f22, 0x0000, 0x3f2a, 0x0000, 0x3f32, 0x0000,
0x3f3a, 0x0000, 0x3f42, 0x0000, 0x3f4a, 0x0000, 0x3f52, 0x0000,
/* char values 0x306_ */
0x3f5a, 0x0000, 0x3f62, 0x0000, 0x0000, 0x3f6a, 0x0000, 0x3f72,
0x0000, 0x3f7a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x307_ */
0x3f82, 0x3f8a, 0x0000, 0x3f92, 0x3f9a, 0x0000, 0x3fa2, 0x3faa,
0x0000, 0x3fb2, 0x3fba, 0x0000, 0x3fc2, 0x3fca, 0x0000, 0x0000,
/* char values 0x309_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x3fd2, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3fda, 0x0000,
/* char values 0x30a_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x3fe2, 0x0000, 0x3fea, 0x0000,
/* char values 0x30b_ */
0x3ff2, 0x0000, 0x3ffa, 0x0000, 0x4002, 0x0000, 0x400a, 0x0000,
0x4012, 0x0000, 0x401a, 0x0000, 0x4022, 0x0000, 0x402a, 0x0000,
/* char values 0x30c_ */
0x4032, 0x0000, 0x403a, 0x0000, 0x0000, 0x4042, 0x0000, 0x404a,
0x0000, 0x4052, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0x30d_ */
0x405a, 0x4062, 0x0000, 0x406a, 0x4072, 0x0000, 0x407a, 0x4082,
0x0000, 0x408a, 0x4092, 0x0000, 0x409a, 0x40a2, 0x0000, 0x0000,
/* char values 0x30f_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x40aa, 0x0000, 0x0000, 0x40b2,
0x40ba, 0x40c2, 0x40ca, 0x0000, 0x0000, 0x0000, 0x40d2, 0x0000,
/* char table 0xf___ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x07c0, 0x0000, 0x0000, 0x0000, 0x0000,
/* char table 0xfb__ */
0x0000, 0x07d0, 0x07e0, 0x07f0, 0x0800, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
/* char values 0xfb1_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x40da,
/* char values 0xfb2_ */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x40e2, 0x40ea, 0x40f3, 0x40ff, 0x410a, 0x4112,
/* char values 0xfb3_ */
0x411a, 0x4122, 0x412a, 0x4132, 0x413a, 0x4142, 0x414a, 0x0000,
0x4152, 0x415a, 0x4162, 0x416a, 0x4172, 0x0000, 0x417a, 0x0000,
/* char values 0xfb4_ */
0x4182, 0x418a, 0x0000, 0x4192, 0x419a, 0x0000, 0x41a2, 0x41aa,
0x41b2, 0x41ba, 0x41c2, 0x41ca, 0x41d2, 0x41da, 0x41e2, 0x0000,
/* decomposed characters */
0x0041, 0x0300, 0x0041, 0x0301, 0x0041, 0x0302, 0x0041, 0x0303,
0x0041, 0x0308, 0x0041, 0x030a, 0x0043, 0x0327, 0x0045, 0x0300,
0x0045, 0x0301, 0x0045, 0x0302, 0x0045, 0x0308, 0x0049, 0x0300,
0x0049, 0x0301, 0x0049, 0x0302, 0x0049, 0x0308, 0x004e, 0x0303,
0x004f, 0x0300, 0x004f, 0x0301, 0x004f, 0x0302, 0x004f, 0x0303,
0x004f, 0x0308, 0x0055, 0x0300, 0x0055, 0x0301, 0x0055, 0x0302,
0x0055, 0x0308, 0x0059, 0x0301, 0x0061, 0x0300, 0x0061, 0x0301,
0x0061, 0x0302, 0x0061, 0x0303, 0x0061, 0x0308, 0x0061, 0x030a,
0x0063, 0x0327, 0x0065, 0x0300, 0x0065, 0x0301, 0x0065, 0x0302,
0x0065, 0x0308, 0x0069, 0x0300, 0x0069, 0x0301, 0x0069, 0x0302,
0x0069, 0x0308, 0x006e, 0x0303, 0x006f, 0x0300, 0x006f, 0x0301,
0x006f, 0x0302, 0x006f, 0x0303, 0x006f, 0x0308, 0x0075, 0x0300,
0x0075, 0x0301, 0x0075, 0x0302, 0x0075, 0x0308, 0x0079, 0x0301,
0x0079, 0x0308, 0x0041, 0x0304, 0x0061, 0x0304, 0x0041, 0x0306,
0x0061, 0x0306, 0x0041, 0x0328, 0x0061, 0x0328, 0x0043, 0x0301,
0x0063, 0x0301, 0x0043, 0x0302, 0x0063, 0x0302, 0x0043, 0x0307,
0x0063, 0x0307, 0x0043, 0x030c, 0x0063, 0x030c, 0x0044, 0x030c,
0x0064, 0x030c, 0x0045, 0x0304, 0x0065, 0x0304, 0x0045, 0x0306,
0x0065, 0x0306, 0x0045, 0x0307, 0x0065, 0x0307, 0x0045, 0x0328,
0x0065, 0x0328, 0x0045, 0x030c, 0x0065, 0x030c, 0x0047, 0x0302,
0x0067, 0x0302, 0x0047, 0x0306, 0x0067, 0x0306, 0x0047, 0x0307,
0x0067, 0x0307, 0x0047, 0x0327, 0x0067, 0x0327, 0x0048, 0x0302,
0x0068, 0x0302, 0x0049, 0x0303, 0x0069, 0x0303, 0x0049, 0x0304,
0x0069, 0x0304, 0x0049, 0x0306, 0x0069, 0x0306, 0x0049, 0x0328,
0x0069, 0x0328, 0x0049, 0x0307, 0x004a, 0x0302, 0x006a, 0x0302,
0x004b, 0x0327, 0x006b, 0x0327, 0x004c, 0x0301, 0x006c, 0x0301,
0x004c, 0x0327, 0x006c, 0x0327, 0x004c, 0x030c, 0x006c, 0x030c,
0x004e, 0x0301, 0x006e, 0x0301, 0x004e, 0x0327, 0x006e, 0x0327,
0x004e, 0x030c, 0x006e, 0x030c, 0x004f, 0x0304, 0x006f, 0x0304,
0x004f, 0x0306, 0x006f, 0x0306, 0x004f, 0x030b, 0x006f, 0x030b,
0x0052, 0x0301, 0x0072, 0x0301, 0x0052, 0x0327, 0x0072, 0x0327,
0x0052, 0x030c, 0x0072, 0x030c, 0x0053, 0x0301, 0x0073, 0x0301,
0x0053, 0x0302, 0x0073, 0x0302, 0x0053, 0x0327, 0x0073, 0x0327,
0x0053, 0x030c, 0x0073, 0x030c, 0x0054, 0x0327, 0x0074, 0x0327,
0x0054, 0x030c, 0x0074, 0x030c, 0x0055, 0x0303, 0x0075, 0x0303,
0x0055, 0x0304, 0x0075, 0x0304, 0x0055, 0x0306, 0x0075, 0x0306,
0x0055, 0x030a, 0x0075, 0x030a, 0x0055, 0x030b, 0x0075, 0x030b,
0x0055, 0x0328, 0x0075, 0x0328, 0x0057, 0x0302, 0x0077, 0x0302,
0x0059, 0x0302, 0x0079, 0x0302, 0x0059, 0x0308, 0x005a, 0x0301,
0x007a, 0x0301, 0x005a, 0x0307, 0x007a, 0x0307, 0x005a, 0x030c,
0x007a, 0x030c, 0x004f, 0x031b, 0x006f, 0x031b, 0x0055, 0x031b,
0x0075, 0x031b, 0x0041, 0x030c, 0x0061, 0x030c, 0x0049, 0x030c,
0x0069, 0x030c, 0x004f, 0x030c, 0x006f, 0x030c, 0x0055, 0x030c,
0x0075, 0x030c, 0x0055, 0x0308, 0x0304, 0x0075, 0x0308, 0x0304,
0x0055, 0x0308, 0x0301, 0x0075, 0x0308, 0x0301, 0x0055, 0x0308,
0x030c, 0x0075, 0x0308, 0x030c, 0x0055, 0x0308, 0x0300, 0x0075,
0x0308, 0x0300, 0x0041, 0x0308, 0x0304, 0x0061, 0x0308, 0x0304,
0x0041, 0x0307, 0x0304, 0x0061, 0x0307, 0x0304, 0x00c6, 0x0304,
0x00e6, 0x0304, 0x0047, 0x030c, 0x0067, 0x030c, 0x004b, 0x030c,
0x006b, 0x030c, 0x004f, 0x0328, 0x006f, 0x0328, 0x004f, 0x0328,
0x0304, 0x006f, 0x0328, 0x0304, 0x01b7, 0x030c, 0x0292, 0x030c,
0x006a, 0x030c, 0x0047, 0x0301, 0x0067, 0x0301, 0x0041, 0x030a,
0x0301, 0x0061, 0x030a, 0x0301, 0x00c6, 0x0301, 0x00e6, 0x0301,
0x00d8, 0x0301, 0x00f8, 0x0301, 0x0041, 0x030f, 0x0061, 0x030f,
0x0041, 0x0311, 0x0061, 0x0311, 0x0045, 0x030f, 0x0065, 0x030f,
0x0045, 0x0311, 0x0065, 0x0311, 0x0049, 0x030f, 0x0069, 0x030f,
0x0049, 0x0311, 0x0069, 0x0311, 0x004f, 0x030f, 0x006f, 0x030f,
0x004f, 0x0311, 0x006f, 0x0311, 0x0052, 0x030f, 0x0072, 0x030f,
0x0052, 0x0311, 0x0072, 0x0311, 0x0055, 0x030f, 0x0075, 0x030f,
0x0055, 0x0311, 0x0075, 0x0311, 0x0306, 0x0307, 0x0300, 0x0301,
0x0313, 0x0308, 0x030d, 0x02b9, 0x003b, 0x00a8, 0x030d, 0x0391,
0x030d, 0x00b7, 0x0395, 0x030d, 0x0397, 0x030d, 0x0399, 0x030d,
0x039f, 0x030d, 0x03a5, 0x030d, 0x03a9, 0x030d, 0x03b9, 0x0308,
0x030d, 0x0399, 0x0308, 0x03a5, 0x0308, 0x03b1, 0x030d, 0x03b5,
0x030d, 0x03b7, 0x030d, 0x03b9, 0x030d, 0x03c5, 0x0308, 0x030d,
0x03b9, 0x0308, 0x03c5, 0x0308, 0x03bf, 0x030d, 0x03c5, 0x030d,
0x03c9, 0x030d, 0x03d2, 0x030d, 0x03d2, 0x0308, 0x0415, 0x0308,
0x0413, 0x0301, 0x0406, 0x0308, 0x041a, 0x0301, 0x0423, 0x0306,
0x0418, 0x0306, 0x0438, 0x0306, 0x0435, 0x0308, 0x0433, 0x0301,
0x0456, 0x0308, 0x043a, 0x0301, 0x0443, 0x0306, 0x0474, 0x030f,
0x0475, 0x030f, 0x0416, 0x0306, 0x0436, 0x0306, 0x0410, 0x0306,
0x0430, 0x0306, 0x0410, 0x0308, 0x0430, 0x0308, 0x00c6, 0x00e6,
0x0415, 0x0306, 0x0435, 0x0306, 0x018f, 0x0259, 0x018f, 0x0308,
0x0259, 0x0308, 0x0416, 0x0308, 0x0436, 0x0308, 0x0417, 0x0308,
0x0437, 0x0308, 0x01b7, 0x0292, 0x0418, 0x0304, 0x0438, 0x0304,
0x0418, 0x0308, 0x0438, 0x0308, 0x041e, 0x0308, 0x043e, 0x0308,
0x019f, 0x0275, 0x019f, 0x0308, 0x0275, 0x0308, 0x0423, 0x0304,
0x0443, 0x0304, 0x0423, 0x0308, 0x0443, 0x0308, 0x0423, 0x030b,
0x0443, 0x030b, 0x0427, 0x0308, 0x0447, 0x0308, 0x042b, 0x0308,
0x044b, 0x0308, 0x0928, 0x093c, 0x0930, 0x093c, 0x0933, 0x093c,
0x0915, 0x093c, 0x0916, 0x093c, 0x0917, 0x093c, 0x091c, 0x093c,
0x0921, 0x093c, 0x0922, 0x093c, 0x092b, 0x093c, 0x092f, 0x093c,
0x09ac, 0x09bc, 0x09c7, 0x09be, 0x09c7, 0x09d7, 0x09a1, 0x09bc,
0x09a2, 0x09bc, 0x09af, 0x09bc, 0x0a16, 0x0a3c, 0x0a17, 0x0a3c,
0x0a1c, 0x0a3c, 0x0a21, 0x0a3c, 0x0a2b, 0x0a3c, 0x0b47, 0x0b56,
0x0b47, 0x0b3e, 0x0b47, 0x0b57, 0x0b21, 0x0b3c, 0x0b22, 0x0b3c,
0x0b2f, 0x0b3c, 0x0b92, 0x0bd7, 0x0bc6, 0x0bbe, 0x0bc7, 0x0bbe,
0x0bc6, 0x0bd7, 0x0c46, 0x0c56, 0x0cbf, 0x0cd5, 0x0cc6, 0x0cd5,
0x0cc6, 0x0cd6, 0x0cc6, 0x0cc2, 0x0cc6, 0x0cc2, 0x0cd5, 0x0d46,
0x0d3e, 0x0d47, 0x0d3e, 0x0d46, 0x0d57, 0x0e4d, 0x0e32, 0x0ecd,
0x0eb2, 0x0f42, 0x0fb7, 0x0f4c, 0x0fb7, 0x0f51, 0x0fb7, 0x0f56,
0x0fb7, 0x0f5b, 0x0fb7, 0x0f40, 0x0fb5, 0x0f72, 0x0f71, 0x0f74,
0x0f71, 0x0fb2, 0x0f80, 0x0fb2, 0x0f80, 0x0f71, 0x0fb3, 0x0f80,
0x0fb3, 0x0f80, 0x0f71, 0x0f80, 0x0f71, 0x0f92, 0x0fb7, 0x0f9c,
0x0fb7, 0x0fa1, 0x0fb7, 0x0fa6, 0x0fb7, 0x0fab, 0x0fb7, 0x0f90,
0x0fb5, 0x0041, 0x0325, 0x0061, 0x0325, 0x0042, 0x0307, 0x0062,
0x0307, 0x0042, 0x0323, 0x0062, 0x0323, 0x0042, 0x0331, 0x0062,
0x0331, 0x0043, 0x0327, 0x0301, 0x0063, 0x0327, 0x0301, 0x0044,
0x0307, 0x0064, 0x0307, 0x0044, 0x0323, 0x0064, 0x0323, 0x0044,
0x0331, 0x0064, 0x0331, 0x0044, 0x0327, 0x0064, 0x0327, 0x0044,
0x032d, 0x0064, 0x032d, 0x0045, 0x0304, 0x0300, 0x0065, 0x0304,
0x0300, 0x0045, 0x0304, 0x0301, 0x0065, 0x0304, 0x0301, 0x0045,
0x032d, 0x0065, 0x032d, 0x0045, 0x0330, 0x0065, 0x0330, 0x0045,
0x0327, 0x0306, 0x0065, 0x0327, 0x0306, 0x0046, 0x0307, 0x0066,
0x0307, 0x0047, 0x0304, 0x0067, 0x0304, 0x0048, 0x0307, 0x0068,
0x0307, 0x0048, 0x0323, 0x0068, 0x0323, 0x0048, 0x0308, 0x0068,
0x0308, 0x0048, 0x0327, 0x0068, 0x0327, 0x0048, 0x032e, 0x0068,
0x032e, 0x0049, 0x0330, 0x0069, 0x0330, 0x0049, 0x0308, 0x0301,
0x0069, 0x0308, 0x0301, 0x004b, 0x0301, 0x006b, 0x0301, 0x004b,
0x0323, 0x006b, 0x0323, 0x004b, 0x0331, 0x006b, 0x0331, 0x004c,
0x0323, 0x006c, 0x0323, 0x004c, 0x0323, 0x0304, 0x006c, 0x0323,
0x0304, 0x004c, 0x0331, 0x006c, 0x0331, 0x004c, 0x032d, 0x006c,
0x032d, 0x004d, 0x0301, 0x006d, 0x0301, 0x004d, 0x0307, 0x006d,
0x0307, 0x004d, 0x0323, 0x006d, 0x0323, 0x004e, 0x0307, 0x006e,
0x0307, 0x004e, 0x0323, 0x006e, 0x0323, 0x004e, 0x0331, 0x006e,
0x0331, 0x004e, 0x032d, 0x006e, 0x032d, 0x004f, 0x0303, 0x0301,
0x006f, 0x0303, 0x0301, 0x004f, 0x0303, 0x0308, 0x006f, 0x0303,
0x0308, 0x004f, 0x0304, 0x0300, 0x006f, 0x0304, 0x0300, 0x004f,
0x0304, 0x0301, 0x006f, 0x0304, 0x0301, 0x0050, 0x0301, 0x0070,
0x0301, 0x0050, 0x0307, 0x0070, 0x0307, 0x0052, 0x0307, 0x0072,
0x0307, 0x0052, 0x0323, 0x0072, 0x0323, 0x0052, 0x0323, 0x0304,
0x0072, 0x0323, 0x0304, 0x0052, 0x0331, 0x0072, 0x0331, 0x0053,
0x0307, 0x0073, 0x0307, 0x0053, 0x0323, 0x0073, 0x0323, 0x0053,
0x0301, 0x0307, 0x0073, 0x0301, 0x0307, 0x0053, 0x030c, 0x0307,
0x0073, 0x030c, 0x0307, 0x0053, 0x0323, 0x0307, 0x0073, 0x0323,
0x0307, 0x0054, 0x0307, 0x0074, 0x0307, 0x0054, 0x0323, 0x0074,
0x0323, 0x0054, 0x0331, 0x0074, 0x0331, 0x0054, 0x032d, 0x0074,
0x032d, 0x0055, 0x0324, 0x0075, 0x0324, 0x0055, 0x0330, 0x0075,
0x0330, 0x0055, 0x032d, 0x0075, 0x032d, 0x0055, 0x0303, 0x0301,
0x0075, 0x0303, 0x0301, 0x0055, 0x0304, 0x0308, 0x0075, 0x0304,
0x0308, 0x0056, 0x0303, 0x0076, 0x0303, 0x0056, 0x0323, 0x0076,
0x0323, 0x0057, 0x0300, 0x0077, 0x0300, 0x0057, 0x0301, 0x0077,
0x0301, 0x0057, 0x0308, 0x0077, 0x0308, 0x0057, 0x0307, 0x0077,
0x0307, 0x0057, 0x0323, 0x0077, 0x0323, 0x0058, 0x0307, 0x0078,
0x0307, 0x0058, 0x0308, 0x0078, 0x0308, 0x0059, 0x0307, 0x0079,
0x0307, 0x005a, 0x0302, 0x007a, 0x0302, 0x005a, 0x0323, 0x007a,
0x0323, 0x005a, 0x0331, 0x007a, 0x0331, 0x0068, 0x0331, 0x0074,
0x0308, 0x0077, 0x030a, 0x0079, 0x030a, 0x017f, 0x0307, 0x0041,
0x0323, 0x0061, 0x0323, 0x0041, 0x0309, 0x0061, 0x0309, 0x0041,
0x0302, 0x0301, 0x0061, 0x0302, 0x0301, 0x0041, 0x0302, 0x0300,
0x0061, 0x0302, 0x0300, 0x0041, 0x0302, 0x0309, 0x0061, 0x0302,
0x0309, 0x0041, 0x0302, 0x0303, 0x0061, 0x0302, 0x0303, 0x0041,
0x0323, 0x0302, 0x0061, 0x0323, 0x0302, 0x0041, 0x0306, 0x0301,
0x0061, 0x0306, 0x0301, 0x0041, 0x0306, 0x0300, 0x0061, 0x0306,
0x0300, 0x0041, 0x0306, 0x0309, 0x0061, 0x0306, 0x0309, 0x0041,
0x0306, 0x0303, 0x0061, 0x0306, 0x0303, 0x0041, 0x0323, 0x0306,
0x0061, 0x0323, 0x0306, 0x0045, 0x0323, 0x0065, 0x0323, 0x0045,
0x0309, 0x0065, 0x0309, 0x0045, 0x0303, 0x0065, 0x0303, 0x0045,
0x0302, 0x0301, 0x0065, 0x0302, 0x0301, 0x0045, 0x0302, 0x0300,
0x0065, 0x0302, 0x0300, 0x0045, 0x0302, 0x0309, 0x0065, 0x0302,
0x0309, 0x0045, 0x0302, 0x0303, 0x0065, 0x0302, 0x0303, 0x0045,
0x0323, 0x0302, 0x0065, 0x0323, 0x0302, 0x0049, 0x0309, 0x0069,
0x0309, 0x0049, 0x0323, 0x0069, 0x0323, 0x004f, 0x0323, 0x006f,
0x0323, 0x004f, 0x0309, 0x006f, 0x0309, 0x004f, 0x0302, 0x0301,
0x006f, 0x0302, 0x0301, 0x004f, 0x0302, 0x0300, 0x006f, 0x0302,
0x0300, 0x004f, 0x0302, 0x0309, 0x006f, 0x0302, 0x0309, 0x004f,
0x0302, 0x0303, 0x006f, 0x0302, 0x0303, 0x004f, 0x0323, 0x0302,
0x006f, 0x0323, 0x0302, 0x004f, 0x031b, 0x0301, 0x006f, 0x031b,
0x0301, 0x004f, 0x031b, 0x0300, 0x006f, 0x031b, 0x0300, 0x004f,
0x031b, 0x0309, 0x006f, 0x031b, 0x0309, 0x004f, 0x031b, 0x0303,
0x006f, 0x031b, 0x0303, 0x004f, 0x031b, 0x0323, 0x006f, 0x031b,
0x0323, 0x0055, 0x0323, 0x0075, 0x0323, 0x0055, 0x0309, 0x0075,
0x0309, 0x0055, 0x031b, 0x0301, 0x0075, 0x031b, 0x0301, 0x0055,
0x031b, 0x0300, 0x0075, 0x031b, 0x0300, 0x0055, 0x031b, 0x0309,
0x0075, 0x031b, 0x0309, 0x0055, 0x031b, 0x0303, 0x0075, 0x031b,
0x0303, 0x0055, 0x031b, 0x0323, 0x0075, 0x031b, 0x0323, 0x0059,
0x0300, 0x0079, 0x0300, 0x0059, 0x0323, 0x0079, 0x0323, 0x0059,
0x0309, 0x0079, 0x0309, 0x0059, 0x0303, 0x0079, 0x0303, 0x03b1,
0x0313, 0x03b1, 0x0314, 0x03b1, 0x0313, 0x0300, 0x03b1, 0x0314,
0x0300, 0x03b1, 0x0313, 0x0301, 0x03b1, 0x0314, 0x0301, 0x03b1,
0x0313, 0x0342, 0x03b1, 0x0314, 0x0342, 0x0391, 0x0313, 0x0391,
0x0314, 0x0391, 0x0313, 0x0300, 0x0391, 0x0314, 0x0300, 0x0391,
0x0313, 0x0301, 0x0391, 0x0314, 0x0301, 0x0391, 0x0313, 0x0342,
0x0391, 0x0314, 0x0342, 0x03b5, 0x0313, 0x03b5, 0x0314, 0x03b5,
0x0313, 0x0300, 0x03b5, 0x0314, 0x0300, 0x03b5, 0x0313, 0x0301,
0x03b5, 0x0314, 0x0301, 0x0395, 0x0313, 0x0395, 0x0314, 0x0395,
0x0313, 0x0300, 0x0395, 0x0314, 0x0300, 0x0395, 0x0313, 0x0301,
0x0395, 0x0314, 0x0301, 0x03b7, 0x0313, 0x03b7, 0x0314, 0x03b7,
0x0313, 0x0300, 0x03b7, 0x0314, 0x0300, 0x03b7, 0x0313, 0x0301,
0x03b7, 0x0314, 0x0301, 0x03b7, 0x0313, 0x0342, 0x03b7, 0x0314,
0x0342, 0x0397, 0x0313, 0x0397, 0x0314, 0x0397, 0x0313, 0x0300,
0x0397, 0x0314, 0x0300, 0x0397, 0x0313, 0x0301, 0x0397, 0x0314,
0x0301, 0x0397, 0x0313, 0x0342, 0x0397, 0x0314, 0x0342, 0x03b9,
0x0313, 0x03b9, 0x0314, 0x03b9, 0x0313, 0x0300, 0x03b9, 0x0314,
0x0300, 0x03b9, 0x0313, 0x0301, 0x03b9, 0x0314, 0x0301, 0x03b9,
0x0313, 0x0342, 0x03b9, 0x0314, 0x0342, 0x0399, 0x0313, 0x0399,
0x0314, 0x0399, 0x0313, 0x0300, 0x0399, 0x0314, 0x0300, 0x0399,
0x0313, 0x0301, 0x0399, 0x0314, 0x0301, 0x0399, 0x0313, 0x0342,
0x0399, 0x0314, 0x0342, 0x03bf, 0x0313, 0x03bf, 0x0314, 0x03bf,
0x0313, 0x0300, 0x03bf, 0x0314, 0x0300, 0x03bf, 0x0313, 0x0301,
0x03bf, 0x0314, 0x0301, 0x039f, 0x0313, 0x039f, 0x0314, 0x039f,
0x0313, 0x0300, 0x039f, 0x0314, 0x0300, 0x039f, 0x0313, 0x0301,
0x039f, 0x0314, 0x0301, 0x03c5, 0x0313, 0x03c5, 0x0314, 0x03c5,
0x0313, 0x0300, 0x03c5, 0x0314, 0x0300, 0x03c5, 0x0313, 0x0301,
0x03c5, 0x0314, 0x0301, 0x03c5, 0x0313, 0x0342, 0x03c5, 0x0314,
0x0342, 0x03a5, 0x0314, 0x03a5, 0x0314, 0x0300, 0x03a5, 0x0314,
0x0301, 0x03a5, 0x0314, 0x0342, 0x03c9, 0x0313, 0x03c9, 0x0314,
0x03c9, 0x0313, 0x0300, 0x03c9, 0x0314, 0x0300, 0x03c9, 0x0313,
0x0301, 0x03c9, 0x0314, 0x0301, 0x03c9, 0x0313, 0x0342, 0x03c9,
0x0314, 0x0342, 0x03a9, 0x0313, 0x03a9, 0x0314, 0x03a9, 0x0313,
0x0300, 0x03a9, 0x0314, 0x0300, 0x03a9, 0x0313, 0x0301, 0x03a9,
0x0314, 0x0301, 0x03a9, 0x0313, 0x0342, 0x03a9, 0x0314, 0x0342,
0x03b1, 0x0300, 0x03b1, 0x0301, 0x03b5, 0x0300, 0x03b5, 0x0301,
0x03b7, 0x0300, 0x03b7, 0x0301, 0x03b9, 0x0300, 0x03b9, 0x0301,
0x03bf, 0x0300, 0x03bf, 0x0301, 0x03c5, 0x0300, 0x03c5, 0x0301,
0x03c9, 0x0300, 0x03c9, 0x0301, 0x03b1, 0x0345, 0x0313, 0x03b1,
0x0345, 0x0314, 0x03b1, 0x0345, 0x0313, 0x0300, 0x03b1, 0x0345,
0x0314, 0x0300, 0x03b1, 0x0345, 0x0313, 0x0301, 0x03b1, 0x0345,
0x0314, 0x0301, 0x03b1, 0x0345, 0x0313, 0x0342, 0x03b1, 0x0345,
0x0314, 0x0342, 0x0391, 0x0345, 0x0313, 0x0391, 0x0345, 0x0314,
0x0391, 0x0345, 0x0313, 0x0300, 0x0391, 0x0345, 0x0314, 0x0300,
0x0391, 0x0345, 0x0313, 0x0301, 0x0391, 0x0345, 0x0314, 0x0301,
0x0391, 0x0345, 0x0313, 0x0342, 0x0391, 0x0345, 0x0314, 0x0342,
0x03b7, 0x0345, 0x0313, 0x03b7, 0x0345, 0x0314, 0x03b7, 0x0345,
0x0313, 0x0300, 0x03b7, 0x0345, 0x0314, 0x0300, 0x03b7, 0x0345,
0x0313, 0x0301, 0x03b7, 0x0345, 0x0314, 0x0301, 0x03b7, 0x0345,
0x0313, 0x0342, 0x03b7, 0x0345, 0x0314, 0x0342, 0x0397, 0x0345,
0x0313, 0x0397, 0x0345, 0x0314, 0x0397, 0x0345, 0x0313, 0x0300,
0x0397, 0x0345, 0x0314, 0x0300, 0x0397, 0x0345, 0x0313, 0x0301,
0x0397, 0x0345, 0x0314, 0x0301, 0x0397, 0x0345, 0x0313, 0x0342,
0x0397, 0x0345, 0x0314, 0x0342, 0x03c9, 0x0345, 0x0313, 0x03c9,
0x0345, 0x0314, 0x03c9, 0x0345, 0x0313, 0x0300, 0x03c9, 0x0345,
0x0314, 0x0300, 0x03c9, 0x0345, 0x0313, 0x0301, 0x03c9, 0x0345,
0x0314, 0x0301, 0x03c9, 0x0345, 0x0313, 0x0342, 0x03c9, 0x0345,
0x0314, 0x0342, 0x03a9, 0x0345, 0x0313, 0x03a9, 0x0345, 0x0314,
0x03a9, 0x0345, 0x0313, 0x0300, 0x03a9, 0x0345, 0x0314, 0x0300,
0x03a9, 0x0345, 0x0313, 0x0301, 0x03a9, 0x0345, 0x0314, 0x0301,
0x03a9, 0x0345, 0x0313, 0x0342, 0x03a9, 0x0345, 0x0314, 0x0342,
0x03b1, 0x0306, 0x03b1, 0x0304, 0x03b1, 0x0345, 0x0300, 0x03b1,
0x0345, 0x03b1, 0x0345, 0x0301, 0x03b1, 0x0342, 0x03b1, 0x0345,
0x0342, 0x0391, 0x0306, 0x0391, 0x0304, 0x0391, 0x0300, 0x0391,
0x0301, 0x0391, 0x0345, 0x03b9, 0x00a8, 0x0342, 0x03b7, 0x0345,
0x0300, 0x03b7, 0x0345, 0x03b7, 0x0345, 0x0301, 0x03b7, 0x0342,
0x03b7, 0x0345, 0x0342, 0x0395, 0x0300, 0x0395, 0x0301, 0x0397,
0x0300, 0x0397, 0x0301, 0x0397, 0x0345, 0x1fbf, 0x0300, 0x1fbf,
0x0301, 0x1fbf, 0x0342, 0x03b9, 0x0306, 0x03b9, 0x0304, 0x03b9,
0x0308, 0x0300, 0x03b9, 0x0308, 0x0301, 0x03b9, 0x0342, 0x03b9,
0x0308, 0x0342, 0x0399, 0x0306, 0x0399, 0x0304, 0x0399, 0x0300,
0x0399, 0x0301, 0x1ffe, 0x0300, 0x1ffe, 0x0301, 0x1ffe, 0x0342,
0x03c5, 0x0306, 0x03c5, 0x0304, 0x03c5, 0x0308, 0x0300, 0x03c5,
0x0308, 0x0301, 0x03c1, 0x0313, 0x03c1, 0x0314, 0x03c5, 0x0342,
0x03c5, 0x0308, 0x0342, 0x03a5, 0x0306, 0x03a5, 0x0304, 0x03a5,
0x0300, 0x03a5, 0x0301, 0x03a1, 0x0314, 0x00a8, 0x0300, 0x00a8,
0x0301, 0x0060, 0x03c9, 0x0345, 0x0300, 0x03c9, 0x0345, 0x03bf,
0x0345, 0x0301, 0x03c9, 0x0342, 0x03c9, 0x0345, 0x0342, 0x039f,
0x0300, 0x039f, 0x0301, 0x03a9, 0x0300, 0x03a9, 0x0301, 0x03a9,
0x0345, 0x00b4, 0x304b, 0x3099, 0x304d, 0x3099, 0x304f, 0x3099,
0x3051, 0x3099, 0x3053, 0x3099, 0x3055, 0x3099, 0x3057, 0x3099,
0x3059, 0x3099, 0x305b, 0x3099, 0x305d, 0x3099, 0x305f, 0x3099,
0x3061, 0x3099, 0x3064, 0x3099, 0x3066, 0x3099, 0x3068, 0x3099,
0x306f, 0x3099, 0x306f, 0x309a, 0x3072, 0x3099, 0x3072, 0x309a,
0x3075, 0x3099, 0x3075, 0x309a, 0x3078, 0x3099, 0x3078, 0x309a,
0x307b, 0x3099, 0x307b, 0x309a, 0x3046, 0x3099, 0x309d, 0x3099,
0x30ab, 0x3099, 0x30ad, 0x3099, 0x30af, 0x3099, 0x30b1, 0x3099,
0x30b3, 0x3099, 0x30b5, 0x3099, 0x30b7, 0x3099, 0x30b9, 0x3099,
0x30bb, 0x3099, 0x30bd, 0x3099, 0x30bf, 0x3099, 0x30c1, 0x3099,
0x30c4, 0x3099, 0x30c6, 0x3099, 0x30c8, 0x3099, 0x30cf, 0x3099,
0x30cf, 0x309a, 0x30d2, 0x3099, 0x30d2, 0x309a, 0x30d5, 0x3099,
0x30d5, 0x309a, 0x30d8, 0x3099, 0x30d8, 0x309a, 0x30db, 0x3099,
0x30db, 0x309a, 0x30a6, 0x3099, 0x30ef, 0x3099, 0x30f0, 0x3099,
0x30f1, 0x3099, 0x30f2, 0x3099, 0x30fd, 0x3099, 0x05f2, 0x05b7,
0x05e9, 0x05c1, 0x05e9, 0x05c2, 0x05e9, 0x05bc, 0x05c1, 0x05e9,
0x05bc, 0x05c2, 0x05d0, 0x05b7, 0x05d0, 0x05b8, 0x05d0, 0x05bc,
0x05d1, 0x05bc, 0x05d2, 0x05bc, 0x05d3, 0x05bc, 0x05d4, 0x05bc,
0x05d5, 0x05bc, 0x05d6, 0x05bc, 0x05d8, 0x05bc, 0x05d9, 0x05bc,
0x05da, 0x05bc, 0x05db, 0x05bc, 0x05dc, 0x05bc, 0x05de, 0x05bc,
0x05e0, 0x05bc, 0x05e1, 0x05bc, 0x05e3, 0x05bc, 0x05e4, 0x05bc,
0x05e6, 0x05bc, 0x05e7, 0x05bc, 0x05e8, 0x05bc, 0x05e9, 0x05bc,
0x05ea, 0x05bc, 0x05d5, 0x05b9, 0x05d1, 0x05bf, 0x05db, 0x05bf,
0x05e4, 0x05bf
};
u16 hfsplus_compose_table[] = {
/* base */
0x0000, 0x0050, 0x0300, 0x00a4, 0x0301, 0x00e4, 0x0302, 0x015c,
0x0303, 0x0192, 0x0304, 0x01b4, 0x0306, 0x01e6, 0x0307, 0x0220,
0x0308, 0x0270, 0x0309, 0x02d2, 0x030a, 0x02ec, 0x030b, 0x02fa,
0x030c, 0x0308, 0x030d, 0x034c, 0x030f, 0x0370, 0x0311, 0x038e,
0x0313, 0x03a8, 0x0314, 0x03c6, 0x031b, 0x03e8, 0x0323, 0x03f2,
0x0324, 0x0440, 0x0325, 0x0446, 0x0327, 0x044c, 0x0328, 0x047a,
0x032d, 0x0490, 0x032e, 0x04aa, 0x0330, 0x04b0, 0x0331, 0x04be,
0x0342, 0x04e2, 0x0345, 0x04f4, 0x05b7, 0x0504, 0x05b8, 0x050a,
0x05b9, 0x050e, 0x05bc, 0x0512, 0x05bf, 0x0540, 0x05c1, 0x0548,
0x05c2, 0x054c, 0x093c, 0x0550, 0x09bc, 0x0568, 0x09be, 0x0572,
0x09d7, 0x0576, 0x0a3c, 0x057a, 0x0b3c, 0x0586, 0x0b3e, 0x058e,
0x0b56, 0x0592, 0x0b57, 0x0596, 0x0bbe, 0x059a, 0x0bd7, 0x05a0,
0x0c56, 0x05a6, 0x0cc2, 0x05aa, 0x0cd5, 0x05ae, 0x0cd6, 0x05b4,
0x0d3e, 0x05b8, 0x0d57, 0x05be, 0x0e32, 0x05c2, 0x0eb2, 0x05c6,
0x0f71, 0x05ca, 0x0f80, 0x05d2, 0x0fb5, 0x05d8, 0x0fb7, 0x05de,
0x1100, 0x00a2, 0x1101, 0x00a2, 0x1102, 0x00a2, 0x1103, 0x00a2,
0x1104, 0x00a2, 0x1105, 0x00a2, 0x1106, 0x00a2, 0x1107, 0x00a2,
0x1108, 0x00a2, 0x1109, 0x00a2, 0x110a, 0x00a2, 0x110b, 0x00a2,
0x110c, 0x00a2, 0x110d, 0x00a2, 0x110e, 0x00a2, 0x110f, 0x00a2,
0x1110, 0x00a2, 0x1111, 0x00a2, 0x1112, 0x00a2, 0x3099, 0x05f4,
0x309a, 0x0656,
/* hangul marker */
0xffff, 0x0000,
/* 0x0300 */
0x0340, 0x001f, 0x0041, 0x066c, 0x0045, 0x066e, 0x0049, 0x0670,
0x004f, 0x0672, 0x0055, 0x0674, 0x0057, 0x0676, 0x0059, 0x0678,
0x0061, 0x067a, 0x0065, 0x067c, 0x0069, 0x067e, 0x006f, 0x0680,
0x0075, 0x0682, 0x0077, 0x0684, 0x0079, 0x0686, 0x00a8, 0x0688,
0x0391, 0x068a, 0x0395, 0x068c, 0x0397, 0x068e, 0x0399, 0x0690,
0x039f, 0x0692, 0x03a5, 0x0694, 0x03a9, 0x0696, 0x03b1, 0x0698,
0x03b5, 0x069a, 0x03b7, 0x069c, 0x03b9, 0x069e, 0x03bf, 0x06a0,
0x03c5, 0x06a2, 0x03c9, 0x06a4, 0x1fbf, 0x06a6, 0x1ffe, 0x06a8,
/* 0x0301 */
0x0341, 0x003b, 0x0041, 0x06aa, 0x0043, 0x06ac, 0x0045, 0x06ae,
0x0047, 0x06b0, 0x0049, 0x06b2, 0x004b, 0x06b4, 0x004c, 0x06b6,
0x004d, 0x06b8, 0x004e, 0x06ba, 0x004f, 0x06bc, 0x0050, 0x06be,
0x0052, 0x06c0, 0x0053, 0x06c2, 0x0055, 0x06c6, 0x0057, 0x06c8,
0x0059, 0x06ca, 0x005a, 0x06cc, 0x0061, 0x06ce, 0x0063, 0x06d0,
0x0065, 0x06d2, 0x0067, 0x06d4, 0x0069, 0x06d6, 0x006b, 0x06d8,
0x006c, 0x06da, 0x006d, 0x06dc, 0x006e, 0x06de, 0x006f, 0x06e0,
0x0070, 0x06e2, 0x0072, 0x06e4, 0x0073, 0x06e6, 0x0075, 0x06ea,
0x0077, 0x06ec, 0x0079, 0x06ee, 0x007a, 0x06f0, 0x00a8, 0x06f2,
0x00c6, 0x06f4, 0x00d8, 0x06f6, 0x00e6, 0x06f8, 0x00f8, 0x06fa,
0x0391, 0x06fc, 0x0395, 0x06fe, 0x0397, 0x0700, 0x0399, 0x0702,
0x039f, 0x0704, 0x03a5, 0x0706, 0x03a9, 0x0708, 0x03b1, 0x070a,
0x03b5, 0x070c, 0x03b7, 0x070e, 0x03b9, 0x0710, 0x03bf, 0x0712,
0x03c5, 0x0714, 0x03c9, 0x0716, 0x0413, 0x0718, 0x041a, 0x071a,
0x0433, 0x071c, 0x043a, 0x071e, 0x1fbf, 0x0720, 0x1ffe, 0x0722,
/* 0x0302 */
0x0000, 0x001a, 0x0041, 0x0724, 0x0043, 0x072e, 0x0045, 0x0730,
0x0047, 0x073a, 0x0048, 0x073c, 0x0049, 0x073e, 0x004a, 0x0740,
0x004f, 0x0742, 0x0053, 0x074c, 0x0055, 0x074e, 0x0057, 0x0750,
0x0059, 0x0752, 0x005a, 0x0754, 0x0061, 0x0756, 0x0063, 0x0760,
0x0065, 0x0762, 0x0067, 0x076c, 0x0068, 0x076e, 0x0069, 0x0770,
0x006a, 0x0772, 0x006f, 0x0774, 0x0073, 0x077e, 0x0075, 0x0780,
0x0077, 0x0782, 0x0079, 0x0784, 0x007a, 0x0786,
/* 0x0303 */
0x0000, 0x0010, 0x0041, 0x0788, 0x0045, 0x078a, 0x0049, 0x078c,
0x004e, 0x078e, 0x004f, 0x0790, 0x0055, 0x0796, 0x0056, 0x079a,
0x0059, 0x079c, 0x0061, 0x079e, 0x0065, 0x07a0, 0x0069, 0x07a2,
0x006e, 0x07a4, 0x006f, 0x07a6, 0x0075, 0x07ac, 0x0076, 0x07b0,
0x0079, 0x07b2,
/* 0x0304 */
0x0000, 0x0018, 0x0041, 0x07b4, 0x0045, 0x07b6, 0x0047, 0x07bc,
0x0049, 0x07be, 0x004f, 0x07c0, 0x0055, 0x07c6, 0x0061, 0x07ca,
0x0065, 0x07cc, 0x0067, 0x07d2, 0x0069, 0x07d4, 0x006f, 0x07d6,
0x0075, 0x07dc, 0x00c6, 0x07e0, 0x00e6, 0x07e2, 0x0391, 0x07e4,
0x0399, 0x07e6, 0x03a5, 0x07e8, 0x03b1, 0x07ea, 0x03b9, 0x07ec,
0x03c5, 0x07ee, 0x0418, 0x07f0, 0x0423, 0x07f2, 0x0438, 0x07f4,
0x0443, 0x07f6,
/* 0x0306 */
0x0000, 0x001c, 0x0041, 0x07f8, 0x0045, 0x0802, 0x0047, 0x0804,
0x0049, 0x0806, 0x004f, 0x0808, 0x0055, 0x080a, 0x0061, 0x080c,
0x0065, 0x0816, 0x0067, 0x0818, 0x0069, 0x081a, 0x006f, 0x081c,
0x0075, 0x081e, 0x0391, 0x0820, 0x0399, 0x0822, 0x03a5, 0x0824,
0x03b1, 0x0826, 0x03b9, 0x0828, 0x03c5, 0x082a, 0x0410, 0x082c,
0x0415, 0x082e, 0x0416, 0x0830, 0x0418, 0x0832, 0x0423, 0x0834,
0x0430, 0x0836, 0x0435, 0x0838, 0x0436, 0x083a, 0x0438, 0x083c,
0x0443, 0x083e,
/* 0x0307 */
0x0000, 0x0027, 0x0041, 0x0840, 0x0042, 0x0844, 0x0043, 0x0846,
0x0044, 0x0848, 0x0045, 0x084a, 0x0046, 0x084c, 0x0047, 0x084e,
0x0048, 0x0850, 0x0049, 0x0852, 0x004d, 0x0854, 0x004e, 0x0856,
0x0050, 0x0858, 0x0052, 0x085a, 0x0053, 0x085c, 0x0054, 0x085e,
0x0057, 0x0860, 0x0058, 0x0862, 0x0059, 0x0864, 0x005a, 0x0866,
0x0061, 0x0868, 0x0062, 0x086c, 0x0063, 0x086e, 0x0064, 0x0870,
0x0065, 0x0872, 0x0066, 0x0874, 0x0067, 0x0876, 0x0068, 0x0878,
0x006d, 0x087a, 0x006e, 0x087c, 0x0070, 0x087e, 0x0072, 0x0880,
0x0073, 0x0882, 0x0074, 0x0884, 0x0077, 0x0886, 0x0078, 0x0888,
0x0079, 0x088a, 0x007a, 0x088c, 0x017f, 0x088e, 0x0306, 0x0890,
/* 0x0308 */
0x0000, 0x0030, 0x0041, 0x0892, 0x0045, 0x0896, 0x0048, 0x0898,
0x0049, 0x089a, 0x004f, 0x089e, 0x0055, 0x08a0, 0x0057, 0x08aa,
0x0058, 0x08ac, 0x0059, 0x08ae, 0x0061, 0x08b0, 0x0065, 0x08b4,
0x0068, 0x08b6, 0x0069, 0x08b8, 0x006f, 0x08bc, 0x0074, 0x08be,
0x0075, 0x08c0, 0x0077, 0x08ca, 0x0078, 0x08cc, 0x0079, 0x08ce,
0x018f, 0x08d0, 0x019f, 0x08d2, 0x0259, 0x08d4, 0x0275, 0x08d6,
0x0399, 0x08d8, 0x03a5, 0x08da, 0x03b9, 0x08dc, 0x03c5, 0x08e6,
0x03d2, 0x08f0, 0x0406, 0x08f2, 0x0410, 0x08f4, 0x0415, 0x08f6,
0x0416, 0x08f8, 0x0417, 0x08fa, 0x0418, 0x08fc, 0x041e, 0x08fe,
0x0423, 0x0900, 0x0427, 0x0902, 0x042b, 0x0904, 0x0430, 0x0906,
0x0435, 0x0908, 0x0436, 0x090a, 0x0437, 0x090c, 0x0438, 0x090e,
0x043e, 0x0910, 0x0443, 0x0912, 0x0447, 0x0914, 0x044b, 0x0916,
0x0456, 0x0918,
/* 0x0309 */
0x0000, 0x000c, 0x0041, 0x091a, 0x0045, 0x091c, 0x0049, 0x091e,
0x004f, 0x0920, 0x0055, 0x0922, 0x0059, 0x0924, 0x0061, 0x0926,
0x0065, 0x0928, 0x0069, 0x092a, 0x006f, 0x092c, 0x0075, 0x092e,
0x0079, 0x0930,
/* 0x030a */
0x0000, 0x0006, 0x0041, 0x0932, 0x0055, 0x0936, 0x0061, 0x0938,
0x0075, 0x093c, 0x0077, 0x093e, 0x0079, 0x0940,
/* 0x030b */
0x0000, 0x0006, 0x004f, 0x0942, 0x0055, 0x0944, 0x006f, 0x0946,
0x0075, 0x0948, 0x0423, 0x094a, 0x0443, 0x094c,
/* 0x030c */
0x0000, 0x0021, 0x0041, 0x094e, 0x0043, 0x0950, 0x0044, 0x0952,
0x0045, 0x0954, 0x0047, 0x0956, 0x0049, 0x0958, 0x004b, 0x095a,
0x004c, 0x095c, 0x004e, 0x095e, 0x004f, 0x0960, 0x0052, 0x0962,
0x0053, 0x0964, 0x0054, 0x0968, 0x0055, 0x096a, 0x005a, 0x096c,
0x0061, 0x096e, 0x0063, 0x0970, 0x0064, 0x0972, 0x0065, 0x0974,
0x0067, 0x0976, 0x0069, 0x0978, 0x006a, 0x097a, 0x006b, 0x097c,
0x006c, 0x097e, 0x006e, 0x0980, 0x006f, 0x0982, 0x0072, 0x0984,
0x0073, 0x0986, 0x0074, 0x098a, 0x0075, 0x098c, 0x007a, 0x098e,
0x01b7, 0x0990, 0x0292, 0x0992,
/* 0x030d */
0x0000, 0x0011, 0x00a8, 0x0994, 0x0308, 0x0996, 0x0391, 0x0998,
0x0395, 0x099a, 0x0397, 0x099c, 0x0399, 0x099e, 0x039f, 0x09a0,
0x03a5, 0x09a2, 0x03a9, 0x09a4, 0x03b1, 0x09a6, 0x03b5, 0x09a8,
0x03b7, 0x09aa, 0x03b9, 0x09ac, 0x03bf, 0x09ae, 0x03c5, 0x09b0,
0x03c9, 0x09b2, 0x03d2, 0x09b4,
/* 0x030f */
0x0000, 0x000e, 0x0041, 0x09b6, 0x0045, 0x09b8, 0x0049, 0x09ba,
0x004f, 0x09bc, 0x0052, 0x09be, 0x0055, 0x09c0, 0x0061, 0x09c2,
0x0065, 0x09c4, 0x0069, 0x09c6, 0x006f, 0x09c8, 0x0072, 0x09ca,
0x0075, 0x09cc, 0x0474, 0x09ce, 0x0475, 0x09d0,
/* 0x0311 */
0x0000, 0x000c, 0x0041, 0x09d2, 0x0045, 0x09d4, 0x0049, 0x09d6,
0x004f, 0x09d8, 0x0052, 0x09da, 0x0055, 0x09dc, 0x0061, 0x09de,
0x0065, 0x09e0, 0x0069, 0x09e2, 0x006f, 0x09e4, 0x0072, 0x09e6,
0x0075, 0x09e8,
/* 0x0313 */
0x0343, 0x000e, 0x0391, 0x09ea, 0x0395, 0x09f2, 0x0397, 0x09f8,
0x0399, 0x0a00, 0x039f, 0x0a08, 0x03a9, 0x0a0e, 0x03b1, 0x0a16,
0x03b5, 0x0a1e, 0x03b7, 0x0a24, 0x03b9, 0x0a2c, 0x03bf, 0x0a34,
0x03c1, 0x0a3a, 0x03c5, 0x0a3c, 0x03c9, 0x0a44,
/* 0x0314 */
0x0000, 0x0010, 0x0391, 0x0a4c, 0x0395, 0x0a54, 0x0397, 0x0a5a,
0x0399, 0x0a62, 0x039f, 0x0a6a, 0x03a1, 0x0a70, 0x03a5, 0x0a72,
0x03a9, 0x0a7a, 0x03b1, 0x0a82, 0x03b5, 0x0a8a, 0x03b7, 0x0a90,
0x03b9, 0x0a98, 0x03bf, 0x0aa0, 0x03c1, 0x0aa6, 0x03c5, 0x0aa8,
0x03c9, 0x0ab0,
/* 0x031b */
0x0000, 0x0004, 0x004f, 0x0ab8, 0x0055, 0x0ac4, 0x006f, 0x0ad0,
0x0075, 0x0adc,
/* 0x0323 */
0x0000, 0x0026, 0x0041, 0x0ae8, 0x0042, 0x0aee, 0x0044, 0x0af0,
0x0045, 0x0af2, 0x0048, 0x0af6, 0x0049, 0x0af8, 0x004b, 0x0afa,
0x004c, 0x0afc, 0x004d, 0x0b00, 0x004e, 0x0b02, 0x004f, 0x0b04,
0x0052, 0x0b08, 0x0053, 0x0b0c, 0x0054, 0x0b10, 0x0055, 0x0b12,
0x0056, 0x0b14, 0x0057, 0x0b16, 0x0059, 0x0b18, 0x005a, 0x0b1a,
0x0061, 0x0b1c, 0x0062, 0x0b22, 0x0064, 0x0b24, 0x0065, 0x0b26,
0x0068, 0x0b2a, 0x0069, 0x0b2c, 0x006b, 0x0b2e, 0x006c, 0x0b30,
0x006d, 0x0b34, 0x006e, 0x0b36, 0x006f, 0x0b38, 0x0072, 0x0b3c,
0x0073, 0x0b40, 0x0074, 0x0b44, 0x0075, 0x0b46, 0x0076, 0x0b48,
0x0077, 0x0b4a, 0x0079, 0x0b4c, 0x007a, 0x0b4e,
/* 0x0324 */
0x0000, 0x0002, 0x0055, 0x0b50, 0x0075, 0x0b52,
/* 0x0325 */
0x0000, 0x0002, 0x0041, 0x0b54, 0x0061, 0x0b56,
/* 0x0327 */
0x0000, 0x0016, 0x0043, 0x0b58, 0x0044, 0x0b5c, 0x0045, 0x0b5e,
0x0047, 0x0b62, 0x0048, 0x0b64, 0x004b, 0x0b66, 0x004c, 0x0b68,
0x004e, 0x0b6a, 0x0052, 0x0b6c, 0x0053, 0x0b6e, 0x0054, 0x0b70,
0x0063, 0x0b72, 0x0064, 0x0b76, 0x0065, 0x0b78, 0x0067, 0x0b7c,
0x0068, 0x0b7e, 0x006b, 0x0b80, 0x006c, 0x0b82, 0x006e, 0x0b84,
0x0072, 0x0b86, 0x0073, 0x0b88, 0x0074, 0x0b8a,
/* 0x0328 */
0x0000, 0x000a, 0x0041, 0x0b8c, 0x0045, 0x0b8e, 0x0049, 0x0b90,
0x004f, 0x0b92, 0x0055, 0x0b96, 0x0061, 0x0b98, 0x0065, 0x0b9a,
0x0069, 0x0b9c, 0x006f, 0x0b9e, 0x0075, 0x0ba2,
/* 0x032d */
0x0000, 0x000c, 0x0044, 0x0ba4, 0x0045, 0x0ba6, 0x004c, 0x0ba8,
0x004e, 0x0baa, 0x0054, 0x0bac, 0x0055, 0x0bae, 0x0064, 0x0bb0,
0x0065, 0x0bb2, 0x006c, 0x0bb4, 0x006e, 0x0bb6, 0x0074, 0x0bb8,
0x0075, 0x0bba,
/* 0x032e */
0x0000, 0x0002, 0x0048, 0x0bbc, 0x0068, 0x0bbe,
/* 0x0330 */
0x0000, 0x0006, 0x0045, 0x0bc0, 0x0049, 0x0bc2, 0x0055, 0x0bc4,
0x0065, 0x0bc6, 0x0069, 0x0bc8, 0x0075, 0x0bca,
/* 0x0331 */
0x0000, 0x0011, 0x0042, 0x0bcc, 0x0044, 0x0bce, 0x004b, 0x0bd0,
0x004c, 0x0bd2, 0x004e, 0x0bd4, 0x0052, 0x0bd6, 0x0054, 0x0bd8,
0x005a, 0x0bda, 0x0062, 0x0bdc, 0x0064, 0x0bde, 0x0068, 0x0be0,
0x006b, 0x0be2, 0x006c, 0x0be4, 0x006e, 0x0be6, 0x0072, 0x0be8,
0x0074, 0x0bea, 0x007a, 0x0bec,
/* 0x0342 */
0x0000, 0x0008, 0x00a8, 0x0bee, 0x03b1, 0x0bf0, 0x03b7, 0x0bf2,
0x03b9, 0x0bf4, 0x03c5, 0x0bf6, 0x03c9, 0x0bf8, 0x1fbf, 0x0bfa,
0x1ffe, 0x0bfc,
/* 0x0345 */
0x0000, 0x0007, 0x0391, 0x0bfe, 0x0397, 0x0c04, 0x03a9, 0x0c0a,
0x03b1, 0x0c10, 0x03b7, 0x0c1c, 0x03bf, 0x0c28, 0x03c9, 0x0c2c,
/* 0x05b7 */
0x0000, 0x0002, 0x05d0, 0x0c36, 0x05f2, 0x0c38,
/* 0x05b8 */
0x0000, 0x0001, 0x05d0, 0x0c3a,
/* 0x05b9 */
0x0000, 0x0001, 0x05d5, 0x0c3c,
/* 0x05bc */
0x0000, 0x0016, 0x05d0, 0x0c3e, 0x05d1, 0x0c40, 0x05d2, 0x0c42,
0x05d3, 0x0c44, 0x05d4, 0x0c46, 0x05d5, 0x0c48, 0x05d6, 0x0c4a,
0x05d8, 0x0c4c, 0x05d9, 0x0c4e, 0x05da, 0x0c50, 0x05db, 0x0c52,
0x05dc, 0x0c54, 0x05de, 0x0c56, 0x05e0, 0x0c58, 0x05e1, 0x0c5a,
0x05e3, 0x0c5c, 0x05e4, 0x0c5e, 0x05e6, 0x0c60, 0x05e7, 0x0c62,
0x05e8, 0x0c64, 0x05e9, 0x0c66, 0x05ea, 0x0c6c,
/* 0x05bf */
0x0000, 0x0003, 0x05d1, 0x0c6e, 0x05db, 0x0c70, 0x05e4, 0x0c72,
/* 0x05c1 */
0x0000, 0x0001, 0x05e9, 0x0c74,
/* 0x05c2 */
0x0000, 0x0001, 0x05e9, 0x0c76,
/* 0x093c */
0x0000, 0x000b, 0x0915, 0x0c78, 0x0916, 0x0c7a, 0x0917, 0x0c7c,
0x091c, 0x0c7e, 0x0921, 0x0c80, 0x0922, 0x0c82, 0x0928, 0x0c84,
0x092b, 0x0c86, 0x092f, 0x0c88, 0x0930, 0x0c8a, 0x0933, 0x0c8c,
/* 0x09bc */
0x0000, 0x0004, 0x09a1, 0x0c8e, 0x09a2, 0x0c90, 0x09ac, 0x0c92,
0x09af, 0x0c94,
/* 0x09be */
0x0000, 0x0001, 0x09c7, 0x0c96,
/* 0x09d7 */
0x0000, 0x0001, 0x09c7, 0x0c98,
/* 0x0a3c */
0x0000, 0x0005, 0x0a16, 0x0c9a, 0x0a17, 0x0c9c, 0x0a1c, 0x0c9e,
0x0a21, 0x0ca0, 0x0a2b, 0x0ca2,
/* 0x0b3c */
0x0000, 0x0003, 0x0b21, 0x0ca4, 0x0b22, 0x0ca6, 0x0b2f, 0x0ca8,
/* 0x0b3e */
0x0000, 0x0001, 0x0b47, 0x0caa,
/* 0x0b56 */
0x0000, 0x0001, 0x0b47, 0x0cac,
/* 0x0b57 */
0x0000, 0x0001, 0x0b47, 0x0cae,
/* 0x0bbe */
0x0000, 0x0002, 0x0bc6, 0x0cb0, 0x0bc7, 0x0cb2,
/* 0x0bd7 */
0x0000, 0x0002, 0x0b92, 0x0cb4, 0x0bc6, 0x0cb6,
/* 0x0c56 */
0x0000, 0x0001, 0x0c46, 0x0cb8,
/* 0x0cc2 */
0x0000, 0x0001, 0x0cc6, 0x0cba,
/* 0x0cd5 */
0x0000, 0x0002, 0x0cbf, 0x0cbe, 0x0cc6, 0x0cc0,
/* 0x0cd6 */
0x0000, 0x0001, 0x0cc6, 0x0cc2,
/* 0x0d3e */
0x0000, 0x0002, 0x0d46, 0x0cc4, 0x0d47, 0x0cc6,
/* 0x0d57 */
0x0000, 0x0001, 0x0d46, 0x0cc8,
/* 0x0e32 */
0x0000, 0x0001, 0x0e4d, 0x0cca,
/* 0x0eb2 */
0x0000, 0x0001, 0x0ecd, 0x0ccc,
/* 0x0f71 */
0x0000, 0x0003, 0x0f72, 0x0cce, 0x0f74, 0x0cd0, 0x0f80, 0x0cd2,
/* 0x0f80 */
0x0000, 0x0002, 0x0fb2, 0x0cd4, 0x0fb3, 0x0cd8,
/* 0x0fb5 */
0x0000, 0x0002, 0x0f40, 0x0cdc, 0x0f90, 0x0cde,
/* 0x0fb7 */
0x0000, 0x000a, 0x0f42, 0x0ce0, 0x0f4c, 0x0ce2, 0x0f51, 0x0ce4,
0x0f56, 0x0ce6, 0x0f5b, 0x0ce8, 0x0f92, 0x0cea, 0x0f9c, 0x0cec,
0x0fa1, 0x0cee, 0x0fa6, 0x0cf0, 0x0fab, 0x0cf2,
/* 0x3099 */
0x0000, 0x0030, 0x3046, 0x0cf4, 0x304b, 0x0cf6, 0x304d, 0x0cf8,
0x304f, 0x0cfa, 0x3051, 0x0cfc, 0x3053, 0x0cfe, 0x3055, 0x0d00,
0x3057, 0x0d02, 0x3059, 0x0d04, 0x305b, 0x0d06, 0x305d, 0x0d08,
0x305f, 0x0d0a, 0x3061, 0x0d0c, 0x3064, 0x0d0e, 0x3066, 0x0d10,
0x3068, 0x0d12, 0x306f, 0x0d14, 0x3072, 0x0d16, 0x3075, 0x0d18,
0x3078, 0x0d1a, 0x307b, 0x0d1c, 0x309d, 0x0d1e, 0x30a6, 0x0d20,
0x30ab, 0x0d22, 0x30ad, 0x0d24, 0x30af, 0x0d26, 0x30b1, 0x0d28,
0x30b3, 0x0d2a, 0x30b5, 0x0d2c, 0x30b7, 0x0d2e, 0x30b9, 0x0d30,
0x30bb, 0x0d32, 0x30bd, 0x0d34, 0x30bf, 0x0d36, 0x30c1, 0x0d38,
0x30c4, 0x0d3a, 0x30c6, 0x0d3c, 0x30c8, 0x0d3e, 0x30cf, 0x0d40,
0x30d2, 0x0d42, 0x30d5, 0x0d44, 0x30d8, 0x0d46, 0x30db, 0x0d48,
0x30ef, 0x0d4a, 0x30f0, 0x0d4c, 0x30f1, 0x0d4e, 0x30f2, 0x0d50,
0x30fd, 0x0d52,
/* 0x309a */
0x0000, 0x000a, 0x306f, 0x0d54, 0x3072, 0x0d56, 0x3075, 0x0d58,
0x3078, 0x0d5a, 0x307b, 0x0d5c, 0x30cf, 0x0d5e, 0x30d2, 0x0d60,
0x30d5, 0x0d62, 0x30d8, 0x0d64, 0x30db, 0x0d66,
/* 0x0041 0x0300 */
0x00c0, 0x0000,
/* 0x0045 0x0300 */
0x00c8, 0x0000,
/* 0x0049 0x0300 */
0x00cc, 0x0000,
/* 0x004f 0x0300 */
0x00d2, 0x0000,
/* 0x0055 0x0300 */
0x00d9, 0x0000,
/* 0x0057 0x0300 */
0x1e80, 0x0000,
/* 0x0059 0x0300 */
0x1ef2, 0x0000,
/* 0x0061 0x0300 */
0x00e0, 0x0000,
/* 0x0065 0x0300 */
0x00e8, 0x0000,
/* 0x0069 0x0300 */
0x00ec, 0x0000,
/* 0x006f 0x0300 */
0x00f2, 0x0000,
/* 0x0075 0x0300 */
0x00f9, 0x0000,
/* 0x0077 0x0300 */
0x1e81, 0x0000,
/* 0x0079 0x0300 */
0x1ef3, 0x0000,
/* 0x00a8 0x0300 */
0x1fed, 0x0000,
/* 0x0391 0x0300 */
0x1fba, 0x0000,
/* 0x0395 0x0300 */
0x1fc8, 0x0000,
/* 0x0397 0x0300 */
0x1fca, 0x0000,
/* 0x0399 0x0300 */
0x1fda, 0x0000,
/* 0x039f 0x0300 */
0x1ff8, 0x0000,
/* 0x03a5 0x0300 */
0x1fea, 0x0000,
/* 0x03a9 0x0300 */
0x1ffa, 0x0000,
/* 0x03b1 0x0300 */
0x1f70, 0x0000,
/* 0x03b5 0x0300 */
0x1f72, 0x0000,
/* 0x03b7 0x0300 */
0x1f74, 0x0000,
/* 0x03b9 0x0300 */
0x1f76, 0x0000,
/* 0x03bf 0x0300 */
0x1f78, 0x0000,
/* 0x03c5 0x0300 */
0x1f7a, 0x0000,
/* 0x03c9 0x0300 */
0x1f7c, 0x0000,
/* 0x1fbf 0x0300 */
0x1fcd, 0x0000,
/* 0x1ffe 0x0300 */
0x1fdd, 0x0000,
/* 0x0041 0x0301 */
0x00c1, 0x0000,
/* 0x0043 0x0301 */
0x0106, 0x0000,
/* 0x0045 0x0301 */
0x00c9, 0x0000,
/* 0x0047 0x0301 */
0x01f4, 0x0000,
/* 0x0049 0x0301 */
0x00cd, 0x0000,
/* 0x004b 0x0301 */
0x1e30, 0x0000,
/* 0x004c 0x0301 */
0x0139, 0x0000,
/* 0x004d 0x0301 */
0x1e3e, 0x0000,
/* 0x004e 0x0301 */
0x0143, 0x0000,
/* 0x004f 0x0301 */
0x00d3, 0x0000,
/* 0x0050 0x0301 */
0x1e54, 0x0000,
/* 0x0052 0x0301 */
0x0154, 0x0000,
/* 0x0053 0x0301 */
0x015a, 0x0001, 0x0307, 0x0d68,
/* 0x0055 0x0301 */
0x00da, 0x0000,
/* 0x0057 0x0301 */
0x1e82, 0x0000,
/* 0x0059 0x0301 */
0x00dd, 0x0000,
/* 0x005a 0x0301 */
0x0179, 0x0000,
/* 0x0061 0x0301 */
0x00e1, 0x0000,
/* 0x0063 0x0301 */
0x0107, 0x0000,
/* 0x0065 0x0301 */
0x00e9, 0x0000,
/* 0x0067 0x0301 */
0x01f5, 0x0000,
/* 0x0069 0x0301 */
0x00ed, 0x0000,
/* 0x006b 0x0301 */
0x1e31, 0x0000,
/* 0x006c 0x0301 */
0x013a, 0x0000,
/* 0x006d 0x0301 */
0x1e3f, 0x0000,
/* 0x006e 0x0301 */
0x0144, 0x0000,
/* 0x006f 0x0301 */
0x00f3, 0x0000,
/* 0x0070 0x0301 */
0x1e55, 0x0000,
/* 0x0072 0x0301 */
0x0155, 0x0000,
/* 0x0073 0x0301 */
0x015b, 0x0001, 0x0307, 0x0d6a,
/* 0x0075 0x0301 */
0x00fa, 0x0000,
/* 0x0077 0x0301 */
0x1e83, 0x0000,
/* 0x0079 0x0301 */
0x00fd, 0x0000,
/* 0x007a 0x0301 */
0x017a, 0x0000,
/* 0x00a8 0x0301 */
0x1fee, 0x0000,
/* 0x00c6 0x0301 */
0x01fc, 0x0000,
/* 0x00d8 0x0301 */
0x01fe, 0x0000,
/* 0x00e6 0x0301 */
0x01fd, 0x0000,
/* 0x00f8 0x0301 */
0x01ff, 0x0000,
/* 0x0391 0x0301 */
0x1fbb, 0x0000,
/* 0x0395 0x0301 */
0x1fc9, 0x0000,
/* 0x0397 0x0301 */
0x1fcb, 0x0000,
/* 0x0399 0x0301 */
0x1fdb, 0x0000,
/* 0x039f 0x0301 */
0x1ff9, 0x0000,
/* 0x03a5 0x0301 */
0x1feb, 0x0000,
/* 0x03a9 0x0301 */
0x1ffb, 0x0000,
/* 0x03b1 0x0301 */
0x1f71, 0x0000,
/* 0x03b5 0x0301 */
0x1f73, 0x0000,
/* 0x03b7 0x0301 */
0x1f75, 0x0000,
/* 0x03b9 0x0301 */
0x1f77, 0x0000,
/* 0x03bf 0x0301 */
0x1f79, 0x0000,
/* 0x03c5 0x0301 */
0x1f7b, 0x0000,
/* 0x03c9 0x0301 */
0x1f7d, 0x0000,
/* 0x0413 0x0301 */
0x0403, 0x0000,
/* 0x041a 0x0301 */
0x040c, 0x0000,
/* 0x0433 0x0301 */
0x0453, 0x0000,
/* 0x043a 0x0301 */
0x045c, 0x0000,
/* 0x1fbf 0x0301 */
0x1fce, 0x0000,
/* 0x1ffe 0x0301 */
0x1fde, 0x0000,
/* 0x0041 0x0302 */
0x00c2, 0x0004, 0x0300, 0x0d6c, 0x0301, 0x0d6e, 0x0303, 0x0d70,
0x0309, 0x0d72,
/* 0x0043 0x0302 */
0x0108, 0x0000,
/* 0x0045 0x0302 */
0x00ca, 0x0004, 0x0300, 0x0d74, 0x0301, 0x0d76, 0x0303, 0x0d78,
0x0309, 0x0d7a,
/* 0x0047 0x0302 */
0x011c, 0x0000,
/* 0x0048 0x0302 */
0x0124, 0x0000,
/* 0x0049 0x0302 */
0x00ce, 0x0000,
/* 0x004a 0x0302 */
0x0134, 0x0000,
/* 0x004f 0x0302 */
0x00d4, 0x0004, 0x0300, 0x0d7c, 0x0301, 0x0d7e, 0x0303, 0x0d80,
0x0309, 0x0d82,
/* 0x0053 0x0302 */
0x015c, 0x0000,
/* 0x0055 0x0302 */
0x00db, 0x0000,
/* 0x0057 0x0302 */
0x0174, 0x0000,
/* 0x0059 0x0302 */
0x0176, 0x0000,
/* 0x005a 0x0302 */
0x1e90, 0x0000,
/* 0x0061 0x0302 */
0x00e2, 0x0004, 0x0300, 0x0d84, 0x0301, 0x0d86, 0x0303, 0x0d88,
0x0309, 0x0d8a,
/* 0x0063 0x0302 */
0x0109, 0x0000,
/* 0x0065 0x0302 */
0x00ea, 0x0004, 0x0300, 0x0d8c, 0x0301, 0x0d8e, 0x0303, 0x0d90,
0x0309, 0x0d92,
/* 0x0067 0x0302 */
0x011d, 0x0000,
/* 0x0068 0x0302 */
0x0125, 0x0000,
/* 0x0069 0x0302 */
0x00ee, 0x0000,
/* 0x006a 0x0302 */
0x0135, 0x0000,
/* 0x006f 0x0302 */
0x00f4, 0x0004, 0x0300, 0x0d94, 0x0301, 0x0d96, 0x0303, 0x0d98,
0x0309, 0x0d9a,
/* 0x0073 0x0302 */
0x015d, 0x0000,
/* 0x0075 0x0302 */
0x00fb, 0x0000,
/* 0x0077 0x0302 */
0x0175, 0x0000,
/* 0x0079 0x0302 */
0x0177, 0x0000,
/* 0x007a 0x0302 */
0x1e91, 0x0000,
/* 0x0041 0x0303 */
0x00c3, 0x0000,
/* 0x0045 0x0303 */
0x1ebc, 0x0000,
/* 0x0049 0x0303 */
0x0128, 0x0000,
/* 0x004e 0x0303 */
0x00d1, 0x0000,
/* 0x004f 0x0303 */
0x00d5, 0x0002, 0x0301, 0x0d9c, 0x0308, 0x0d9e,
/* 0x0055 0x0303 */
0x0168, 0x0001, 0x0301, 0x0da0,
/* 0x0056 0x0303 */
0x1e7c, 0x0000,
/* 0x0059 0x0303 */
0x1ef8, 0x0000,
/* 0x0061 0x0303 */
0x00e3, 0x0000,
/* 0x0065 0x0303 */
0x1ebd, 0x0000,
/* 0x0069 0x0303 */
0x0129, 0x0000,
/* 0x006e 0x0303 */
0x00f1, 0x0000,
/* 0x006f 0x0303 */
0x00f5, 0x0002, 0x0301, 0x0da2, 0x0308, 0x0da4,
/* 0x0075 0x0303 */
0x0169, 0x0001, 0x0301, 0x0da6,
/* 0x0076 0x0303 */
0x1e7d, 0x0000,
/* 0x0079 0x0303 */
0x1ef9, 0x0000,
/* 0x0041 0x0304 */
0x0100, 0x0000,
/* 0x0045 0x0304 */
0x0112, 0x0002, 0x0300, 0x0da8, 0x0301, 0x0daa,
/* 0x0047 0x0304 */
0x1e20, 0x0000,
/* 0x0049 0x0304 */
0x012a, 0x0000,
/* 0x004f 0x0304 */
0x014c, 0x0002, 0x0300, 0x0dac, 0x0301, 0x0dae,
/* 0x0055 0x0304 */
0x016a, 0x0001, 0x0308, 0x0db0,
/* 0x0061 0x0304 */
0x0101, 0x0000,
/* 0x0065 0x0304 */
0x0113, 0x0002, 0x0300, 0x0db2, 0x0301, 0x0db4,
/* 0x0067 0x0304 */
0x1e21, 0x0000,
/* 0x0069 0x0304 */
0x012b, 0x0000,
/* 0x006f 0x0304 */
0x014d, 0x0002, 0x0300, 0x0db6, 0x0301, 0x0db8,
/* 0x0075 0x0304 */
0x016b, 0x0001, 0x0308, 0x0dba,
/* 0x00c6 0x0304 */
0x01e2, 0x0000,
/* 0x00e6 0x0304 */
0x01e3, 0x0000,
/* 0x0391 0x0304 */
0x1fb9, 0x0000,
/* 0x0399 0x0304 */
0x1fd9, 0x0000,
/* 0x03a5 0x0304 */
0x1fe9, 0x0000,
/* 0x03b1 0x0304 */
0x1fb1, 0x0000,
/* 0x03b9 0x0304 */
0x1fd1, 0x0000,
/* 0x03c5 0x0304 */
0x1fe1, 0x0000,
/* 0x0418 0x0304 */
0x04e2, 0x0000,
/* 0x0423 0x0304 */
0x04ee, 0x0000,
/* 0x0438 0x0304 */
0x04e3, 0x0000,
/* 0x0443 0x0304 */
0x04ef, 0x0000,
/* 0x0041 0x0306 */
0x0102, 0x0004, 0x0300, 0x0dbc, 0x0301, 0x0dbe, 0x0303, 0x0dc0,
0x0309, 0x0dc2,
/* 0x0045 0x0306 */
0x0114, 0x0000,
/* 0x0047 0x0306 */
0x011e, 0x0000,
/* 0x0049 0x0306 */
0x012c, 0x0000,
/* 0x004f 0x0306 */
0x014e, 0x0000,
/* 0x0055 0x0306 */
0x016c, 0x0000,
/* 0x0061 0x0306 */
0x0103, 0x0004, 0x0300, 0x0dc4, 0x0301, 0x0dc6, 0x0303, 0x0dc8,
0x0309, 0x0dca,
/* 0x0065 0x0306 */
0x0115, 0x0000,
/* 0x0067 0x0306 */
0x011f, 0x0000,
/* 0x0069 0x0306 */
0x012d, 0x0000,
/* 0x006f 0x0306 */
0x014f, 0x0000,
/* 0x0075 0x0306 */
0x016d, 0x0000,
/* 0x0391 0x0306 */
0x1fb8, 0x0000,
/* 0x0399 0x0306 */
0x1fd8, 0x0000,
/* 0x03a5 0x0306 */
0x1fe8, 0x0000,
/* 0x03b1 0x0306 */
0x1fb0, 0x0000,
/* 0x03b9 0x0306 */
0x1fd0, 0x0000,
/* 0x03c5 0x0306 */
0x1fe0, 0x0000,
/* 0x0410 0x0306 */
0x04d0, 0x0000,
/* 0x0415 0x0306 */
0x04d6, 0x0000,
/* 0x0416 0x0306 */
0x04c1, 0x0000,
/* 0x0418 0x0306 */
0x0419, 0x0000,
/* 0x0423 0x0306 */
0x040e, 0x0000,
/* 0x0430 0x0306 */
0x04d1, 0x0000,
/* 0x0435 0x0306 */
0x04d7, 0x0000,
/* 0x0436 0x0306 */
0x04c2, 0x0000,
/* 0x0438 0x0306 */
0x0439, 0x0000,
/* 0x0443 0x0306 */
0x045e, 0x0000,
/* 0x0041 0x0307 */
0x0000, 0x0001, 0x0304, 0x0dcc,
/* 0x0042 0x0307 */
0x1e02, 0x0000,
/* 0x0043 0x0307 */
0x010a, 0x0000,
/* 0x0044 0x0307 */
0x1e0a, 0x0000,
/* 0x0045 0x0307 */
0x0116, 0x0000,
/* 0x0046 0x0307 */
0x1e1e, 0x0000,
/* 0x0047 0x0307 */
0x0120, 0x0000,
/* 0x0048 0x0307 */
0x1e22, 0x0000,
/* 0x0049 0x0307 */
0x0130, 0x0000,
/* 0x004d 0x0307 */
0x1e40, 0x0000,
/* 0x004e 0x0307 */
0x1e44, 0x0000,
/* 0x0050 0x0307 */
0x1e56, 0x0000,
/* 0x0052 0x0307 */
0x1e58, 0x0000,
/* 0x0053 0x0307 */
0x1e60, 0x0000,
/* 0x0054 0x0307 */
0x1e6a, 0x0000,
/* 0x0057 0x0307 */
0x1e86, 0x0000,
/* 0x0058 0x0307 */
0x1e8a, 0x0000,
/* 0x0059 0x0307 */
0x1e8e, 0x0000,
/* 0x005a 0x0307 */
0x017b, 0x0000,
/* 0x0061 0x0307 */
0x0000, 0x0001, 0x0304, 0x0dce,
/* 0x0062 0x0307 */
0x1e03, 0x0000,
/* 0x0063 0x0307 */
0x010b, 0x0000,
/* 0x0064 0x0307 */
0x1e0b, 0x0000,
/* 0x0065 0x0307 */
0x0117, 0x0000,
/* 0x0066 0x0307 */
0x1e1f, 0x0000,
/* 0x0067 0x0307 */
0x0121, 0x0000,
/* 0x0068 0x0307 */
0x1e23, 0x0000,
/* 0x006d 0x0307 */
0x1e41, 0x0000,
/* 0x006e 0x0307 */
0x1e45, 0x0000,
/* 0x0070 0x0307 */
0x1e57, 0x0000,
/* 0x0072 0x0307 */
0x1e59, 0x0000,
/* 0x0073 0x0307 */
0x1e61, 0x0000,
/* 0x0074 0x0307 */
0x1e6b, 0x0000,
/* 0x0077 0x0307 */
0x1e87, 0x0000,
/* 0x0078 0x0307 */
0x1e8b, 0x0000,
/* 0x0079 0x0307 */
0x1e8f, 0x0000,
/* 0x007a 0x0307 */
0x017c, 0x0000,
/* 0x017f 0x0307 */
0x1e9b, 0x0000,
/* 0x0306 0x0307 */
0x0310, 0x0000,
/* 0x0041 0x0308 */
0x00c4, 0x0001, 0x0304, 0x0dd0,
/* 0x0045 0x0308 */
0x00cb, 0x0000,
/* 0x0048 0x0308 */
0x1e26, 0x0000,
/* 0x0049 0x0308 */
0x00cf, 0x0001, 0x0301, 0x0dd2,
/* 0x004f 0x0308 */
0x00d6, 0x0000,
/* 0x0055 0x0308 */
0x00dc, 0x0004, 0x0300, 0x0dd4, 0x0301, 0x0dd6, 0x0304, 0x0dd8,
0x030c, 0x0dda,
/* 0x0057 0x0308 */
0x1e84, 0x0000,
/* 0x0058 0x0308 */
0x1e8c, 0x0000,
/* 0x0059 0x0308 */
0x0178, 0x0000,
/* 0x0061 0x0308 */
0x00e4, 0x0001, 0x0304, 0x0ddc,
/* 0x0065 0x0308 */
0x00eb, 0x0000,
/* 0x0068 0x0308 */
0x1e27, 0x0000,
/* 0x0069 0x0308 */
0x00ef, 0x0001, 0x0301, 0x0dde,
/* 0x006f 0x0308 */
0x00f6, 0x0000,
/* 0x0074 0x0308 */
0x1e97, 0x0000,
/* 0x0075 0x0308 */
0x00fc, 0x0004, 0x0300, 0x0de0, 0x0301, 0x0de2, 0x0304, 0x0de4,
0x030c, 0x0de6,
/* 0x0077 0x0308 */
0x1e85, 0x0000,
/* 0x0078 0x0308 */
0x1e8d, 0x0000,
/* 0x0079 0x0308 */
0x00ff, 0x0000,
/* 0x018f 0x0308 */
0x04da, 0x0000,
/* 0x019f 0x0308 */
0x04ea, 0x0000,
/* 0x0259 0x0308 */
0x04db, 0x0000,
/* 0x0275 0x0308 */
0x04eb, 0x0000,
/* 0x0399 0x0308 */
0x03aa, 0x0000,
/* 0x03a5 0x0308 */
0x03ab, 0x0000,
/* 0x03b9 0x0308 */
0x03ca, 0x0004, 0x0300, 0x0de8, 0x0301, 0x0dea, 0x030d, 0x0dec,
0x0342, 0x0dee,
/* 0x03c5 0x0308 */
0x03cb, 0x0004, 0x0300, 0x0df0, 0x0301, 0x0df2, 0x030d, 0x0df4,
0x0342, 0x0df6,
/* 0x03d2 0x0308 */
0x03d4, 0x0000,
/* 0x0406 0x0308 */
0x0407, 0x0000,
/* 0x0410 0x0308 */
0x04d2, 0x0000,
/* 0x0415 0x0308 */
0x0401, 0x0000,
/* 0x0416 0x0308 */
0x04dc, 0x0000,
/* 0x0417 0x0308 */
0x04de, 0x0000,
/* 0x0418 0x0308 */
0x04e4, 0x0000,
/* 0x041e 0x0308 */
0x04e6, 0x0000,
/* 0x0423 0x0308 */
0x04f0, 0x0000,
/* 0x0427 0x0308 */
0x04f4, 0x0000,
/* 0x042b 0x0308 */
0x04f8, 0x0000,
/* 0x0430 0x0308 */
0x04d3, 0x0000,
/* 0x0435 0x0308 */
0x0451, 0x0000,
/* 0x0436 0x0308 */
0x04dd, 0x0000,
/* 0x0437 0x0308 */
0x04df, 0x0000,
/* 0x0438 0x0308 */
0x04e5, 0x0000,
/* 0x043e 0x0308 */
0x04e7, 0x0000,
/* 0x0443 0x0308 */
0x04f1, 0x0000,
/* 0x0447 0x0308 */
0x04f5, 0x0000,
/* 0x044b 0x0308 */
0x04f9, 0x0000,
/* 0x0456 0x0308 */
0x0457, 0x0000,
/* 0x0041 0x0309 */
0x1ea2, 0x0000,
/* 0x0045 0x0309 */
0x1eba, 0x0000,
/* 0x0049 0x0309 */
0x1ec8, 0x0000,
/* 0x004f 0x0309 */
0x1ece, 0x0000,
/* 0x0055 0x0309 */
0x1ee6, 0x0000,
/* 0x0059 0x0309 */
0x1ef6, 0x0000,
/* 0x0061 0x0309 */
0x1ea3, 0x0000,
/* 0x0065 0x0309 */
0x1ebb, 0x0000,
/* 0x0069 0x0309 */
0x1ec9, 0x0000,
/* 0x006f 0x0309 */
0x1ecf, 0x0000,
/* 0x0075 0x0309 */
0x1ee7, 0x0000,
/* 0x0079 0x0309 */
0x1ef7, 0x0000,
/* 0x0041 0x030a */
0x00c5, 0x0001, 0x0301, 0x0df8,
/* 0x0055 0x030a */
0x016e, 0x0000,
/* 0x0061 0x030a */
0x00e5, 0x0001, 0x0301, 0x0dfa,
/* 0x0075 0x030a */
0x016f, 0x0000,
/* 0x0077 0x030a */
0x1e98, 0x0000,
/* 0x0079 0x030a */
0x1e99, 0x0000,
/* 0x004f 0x030b */
0x0150, 0x0000,
/* 0x0055 0x030b */
0x0170, 0x0000,
/* 0x006f 0x030b */
0x0151, 0x0000,
/* 0x0075 0x030b */
0x0171, 0x0000,
/* 0x0423 0x030b */
0x04f2, 0x0000,
/* 0x0443 0x030b */
0x04f3, 0x0000,
/* 0x0041 0x030c */
0x01cd, 0x0000,
/* 0x0043 0x030c */
0x010c, 0x0000,
/* 0x0044 0x030c */
0x010e, 0x0000,
/* 0x0045 0x030c */
0x011a, 0x0000,
/* 0x0047 0x030c */
0x01e6, 0x0000,
/* 0x0049 0x030c */
0x01cf, 0x0000,
/* 0x004b 0x030c */
0x01e8, 0x0000,
/* 0x004c 0x030c */
0x013d, 0x0000,
/* 0x004e 0x030c */
0x0147, 0x0000,
/* 0x004f 0x030c */
0x01d1, 0x0000,
/* 0x0052 0x030c */
0x0158, 0x0000,
/* 0x0053 0x030c */
0x0160, 0x0001, 0x0307, 0x0dfc,
/* 0x0054 0x030c */
0x0164, 0x0000,
/* 0x0055 0x030c */
0x01d3, 0x0000,
/* 0x005a 0x030c */
0x017d, 0x0000,
/* 0x0061 0x030c */
0x01ce, 0x0000,
/* 0x0063 0x030c */
0x010d, 0x0000,
/* 0x0064 0x030c */
0x010f, 0x0000,
/* 0x0065 0x030c */
0x011b, 0x0000,
/* 0x0067 0x030c */
0x01e7, 0x0000,
/* 0x0069 0x030c */
0x01d0, 0x0000,
/* 0x006a 0x030c */
0x01f0, 0x0000,
/* 0x006b 0x030c */
0x01e9, 0x0000,
/* 0x006c 0x030c */
0x013e, 0x0000,
/* 0x006e 0x030c */
0x0148, 0x0000,
/* 0x006f 0x030c */
0x01d2, 0x0000,
/* 0x0072 0x030c */
0x0159, 0x0000,
/* 0x0073 0x030c */
0x0161, 0x0001, 0x0307, 0x0dfe,
/* 0x0074 0x030c */
0x0165, 0x0000,
/* 0x0075 0x030c */
0x01d4, 0x0000,
/* 0x007a 0x030c */
0x017e, 0x0000,
/* 0x01b7 0x030c */
0x01ee, 0x0000,
/* 0x0292 0x030c */
0x01ef, 0x0000,
/* 0x00a8 0x030d */
0x0385, 0x0000,
/* 0x0308 0x030d */
0x0344, 0x0000,
/* 0x0391 0x030d */
0x0386, 0x0000,
/* 0x0395 0x030d */
0x0388, 0x0000,
/* 0x0397 0x030d */
0x0389, 0x0000,
/* 0x0399 0x030d */
0x038a, 0x0000,
/* 0x039f 0x030d */
0x038c, 0x0000,
/* 0x03a5 0x030d */
0x038e, 0x0000,
/* 0x03a9 0x030d */
0x038f, 0x0000,
/* 0x03b1 0x030d */
0x03ac, 0x0000,
/* 0x03b5 0x030d */
0x03ad, 0x0000,
/* 0x03b7 0x030d */
0x03ae, 0x0000,
/* 0x03b9 0x030d */
0x03af, 0x0000,
/* 0x03bf 0x030d */
0x03cc, 0x0000,
/* 0x03c5 0x030d */
0x03cd, 0x0000,
/* 0x03c9 0x030d */
0x03ce, 0x0000,
/* 0x03d2 0x030d */
0x03d3, 0x0000,
/* 0x0041 0x030f */
0x0200, 0x0000,
/* 0x0045 0x030f */
0x0204, 0x0000,
/* 0x0049 0x030f */
0x0208, 0x0000,
/* 0x004f 0x030f */
0x020c, 0x0000,
/* 0x0052 0x030f */
0x0210, 0x0000,
/* 0x0055 0x030f */
0x0214, 0x0000,
/* 0x0061 0x030f */
0x0201, 0x0000,
/* 0x0065 0x030f */
0x0205, 0x0000,
/* 0x0069 0x030f */
0x0209, 0x0000,
/* 0x006f 0x030f */
0x020d, 0x0000,
/* 0x0072 0x030f */
0x0211, 0x0000,
/* 0x0075 0x030f */
0x0215, 0x0000,
/* 0x0474 0x030f */
0x0476, 0x0000,
/* 0x0475 0x030f */
0x0477, 0x0000,
/* 0x0041 0x0311 */
0x0202, 0x0000,
/* 0x0045 0x0311 */
0x0206, 0x0000,
/* 0x0049 0x0311 */
0x020a, 0x0000,
/* 0x004f 0x0311 */
0x020e, 0x0000,
/* 0x0052 0x0311 */
0x0212, 0x0000,
/* 0x0055 0x0311 */
0x0216, 0x0000,
/* 0x0061 0x0311 */
0x0203, 0x0000,
/* 0x0065 0x0311 */
0x0207, 0x0000,
/* 0x0069 0x0311 */
0x020b, 0x0000,
/* 0x006f 0x0311 */
0x020f, 0x0000,
/* 0x0072 0x0311 */
0x0213, 0x0000,
/* 0x0075 0x0311 */
0x0217, 0x0000,
/* 0x0391 0x0313 */
0x1f08, 0x0003, 0x0300, 0x0e00, 0x0301, 0x0e02, 0x0342, 0x0e04,
/* 0x0395 0x0313 */
0x1f18, 0x0002, 0x0300, 0x0e06, 0x0301, 0x0e08,
/* 0x0397 0x0313 */
0x1f28, 0x0003, 0x0300, 0x0e0a, 0x0301, 0x0e0c, 0x0342, 0x0e0e,
/* 0x0399 0x0313 */
0x1f38, 0x0003, 0x0300, 0x0e10, 0x0301, 0x0e12, 0x0342, 0x0e14,
/* 0x039f 0x0313 */
0x1f48, 0x0002, 0x0300, 0x0e16, 0x0301, 0x0e18,
/* 0x03a9 0x0313 */
0x1f68, 0x0003, 0x0300, 0x0e1a, 0x0301, 0x0e1c, 0x0342, 0x0e1e,
/* 0x03b1 0x0313 */
0x1f00, 0x0003, 0x0300, 0x0e20, 0x0301, 0x0e22, 0x0342, 0x0e24,
/* 0x03b5 0x0313 */
0x1f10, 0x0002, 0x0300, 0x0e26, 0x0301, 0x0e28,
/* 0x03b7 0x0313 */
0x1f20, 0x0003, 0x0300, 0x0e2a, 0x0301, 0x0e2c, 0x0342, 0x0e2e,
/* 0x03b9 0x0313 */
0x1f30, 0x0003, 0x0300, 0x0e30, 0x0301, 0x0e32, 0x0342, 0x0e34,
/* 0x03bf 0x0313 */
0x1f40, 0x0002, 0x0300, 0x0e36, 0x0301, 0x0e38,
/* 0x03c1 0x0313 */
0x1fe4, 0x0000,
/* 0x03c5 0x0313 */
0x1f50, 0x0003, 0x0300, 0x0e3a, 0x0301, 0x0e3c, 0x0342, 0x0e3e,
/* 0x03c9 0x0313 */
0x1f60, 0x0003, 0x0300, 0x0e40, 0x0301, 0x0e42, 0x0342, 0x0e44,
/* 0x0391 0x0314 */
0x1f09, 0x0003, 0x0300, 0x0e46, 0x0301, 0x0e48, 0x0342, 0x0e4a,
/* 0x0395 0x0314 */
0x1f19, 0x0002, 0x0300, 0x0e4c, 0x0301, 0x0e4e,
/* 0x0397 0x0314 */
0x1f29, 0x0003, 0x0300, 0x0e50, 0x0301, 0x0e52, 0x0342, 0x0e54,
/* 0x0399 0x0314 */
0x1f39, 0x0003, 0x0300, 0x0e56, 0x0301, 0x0e58, 0x0342, 0x0e5a,
/* 0x039f 0x0314 */
0x1f49, 0x0002, 0x0300, 0x0e5c, 0x0301, 0x0e5e,
/* 0x03a1 0x0314 */
0x1fec, 0x0000,
/* 0x03a5 0x0314 */
0x1f59, 0x0003, 0x0300, 0x0e60, 0x0301, 0x0e62, 0x0342, 0x0e64,
/* 0x03a9 0x0314 */
0x1f69, 0x0003, 0x0300, 0x0e66, 0x0301, 0x0e68, 0x0342, 0x0e6a,
/* 0x03b1 0x0314 */
0x1f01, 0x0003, 0x0300, 0x0e6c, 0x0301, 0x0e6e, 0x0342, 0x0e70,
/* 0x03b5 0x0314 */
0x1f11, 0x0002, 0x0300, 0x0e72, 0x0301, 0x0e74,
/* 0x03b7 0x0314 */
0x1f21, 0x0003, 0x0300, 0x0e76, 0x0301, 0x0e78, 0x0342, 0x0e7a,
/* 0x03b9 0x0314 */
0x1f31, 0x0003, 0x0300, 0x0e7c, 0x0301, 0x0e7e, 0x0342, 0x0e80,
/* 0x03bf 0x0314 */
0x1f41, 0x0002, 0x0300, 0x0e82, 0x0301, 0x0e84,
/* 0x03c1 0x0314 */
0x1fe5, 0x0000,
/* 0x03c5 0x0314 */
0x1f51, 0x0003, 0x0300, 0x0e86, 0x0301, 0x0e88, 0x0342, 0x0e8a,
/* 0x03c9 0x0314 */
0x1f61, 0x0003, 0x0300, 0x0e8c, 0x0301, 0x0e8e, 0x0342, 0x0e90,
/* 0x004f 0x031b */
0x01a0, 0x0005, 0x0300, 0x0e92, 0x0301, 0x0e94, 0x0303, 0x0e96,
0x0309, 0x0e98, 0x0323, 0x0e9a,
/* 0x0055 0x031b */
0x01af, 0x0005, 0x0300, 0x0e9c, 0x0301, 0x0e9e, 0x0303, 0x0ea0,
0x0309, 0x0ea2, 0x0323, 0x0ea4,
/* 0x006f 0x031b */
0x01a1, 0x0005, 0x0300, 0x0ea6, 0x0301, 0x0ea8, 0x0303, 0x0eaa,
0x0309, 0x0eac, 0x0323, 0x0eae,
/* 0x0075 0x031b */
0x01b0, 0x0005, 0x0300, 0x0eb0, 0x0301, 0x0eb2, 0x0303, 0x0eb4,
0x0309, 0x0eb6, 0x0323, 0x0eb8,
/* 0x0041 0x0323 */
0x1ea0, 0x0002, 0x0302, 0x0eba, 0x0306, 0x0ebc,
/* 0x0042 0x0323 */
0x1e04, 0x0000,
/* 0x0044 0x0323 */
0x1e0c, 0x0000,
/* 0x0045 0x0323 */
0x1eb8, 0x0001, 0x0302, 0x0ebe,
/* 0x0048 0x0323 */
0x1e24, 0x0000,
/* 0x0049 0x0323 */
0x1eca, 0x0000,
/* 0x004b 0x0323 */
0x1e32, 0x0000,
/* 0x004c 0x0323 */
0x1e36, 0x0001, 0x0304, 0x0ec0,
/* 0x004d 0x0323 */
0x1e42, 0x0000,
/* 0x004e 0x0323 */
0x1e46, 0x0000,
/* 0x004f 0x0323 */
0x1ecc, 0x0001, 0x0302, 0x0ec2,
/* 0x0052 0x0323 */
0x1e5a, 0x0001, 0x0304, 0x0ec4,
/* 0x0053 0x0323 */
0x1e62, 0x0001, 0x0307, 0x0ec6,
/* 0x0054 0x0323 */
0x1e6c, 0x0000,
/* 0x0055 0x0323 */
0x1ee4, 0x0000,
/* 0x0056 0x0323 */
0x1e7e, 0x0000,
/* 0x0057 0x0323 */
0x1e88, 0x0000,
/* 0x0059 0x0323 */
0x1ef4, 0x0000,
/* 0x005a 0x0323 */
0x1e92, 0x0000,
/* 0x0061 0x0323 */
0x1ea1, 0x0002, 0x0302, 0x0ec8, 0x0306, 0x0eca,
/* 0x0062 0x0323 */
0x1e05, 0x0000,
/* 0x0064 0x0323 */
0x1e0d, 0x0000,
/* 0x0065 0x0323 */
0x1eb9, 0x0001, 0x0302, 0x0ecc,
/* 0x0068 0x0323 */
0x1e25, 0x0000,
/* 0x0069 0x0323 */
0x1ecb, 0x0000,
/* 0x006b 0x0323 */
0x1e33, 0x0000,
/* 0x006c 0x0323 */
0x1e37, 0x0001, 0x0304, 0x0ece,
/* 0x006d 0x0323 */
0x1e43, 0x0000,
/* 0x006e 0x0323 */
0x1e47, 0x0000,
/* 0x006f 0x0323 */
0x1ecd, 0x0001, 0x0302, 0x0ed0,
/* 0x0072 0x0323 */
0x1e5b, 0x0001, 0x0304, 0x0ed2,
/* 0x0073 0x0323 */
0x1e63, 0x0001, 0x0307, 0x0ed4,
/* 0x0074 0x0323 */
0x1e6d, 0x0000,
/* 0x0075 0x0323 */
0x1ee5, 0x0000,
/* 0x0076 0x0323 */
0x1e7f, 0x0000,
/* 0x0077 0x0323 */
0x1e89, 0x0000,
/* 0x0079 0x0323 */
0x1ef5, 0x0000,
/* 0x007a 0x0323 */
0x1e93, 0x0000,
/* 0x0055 0x0324 */
0x1e72, 0x0000,
/* 0x0075 0x0324 */
0x1e73, 0x0000,
/* 0x0041 0x0325 */
0x1e00, 0x0000,
/* 0x0061 0x0325 */
0x1e01, 0x0000,
/* 0x0043 0x0327 */
0x00c7, 0x0001, 0x0301, 0x0ed6,
/* 0x0044 0x0327 */
0x1e10, 0x0000,
/* 0x0045 0x0327 */
0x0000, 0x0001, 0x0306, 0x0ed8,
/* 0x0047 0x0327 */
0x0122, 0x0000,
/* 0x0048 0x0327 */
0x1e28, 0x0000,
/* 0x004b 0x0327 */
0x0136, 0x0000,
/* 0x004c 0x0327 */
0x013b, 0x0000,
/* 0x004e 0x0327 */
0x0145, 0x0000,
/* 0x0052 0x0327 */
0x0156, 0x0000,
/* 0x0053 0x0327 */
0x015e, 0x0000,
/* 0x0054 0x0327 */
0x0162, 0x0000,
/* 0x0063 0x0327 */
0x00e7, 0x0001, 0x0301, 0x0eda,
/* 0x0064 0x0327 */
0x1e11, 0x0000,
/* 0x0065 0x0327 */
0x0000, 0x0001, 0x0306, 0x0edc,
/* 0x0067 0x0327 */
0x0123, 0x0000,
/* 0x0068 0x0327 */
0x1e29, 0x0000,
/* 0x006b 0x0327 */
0x0137, 0x0000,
/* 0x006c 0x0327 */
0x013c, 0x0000,
/* 0x006e 0x0327 */
0x0146, 0x0000,
/* 0x0072 0x0327 */
0x0157, 0x0000,
/* 0x0073 0x0327 */
0x015f, 0x0000,
/* 0x0074 0x0327 */
0x0163, 0x0000,
/* 0x0041 0x0328 */
0x0104, 0x0000,
/* 0x0045 0x0328 */
0x0118, 0x0000,
/* 0x0049 0x0328 */
0x012e, 0x0000,
/* 0x004f 0x0328 */
0x01ea, 0x0001, 0x0304, 0x0ede,
/* 0x0055 0x0328 */
0x0172, 0x0000,
/* 0x0061 0x0328 */
0x0105, 0x0000,
/* 0x0065 0x0328 */
0x0119, 0x0000,
/* 0x0069 0x0328 */
0x012f, 0x0000,
/* 0x006f 0x0328 */
0x01eb, 0x0001, 0x0304, 0x0ee0,
/* 0x0075 0x0328 */
0x0173, 0x0000,
/* 0x0044 0x032d */
0x1e12, 0x0000,
/* 0x0045 0x032d */
0x1e18, 0x0000,
/* 0x004c 0x032d */
0x1e3c, 0x0000,
/* 0x004e 0x032d */
0x1e4a, 0x0000,
/* 0x0054 0x032d */
0x1e70, 0x0000,
/* 0x0055 0x032d */
0x1e76, 0x0000,
/* 0x0064 0x032d */
0x1e13, 0x0000,
/* 0x0065 0x032d */
0x1e19, 0x0000,
/* 0x006c 0x032d */
0x1e3d, 0x0000,
/* 0x006e 0x032d */
0x1e4b, 0x0000,
/* 0x0074 0x032d */
0x1e71, 0x0000,
/* 0x0075 0x032d */
0x1e77, 0x0000,
/* 0x0048 0x032e */
0x1e2a, 0x0000,
/* 0x0068 0x032e */
0x1e2b, 0x0000,
/* 0x0045 0x0330 */
0x1e1a, 0x0000,
/* 0x0049 0x0330 */
0x1e2c, 0x0000,
/* 0x0055 0x0330 */
0x1e74, 0x0000,
/* 0x0065 0x0330 */
0x1e1b, 0x0000,
/* 0x0069 0x0330 */
0x1e2d, 0x0000,
/* 0x0075 0x0330 */
0x1e75, 0x0000,
/* 0x0042 0x0331 */
0x1e06, 0x0000,
/* 0x0044 0x0331 */
0x1e0e, 0x0000,
/* 0x004b 0x0331 */
0x1e34, 0x0000,
/* 0x004c 0x0331 */
0x1e3a, 0x0000,
/* 0x004e 0x0331 */
0x1e48, 0x0000,
/* 0x0052 0x0331 */
0x1e5e, 0x0000,
/* 0x0054 0x0331 */
0x1e6e, 0x0000,
/* 0x005a 0x0331 */
0x1e94, 0x0000,
/* 0x0062 0x0331 */
0x1e07, 0x0000,
/* 0x0064 0x0331 */
0x1e0f, 0x0000,
/* 0x0068 0x0331 */
0x1e96, 0x0000,
/* 0x006b 0x0331 */
0x1e35, 0x0000,
/* 0x006c 0x0331 */
0x1e3b, 0x0000,
/* 0x006e 0x0331 */
0x1e49, 0x0000,
/* 0x0072 0x0331 */
0x1e5f, 0x0000,
/* 0x0074 0x0331 */
0x1e6f, 0x0000,
/* 0x007a 0x0331 */
0x1e95, 0x0000,
/* 0x00a8 0x0342 */
0x1fc1, 0x0000,
/* 0x03b1 0x0342 */
0x1fb6, 0x0000,
/* 0x03b7 0x0342 */
0x1fc6, 0x0000,
/* 0x03b9 0x0342 */
0x1fd6, 0x0000,
/* 0x03c5 0x0342 */
0x1fe6, 0x0000,
/* 0x03c9 0x0342 */
0x1ff6, 0x0000,
/* 0x1fbf 0x0342 */
0x1fcf, 0x0000,
/* 0x1ffe 0x0342 */
0x1fdf, 0x0000,
/* 0x0391 0x0345 */
0x1fbc, 0x0002, 0x0313, 0x0ee2, 0x0314, 0x0eea,
/* 0x0397 0x0345 */
0x1fcc, 0x0002, 0x0313, 0x0ef2, 0x0314, 0x0efa,
/* 0x03a9 0x0345 */
0x1ffc, 0x0002, 0x0313, 0x0f02, 0x0314, 0x0f0a,
/* 0x03b1 0x0345 */
0x1fb3, 0x0005, 0x0300, 0x0f12, 0x0301, 0x0f14, 0x0313, 0x0f16,
0x0314, 0x0f1e, 0x0342, 0x0f26,
/* 0x03b7 0x0345 */
0x1fc3, 0x0005, 0x0300, 0x0f28, 0x0301, 0x0f2a, 0x0313, 0x0f2c,
0x0314, 0x0f34, 0x0342, 0x0f3c,
/* 0x03bf 0x0345 */
0x0000, 0x0001, 0x0301, 0x0f3e,
/* 0x03c9 0x0345 */
0x1ff3, 0x0004, 0x0300, 0x0f40, 0x0313, 0x0f42, 0x0314, 0x0f4a,
0x0342, 0x0f52,
/* 0x05d0 0x05b7 */
0xfb2e, 0x0000,
/* 0x05f2 0x05b7 */
0xfb1f, 0x0000,
/* 0x05d0 0x05b8 */
0xfb2f, 0x0000,
/* 0x05d5 0x05b9 */
0xfb4b, 0x0000,
/* 0x05d0 0x05bc */
0xfb30, 0x0000,
/* 0x05d1 0x05bc */
0xfb31, 0x0000,
/* 0x05d2 0x05bc */
0xfb32, 0x0000,
/* 0x05d3 0x05bc */
0xfb33, 0x0000,
/* 0x05d4 0x05bc */
0xfb34, 0x0000,
/* 0x05d5 0x05bc */
0xfb35, 0x0000,
/* 0x05d6 0x05bc */
0xfb36, 0x0000,
/* 0x05d8 0x05bc */
0xfb38, 0x0000,
/* 0x05d9 0x05bc */
0xfb39, 0x0000,
/* 0x05da 0x05bc */
0xfb3a, 0x0000,
/* 0x05db 0x05bc */
0xfb3b, 0x0000,
/* 0x05dc 0x05bc */
0xfb3c, 0x0000,
/* 0x05de 0x05bc */
0xfb3e, 0x0000,
/* 0x05e0 0x05bc */
0xfb40, 0x0000,
/* 0x05e1 0x05bc */
0xfb41, 0x0000,
/* 0x05e3 0x05bc */
0xfb43, 0x0000,
/* 0x05e4 0x05bc */
0xfb44, 0x0000,
/* 0x05e6 0x05bc */
0xfb46, 0x0000,
/* 0x05e7 0x05bc */
0xfb47, 0x0000,
/* 0x05e8 0x05bc */
0xfb48, 0x0000,
/* 0x05e9 0x05bc */
0xfb49, 0x0002, 0x05c1, 0x0f54, 0x05c2, 0x0f56,
/* 0x05ea 0x05bc */
0xfb4a, 0x0000,
/* 0x05d1 0x05bf */
0xfb4c, 0x0000,
/* 0x05db 0x05bf */
0xfb4d, 0x0000,
/* 0x05e4 0x05bf */
0xfb4e, 0x0000,
/* 0x05e9 0x05c1 */
0xfb2a, 0x0000,
/* 0x05e9 0x05c2 */
0xfb2b, 0x0000,
/* 0x0915 0x093c */
0x0958, 0x0000,
/* 0x0916 0x093c */
0x0959, 0x0000,
/* 0x0917 0x093c */
0x095a, 0x0000,
/* 0x091c 0x093c */
0x095b, 0x0000,
/* 0x0921 0x093c */
0x095c, 0x0000,
/* 0x0922 0x093c */
0x095d, 0x0000,
/* 0x0928 0x093c */
0x0929, 0x0000,
/* 0x092b 0x093c */
0x095e, 0x0000,
/* 0x092f 0x093c */
0x095f, 0x0000,
/* 0x0930 0x093c */
0x0931, 0x0000,
/* 0x0933 0x093c */
0x0934, 0x0000,
/* 0x09a1 0x09bc */
0x09dc, 0x0000,
/* 0x09a2 0x09bc */
0x09dd, 0x0000,
/* 0x09ac 0x09bc */
0x09b0, 0x0000,
/* 0x09af 0x09bc */
0x09df, 0x0000,
/* 0x09c7 0x09be */
0x09cb, 0x0000,
/* 0x09c7 0x09d7 */
0x09cc, 0x0000,
/* 0x0a16 0x0a3c */
0x0a59, 0x0000,
/* 0x0a17 0x0a3c */
0x0a5a, 0x0000,
/* 0x0a1c 0x0a3c */
0x0a5b, 0x0000,
/* 0x0a21 0x0a3c */
0x0a5c, 0x0000,
/* 0x0a2b 0x0a3c */
0x0a5e, 0x0000,
/* 0x0b21 0x0b3c */
0x0b5c, 0x0000,
/* 0x0b22 0x0b3c */
0x0b5d, 0x0000,
/* 0x0b2f 0x0b3c */
0x0b5f, 0x0000,
/* 0x0b47 0x0b3e */
0x0b4b, 0x0000,
/* 0x0b47 0x0b56 */
0x0b48, 0x0000,
/* 0x0b47 0x0b57 */
0x0b4c, 0x0000,
/* 0x0bc6 0x0bbe */
0x0bca, 0x0000,
/* 0x0bc7 0x0bbe */
0x0bcb, 0x0000,
/* 0x0b92 0x0bd7 */
0x0b94, 0x0000,
/* 0x0bc6 0x0bd7 */
0x0bcc, 0x0000,
/* 0x0c46 0x0c56 */
0x0c48, 0x0000,
/* 0x0cc6 0x0cc2 */
0x0cca, 0x0001, 0x0cd5, 0x0f58,
/* 0x0cbf 0x0cd5 */
0x0cc0, 0x0000,
/* 0x0cc6 0x0cd5 */
0x0cc7, 0x0000,
/* 0x0cc6 0x0cd6 */
0x0cc8, 0x0000,
/* 0x0d46 0x0d3e */
0x0d4a, 0x0000,
/* 0x0d47 0x0d3e */
0x0d4b, 0x0000,
/* 0x0d46 0x0d57 */
0x0d4c, 0x0000,
/* 0x0e4d 0x0e32 */
0x0e33, 0x0000,
/* 0x0ecd 0x0eb2 */
0x0eb3, 0x0000,
/* 0x0f72 0x0f71 */
0x0f73, 0x0000,
/* 0x0f74 0x0f71 */
0x0f75, 0x0000,
/* 0x0f80 0x0f71 */
0x0f81, 0x0000,
/* 0x0fb2 0x0f80 */
0x0f76, 0x0001, 0x0f71, 0x0f5a,
/* 0x0fb3 0x0f80 */
0x0f78, 0x0001, 0x0f71, 0x0f5c,
/* 0x0f40 0x0fb5 */
0x0f69, 0x0000,
/* 0x0f90 0x0fb5 */
0x0fb9, 0x0000,
/* 0x0f42 0x0fb7 */
0x0f43, 0x0000,
/* 0x0f4c 0x0fb7 */
0x0f4d, 0x0000,
/* 0x0f51 0x0fb7 */
0x0f52, 0x0000,
/* 0x0f56 0x0fb7 */
0x0f57, 0x0000,
/* 0x0f5b 0x0fb7 */
0x0f5c, 0x0000,
/* 0x0f92 0x0fb7 */
0x0f93, 0x0000,
/* 0x0f9c 0x0fb7 */
0x0f9d, 0x0000,
/* 0x0fa1 0x0fb7 */
0x0fa2, 0x0000,
/* 0x0fa6 0x0fb7 */
0x0fa7, 0x0000,
/* 0x0fab 0x0fb7 */
0x0fac, 0x0000,
/* 0x3046 0x3099 */
0x3094, 0x0000,
/* 0x304b 0x3099 */
0x304c, 0x0000,
/* 0x304d 0x3099 */
0x304e, 0x0000,
/* 0x304f 0x3099 */
0x3050, 0x0000,
/* 0x3051 0x3099 */
0x3052, 0x0000,
/* 0x3053 0x3099 */
0x3054, 0x0000,
/* 0x3055 0x3099 */
0x3056, 0x0000,
/* 0x3057 0x3099 */
0x3058, 0x0000,
/* 0x3059 0x3099 */
0x305a, 0x0000,
/* 0x305b 0x3099 */
0x305c, 0x0000,
/* 0x305d 0x3099 */
0x305e, 0x0000,
/* 0x305f 0x3099 */
0x3060, 0x0000,
/* 0x3061 0x3099 */
0x3062, 0x0000,
/* 0x3064 0x3099 */
0x3065, 0x0000,
/* 0x3066 0x3099 */
0x3067, 0x0000,
/* 0x3068 0x3099 */
0x3069, 0x0000,
/* 0x306f 0x3099 */
0x3070, 0x0000,
/* 0x3072 0x3099 */
0x3073, 0x0000,
/* 0x3075 0x3099 */
0x3076, 0x0000,
/* 0x3078 0x3099 */
0x3079, 0x0000,
/* 0x307b 0x3099 */
0x307c, 0x0000,
/* 0x309d 0x3099 */
0x309e, 0x0000,
/* 0x30a6 0x3099 */
0x30f4, 0x0000,
/* 0x30ab 0x3099 */
0x30ac, 0x0000,
/* 0x30ad 0x3099 */
0x30ae, 0x0000,
/* 0x30af 0x3099 */
0x30b0, 0x0000,
/* 0x30b1 0x3099 */
0x30b2, 0x0000,
/* 0x30b3 0x3099 */
0x30b4, 0x0000,
/* 0x30b5 0x3099 */
0x30b6, 0x0000,
/* 0x30b7 0x3099 */
0x30b8, 0x0000,
/* 0x30b9 0x3099 */
0x30ba, 0x0000,
/* 0x30bb 0x3099 */
0x30bc, 0x0000,
/* 0x30bd 0x3099 */
0x30be, 0x0000,
/* 0x30bf 0x3099 */
0x30c0, 0x0000,
/* 0x30c1 0x3099 */
0x30c2, 0x0000,
/* 0x30c4 0x3099 */
0x30c5, 0x0000,
/* 0x30c6 0x3099 */
0x30c7, 0x0000,
/* 0x30c8 0x3099 */
0x30c9, 0x0000,
/* 0x30cf 0x3099 */
0x30d0, 0x0000,
/* 0x30d2 0x3099 */
0x30d3, 0x0000,
/* 0x30d5 0x3099 */
0x30d6, 0x0000,
/* 0x30d8 0x3099 */
0x30d9, 0x0000,
/* 0x30db 0x3099 */
0x30dc, 0x0000,
/* 0x30ef 0x3099 */
0x30f7, 0x0000,
/* 0x30f0 0x3099 */
0x30f8, 0x0000,
/* 0x30f1 0x3099 */
0x30f9, 0x0000,
/* 0x30f2 0x3099 */
0x30fa, 0x0000,
/* 0x30fd 0x3099 */
0x30fe, 0x0000,
/* 0x306f 0x309a */
0x3071, 0x0000,
/* 0x3072 0x309a */
0x3074, 0x0000,
/* 0x3075 0x309a */
0x3077, 0x0000,
/* 0x3078 0x309a */
0x307a, 0x0000,
/* 0x307b 0x309a */
0x307d, 0x0000,
/* 0x30cf 0x309a */
0x30d1, 0x0000,
/* 0x30d2 0x309a */
0x30d4, 0x0000,
/* 0x30d5 0x309a */
0x30d7, 0x0000,
/* 0x30d8 0x309a */
0x30da, 0x0000,
/* 0x30db 0x309a */
0x30dd, 0x0000,
/* 0x0307 0x0053 0x0301 */
0x1e64, 0x0000,
/* 0x0307 0x0073 0x0301 */
0x1e65, 0x0000,
/* 0x0300 0x0041 0x0302 */
0x1ea6, 0x0000,
/* 0x0301 0x0041 0x0302 */
0x1ea4, 0x0000,
/* 0x0303 0x0041 0x0302 */
0x1eaa, 0x0000,
/* 0x0309 0x0041 0x0302 */
0x1ea8, 0x0000,
/* 0x0300 0x0045 0x0302 */
0x1ec0, 0x0000,
/* 0x0301 0x0045 0x0302 */
0x1ebe, 0x0000,
/* 0x0303 0x0045 0x0302 */
0x1ec4, 0x0000,
/* 0x0309 0x0045 0x0302 */
0x1ec2, 0x0000,
/* 0x0300 0x004f 0x0302 */
0x1ed2, 0x0000,
/* 0x0301 0x004f 0x0302 */
0x1ed0, 0x0000,
/* 0x0303 0x004f 0x0302 */
0x1ed6, 0x0000,
/* 0x0309 0x004f 0x0302 */
0x1ed4, 0x0000,
/* 0x0300 0x0061 0x0302 */
0x1ea7, 0x0000,
/* 0x0301 0x0061 0x0302 */
0x1ea5, 0x0000,
/* 0x0303 0x0061 0x0302 */
0x1eab, 0x0000,
/* 0x0309 0x0061 0x0302 */
0x1ea9, 0x0000,
/* 0x0300 0x0065 0x0302 */
0x1ec1, 0x0000,
/* 0x0301 0x0065 0x0302 */
0x1ebf, 0x0000,
/* 0x0303 0x0065 0x0302 */
0x1ec5, 0x0000,
/* 0x0309 0x0065 0x0302 */
0x1ec3, 0x0000,
/* 0x0300 0x006f 0x0302 */
0x1ed3, 0x0000,
/* 0x0301 0x006f 0x0302 */
0x1ed1, 0x0000,
/* 0x0303 0x006f 0x0302 */
0x1ed7, 0x0000,
/* 0x0309 0x006f 0x0302 */
0x1ed5, 0x0000,
/* 0x0301 0x004f 0x0303 */
0x1e4c, 0x0000,
/* 0x0308 0x004f 0x0303 */
0x1e4e, 0x0000,
/* 0x0301 0x0055 0x0303 */
0x1e78, 0x0000,
/* 0x0301 0x006f 0x0303 */
0x1e4d, 0x0000,
/* 0x0308 0x006f 0x0303 */
0x1e4f, 0x0000,
/* 0x0301 0x0075 0x0303 */
0x1e79, 0x0000,
/* 0x0300 0x0045 0x0304 */
0x1e14, 0x0000,
/* 0x0301 0x0045 0x0304 */
0x1e16, 0x0000,
/* 0x0300 0x004f 0x0304 */
0x1e50, 0x0000,
/* 0x0301 0x004f 0x0304 */
0x1e52, 0x0000,
/* 0x0308 0x0055 0x0304 */
0x1e7a, 0x0000,
/* 0x0300 0x0065 0x0304 */
0x1e15, 0x0000,
/* 0x0301 0x0065 0x0304 */
0x1e17, 0x0000,
/* 0x0300 0x006f 0x0304 */
0x1e51, 0x0000,
/* 0x0301 0x006f 0x0304 */
0x1e53, 0x0000,
/* 0x0308 0x0075 0x0304 */
0x1e7b, 0x0000,
/* 0x0300 0x0041 0x0306 */
0x1eb0, 0x0000,
/* 0x0301 0x0041 0x0306 */
0x1eae, 0x0000,
/* 0x0303 0x0041 0x0306 */
0x1eb4, 0x0000,
/* 0x0309 0x0041 0x0306 */
0x1eb2, 0x0000,
/* 0x0300 0x0061 0x0306 */
0x1eb1, 0x0000,
/* 0x0301 0x0061 0x0306 */
0x1eaf, 0x0000,
/* 0x0303 0x0061 0x0306 */
0x1eb5, 0x0000,
/* 0x0309 0x0061 0x0306 */
0x1eb3, 0x0000,
/* 0x0304 0x0041 0x0307 */
0x01e0, 0x0000,
/* 0x0304 0x0061 0x0307 */
0x01e1, 0x0000,
/* 0x0304 0x0041 0x0308 */
0x01de, 0x0000,
/* 0x0301 0x0049 0x0308 */
0x1e2e, 0x0000,
/* 0x0300 0x0055 0x0308 */
0x01db, 0x0000,
/* 0x0301 0x0055 0x0308 */
0x01d7, 0x0000,
/* 0x0304 0x0055 0x0308 */
0x01d5, 0x0000,
/* 0x030c 0x0055 0x0308 */
0x01d9, 0x0000,
/* 0x0304 0x0061 0x0308 */
0x01df, 0x0000,
/* 0x0301 0x0069 0x0308 */
0x1e2f, 0x0000,
/* 0x0300 0x0075 0x0308 */
0x01dc, 0x0000,
/* 0x0301 0x0075 0x0308 */
0x01d8, 0x0000,
/* 0x0304 0x0075 0x0308 */
0x01d6, 0x0000,
/* 0x030c 0x0075 0x0308 */
0x01da, 0x0000,
/* 0x0300 0x03b9 0x0308 */
0x1fd2, 0x0000,
/* 0x0301 0x03b9 0x0308 */
0x1fd3, 0x0000,
/* 0x030d 0x03b9 0x0308 */
0x0390, 0x0000,
/* 0x0342 0x03b9 0x0308 */
0x1fd7, 0x0000,
/* 0x0300 0x03c5 0x0308 */
0x1fe2, 0x0000,
/* 0x0301 0x03c5 0x0308 */
0x1fe3, 0x0000,
/* 0x030d 0x03c5 0x0308 */
0x03b0, 0x0000,
/* 0x0342 0x03c5 0x0308 */
0x1fe7, 0x0000,
/* 0x0301 0x0041 0x030a */
0x01fa, 0x0000,
/* 0x0301 0x0061 0x030a */
0x01fb, 0x0000,
/* 0x0307 0x0053 0x030c */
0x1e66, 0x0000,
/* 0x0307 0x0073 0x030c */
0x1e67, 0x0000,
/* 0x0300 0x0391 0x0313 */
0x1f0a, 0x0000,
/* 0x0301 0x0391 0x0313 */
0x1f0c, 0x0000,
/* 0x0342 0x0391 0x0313 */
0x1f0e, 0x0000,
/* 0x0300 0x0395 0x0313 */
0x1f1a, 0x0000,
/* 0x0301 0x0395 0x0313 */
0x1f1c, 0x0000,
/* 0x0300 0x0397 0x0313 */
0x1f2a, 0x0000,
/* 0x0301 0x0397 0x0313 */
0x1f2c, 0x0000,
/* 0x0342 0x0397 0x0313 */
0x1f2e, 0x0000,
/* 0x0300 0x0399 0x0313 */
0x1f3a, 0x0000,
/* 0x0301 0x0399 0x0313 */
0x1f3c, 0x0000,
/* 0x0342 0x0399 0x0313 */
0x1f3e, 0x0000,
/* 0x0300 0x039f 0x0313 */
0x1f4a, 0x0000,
/* 0x0301 0x039f 0x0313 */
0x1f4c, 0x0000,
/* 0x0300 0x03a9 0x0313 */
0x1f6a, 0x0000,
/* 0x0301 0x03a9 0x0313 */
0x1f6c, 0x0000,
/* 0x0342 0x03a9 0x0313 */
0x1f6e, 0x0000,
/* 0x0300 0x03b1 0x0313 */
0x1f02, 0x0000,
/* 0x0301 0x03b1 0x0313 */
0x1f04, 0x0000,
/* 0x0342 0x03b1 0x0313 */
0x1f06, 0x0000,
/* 0x0300 0x03b5 0x0313 */
0x1f12, 0x0000,
/* 0x0301 0x03b5 0x0313 */
0x1f14, 0x0000,
/* 0x0300 0x03b7 0x0313 */
0x1f22, 0x0000,
/* 0x0301 0x03b7 0x0313 */
0x1f24, 0x0000,
/* 0x0342 0x03b7 0x0313 */
0x1f26, 0x0000,
/* 0x0300 0x03b9 0x0313 */
0x1f32, 0x0000,
/* 0x0301 0x03b9 0x0313 */
0x1f34, 0x0000,
/* 0x0342 0x03b9 0x0313 */
0x1f36, 0x0000,
/* 0x0300 0x03bf 0x0313 */
0x1f42, 0x0000,
/* 0x0301 0x03bf 0x0313 */
0x1f44, 0x0000,
/* 0x0300 0x03c5 0x0313 */
0x1f52, 0x0000,
/* 0x0301 0x03c5 0x0313 */
0x1f54, 0x0000,
/* 0x0342 0x03c5 0x0313 */
0x1f56, 0x0000,
/* 0x0300 0x03c9 0x0313 */
0x1f62, 0x0000,
/* 0x0301 0x03c9 0x0313 */
0x1f64, 0x0000,
/* 0x0342 0x03c9 0x0313 */
0x1f66, 0x0000,
/* 0x0300 0x0391 0x0314 */
0x1f0b, 0x0000,
/* 0x0301 0x0391 0x0314 */
0x1f0d, 0x0000,
/* 0x0342 0x0391 0x0314 */
0x1f0f, 0x0000,
/* 0x0300 0x0395 0x0314 */
0x1f1b, 0x0000,
/* 0x0301 0x0395 0x0314 */
0x1f1d, 0x0000,
/* 0x0300 0x0397 0x0314 */
0x1f2b, 0x0000,
/* 0x0301 0x0397 0x0314 */
0x1f2d, 0x0000,
/* 0x0342 0x0397 0x0314 */
0x1f2f, 0x0000,
/* 0x0300 0x0399 0x0314 */
0x1f3b, 0x0000,
/* 0x0301 0x0399 0x0314 */
0x1f3d, 0x0000,
/* 0x0342 0x0399 0x0314 */
0x1f3f, 0x0000,
/* 0x0300 0x039f 0x0314 */
0x1f4b, 0x0000,
/* 0x0301 0x039f 0x0314 */
0x1f4d, 0x0000,
/* 0x0300 0x03a5 0x0314 */
0x1f5b, 0x0000,
/* 0x0301 0x03a5 0x0314 */
0x1f5d, 0x0000,
/* 0x0342 0x03a5 0x0314 */
0x1f5f, 0x0000,
/* 0x0300 0x03a9 0x0314 */
0x1f6b, 0x0000,
/* 0x0301 0x03a9 0x0314 */
0x1f6d, 0x0000,
/* 0x0342 0x03a9 0x0314 */
0x1f6f, 0x0000,
/* 0x0300 0x03b1 0x0314 */
0x1f03, 0x0000,
/* 0x0301 0x03b1 0x0314 */
0x1f05, 0x0000,
/* 0x0342 0x03b1 0x0314 */
0x1f07, 0x0000,
/* 0x0300 0x03b5 0x0314 */
0x1f13, 0x0000,
/* 0x0301 0x03b5 0x0314 */
0x1f15, 0x0000,
/* 0x0300 0x03b7 0x0314 */
0x1f23, 0x0000,
/* 0x0301 0x03b7 0x0314 */
0x1f25, 0x0000,
/* 0x0342 0x03b7 0x0314 */
0x1f27, 0x0000,
/* 0x0300 0x03b9 0x0314 */
0x1f33, 0x0000,
/* 0x0301 0x03b9 0x0314 */
0x1f35, 0x0000,
/* 0x0342 0x03b9 0x0314 */
0x1f37, 0x0000,
/* 0x0300 0x03bf 0x0314 */
0x1f43, 0x0000,
/* 0x0301 0x03bf 0x0314 */
0x1f45, 0x0000,
/* 0x0300 0x03c5 0x0314 */
0x1f53, 0x0000,
/* 0x0301 0x03c5 0x0314 */
0x1f55, 0x0000,
/* 0x0342 0x03c5 0x0314 */
0x1f57, 0x0000,
/* 0x0300 0x03c9 0x0314 */
0x1f63, 0x0000,
/* 0x0301 0x03c9 0x0314 */
0x1f65, 0x0000,
/* 0x0342 0x03c9 0x0314 */
0x1f67, 0x0000,
/* 0x0300 0x004f 0x031b */
0x1edc, 0x0000,
/* 0x0301 0x004f 0x031b */
0x1eda, 0x0000,
/* 0x0303 0x004f 0x031b */
0x1ee0, 0x0000,
/* 0x0309 0x004f 0x031b */
0x1ede, 0x0000,
/* 0x0323 0x004f 0x031b */
0x1ee2, 0x0000,
/* 0x0300 0x0055 0x031b */
0x1eea, 0x0000,
/* 0x0301 0x0055 0x031b */
0x1ee8, 0x0000,
/* 0x0303 0x0055 0x031b */
0x1eee, 0x0000,
/* 0x0309 0x0055 0x031b */
0x1eec, 0x0000,
/* 0x0323 0x0055 0x031b */
0x1ef0, 0x0000,
/* 0x0300 0x006f 0x031b */
0x1edd, 0x0000,
/* 0x0301 0x006f 0x031b */
0x1edb, 0x0000,
/* 0x0303 0x006f 0x031b */
0x1ee1, 0x0000,
/* 0x0309 0x006f 0x031b */
0x1edf, 0x0000,
/* 0x0323 0x006f 0x031b */
0x1ee3, 0x0000,
/* 0x0300 0x0075 0x031b */
0x1eeb, 0x0000,
/* 0x0301 0x0075 0x031b */
0x1ee9, 0x0000,
/* 0x0303 0x0075 0x031b */
0x1eef, 0x0000,
/* 0x0309 0x0075 0x031b */
0x1eed, 0x0000,
/* 0x0323 0x0075 0x031b */
0x1ef1, 0x0000,
/* 0x0302 0x0041 0x0323 */
0x1eac, 0x0000,
/* 0x0306 0x0041 0x0323 */
0x1eb6, 0x0000,
/* 0x0302 0x0045 0x0323 */
0x1ec6, 0x0000,
/* 0x0304 0x004c 0x0323 */
0x1e38, 0x0000,
/* 0x0302 0x004f 0x0323 */
0x1ed8, 0x0000,
/* 0x0304 0x0052 0x0323 */
0x1e5c, 0x0000,
/* 0x0307 0x0053 0x0323 */
0x1e68, 0x0000,
/* 0x0302 0x0061 0x0323 */
0x1ead, 0x0000,
/* 0x0306 0x0061 0x0323 */
0x1eb7, 0x0000,
/* 0x0302 0x0065 0x0323 */
0x1ec7, 0x0000,
/* 0x0304 0x006c 0x0323 */
0x1e39, 0x0000,
/* 0x0302 0x006f 0x0323 */
0x1ed9, 0x0000,
/* 0x0304 0x0072 0x0323 */
0x1e5d, 0x0000,
/* 0x0307 0x0073 0x0323 */
0x1e69, 0x0000,
/* 0x0301 0x0043 0x0327 */
0x1e08, 0x0000,
/* 0x0306 0x0045 0x0327 */
0x1e1c, 0x0000,
/* 0x0301 0x0063 0x0327 */
0x1e09, 0x0000,
/* 0x0306 0x0065 0x0327 */
0x1e1d, 0x0000,
/* 0x0304 0x004f 0x0328 */
0x01ec, 0x0000,
/* 0x0304 0x006f 0x0328 */
0x01ed, 0x0000,
/* 0x0313 0x0391 0x0345 */
0x1f88, 0x0003, 0x0300, 0x0f5e, 0x0301, 0x0f60, 0x0342, 0x0f62,
/* 0x0314 0x0391 0x0345 */
0x1f89, 0x0003, 0x0300, 0x0f64, 0x0301, 0x0f66, 0x0342, 0x0f68,
/* 0x0313 0x0397 0x0345 */
0x1f98, 0x0003, 0x0300, 0x0f6a, 0x0301, 0x0f6c, 0x0342, 0x0f6e,
/* 0x0314 0x0397 0x0345 */
0x1f99, 0x0003, 0x0300, 0x0f70, 0x0301, 0x0f72, 0x0342, 0x0f74,
/* 0x0313 0x03a9 0x0345 */
0x1fa8, 0x0003, 0x0300, 0x0f76, 0x0301, 0x0f78, 0x0342, 0x0f7a,
/* 0x0314 0x03a9 0x0345 */
0x1fa9, 0x0003, 0x0300, 0x0f7c, 0x0301, 0x0f7e, 0x0342, 0x0f80,
/* 0x0300 0x03b1 0x0345 */
0x1fb2, 0x0000,
/* 0x0301 0x03b1 0x0345 */
0x1fb4, 0x0000,
/* 0x0313 0x03b1 0x0345 */
0x1f80, 0x0003, 0x0300, 0x0f82, 0x0301, 0x0f84, 0x0342, 0x0f86,
/* 0x0314 0x03b1 0x0345 */
0x1f81, 0x0003, 0x0300, 0x0f88, 0x0301, 0x0f8a, 0x0342, 0x0f8c,
/* 0x0342 0x03b1 0x0345 */
0x1fb7, 0x0000,
/* 0x0300 0x03b7 0x0345 */
0x1fc2, 0x0000,
/* 0x0301 0x03b7 0x0345 */
0x1fc4, 0x0000,
/* 0x0313 0x03b7 0x0345 */
0x1f90, 0x0003, 0x0300, 0x0f8e, 0x0301, 0x0f90, 0x0342, 0x0f92,
/* 0x0314 0x03b7 0x0345 */
0x1f91, 0x0003, 0x0300, 0x0f94, 0x0301, 0x0f96, 0x0342, 0x0f98,
/* 0x0342 0x03b7 0x0345 */
0x1fc7, 0x0000,
/* 0x0301 0x03bf 0x0345 */
0x1ff4, 0x0000,
/* 0x0300 0x03c9 0x0345 */
0x1ff2, 0x0000,
/* 0x0313 0x03c9 0x0345 */
0x1fa0, 0x0003, 0x0300, 0x0f9a, 0x0301, 0x0f9c, 0x0342, 0x0f9e,
/* 0x0314 0x03c9 0x0345 */
0x1fa1, 0x0003, 0x0300, 0x0fa0, 0x0301, 0x0fa2, 0x0342, 0x0fa4,
/* 0x0342 0x03c9 0x0345 */
0x1ff7, 0x0000,
/* 0x05c1 0x05e9 0x05bc */
0xfb2c, 0x0000,
/* 0x05c2 0x05e9 0x05bc */
0xfb2d, 0x0000,
/* 0x0cd5 0x0cc6 0x0cc2 */
0x0ccb, 0x0000,
/* 0x0f71 0x0fb2 0x0f80 */
0x0f77, 0x0000,
/* 0x0f71 0x0fb3 0x0f80 */
0x0f79, 0x0000,
/* 0x0300 0x0313 0x0391 0x0345 */
0x1f8a, 0x0000,
/* 0x0301 0x0313 0x0391 0x0345 */
0x1f8c, 0x0000,
/* 0x0342 0x0313 0x0391 0x0345 */
0x1f8e, 0x0000,
/* 0x0300 0x0314 0x0391 0x0345 */
0x1f8b, 0x0000,
/* 0x0301 0x0314 0x0391 0x0345 */
0x1f8d, 0x0000,
/* 0x0342 0x0314 0x0391 0x0345 */
0x1f8f, 0x0000,
/* 0x0300 0x0313 0x0397 0x0345 */
0x1f9a, 0x0000,
/* 0x0301 0x0313 0x0397 0x0345 */
0x1f9c, 0x0000,
/* 0x0342 0x0313 0x0397 0x0345 */
0x1f9e, 0x0000,
/* 0x0300 0x0314 0x0397 0x0345 */
0x1f9b, 0x0000,
/* 0x0301 0x0314 0x0397 0x0345 */
0x1f9d, 0x0000,
/* 0x0342 0x0314 0x0397 0x0345 */
0x1f9f, 0x0000,
/* 0x0300 0x0313 0x03a9 0x0345 */
0x1faa, 0x0000,
/* 0x0301 0x0313 0x03a9 0x0345 */
0x1fac, 0x0000,
/* 0x0342 0x0313 0x03a9 0x0345 */
0x1fae, 0x0000,
/* 0x0300 0x0314 0x03a9 0x0345 */
0x1fab, 0x0000,
/* 0x0301 0x0314 0x03a9 0x0345 */
0x1fad, 0x0000,
/* 0x0342 0x0314 0x03a9 0x0345 */
0x1faf, 0x0000,
/* 0x0300 0x0313 0x03b1 0x0345 */
0x1f82, 0x0000,
/* 0x0301 0x0313 0x03b1 0x0345 */
0x1f84, 0x0000,
/* 0x0342 0x0313 0x03b1 0x0345 */
0x1f86, 0x0000,
/* 0x0300 0x0314 0x03b1 0x0345 */
0x1f83, 0x0000,
/* 0x0301 0x0314 0x03b1 0x0345 */
0x1f85, 0x0000,
/* 0x0342 0x0314 0x03b1 0x0345 */
0x1f87, 0x0000,
/* 0x0300 0x0313 0x03b7 0x0345 */
0x1f92, 0x0000,
/* 0x0301 0x0313 0x03b7 0x0345 */
0x1f94, 0x0000,
/* 0x0342 0x0313 0x03b7 0x0345 */
0x1f96, 0x0000,
/* 0x0300 0x0314 0x03b7 0x0345 */
0x1f93, 0x0000,
/* 0x0301 0x0314 0x03b7 0x0345 */
0x1f95, 0x0000,
/* 0x0342 0x0314 0x03b7 0x0345 */
0x1f97, 0x0000,
/* 0x0300 0x0313 0x03c9 0x0345 */
0x1fa2, 0x0000,
/* 0x0301 0x0313 0x03c9 0x0345 */
0x1fa4, 0x0000,
/* 0x0342 0x0313 0x03c9 0x0345 */
0x1fa6, 0x0000,
/* 0x0300 0x0314 0x03c9 0x0345 */
0x1fa3, 0x0000,
/* 0x0301 0x0314 0x03c9 0x0345 */
0x1fa5, 0x0000,
/* 0x0342 0x0314 0x03c9 0x0345 */
0x1fa7, 0x0000,
};
| linux-master | fs/hfsplus/tables.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/xattr_trusted.c
*
* Vyacheslav Dubeyko <[email protected]>
*
* Handler for trusted extended attributes.
*/
#include <linux/nls.h>
#include "hfsplus_fs.h"
#include "xattr.h"
static int hfsplus_trusted_getxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return hfsplus_getxattr(inode, name, buffer, size,
XATTR_TRUSTED_PREFIX,
XATTR_TRUSTED_PREFIX_LEN);
}
static int hfsplus_trusted_setxattr(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{
return hfsplus_setxattr(inode, name, buffer, size, flags,
XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
}
const struct xattr_handler hfsplus_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = hfsplus_trusted_getxattr,
.set = hfsplus_trusted_setxattr,
};
| linux-master | fs/hfsplus/xattr_trusted.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/xattr.c
*
* Vyacheslav Dubeyko <[email protected]>
*
* Logic of processing extended attributes
*/
#include "hfsplus_fs.h"
#include <linux/nls.h>
#include "xattr.h"
static int hfsplus_removexattr(struct inode *inode, const char *name);
const struct xattr_handler *hfsplus_xattr_handlers[] = {
&hfsplus_xattr_osx_handler,
&hfsplus_xattr_user_handler,
&hfsplus_xattr_trusted_handler,
&hfsplus_xattr_security_handler,
NULL
};
static int strcmp_xattr_finder_info(const char *name)
{
if (name) {
return strncmp(name, HFSPLUS_XATTR_FINDER_INFO_NAME,
sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME));
}
return -1;
}
static int strcmp_xattr_acl(const char *name)
{
if (name) {
return strncmp(name, HFSPLUS_XATTR_ACL_NAME,
sizeof(HFSPLUS_XATTR_ACL_NAME));
}
return -1;
}
static bool is_known_namespace(const char *name)
{
if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
return false;
return true;
}
static void hfsplus_init_header_node(struct inode *attr_file,
u32 clump_size,
char *buf, u16 node_size)
{
struct hfs_bnode_desc *desc;
struct hfs_btree_header_rec *head;
u16 offset;
__be16 *rec_offsets;
u32 hdr_node_map_rec_bits;
char *bmp;
u32 used_nodes;
u32 used_bmp_bytes;
u64 tmp;
hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %u\n",
clump_size, node_size);
/* The end of the node contains list of record offsets */
rec_offsets = (__be16 *)(buf + node_size);
desc = (struct hfs_bnode_desc *)buf;
desc->type = HFS_NODE_HEADER;
desc->num_recs = cpu_to_be16(HFSPLUS_BTREE_HDR_NODE_RECS_COUNT);
offset = sizeof(struct hfs_bnode_desc);
*--rec_offsets = cpu_to_be16(offset);
head = (struct hfs_btree_header_rec *)(buf + offset);
head->node_size = cpu_to_be16(node_size);
tmp = i_size_read(attr_file);
do_div(tmp, node_size);
head->node_count = cpu_to_be32(tmp);
head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1);
head->clump_size = cpu_to_be32(clump_size);
head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS);
head->max_key_len = cpu_to_be16(HFSPLUS_ATTR_KEYLEN - sizeof(u16));
offset += sizeof(struct hfs_btree_header_rec);
*--rec_offsets = cpu_to_be16(offset);
offset += HFSPLUS_BTREE_HDR_USER_BYTES;
*--rec_offsets = cpu_to_be16(offset);
hdr_node_map_rec_bits = 8 * (node_size - offset - (4 * sizeof(u16)));
if (be32_to_cpu(head->node_count) > hdr_node_map_rec_bits) {
u32 map_node_bits;
u32 map_nodes;
desc->next = cpu_to_be32(be32_to_cpu(head->leaf_tail) + 1);
map_node_bits = 8 * (node_size - sizeof(struct hfs_bnode_desc) -
(2 * sizeof(u16)) - 2);
map_nodes = (be32_to_cpu(head->node_count) -
hdr_node_map_rec_bits +
(map_node_bits - 1)) / map_node_bits;
be32_add_cpu(&head->free_nodes, 0 - map_nodes);
}
bmp = buf + offset;
used_nodes =
be32_to_cpu(head->node_count) - be32_to_cpu(head->free_nodes);
used_bmp_bytes = used_nodes / 8;
if (used_bmp_bytes) {
memset(bmp, 0xFF, used_bmp_bytes);
bmp += used_bmp_bytes;
used_nodes %= 8;
}
*bmp = ~(0xFF >> used_nodes);
offset += hdr_node_map_rec_bits / 8;
*--rec_offsets = cpu_to_be16(offset);
}
static int hfsplus_create_attributes_file(struct super_block *sb)
{
int err = 0;
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct inode *attr_file;
struct hfsplus_inode_info *hip;
u32 clump_size;
u16 node_size = HFSPLUS_ATTR_TREE_NODE_SIZE;
char *buf;
int index, written;
struct address_space *mapping;
struct page *page;
int old_state = HFSPLUS_EMPTY_ATTR_TREE;
hfs_dbg(ATTR_MOD, "create_attr_file: ino %d\n", HFSPLUS_ATTR_CNID);
check_attr_tree_state_again:
switch (atomic_read(&sbi->attr_tree_state)) {
case HFSPLUS_EMPTY_ATTR_TREE:
if (old_state != atomic_cmpxchg(&sbi->attr_tree_state,
old_state,
HFSPLUS_CREATING_ATTR_TREE))
goto check_attr_tree_state_again;
break;
case HFSPLUS_CREATING_ATTR_TREE:
/*
* This state means that another thread is in process
* of AttributesFile creation. Theoretically, it is
* possible to be here. But really __setxattr() method
* first of all calls hfs_find_init() for lookup in
* B-tree of CatalogFile. This method locks mutex of
* CatalogFile's B-tree. As a result, if some thread
* is inside AttributedFile creation operation then
* another threads will be waiting unlocking of
* CatalogFile's B-tree's mutex. However, if code will
* change then we will return error code (-EAGAIN) from
* here. Really, it means that first try to set of xattr
* fails with error but second attempt will have success.
*/
return -EAGAIN;
case HFSPLUS_VALID_ATTR_TREE:
return 0;
case HFSPLUS_FAILED_ATTR_TREE:
return -EOPNOTSUPP;
default:
BUG();
}
attr_file = hfsplus_iget(sb, HFSPLUS_ATTR_CNID);
if (IS_ERR(attr_file)) {
pr_err("failed to load attributes file\n");
return PTR_ERR(attr_file);
}
BUG_ON(i_size_read(attr_file) != 0);
hip = HFSPLUS_I(attr_file);
clump_size = hfsplus_calc_btree_clump_size(sb->s_blocksize,
node_size,
sbi->sect_count,
HFSPLUS_ATTR_CNID);
mutex_lock(&hip->extents_lock);
hip->clump_blocks = clump_size >> sbi->alloc_blksz_shift;
mutex_unlock(&hip->extents_lock);
if (sbi->free_blocks <= (hip->clump_blocks << 1)) {
err = -ENOSPC;
goto end_attr_file_creation;
}
while (hip->alloc_blocks < hip->clump_blocks) {
err = hfsplus_file_extend(attr_file, false);
if (unlikely(err)) {
pr_err("failed to extend attributes file\n");
goto end_attr_file_creation;
}
hip->phys_size = attr_file->i_size =
(loff_t)hip->alloc_blocks << sbi->alloc_blksz_shift;
hip->fs_blocks = hip->alloc_blocks << sbi->fs_shift;
inode_set_bytes(attr_file, attr_file->i_size);
}
buf = kzalloc(node_size, GFP_NOFS);
if (!buf) {
err = -ENOMEM;
goto end_attr_file_creation;
}
hfsplus_init_header_node(attr_file, clump_size, buf, node_size);
mapping = attr_file->i_mapping;
index = 0;
written = 0;
for (; written < node_size; index++, written += PAGE_SIZE) {
void *kaddr;
page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto failed_header_node_init;
}
kaddr = kmap_atomic(page);
memcpy(kaddr, buf + written,
min_t(size_t, PAGE_SIZE, node_size - written));
kunmap_atomic(kaddr);
set_page_dirty(page);
put_page(page);
}
hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
if (!sbi->attr_tree)
pr_err("failed to load attributes file\n");
failed_header_node_init:
kfree(buf);
end_attr_file_creation:
iput(attr_file);
if (!err)
atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
else if (err == -ENOSPC)
atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
else
atomic_set(&sbi->attr_tree_state, HFSPLUS_FAILED_ATTR_TREE);
return err;
}
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
int err;
struct hfs_find_data cat_fd;
hfsplus_cat_entry entry;
u16 cat_entry_flags, cat_entry_type;
u16 folder_finderinfo_len = sizeof(struct DInfo) +
sizeof(struct DXInfo);
u16 file_finderinfo_len = sizeof(struct FInfo) +
sizeof(struct FXInfo);
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
HFSPLUS_IS_RSRC(inode))
return -EOPNOTSUPP;
if (value == NULL)
return hfsplus_removexattr(inode, name);
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
if (err) {
pr_err("can't init xattr find struct\n");
return err;
}
err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
if (err) {
pr_err("catalog searching failed\n");
goto end_setxattr;
}
if (!strcmp_xattr_finder_info(name)) {
if (flags & XATTR_CREATE) {
pr_err("xattr exists yet\n");
err = -EOPNOTSUPP;
goto end_setxattr;
}
hfs_bnode_read(cat_fd.bnode, &entry, cat_fd.entryoffset,
sizeof(hfsplus_cat_entry));
if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
if (size == folder_finderinfo_len) {
memcpy(&entry.folder.info, value,
folder_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
hfsplus_mark_inode_dirty(inode,
HFSPLUS_I_CAT_DIRTY);
} else {
err = -ERANGE;
goto end_setxattr;
}
} else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
if (size == file_finderinfo_len) {
memcpy(&entry.file.info, value,
file_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_mark_inode_dirty(inode,
HFSPLUS_I_CAT_DIRTY);
} else {
err = -ERANGE;
goto end_setxattr;
}
} else {
err = -EOPNOTSUPP;
goto end_setxattr;
}
goto end_setxattr;
}
if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
err = hfsplus_create_attributes_file(inode->i_sb);
if (unlikely(err))
goto end_setxattr;
}
if (hfsplus_attr_exists(inode, name)) {
if (flags & XATTR_CREATE) {
pr_err("xattr exists yet\n");
err = -EOPNOTSUPP;
goto end_setxattr;
}
err = hfsplus_delete_attr(inode, name);
if (err)
goto end_setxattr;
err = hfsplus_create_attr(inode, name, value, size);
if (err)
goto end_setxattr;
} else {
if (flags & XATTR_REPLACE) {
pr_err("cannot replace xattr\n");
err = -EOPNOTSUPP;
goto end_setxattr;
}
err = hfsplus_create_attr(inode, name, value, size);
if (err)
goto end_setxattr;
}
cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
if (cat_entry_type == HFSPLUS_FOLDER) {
cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags));
cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
if (!strcmp_xattr_acl(name))
cat_entry_flags |= HFSPLUS_ACL_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags),
cat_entry_flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else if (cat_entry_type == HFSPLUS_FILE) {
cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags));
cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
if (!strcmp_xattr_acl(name))
cat_entry_flags |= HFSPLUS_ACL_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags),
cat_entry_flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else {
pr_err("invalid catalog entry type\n");
err = -EIO;
goto end_setxattr;
}
end_setxattr:
hfs_find_exit(&cat_fd);
return err;
}
static int name_len(const char *xattr_name, int xattr_name_len)
{
int len = xattr_name_len + 1;
if (!is_known_namespace(xattr_name))
len += XATTR_MAC_OSX_PREFIX_LEN;
return len;
}
static int copy_name(char *buffer, const char *xattr_name, int name_len)
{
int len = name_len;
int offset = 0;
if (!is_known_namespace(xattr_name)) {
memcpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
offset += XATTR_MAC_OSX_PREFIX_LEN;
len += XATTR_MAC_OSX_PREFIX_LEN;
}
strncpy(buffer + offset, xattr_name, name_len);
memset(buffer + offset + name_len, 0, 1);
len += 1;
return len;
}
int hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags,
const char *prefix, size_t prefixlen)
{
char *xattr_name;
int res;
xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
GFP_KERNEL);
if (!xattr_name)
return -ENOMEM;
strcpy(xattr_name, prefix);
strcpy(xattr_name + prefixlen, name);
res = __hfsplus_setxattr(inode, xattr_name, value, size, flags);
kfree(xattr_name);
return res;
}
static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
void *value, size_t size)
{
ssize_t res = 0;
struct hfs_find_data fd;
u16 entry_type;
u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
u16 record_len = max(folder_rec_len, file_rec_len);
u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
if (size >= record_len) {
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
if (res) {
pr_err("can't init xattr find struct\n");
return res;
}
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
if (res)
goto end_getxattr_finder_info;
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (entry_type == HFSPLUS_FOLDER) {
hfs_bnode_read(fd.bnode, folder_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_folder, user_info),
folder_rec_len);
memcpy(value, folder_finder_info, folder_rec_len);
res = folder_rec_len;
} else if (entry_type == HFSPLUS_FILE) {
hfs_bnode_read(fd.bnode, file_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_file, user_info),
file_rec_len);
memcpy(value, file_finder_info, file_rec_len);
res = file_rec_len;
} else {
res = -EOPNOTSUPP;
goto end_getxattr_finder_info;
}
} else
res = size ? -ERANGE : record_len;
end_getxattr_finder_info:
if (size >= record_len)
hfs_find_exit(&fd);
return res;
}
ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
void *value, size_t size)
{
struct hfs_find_data fd;
hfsplus_attr_entry *entry;
__be32 xattr_record_type;
u32 record_type;
u16 record_length = 0;
ssize_t res;
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
HFSPLUS_IS_RSRC(inode))
return -EOPNOTSUPP;
if (!strcmp_xattr_finder_info(name))
return hfsplus_getxattr_finder_info(inode, value, size);
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return -EOPNOTSUPP;
entry = hfsplus_alloc_attr_entry();
if (!entry) {
pr_err("can't allocate xattr entry\n");
return -ENOMEM;
}
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
if (res) {
pr_err("can't init xattr find struct\n");
goto failed_getxattr_init;
}
res = hfsplus_find_attr(inode->i_sb, inode->i_ino, name, &fd);
if (res) {
if (res == -ENOENT)
res = -ENODATA;
else
pr_err("xattr searching failed\n");
goto out;
}
hfs_bnode_read(fd.bnode, &xattr_record_type,
fd.entryoffset, sizeof(xattr_record_type));
record_type = be32_to_cpu(xattr_record_type);
if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
record_length = hfs_bnode_read_u16(fd.bnode,
fd.entryoffset +
offsetof(struct hfsplus_attr_inline_data,
length));
if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) {
pr_err("invalid xattr record size\n");
res = -EIO;
goto out;
}
} else if (record_type == HFSPLUS_ATTR_FORK_DATA ||
record_type == HFSPLUS_ATTR_EXTENTS) {
pr_err("only inline data xattr are supported\n");
res = -EOPNOTSUPP;
goto out;
} else {
pr_err("invalid xattr record\n");
res = -EIO;
goto out;
}
if (size) {
hfs_bnode_read(fd.bnode, entry, fd.entryoffset,
offsetof(struct hfsplus_attr_inline_data,
raw_bytes) + record_length);
}
if (size >= record_length) {
memcpy(value, entry->inline_data.raw_bytes, record_length);
res = record_length;
} else
res = size ? -ERANGE : record_length;
out:
hfs_find_exit(&fd);
failed_getxattr_init:
hfsplus_destroy_attr_entry(entry);
return res;
}
ssize_t hfsplus_getxattr(struct inode *inode, const char *name,
void *value, size_t size,
const char *prefix, size_t prefixlen)
{
int res;
char *xattr_name;
xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
GFP_KERNEL);
if (!xattr_name)
return -ENOMEM;
strcpy(xattr_name, prefix);
strcpy(xattr_name + prefixlen, name);
res = __hfsplus_getxattr(inode, xattr_name, value, size);
kfree(xattr_name);
return res;
}
static inline int can_list(const char *xattr_name)
{
if (!xattr_name)
return 0;
return strncmp(xattr_name, XATTR_TRUSTED_PREFIX,
XATTR_TRUSTED_PREFIX_LEN) ||
capable(CAP_SYS_ADMIN);
}
static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
char *buffer, size_t size)
{
ssize_t res;
struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
u16 entry_type;
u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
unsigned long len, found_bit;
int xattr_name_len, symbols_count;
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
if (res) {
pr_err("can't init xattr find struct\n");
return res;
}
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
if (res)
goto end_listxattr_finder_info;
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (entry_type == HFSPLUS_FOLDER) {
len = sizeof(struct DInfo) + sizeof(struct DXInfo);
hfs_bnode_read(fd.bnode, folder_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_folder, user_info),
len);
found_bit = find_first_bit((void *)folder_finder_info, len*8);
} else if (entry_type == HFSPLUS_FILE) {
len = sizeof(struct FInfo) + sizeof(struct FXInfo);
hfs_bnode_read(fd.bnode, file_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_file, user_info),
len);
found_bit = find_first_bit((void *)file_finder_info, len*8);
} else {
res = -EOPNOTSUPP;
goto end_listxattr_finder_info;
}
if (found_bit >= (len*8))
res = 0;
else {
symbols_count = sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME) - 1;
xattr_name_len =
name_len(HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count);
if (!buffer || !size) {
if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME))
res = xattr_name_len;
} else if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) {
if (size < xattr_name_len)
res = -ERANGE;
else {
res = copy_name(buffer,
HFSPLUS_XATTR_FINDER_INFO_NAME,
symbols_count);
}
}
}
end_listxattr_finder_info:
hfs_find_exit(&fd);
return res;
}
ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
ssize_t err;
ssize_t res;
struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
struct hfsplus_attr_key attr_key;
char *strbuf;
int xattr_name_len;
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
HFSPLUS_IS_RSRC(inode))
return -EOPNOTSUPP;
res = hfsplus_listxattr_finder_info(dentry, buffer, size);
if (res < 0)
return res;
else if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return (res == 0) ? -EOPNOTSUPP : res;
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
if (err) {
pr_err("can't init xattr find struct\n");
return err;
}
strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
if (!strbuf) {
res = -ENOMEM;
goto out;
}
err = hfsplus_find_attr(inode->i_sb, inode->i_ino, NULL, &fd);
if (err) {
if (err == -ENOENT) {
if (res == 0)
res = -ENODATA;
goto end_listxattr;
} else {
res = err;
goto end_listxattr;
}
}
for (;;) {
u16 key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
if (key_len == 0 || key_len > fd.tree->max_key_len) {
pr_err("invalid xattr key length: %d\n", key_len);
res = -EIO;
goto end_listxattr;
}
hfs_bnode_read(fd.bnode, &attr_key,
fd.keyoffset, key_len + sizeof(key_len));
if (be32_to_cpu(attr_key.cnid) != inode->i_ino)
goto end_listxattr;
xattr_name_len = NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN;
if (hfsplus_uni2asc(inode->i_sb,
(const struct hfsplus_unistr *)&fd.key->attr.key_name,
strbuf, &xattr_name_len)) {
pr_err("unicode conversion failed\n");
res = -EIO;
goto end_listxattr;
}
if (!buffer || !size) {
if (can_list(strbuf))
res += name_len(strbuf, xattr_name_len);
} else if (can_list(strbuf)) {
if (size < (res + name_len(strbuf, xattr_name_len))) {
res = -ERANGE;
goto end_listxattr;
} else
res += copy_name(buffer + res,
strbuf, xattr_name_len);
}
if (hfs_brec_goto(&fd, 1))
goto end_listxattr;
}
end_listxattr:
kfree(strbuf);
out:
hfs_find_exit(&fd);
return res;
}
static int hfsplus_removexattr(struct inode *inode, const char *name)
{
int err;
struct hfs_find_data cat_fd;
u16 flags;
u16 cat_entry_type;
int is_xattr_acl_deleted;
int is_all_xattrs_deleted;
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return -EOPNOTSUPP;
if (!strcmp_xattr_finder_info(name))
return -EOPNOTSUPP;
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
if (err) {
pr_err("can't init xattr find struct\n");
return err;
}
err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
if (err) {
pr_err("catalog searching failed\n");
goto end_removexattr;
}
err = hfsplus_delete_attr(inode, name);
if (err)
goto end_removexattr;
is_xattr_acl_deleted = !strcmp_xattr_acl(name);
is_all_xattrs_deleted = !hfsplus_attr_exists(inode, NULL);
if (!is_xattr_acl_deleted && !is_all_xattrs_deleted)
goto end_removexattr;
cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
if (cat_entry_type == HFSPLUS_FOLDER) {
flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags));
if (is_xattr_acl_deleted)
flags &= ~HFSPLUS_ACL_EXISTS;
if (is_all_xattrs_deleted)
flags &= ~HFSPLUS_XATTR_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags),
flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else if (cat_entry_type == HFSPLUS_FILE) {
flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags));
if (is_xattr_acl_deleted)
flags &= ~HFSPLUS_ACL_EXISTS;
if (is_all_xattrs_deleted)
flags &= ~HFSPLUS_XATTR_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags),
flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else {
pr_err("invalid catalog entry type\n");
err = -EIO;
goto end_removexattr;
}
end_removexattr:
hfs_find_exit(&cat_fd);
return err;
}
static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
/*
* Don't allow retrieving properly prefixed attributes
* by prepending them with "osx."
*/
if (is_known_namespace(name))
return -EOPNOTSUPP;
/*
* osx is the namespace we use to indicate an unprefixed
* attribute on the filesystem (like the ones that OS X
* creates), so we pass the name through unmodified (after
* ensuring it doesn't conflict with another namespace).
*/
return __hfsplus_getxattr(inode, name, buffer, size);
}
static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{
/*
* Don't allow setting properly prefixed attributes
* by prepending them with "osx."
*/
if (is_known_namespace(name))
return -EOPNOTSUPP;
/*
* osx is the namespace we use to indicate an unprefixed
* attribute on the filesystem (like the ones that OS X
* creates), so we pass the name through unmodified (after
* ensuring it doesn't conflict with another namespace).
*/
return __hfsplus_setxattr(inode, name, buffer, size, flags);
}
const struct xattr_handler hfsplus_xattr_osx_handler = {
.prefix = XATTR_MAC_OSX_PREFIX,
.get = hfsplus_osx_getxattr,
.set = hfsplus_osx_setxattr,
};
| linux-master | fs/hfsplus/xattr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/bfind.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Search routines for btrees
*/
#include <linux/slab.h>
#include "hfsplus_fs.h"
int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
{
void *ptr;
fd->tree = tree;
fd->bnode = NULL;
ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
switch (tree->cnid) {
case HFSPLUS_CAT_CNID:
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
break;
case HFSPLUS_EXT_CNID:
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
break;
case HFSPLUS_ATTR_CNID:
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
break;
default:
BUG();
}
return 0;
}
void hfs_find_exit(struct hfs_find_data *fd)
{
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
fd->tree->cnid, __builtin_return_address(0));
mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
}
int hfs_find_1st_rec_by_cnid(struct hfs_bnode *bnode,
struct hfs_find_data *fd,
int *begin,
int *end,
int *cur_rec)
{
__be32 cur_cnid;
__be32 search_cnid;
if (bnode->tree->cnid == HFSPLUS_EXT_CNID) {
cur_cnid = fd->key->ext.cnid;
search_cnid = fd->search_key->ext.cnid;
} else if (bnode->tree->cnid == HFSPLUS_CAT_CNID) {
cur_cnid = fd->key->cat.parent;
search_cnid = fd->search_key->cat.parent;
} else if (bnode->tree->cnid == HFSPLUS_ATTR_CNID) {
cur_cnid = fd->key->attr.cnid;
search_cnid = fd->search_key->attr.cnid;
} else {
cur_cnid = 0; /* used-uninitialized warning */
search_cnid = 0;
BUG();
}
if (cur_cnid == search_cnid) {
(*end) = (*cur_rec);
if ((*begin) == (*end))
return 1;
} else {
if (be32_to_cpu(cur_cnid) < be32_to_cpu(search_cnid))
(*begin) = (*cur_rec) + 1;
else
(*end) = (*cur_rec) - 1;
}
return 0;
}
int hfs_find_rec_by_key(struct hfs_bnode *bnode,
struct hfs_find_data *fd,
int *begin,
int *end,
int *cur_rec)
{
int cmpval;
cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
if (!cmpval) {
(*end) = (*cur_rec);
return 1;
}
if (cmpval < 0)
(*begin) = (*cur_rec) + 1;
else
*(end) = (*cur_rec) - 1;
return 0;
}
/* Find the record in bnode that best matches key (not greater than...)*/
int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd,
search_strategy_t rec_found)
{
u16 off, len, keylen;
int rec;
int b, e;
int res;
BUG_ON(!rec_found);
b = 0;
e = bnode->num_recs - 1;
res = -ENOENT;
do {
rec = (e + b) / 2;
len = hfs_brec_lenoff(bnode, rec, &off);
keylen = hfs_brec_keylen(bnode, rec);
if (keylen == 0) {
res = -EINVAL;
goto fail;
}
hfs_bnode_read(bnode, fd->key, off, keylen);
if (rec_found(bnode, fd, &b, &e, &rec)) {
res = 0;
goto done;
}
} while (b <= e);
if (rec != e && e >= 0) {
len = hfs_brec_lenoff(bnode, e, &off);
keylen = hfs_brec_keylen(bnode, e);
if (keylen == 0) {
res = -EINVAL;
goto fail;
}
hfs_bnode_read(bnode, fd->key, off, keylen);
}
done:
fd->record = e;
fd->keyoffset = off;
fd->keylength = keylen;
fd->entryoffset = off + keylen;
fd->entrylength = len - keylen;
fail:
return res;
}
/* Traverse a B*Tree from the root to a leaf finding best fit to key */
/* Return allocated copy of node found, set recnum to best record */
int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare)
{
struct hfs_btree *tree;
struct hfs_bnode *bnode;
u32 nidx, parent;
__be32 data;
int height, res;
tree = fd->tree;
if (fd->bnode)
hfs_bnode_put(fd->bnode);
fd->bnode = NULL;
nidx = tree->root;
if (!nidx)
return -ENOENT;
height = tree->depth;
res = 0;
parent = 0;
for (;;) {
bnode = hfs_bnode_find(tree, nidx);
if (IS_ERR(bnode)) {
res = PTR_ERR(bnode);
bnode = NULL;
break;
}
if (bnode->height != height)
goto invalid;
if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF))
goto invalid;
bnode->parent = parent;
res = __hfs_brec_find(bnode, fd, do_key_compare);
if (!height)
break;
if (fd->record < 0)
goto release;
parent = nidx;
hfs_bnode_read(bnode, &data, fd->entryoffset, 4);
nidx = be32_to_cpu(data);
hfs_bnode_put(bnode);
}
fd->bnode = bnode;
return res;
invalid:
pr_err("inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
height, bnode->height, bnode->type, nidx, parent);
res = -EIO;
release:
hfs_bnode_put(bnode);
return res;
}
int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
{
int res;
res = hfs_brec_find(fd, hfs_find_rec_by_key);
if (res)
return res;
if (fd->entrylength > rec_len)
return -EINVAL;
hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength);
return 0;
}
int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
{
struct hfs_btree *tree;
struct hfs_bnode *bnode;
int idx, res = 0;
u16 off, len, keylen;
bnode = fd->bnode;
tree = bnode->tree;
if (cnt < 0) {
cnt = -cnt;
while (cnt > fd->record) {
cnt -= fd->record + 1;
fd->record = bnode->num_recs - 1;
idx = bnode->prev;
if (!idx) {
res = -ENOENT;
goto out;
}
hfs_bnode_put(bnode);
bnode = hfs_bnode_find(tree, idx);
if (IS_ERR(bnode)) {
res = PTR_ERR(bnode);
bnode = NULL;
goto out;
}
}
fd->record -= cnt;
} else {
while (cnt >= bnode->num_recs - fd->record) {
cnt -= bnode->num_recs - fd->record;
fd->record = 0;
idx = bnode->next;
if (!idx) {
res = -ENOENT;
goto out;
}
hfs_bnode_put(bnode);
bnode = hfs_bnode_find(tree, idx);
if (IS_ERR(bnode)) {
res = PTR_ERR(bnode);
bnode = NULL;
goto out;
}
}
fd->record += cnt;
}
len = hfs_brec_lenoff(bnode, fd->record, &off);
keylen = hfs_brec_keylen(bnode, fd->record);
if (keylen == 0) {
res = -EINVAL;
goto out;
}
fd->keyoffset = off;
fd->keylength = keylen;
fd->entryoffset = off + keylen;
fd->entrylength = len - keylen;
hfs_bnode_read(bnode, fd->key, off, keylen);
out:
fd->bnode = bnode;
return res;
}
| linux-master | fs/hfsplus/bfind.c |
/*
* linux/fs/hfsplus/part_tbl.c
*
* Copyright (C) 1996-1997 Paul H. Hargrove
* This file may be distributed under the terms of
* the GNU General Public License.
*
* Original code to handle the new style Mac partition table based on
* a patch contributed by Holger Schemel ([email protected]).
*
* In function preconditions the term "valid" applied to a pointer to
* a structure means that the pointer is non-NULL and the structure it
* points to has all fields initialized to consistent values.
*
*/
#include <linux/slab.h>
#include "hfsplus_fs.h"
/* offsets to various blocks */
#define HFS_DD_BLK 0 /* Driver Descriptor block */
#define HFS_PMAP_BLK 1 /* First block of partition map */
#define HFS_MDB_BLK 2 /* Block (w/i partition) of MDB */
/* magic numbers for various disk blocks */
#define HFS_DRVR_DESC_MAGIC 0x4552 /* "ER": driver descriptor map */
#define HFS_OLD_PMAP_MAGIC 0x5453 /* "TS": old-type partition map */
#define HFS_NEW_PMAP_MAGIC 0x504D /* "PM": new-type partition map */
#define HFS_SUPER_MAGIC 0x4244 /* "BD": HFS MDB (super block) */
#define HFS_MFS_SUPER_MAGIC 0xD2D7 /* MFS MDB (super block) */
/*
* The new style Mac partition map
*
* For each partition on the media there is a physical block (512-byte
* block) containing one of these structures. These blocks are
* contiguous starting at block 1.
*/
struct new_pmap {
__be16 pmSig; /* signature */
__be16 reSigPad; /* padding */
__be32 pmMapBlkCnt; /* partition blocks count */
__be32 pmPyPartStart; /* physical block start of partition */
__be32 pmPartBlkCnt; /* physical block count of partition */
u8 pmPartName[32]; /* (null terminated?) string
giving the name of this
partition */
u8 pmPartType[32]; /* (null terminated?) string
giving the type of this
partition */
/* a bunch more stuff we don't need */
} __packed;
/*
* The old style Mac partition map
*
* The partition map consists for a 2-byte signature followed by an
* array of these structures. The map is terminated with an all-zero
* one of these.
*/
struct old_pmap {
__be16 pdSig; /* Signature bytes */
struct old_pmap_entry {
__be32 pdStart;
__be32 pdSize;
__be32 pdFSID;
} pdEntry[42];
} __packed;
static int hfs_parse_old_pmap(struct super_block *sb, struct old_pmap *pm,
sector_t *part_start, sector_t *part_size)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
int i;
for (i = 0; i < 42; i++) {
struct old_pmap_entry *p = &pm->pdEntry[i];
if (p->pdStart && p->pdSize &&
p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
(sbi->part < 0 || sbi->part == i)) {
*part_start += be32_to_cpu(p->pdStart);
*part_size = be32_to_cpu(p->pdSize);
return 0;
}
}
return -ENOENT;
}
static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
struct new_pmap *pm, sector_t *part_start, sector_t *part_size)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
int size = be32_to_cpu(pm->pmMapBlkCnt);
int buf_size = hfsplus_min_io_size(sb);
int res;
int i = 0;
do {
if (!memcmp(pm->pmPartType, "Apple_HFS", 9) &&
(sbi->part < 0 || sbi->part == i)) {
*part_start += be32_to_cpu(pm->pmPyPartStart);
*part_size = be32_to_cpu(pm->pmPartBlkCnt);
return 0;
}
if (++i >= size)
return -ENOENT;
pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE);
if ((u8 *)pm - (u8 *)buf >= buf_size) {
res = hfsplus_submit_bio(sb,
*part_start + HFS_PMAP_BLK + i,
buf, (void **)&pm, REQ_OP_READ);
if (res)
return res;
}
} while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));
return -ENOENT;
}
/*
* Parse the partition map looking for the start and length of a
* HFS/HFS+ partition.
*/
int hfs_part_find(struct super_block *sb,
sector_t *part_start, sector_t *part_size)
{
void *buf, *data;
int res;
buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
if (!buf)
return -ENOMEM;
res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
buf, &data, REQ_OP_READ);
if (res)
goto out;
switch (be16_to_cpu(*((__be16 *)data))) {
case HFS_OLD_PMAP_MAGIC:
res = hfs_parse_old_pmap(sb, data, part_start, part_size);
break;
case HFS_NEW_PMAP_MAGIC:
res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size);
break;
default:
res = -ENOENT;
break;
}
out:
kfree(buf);
return res;
}
| linux-master | fs/hfsplus/part_tbl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/bitmap.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handling of allocation file
*/
#include <linux/pagemap.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
#define PAGE_CACHE_BITS (PAGE_SIZE * 8)
int hfsplus_block_allocate(struct super_block *sb, u32 size,
u32 offset, u32 *max)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
u32 mask, start, len, n;
__be32 val;
int i;
len = *max;
if (!len)
return size;
hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&sbi->alloc_mutex);
mapping = sbi->alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
if (IS_ERR(page)) {
start = size;
goto out;
}
pptr = kmap_local_page(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset &= ~(PAGE_CACHE_BITS - 1);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
/* scan the first partial u32 for zero bits */
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = (1U << 31) >> i;
for (; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
/* scan complete u32s for the first zero bit */
while (1) {
while (curr < end) {
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = 1 << 31;
for (i = 0; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
}
kunmap_local(pptr);
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
if (IS_ERR(page)) {
start = size;
goto out;
}
curr = pptr = kmap_local_page(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
}
hfs_dbg(BITMAP, "bitmap full\n");
start = size;
goto out;
found:
start = offset + (curr - pptr) * 32 + i;
if (start >= size) {
hfs_dbg(BITMAP, "bitmap full\n");
goto out;
}
/* do any partial u32 at the start */
len = min(size - start, len);
while (1) {
n |= mask;
if (++i >= 32)
break;
mask >>= 1;
if (!--len || n & mask)
goto done;
}
if (!--len)
goto done;
*curr++ = cpu_to_be32(n);
/* do full u32s */
while (1) {
while (curr < end) {
n = be32_to_cpu(*curr);
if (len < 32)
goto last;
if (n) {
len = 32;
goto last;
}
*curr++ = cpu_to_be32(0xffffffff);
len -= 32;
}
set_page_dirty(page);
kunmap_local(pptr);
offset += PAGE_CACHE_BITS;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
if (IS_ERR(page)) {
start = size;
goto out;
}
pptr = kmap_local_page(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
last:
/* do any partial u32 at end */
mask = 1U << 31;
for (i = 0; i < len; i++) {
if (n & mask)
break;
n |= mask;
mask >>= 1;
}
done:
*curr = cpu_to_be32(n);
set_page_dirty(page);
kunmap_local(pptr);
*max = offset + (curr - pptr) * 32 + i - start;
sbi->free_blocks -= *max;
hfsplus_mark_mdb_dirty(sb);
hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
out:
mutex_unlock(&sbi->alloc_mutex);
return start;
}
int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
u32 mask, len, pnr;
int i;
/* is there any actual work to be done? */
if (!count)
return 0;
hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
/* are all of the bits in range? */
if ((offset + count) > sbi->total_blocks)
return -ENOENT;
mutex_lock(&sbi->alloc_mutex);
mapping = sbi->alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_mapping_page(mapping, pnr, NULL);
if (IS_ERR(page))
goto kaboom;
pptr = kmap_local_page(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
len = count;
/* do any partial u32 at the start */
i = offset % 32;
if (i) {
int j = 32 - i;
mask = 0xffffffffU << j;
if (j > count) {
mask |= 0xffffffffU >> (i + count);
*curr++ &= cpu_to_be32(mask);
goto out;
}
*curr++ &= cpu_to_be32(mask);
count -= j;
}
/* do full u32s */
while (1) {
while (curr < end) {
if (count < 32)
goto done;
*curr++ = 0;
count -= 32;
}
if (!count)
break;
set_page_dirty(page);
kunmap_local(pptr);
page = read_mapping_page(mapping, ++pnr, NULL);
if (IS_ERR(page))
goto kaboom;
pptr = kmap_local_page(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
done:
/* do any partial u32 at end */
if (count) {
mask = 0xffffffffU >> count;
*curr &= cpu_to_be32(mask);
}
out:
set_page_dirty(page);
kunmap_local(pptr);
sbi->free_blocks += len;
hfsplus_mark_mdb_dirty(sb);
mutex_unlock(&sbi->alloc_mutex);
return 0;
kaboom:
pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
mutex_unlock(&sbi->alloc_mutex);
return -EIO;
}
| linux-master | fs/hfsplus/bitmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/xattr_user.c
*
* Vyacheslav Dubeyko <[email protected]>
*
* Handler for user extended attributes.
*/
#include <linux/nls.h>
#include "hfsplus_fs.h"
#include "xattr.h"
static int hfsplus_user_getxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return hfsplus_getxattr(inode, name, buffer, size,
XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
}
static int hfsplus_user_setxattr(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{
return hfsplus_setxattr(inode, name, buffer, size, flags,
XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
}
const struct xattr_handler hfsplus_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.get = hfsplus_user_getxattr,
.set = hfsplus_user_setxattr,
};
| linux-master | fs/hfsplus/xattr_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/dir.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handling of directories
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/nls.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
#include "xattr.h"
static inline void hfsplus_instantiate(struct dentry *dentry,
struct inode *inode, u32 cnid)
{
dentry->d_fsdata = (void *)(unsigned long)cnid;
d_instantiate(dentry, inode);
}
/* Find the entry inside dir named dentry->d_name */
static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode = NULL;
struct hfs_find_data fd;
struct super_block *sb;
hfsplus_cat_entry entry;
int err;
u32 cnid, linkid = 0;
u16 type;
sb = dir->i_sb;
dentry->d_fsdata = NULL;
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return ERR_PTR(err);
err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino,
&dentry->d_name);
if (unlikely(err < 0))
goto fail;
again:
err = hfs_brec_read(&fd, &entry, sizeof(entry));
if (err) {
if (err == -ENOENT) {
hfs_find_exit(&fd);
/* No such entry */
inode = NULL;
goto out;
}
goto fail;
}
type = be16_to_cpu(entry.type);
if (type == HFSPLUS_FOLDER) {
if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
err = -EIO;
goto fail;
}
cnid = be32_to_cpu(entry.folder.id);
dentry->d_fsdata = (void *)(unsigned long)cnid;
} else if (type == HFSPLUS_FILE) {
if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
err = -EIO;
goto fail;
}
cnid = be32_to_cpu(entry.file.id);
if (entry.file.user_info.fdType ==
cpu_to_be32(HFSP_HARDLINK_TYPE) &&
entry.file.user_info.fdCreator ==
cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
HFSPLUS_SB(sb)->hidden_dir &&
(entry.file.create_date ==
HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
create_date ||
entry.file.create_date ==
HFSPLUS_I(d_inode(sb->s_root))->
create_date)) {
struct qstr str;
char name[32];
if (dentry->d_fsdata) {
/*
* We found a link pointing to another link,
* so ignore it and treat it as regular file.
*/
cnid = (unsigned long)dentry->d_fsdata;
linkid = 0;
} else {
dentry->d_fsdata = (void *)(unsigned long)cnid;
linkid =
be32_to_cpu(entry.file.permissions.dev);
str.len = sprintf(name, "iNode%d", linkid);
str.name = name;
err = hfsplus_cat_build_key(sb, fd.search_key,
HFSPLUS_SB(sb)->hidden_dir->i_ino,
&str);
if (unlikely(err < 0))
goto fail;
goto again;
}
} else if (!dentry->d_fsdata)
dentry->d_fsdata = (void *)(unsigned long)cnid;
} else {
pr_err("invalid catalog entry type in lookup\n");
err = -EIO;
goto fail;
}
hfs_find_exit(&fd);
inode = hfsplus_iget(dir->i_sb, cnid);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (S_ISREG(inode->i_mode))
HFSPLUS_I(inode)->linkid = linkid;
out:
return d_splice_alias(inode, dentry);
fail:
hfs_find_exit(&fd);
return ERR_PTR(err);
}
static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
int len, err;
char *strbuf;
hfsplus_cat_entry entry;
struct hfs_find_data fd;
struct hfsplus_readdir_data *rd;
u16 type;
if (file->f_pos >= inode->i_size)
return 0;
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return err;
strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_MAX_STRLEN + 1, GFP_KERNEL);
if (!strbuf) {
err = -ENOMEM;
goto out;
}
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, inode->i_ino);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
if (ctx->pos == 0) {
/* This is completely artificial... */
if (!dir_emit_dot(file, ctx))
goto out;
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
err = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
fd.entrylength);
if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
pr_err("bad catalog folder thread\n");
err = -EIO;
goto out;
}
if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
pr_err("truncated catalog thread\n");
err = -EIO;
goto out;
}
if (!dir_emit(ctx, "..", 2,
be32_to_cpu(entry.thread.parentID), DT_DIR))
goto out;
ctx->pos = 2;
}
if (ctx->pos >= inode->i_size)
goto out;
err = hfs_brec_goto(&fd, ctx->pos - 1);
if (err)
goto out;
for (;;) {
if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
pr_err("walked past end of dir\n");
err = -EIO;
goto out;
}
if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
err = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
fd.entrylength);
type = be16_to_cpu(entry.type);
len = NLS_MAX_CHARSET_SIZE * HFSPLUS_MAX_STRLEN;
err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
if (err)
goto out;
if (type == HFSPLUS_FOLDER) {
if (fd.entrylength <
sizeof(struct hfsplus_cat_folder)) {
pr_err("small dir entry\n");
err = -EIO;
goto out;
}
if (HFSPLUS_SB(sb)->hidden_dir &&
HFSPLUS_SB(sb)->hidden_dir->i_ino ==
be32_to_cpu(entry.folder.id))
goto next;
if (!dir_emit(ctx, strbuf, len,
be32_to_cpu(entry.folder.id), DT_DIR))
break;
} else if (type == HFSPLUS_FILE) {
u16 mode;
unsigned type = DT_UNKNOWN;
if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
pr_err("small file entry\n");
err = -EIO;
goto out;
}
mode = be16_to_cpu(entry.file.permissions.mode);
if (S_ISREG(mode))
type = DT_REG;
else if (S_ISLNK(mode))
type = DT_LNK;
else if (S_ISFIFO(mode))
type = DT_FIFO;
else if (S_ISCHR(mode))
type = DT_CHR;
else if (S_ISBLK(mode))
type = DT_BLK;
else if (S_ISSOCK(mode))
type = DT_SOCK;
if (!dir_emit(ctx, strbuf, len,
be32_to_cpu(entry.file.id), type))
break;
} else {
pr_err("bad catalog entry type\n");
err = -EIO;
goto out;
}
next:
ctx->pos++;
if (ctx->pos >= inode->i_size)
goto out;
err = hfs_brec_goto(&fd, 1);
if (err)
goto out;
}
rd = file->private_data;
if (!rd) {
rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL);
if (!rd) {
err = -ENOMEM;
goto out;
}
file->private_data = rd;
rd->file = file;
spin_lock(&HFSPLUS_I(inode)->open_dir_lock);
list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
spin_unlock(&HFSPLUS_I(inode)->open_dir_lock);
}
/*
* Can be done after the list insertion; exclusion with
* hfsplus_delete_cat() is provided by directory lock.
*/
memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
out:
kfree(strbuf);
hfs_find_exit(&fd);
return err;
}
static int hfsplus_dir_release(struct inode *inode, struct file *file)
{
struct hfsplus_readdir_data *rd = file->private_data;
if (rd) {
spin_lock(&HFSPLUS_I(inode)->open_dir_lock);
list_del(&rd->list);
spin_unlock(&HFSPLUS_I(inode)->open_dir_lock);
kfree(rd);
}
return 0;
}
static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
struct dentry *dst_dentry)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb);
struct inode *inode = d_inode(src_dentry);
struct inode *src_dir = d_inode(src_dentry->d_parent);
struct qstr str;
char name[32];
u32 cnid, id;
int res;
if (HFSPLUS_IS_RSRC(inode))
return -EPERM;
if (!S_ISREG(inode->i_mode))
return -EPERM;
mutex_lock(&sbi->vh_mutex);
if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) {
for (;;) {
get_random_bytes(&id, sizeof(cnid));
id &= 0x3fffffff;
str.name = name;
str.len = sprintf(name, "iNode%d", id);
res = hfsplus_rename_cat(inode->i_ino,
src_dir, &src_dentry->d_name,
sbi->hidden_dir, &str);
if (!res)
break;
if (res != -EEXIST)
goto out;
}
HFSPLUS_I(inode)->linkid = id;
cnid = sbi->next_cnid++;
src_dentry->d_fsdata = (void *)(unsigned long)cnid;
res = hfsplus_create_cat(cnid, src_dir,
&src_dentry->d_name, inode);
if (res)
/* panic? */
goto out;
sbi->file_count++;
}
cnid = sbi->next_cnid++;
res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode);
if (res)
goto out;
inc_nlink(inode);
hfsplus_instantiate(dst_dentry, inode, cnid);
ihold(inode);
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
sbi->file_count++;
hfsplus_mark_mdb_dirty(dst_dir->i_sb);
out:
mutex_unlock(&sbi->vh_mutex);
return res;
}
static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode = d_inode(dentry);
struct qstr str;
char name[32];
u32 cnid;
int res;
if (HFSPLUS_IS_RSRC(inode))
return -EPERM;
mutex_lock(&sbi->vh_mutex);
cnid = (u32)(unsigned long)dentry->d_fsdata;
if (inode->i_ino == cnid &&
atomic_read(&HFSPLUS_I(inode)->opencnt)) {
str.name = name;
str.len = sprintf(name, "temp%lu", inode->i_ino);
res = hfsplus_rename_cat(inode->i_ino,
dir, &dentry->d_name,
sbi->hidden_dir, &str);
if (!res) {
inode->i_flags |= S_DEAD;
drop_nlink(inode);
}
goto out;
}
res = hfsplus_delete_cat(cnid, dir, &dentry->d_name);
if (res)
goto out;
if (inode->i_nlink > 0)
drop_nlink(inode);
if (inode->i_ino == cnid)
clear_nlink(inode);
if (!inode->i_nlink) {
if (inode->i_ino != cnid) {
sbi->file_count--;
if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) {
res = hfsplus_delete_cat(inode->i_ino,
sbi->hidden_dir,
NULL);
if (!res)
hfsplus_delete_inode(inode);
} else
inode->i_flags |= S_DEAD;
} else
hfsplus_delete_inode(inode);
} else
sbi->file_count--;
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
out:
mutex_unlock(&sbi->vh_mutex);
return res;
}
static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode = d_inode(dentry);
int res;
if (inode->i_size != 2)
return -ENOTEMPTY;
mutex_lock(&sbi->vh_mutex);
res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
if (res)
goto out;
clear_nlink(inode);
inode_set_ctime_current(inode);
hfsplus_delete_inode(inode);
mark_inode_dirty(inode);
out:
mutex_unlock(&sbi->vh_mutex);
return res;
}
static int hfsplus_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
int res = -ENOMEM;
mutex_lock(&sbi->vh_mutex);
inode = hfsplus_new_inode(dir->i_sb, dir, S_IFLNK | S_IRWXUGO);
if (!inode)
goto out;
res = page_symlink(inode, symname, strlen(symname) + 1);
if (res)
goto out_err;
res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
if (res)
goto out_err;
res = hfsplus_init_security(inode, dir, &dentry->d_name);
if (res == -EOPNOTSUPP)
res = 0; /* Operation is not supported. */
else if (res) {
/* Try to delete anyway without error analysis. */
hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
goto out_err;
}
hfsplus_instantiate(dentry, inode, inode->i_ino);
mark_inode_dirty(inode);
goto out;
out_err:
clear_nlink(inode);
hfsplus_delete_inode(inode);
iput(inode);
out:
mutex_unlock(&sbi->vh_mutex);
return res;
}
static int hfsplus_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
int res = -ENOMEM;
mutex_lock(&sbi->vh_mutex);
inode = hfsplus_new_inode(dir->i_sb, dir, mode);
if (!inode)
goto out;
if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode))
init_special_inode(inode, mode, rdev);
res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
if (res)
goto failed_mknod;
res = hfsplus_init_security(inode, dir, &dentry->d_name);
if (res == -EOPNOTSUPP)
res = 0; /* Operation is not supported. */
else if (res) {
/* Try to delete anyway without error analysis. */
hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
goto failed_mknod;
}
hfsplus_instantiate(dentry, inode, inode->i_ino);
mark_inode_dirty(inode);
goto out;
failed_mknod:
clear_nlink(inode);
hfsplus_delete_inode(inode);
iput(inode);
out:
mutex_unlock(&sbi->vh_mutex);
return res;
}
static int hfsplus_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
return hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode, 0);
}
static int hfsplus_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
return hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0);
}
static int hfsplus_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
int res;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
/* Unlink destination if it already exists */
if (d_really_is_positive(new_dentry)) {
if (d_is_dir(new_dentry))
res = hfsplus_rmdir(new_dir, new_dentry);
else
res = hfsplus_unlink(new_dir, new_dentry);
if (res)
return res;
}
res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name);
if (!res)
new_dentry->d_fsdata = old_dentry->d_fsdata;
return res;
}
const struct inode_operations hfsplus_dir_inode_operations = {
.lookup = hfsplus_lookup,
.create = hfsplus_create,
.link = hfsplus_link,
.unlink = hfsplus_unlink,
.mkdir = hfsplus_mkdir,
.rmdir = hfsplus_rmdir,
.symlink = hfsplus_symlink,
.mknod = hfsplus_mknod,
.rename = hfsplus_rename,
.getattr = hfsplus_getattr,
.listxattr = hfsplus_listxattr,
.fileattr_get = hfsplus_fileattr_get,
.fileattr_set = hfsplus_fileattr_set,
};
const struct file_operations hfsplus_dir_operations = {
.fsync = hfsplus_file_fsync,
.read = generic_read_dir,
.iterate_shared = hfsplus_readdir,
.unlocked_ioctl = hfsplus_ioctl,
.llseek = generic_file_llseek,
.release = hfsplus_dir_release,
};
| linux-master | fs/hfsplus/dir.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/wrapper.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handling of HFS wrappers around HFS+ volumes
*/
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/cdrom.h>
#include <asm/unaligned.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
struct hfsplus_wd {
u32 ablk_size;
u16 ablk_start;
u16 embed_start;
u16 embed_count;
};
/**
* hfsplus_submit_bio - Perform block I/O
* @sb: super block of volume for I/O
* @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
* @buf: buffer for I/O
* @data: output pointer for location of requested data
* @op: direction of I/O
* @op_flags: request op flags
*
* The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
* HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
* @data will return a pointer to the start of the requested sector,
* which may not be the same location as @buf.
*
* If @sector is not aligned to the bdev logical block size it will
* be rounded down. For writes this means that @buf should contain data
* that starts at the rounded-down address. As long as the data was
* read using hfsplus_submit_bio() and the same buffer is used things
* will work correctly.
*/
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
void *buf, void **data, blk_opf_t opf)
{
const enum req_op op = opf & REQ_OP_MASK;
struct bio *bio;
int ret = 0;
u64 io_size;
loff_t start;
int offset;
/*
* Align sector to hardware sector size and find offset. We
* assume that io_size is a power of two, which _should_
* be true.
*/
io_size = hfsplus_min_io_size(sb);
start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
bio = bio_alloc(sb->s_bdev, 1, opf, GFP_NOIO);
bio->bi_iter.bi_sector = sector;
if (op != REQ_OP_WRITE && data)
*data = (u8 *)buf + offset;
while (io_size > 0) {
unsigned int page_offset = offset_in_page(buf);
unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
io_size);
ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
if (ret != len) {
ret = -EIO;
goto out;
}
io_size -= len;
buf = (u8 *)buf + len;
}
ret = submit_bio_wait(bio);
out:
bio_put(bio);
return ret < 0 ? ret : 0;
}
static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
{
u32 extent;
u16 attrib;
__be16 sig;
sig = *(__be16 *)(bufptr + HFSP_WRAPOFF_EMBEDSIG);
if (sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIG) &&
sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
return 0;
attrib = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ATTRIB));
if (!(attrib & HFSP_WRAP_ATTRIB_SLOCK) ||
!(attrib & HFSP_WRAP_ATTRIB_SPARED))
return 0;
wd->ablk_size =
be32_to_cpu(*(__be32 *)(bufptr + HFSP_WRAPOFF_ABLKSIZE));
if (wd->ablk_size < HFSPLUS_SECTOR_SIZE)
return 0;
if (wd->ablk_size % HFSPLUS_SECTOR_SIZE)
return 0;
wd->ablk_start =
be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART));
extent = get_unaligned_be32(bufptr + HFSP_WRAPOFF_EMBEDEXT);
wd->embed_start = (extent >> 16) & 0xFFFF;
wd->embed_count = extent & 0xFFFF;
return 1;
}
static int hfsplus_get_last_session(struct super_block *sb,
sector_t *start, sector_t *size)
{
struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk);
/* default values */
*start = 0;
*size = bdev_nr_sectors(sb->s_bdev);
if (HFSPLUS_SB(sb)->session >= 0) {
struct cdrom_tocentry te;
if (!cdi)
return -EINVAL;
te.cdte_track = HFSPLUS_SB(sb)->session;
te.cdte_format = CDROM_LBA;
if (cdrom_read_tocentry(cdi, &te) ||
(te.cdte_ctrl & CDROM_DATA_TRACK) != 4) {
pr_err("invalid session number or type of track\n");
return -EINVAL;
}
*start = (sector_t)te.cdte_addr.lba << 2;
} else if (cdi) {
struct cdrom_multisession ms_info;
ms_info.addr_format = CDROM_LBA;
if (cdrom_multisession(cdi, &ms_info) == 0 && ms_info.xa_flag)
*start = (sector_t)ms_info.addr.lba << 2;
}
return 0;
}
/* Find the volume header and fill in some minimum bits in superblock */
/* Takes in super block, returns true if good data read */
int hfsplus_read_wrapper(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_wd wd;
sector_t part_start, part_size;
u32 blocksize;
int error = 0;
error = -EINVAL;
blocksize = sb_min_blocksize(sb, HFSPLUS_SECTOR_SIZE);
if (!blocksize)
goto out;
if (hfsplus_get_last_session(sb, &part_start, &part_size))
goto out;
error = -ENOMEM;
sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
if (!sbi->s_vhdr_buf)
goto out;
sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
if (!sbi->s_backup_vhdr_buf)
goto out_free_vhdr;
reread:
error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
REQ_OP_READ);
if (error)
goto out_free_backup_vhdr;
error = -EINVAL;
switch (sbi->s_vhdr->signature) {
case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX):
set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
fallthrough;
case cpu_to_be16(HFSPLUS_VOLHEAD_SIG):
break;
case cpu_to_be16(HFSP_WRAP_MAGIC):
if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
goto out_free_backup_vhdr;
wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
part_start += (sector_t)wd.ablk_start +
(sector_t)wd.embed_start * wd.ablk_size;
part_size = (sector_t)wd.embed_count * wd.ablk_size;
goto reread;
default:
/*
* Check for a partition block.
*
* (should do this only for cdrom/loop though)
*/
if (hfs_part_find(sb, &part_start, &part_size))
goto out_free_backup_vhdr;
goto reread;
}
error = hfsplus_submit_bio(sb, part_start + part_size - 2,
sbi->s_backup_vhdr_buf,
(void **)&sbi->s_backup_vhdr, REQ_OP_READ);
if (error)
goto out_free_backup_vhdr;
error = -EINVAL;
if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) {
pr_warn("invalid secondary volume header\n");
goto out_free_backup_vhdr;
}
blocksize = be32_to_cpu(sbi->s_vhdr->blocksize);
/*
* Block size must be at least as large as a sector and a multiple of 2.
*/
if (blocksize < HFSPLUS_SECTOR_SIZE || ((blocksize - 1) & blocksize))
goto out_free_backup_vhdr;
sbi->alloc_blksz = blocksize;
sbi->alloc_blksz_shift = ilog2(blocksize);
blocksize = min_t(u32, sbi->alloc_blksz, PAGE_SIZE);
/*
* Align block size to block offset.
*/
while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
blocksize >>= 1;
if (sb_set_blocksize(sb, blocksize) != blocksize) {
pr_err("unable to set blocksize to %u!\n", blocksize);
goto out_free_backup_vhdr;
}
sbi->blockoffset =
part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
sbi->part_start = part_start;
sbi->sect_count = part_size;
sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
return 0;
out_free_backup_vhdr:
kfree(sbi->s_backup_vhdr_buf);
out_free_vhdr:
kfree(sbi->s_vhdr_buf);
out:
return error;
}
| linux-master | fs/hfsplus/wrapper.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/inode.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Inode handling routines
*/
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/mpage.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/uio.h>
#include <linux/fileattr.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
#include "xattr.h"
static int hfsplus_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_folio(folio, hfsplus_get_block);
}
static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, hfsplus_get_block, wbc);
}
static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
if (to > inode->i_size) {
truncate_pagecache(inode, inode->i_size);
hfsplus_file_truncate(inode);
}
}
int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret))
hfsplus_write_failed(mapping, pos + len);
return ret;
}
static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, hfsplus_get_block);
}
static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
{
struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
int i;
bool res = true;
switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
tree = HFSPLUS_SB(sb)->ext_tree;
break;
case HFSPLUS_CAT_CNID:
tree = HFSPLUS_SB(sb)->cat_tree;
break;
case HFSPLUS_ATTR_CNID:
tree = HFSPLUS_SB(sb)->attr_tree;
break;
default:
BUG();
return false;
}
if (!tree)
return false;
if (tree->node_size >= PAGE_SIZE) {
nidx = folio->index >>
(tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
nidx = folio->index <<
(PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
node = hfs_bnode_findhash(tree, nidx++);
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
res = false;
break;
}
hfs_bnode_unhash(node);
hfs_bnode_free(node);
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
if (end > isize)
hfsplus_write_failed(mapping, end);
}
return ret;
}
static int hfsplus_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, hfsplus_get_block);
}
const struct address_space_operations hfsplus_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = hfsplus_read_folio,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
.release_folio = hfsplus_release_folio,
};
const struct address_space_operations hfsplus_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = hfsplus_read_folio,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
.direct_IO = hfsplus_direct_IO,
.writepages = hfsplus_writepages,
.migrate_folio = buffer_migrate_folio,
};
const struct dentry_operations hfsplus_dentry_operations = {
.d_hash = hfsplus_hash_dentry,
.d_compare = hfsplus_compare_dentry,
};
static void hfsplus_get_perms(struct inode *inode,
struct hfsplus_perm *perms, int dir)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
u16 mode;
mode = be16_to_cpu(perms->mode);
i_uid_write(inode, be32_to_cpu(perms->owner));
if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
inode->i_uid = sbi->uid;
i_gid_write(inode, be32_to_cpu(perms->group));
if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
inode->i_gid = sbi->gid;
if (dir) {
mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
mode |= S_IFDIR;
} else if (!mode)
mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
inode->i_mode = mode;
HFSPLUS_I(inode)->userflags = perms->userflags;
if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
if (perms->rootflags & HFSPLUS_FLG_APPEND)
inode->i_flags |= S_APPEND;
else
inode->i_flags &= ~S_APPEND;
}
static int hfsplus_file_open(struct inode *inode, struct file *file)
{
if (HFSPLUS_IS_RSRC(inode))
inode = HFSPLUS_I(inode)->rsrc_inode;
if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EOVERFLOW;
atomic_inc(&HFSPLUS_I(inode)->opencnt);
return 0;
}
static int hfsplus_file_release(struct inode *inode, struct file *file)
{
struct super_block *sb = inode->i_sb;
if (HFSPLUS_IS_RSRC(inode))
inode = HFSPLUS_I(inode)->rsrc_inode;
if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
inode_lock(inode);
hfsplus_file_truncate(inode);
if (inode->i_flags & S_DEAD) {
hfsplus_delete_cat(inode->i_ino,
HFSPLUS_SB(sb)->hidden_dir, NULL);
hfsplus_delete_inode(inode);
}
inode_unlock(inode);
}
return 0;
}
static int hfsplus_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
return error;
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
if (attr->ia_size > inode->i_size) {
error = generic_cont_expand_simple(inode,
attr->ia_size);
if (error)
return error;
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
inode->i_mtime = inode_set_ctime_current(inode);
}
setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
int hfsplus_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
stat->btime = hfsp_mt2ut(hip->create_date);
}
if (inode->i_flags & S_APPEND)
stat->attributes |= STATX_ATTR_APPEND;
if (inode->i_flags & S_IMMUTABLE)
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (hip->userflags & HFSPLUS_FLG_NODUMP)
stat->attributes |= STATX_ATTR_NODUMP;
stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP;
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file->f_mapping->host;
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
int error = 0, error2;
error = file_write_and_wait_range(file, start, end);
if (error)
return error;
inode_lock(inode);
/*
* Sync inode metadata into the catalog and extent trees.
*/
sync_inode_metadata(inode, 1);
/*
* And explicitly write out the btrees.
*/
if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags))
error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) {
error2 =
filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
if (!error)
error = error2;
}
if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
if (sbi->attr_tree) {
error2 =
filemap_write_and_wait(
sbi->attr_tree->inode->i_mapping);
if (!error)
error = error2;
} else {
pr_err("sync non-existent attributes tree\n");
}
}
if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
if (!error)
error = error2;
}
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(inode->i_sb->s_bdev);
inode_unlock(inode);
return error;
}
static const struct inode_operations hfsplus_file_inode_operations = {
.setattr = hfsplus_setattr,
.getattr = hfsplus_getattr,
.listxattr = hfsplus_listxattr,
.fileattr_get = hfsplus_fileattr_get,
.fileattr_set = hfsplus_fileattr_set,
};
static const struct file_operations hfsplus_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = filemap_splice_read,
.fsync = hfsplus_file_fsync,
.open = hfsplus_file_open,
.release = hfsplus_file_release,
.unlocked_ioctl = hfsplus_ioctl,
};
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct inode *inode = new_inode(sb);
struct hfsplus_inode_info *hip;
if (!inode)
return NULL;
inode->i_ino = sbi->next_cnid++;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
set_nlink(inode, 1);
inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
hip = HFSPLUS_I(inode);
INIT_LIST_HEAD(&hip->open_dir_list);
spin_lock_init(&hip->open_dir_lock);
mutex_init(&hip->extents_lock);
atomic_set(&hip->opencnt, 0);
hip->extent_state = 0;
hip->flags = 0;
hip->userflags = 0;
hip->subfolders = 0;
memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
hip->alloc_blocks = 0;
hip->first_blocks = 0;
hip->cached_start = 0;
hip->cached_blocks = 0;
hip->phys_size = 0;
hip->fs_blocks = 0;
hip->rsrc_inode = NULL;
if (S_ISDIR(inode->i_mode)) {
inode->i_size = 2;
sbi->folder_count++;
inode->i_op = &hfsplus_dir_inode_operations;
inode->i_fop = &hfsplus_dir_operations;
} else if (S_ISREG(inode->i_mode)) {
sbi->file_count++;
inode->i_op = &hfsplus_file_inode_operations;
inode->i_fop = &hfsplus_file_operations;
inode->i_mapping->a_ops = &hfsplus_aops;
hip->clump_blocks = sbi->data_clump_blocks;
} else if (S_ISLNK(inode->i_mode)) {
sbi->file_count++;
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &hfsplus_aops;
hip->clump_blocks = 1;
} else
sbi->file_count++;
insert_inode_hash(inode);
mark_inode_dirty(inode);
hfsplus_mark_mdb_dirty(sb);
return inode;
}
void hfsplus_delete_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (S_ISDIR(inode->i_mode)) {
HFSPLUS_SB(sb)->folder_count--;
hfsplus_mark_mdb_dirty(sb);
return;
}
HFSPLUS_SB(sb)->file_count--;
if (S_ISREG(inode->i_mode)) {
if (!inode->i_nlink) {
inode->i_size = 0;
hfsplus_file_truncate(inode);
}
} else if (S_ISLNK(inode->i_mode)) {
inode->i_size = 0;
hfsplus_file_truncate(inode);
}
hfsplus_mark_mdb_dirty(sb);
}
void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
{
struct super_block *sb = inode->i_sb;
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
u32 count;
int i;
memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
for (count = 0, i = 0; i < 8; i++)
count += be32_to_cpu(fork->extents[i].block_count);
hip->first_blocks = count;
memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
hip->cached_start = 0;
hip->cached_blocks = 0;
hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
hip->fs_blocks =
(inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
hip->clump_blocks =
be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
if (!hip->clump_blocks) {
hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
sbi->rsrc_clump_blocks :
sbi->data_clump_blocks;
}
}
void hfsplus_inode_write_fork(struct inode *inode,
struct hfsplus_fork_raw *fork)
{
memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
sizeof(hfsplus_extent_rec));
fork->total_size = cpu_to_be64(inode->i_size);
fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
}
int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
{
hfsplus_cat_entry entry;
int res = 0;
u16 type;
type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
HFSPLUS_I(inode)->linkid = 0;
if (type == HFSPLUS_FOLDER) {
struct hfsplus_cat_folder *folder = &entry.folder;
if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
pr_err("bad catalog folder entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_folder));
hfsplus_get_perms(inode, &folder->permissions, 1);
set_nlink(inode, 1);
inode->i_size = 2 + be32_to_cpu(folder->valence);
inode->i_atime = hfsp_mt2ut(folder->access_date);
inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
inode_set_ctime_to_ts(inode,
hfsp_mt2ut(folder->attribute_mod_date));
HFSPLUS_I(inode)->create_date = folder->create_date;
HFSPLUS_I(inode)->fs_blocks = 0;
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
HFSPLUS_I(inode)->subfolders =
be32_to_cpu(folder->subfolders);
}
inode->i_op = &hfsplus_dir_inode_operations;
inode->i_fop = &hfsplus_dir_operations;
} else if (type == HFSPLUS_FILE) {
struct hfsplus_cat_file *file = &entry.file;
if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
pr_err("bad catalog file entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
&file->rsrc_fork : &file->data_fork);
hfsplus_get_perms(inode, &file->permissions, 0);
set_nlink(inode, 1);
if (S_ISREG(inode->i_mode)) {
if (file->permissions.dev)
set_nlink(inode,
be32_to_cpu(file->permissions.dev));
inode->i_op = &hfsplus_file_inode_operations;
inode->i_fop = &hfsplus_file_operations;
inode->i_mapping->a_ops = &hfsplus_aops;
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &hfsplus_aops;
} else {
init_special_inode(inode, inode->i_mode,
be32_to_cpu(file->permissions.dev));
}
inode->i_atime = hfsp_mt2ut(file->access_date);
inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
inode_set_ctime_to_ts(inode,
hfsp_mt2ut(file->attribute_mod_date));
HFSPLUS_I(inode)->create_date = file->create_date;
} else {
pr_err("bad catalog entry used to create inode\n");
res = -EIO;
}
out:
return res;
}
int hfsplus_cat_write_inode(struct inode *inode)
{
struct inode *main_inode = inode;
struct hfs_find_data fd;
hfsplus_cat_entry entry;
int res = 0;
if (HFSPLUS_IS_RSRC(inode))
main_inode = HFSPLUS_I(inode)->rsrc_inode;
if (!main_inode->i_nlink)
return 0;
if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
/* panic? */
return -EIO;
if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
/* panic? */
goto out;
if (S_ISDIR(main_inode->i_mode)) {
struct hfsplus_cat_folder *folder = &entry.folder;
if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
pr_err("bad catalog folder entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
hfsplus_cat_set_perms(inode, &folder->permissions);
folder->access_date = hfsp_ut2mt(inode->i_atime);
folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
folder->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
folder->valence = cpu_to_be32(inode->i_size - 2);
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
folder->subfolders =
cpu_to_be32(HFSPLUS_I(inode)->subfolders);
}
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
} else if (HFSPLUS_IS_RSRC(inode)) {
struct hfsplus_cat_file *file = &entry.file;
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_write_fork(inode, &file->rsrc_fork);
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
} else {
struct hfsplus_cat_file *file = &entry.file;
if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
pr_err("bad catalog file entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_write_fork(inode, &file->data_fork);
hfsplus_cat_set_perms(inode, &file->permissions);
if (HFSPLUS_FLG_IMMUTABLE &
(file->permissions.rootflags |
file->permissions.userflags))
file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
else
file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
file->access_date = hfsp_ut2mt(inode->i_atime);
file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
file->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
}
set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
out:
hfs_find_exit(&fd);
return res;
}
int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
{
struct inode *inode = d_inode(dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
unsigned int flags = 0;
if (inode->i_flags & S_IMMUTABLE)
flags |= FS_IMMUTABLE_FL;
if (inode->i_flags & S_APPEND)
flags |= FS_APPEND_FL;
if (hip->userflags & HFSPLUS_FLG_NODUMP)
flags |= FS_NODUMP_FL;
fileattr_fill_flags(fa, flags);
return 0;
}
int hfsplus_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa)
{
struct inode *inode = d_inode(dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
unsigned int new_fl = 0;
if (fileattr_has_fsx(fa))
return -EOPNOTSUPP;
/* don't silently ignore unsupported ext2 flags */
if (fa->flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL))
return -EOPNOTSUPP;
if (fa->flags & FS_IMMUTABLE_FL)
new_fl |= S_IMMUTABLE;
if (fa->flags & FS_APPEND_FL)
new_fl |= S_APPEND;
inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
if (fa->flags & FS_NODUMP_FL)
hip->userflags |= HFSPLUS_FLG_NODUMP;
else
hip->userflags &= ~HFSPLUS_FLG_NODUMP;
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
}
| linux-master | fs/hfsplus/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/ioctl.c
*
* Copyright (C) 2003
* Ethan Benson <[email protected]>
* partially derived from linux/fs/ext2/ioctl.c
* Copyright (C) 1993, 1994, 1995
* Remy Card ([email protected])
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* hfsplus ioctls
*/
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include "hfsplus_fs.h"
/*
* "Blessing" an HFS+ filesystem writes metadata to the superblock informing
* the platform firmware which file to boot from
*/
static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
struct hfsplus_vh *vh = sbi->s_vhdr;
struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
u32 cnid = (unsigned long)dentry->d_fsdata;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&sbi->vh_mutex);
/* Directory containing the bootable system */
vh->finder_info[0] = bvh->finder_info[0] =
cpu_to_be32(parent_ino(dentry));
/*
* Bootloader. Just using the inode here breaks in the case of
* hard links - the firmware wants the ID of the hard link file,
* but the inode points at the indirect inode
*/
vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid);
/* Per spec, the OS X system folder - same as finder_info[0] here */
vh->finder_info[5] = bvh->finder_info[5] =
cpu_to_be32(parent_ino(dentry));
mutex_unlock(&sbi->vh_mutex);
return 0;
}
long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
switch (cmd) {
case HFSPLUS_IOC_BLESS:
return hfsplus_ioctl_bless(file, argp);
default:
return -ENOTTY;
}
}
| linux-master | fs/hfsplus/ioctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/xattr_trusted.c
*
* Vyacheslav Dubeyko <[email protected]>
*
* Handler for storing security labels as extended attributes.
*/
#include <linux/security.h>
#include <linux/nls.h>
#include "hfsplus_fs.h"
#include "xattr.h"
static int hfsplus_security_getxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return hfsplus_getxattr(inode, name, buffer, size,
XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN);
}
static int hfsplus_security_setxattr(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{
return hfsplus_setxattr(inode, name, buffer, size, flags,
XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN);
}
static int hfsplus_initxattrs(struct inode *inode,
const struct xattr *xattr_array,
void *fs_info)
{
const struct xattr *xattr;
char *xattr_name;
int err = 0;
xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
GFP_KERNEL);
if (!xattr_name)
return -ENOMEM;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
if (!strcmp(xattr->name, ""))
continue;
strcpy(xattr_name, XATTR_SECURITY_PREFIX);
strcpy(xattr_name +
XATTR_SECURITY_PREFIX_LEN, xattr->name);
memset(xattr_name +
XATTR_SECURITY_PREFIX_LEN + strlen(xattr->name), 0, 1);
err = __hfsplus_setxattr(inode, xattr_name,
xattr->value, xattr->value_len, 0);
if (err)
break;
}
kfree(xattr_name);
return err;
}
int hfsplus_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
return security_inode_init_security(inode, dir, qstr,
&hfsplus_initxattrs, NULL);
}
const struct xattr_handler hfsplus_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = hfsplus_security_getxattr,
.set = hfsplus_security_setxattr,
};
| linux-master | fs/hfsplus/xattr_security.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/brec.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handle individual btree records
*/
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd);
static int hfs_brec_update_parent(struct hfs_find_data *fd);
static int hfs_btree_inc_height(struct hfs_btree *);
/* Get the length and offset of the given record in the given node */
u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off)
{
__be16 retval[2];
u16 dataoff;
dataoff = node->tree->node_size - (rec + 2) * 2;
hfs_bnode_read(node, retval, dataoff, 4);
*off = be16_to_cpu(retval[1]);
return be16_to_cpu(retval[0]) - *off;
}
/* Get the length of the key from a keyed record */
u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
{
u16 retval, recoff;
if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF)
return 0;
if ((node->type == HFS_NODE_INDEX) &&
!(node->tree->attributes & HFS_TREE_VARIDXKEYS) &&
(node->tree->cnid != HFSPLUS_ATTR_CNID)) {
retval = node->tree->max_key_len + 2;
} else {
recoff = hfs_bnode_read_u16(node,
node->tree->node_size - (rec + 1) * 2);
if (!recoff)
return 0;
if (recoff > node->tree->node_size - 2) {
pr_err("recoff %d too large\n", recoff);
return 0;
}
retval = hfs_bnode_read_u16(node, recoff) + 2;
if (retval > node->tree->max_key_len + 2) {
pr_err("keylen %d too large\n",
retval);
retval = 0;
}
}
return retval;
}
int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *new_node;
int size, key_len, rec;
int data_off, end_off;
int idx_rec_off, data_rec_off, end_rec_off;
__be32 cnid;
tree = fd->tree;
if (!fd->bnode) {
if (!tree->root)
hfs_btree_inc_height(tree);
node = hfs_bnode_find(tree, tree->leaf_head);
if (IS_ERR(node))
return PTR_ERR(node);
fd->bnode = node;
fd->record = -1;
}
new_node = NULL;
key_len = be16_to_cpu(fd->search_key->key_len) + 2;
again:
/* new record idx and complete record size */
rec = fd->record + 1;
size = key_len + entry_len;
node = fd->bnode;
hfs_bnode_dump(node);
/* get last offset */
end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
end_off = hfs_bnode_read_u16(node, end_rec_off);
end_rec_off -= 2;
hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
rec, size, end_off, end_rec_off);
if (size > end_rec_off - end_off) {
if (new_node)
panic("not enough room!\n");
new_node = hfs_bnode_split(fd);
if (IS_ERR(new_node))
return PTR_ERR(new_node);
goto again;
}
if (node->type == HFS_NODE_LEAF) {
tree->leaf_count++;
mark_inode_dirty(tree->inode);
}
node->num_recs++;
/* write new last offset */
hfs_bnode_write_u16(node,
offsetof(struct hfs_bnode_desc, num_recs),
node->num_recs);
hfs_bnode_write_u16(node, end_rec_off, end_off + size);
data_off = end_off;
data_rec_off = end_rec_off + 2;
idx_rec_off = tree->node_size - (rec + 1) * 2;
if (idx_rec_off == data_rec_off)
goto skip;
/* move all following entries */
do {
data_off = hfs_bnode_read_u16(node, data_rec_off + 2);
hfs_bnode_write_u16(node, data_rec_off, data_off + size);
data_rec_off += 2;
} while (data_rec_off < idx_rec_off);
/* move data away */
hfs_bnode_move(node, data_off + size, data_off,
end_off - data_off);
skip:
hfs_bnode_write(node, fd->search_key, data_off, key_len);
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
hfs_bnode_dump(node);
/*
* update parent key if we inserted a key
* at the start of the node and it is not the new node
*/
if (!rec && new_node != node) {
hfs_bnode_read_key(node, fd->search_key, data_off + size);
hfs_brec_update_parent(fd);
}
if (new_node) {
hfs_bnode_put(fd->bnode);
if (!new_node->parent) {
hfs_btree_inc_height(tree);
new_node->parent = tree->root;
}
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index data entry */
cnid = cpu_to_be32(new_node->this);
entry = &cnid;
entry_len = sizeof(cnid);
/* get index key */
hfs_bnode_read_key(new_node, fd->search_key, 14);
__hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
hfs_bnode_put(new_node);
new_node = NULL;
if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
(tree->cnid == HFSPLUS_ATTR_CNID))
key_len = be16_to_cpu(fd->search_key->key_len) + 2;
else {
fd->search_key->key_len =
cpu_to_be16(tree->max_key_len);
key_len = tree->max_key_len + 2;
}
goto again;
}
return 0;
}
int hfs_brec_remove(struct hfs_find_data *fd)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *parent;
int end_off, rec_off, data_off, size;
tree = fd->tree;
node = fd->bnode;
again:
rec_off = tree->node_size - (fd->record + 2) * 2;
end_off = tree->node_size - (node->num_recs + 1) * 2;
if (node->type == HFS_NODE_LEAF) {
tree->leaf_count--;
mark_inode_dirty(tree->inode);
}
hfs_bnode_dump(node);
hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n",
fd->record, fd->keylength + fd->entrylength);
if (!--node->num_recs) {
hfs_bnode_unlink(node);
if (!node->parent)
return 0;
parent = hfs_bnode_find(tree, node->parent);
if (IS_ERR(parent))
return PTR_ERR(parent);
hfs_bnode_put(node);
node = fd->bnode = parent;
__hfs_brec_find(node, fd, hfs_find_rec_by_key);
goto again;
}
hfs_bnode_write_u16(node,
offsetof(struct hfs_bnode_desc, num_recs),
node->num_recs);
if (rec_off == end_off)
goto skip;
size = fd->keylength + fd->entrylength;
do {
data_off = hfs_bnode_read_u16(node, rec_off);
hfs_bnode_write_u16(node, rec_off + 2, data_off - size);
rec_off -= 2;
} while (rec_off >= end_off);
/* fill hole */
hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size,
data_off - fd->keyoffset - size);
skip:
hfs_bnode_dump(node);
if (!fd->record)
hfs_brec_update_parent(fd);
return 0;
}
static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *new_node, *next_node;
struct hfs_bnode_desc node_desc;
int num_recs, new_rec_off, new_off, old_rec_off;
int data_start, data_end, size;
tree = fd->tree;
node = fd->bnode;
new_node = hfs_bmap_alloc(tree);
if (IS_ERR(new_node))
return new_node;
hfs_bnode_get(node);
hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
node->this, new_node->this, node->next);
new_node->next = node->next;
new_node->prev = node->this;
new_node->parent = node->parent;
new_node->type = node->type;
new_node->height = node->height;
if (node->next)
next_node = hfs_bnode_find(tree, node->next);
else
next_node = NULL;
if (IS_ERR(next_node)) {
hfs_bnode_put(node);
hfs_bnode_put(new_node);
return next_node;
}
size = tree->node_size / 2 - node->num_recs * 2 - 14;
old_rec_off = tree->node_size - 4;
num_recs = 1;
for (;;) {
data_start = hfs_bnode_read_u16(node, old_rec_off);
if (data_start > size)
break;
old_rec_off -= 2;
if (++num_recs < node->num_recs)
continue;
/* panic? */
hfs_bnode_put(node);
hfs_bnode_put(new_node);
if (next_node)
hfs_bnode_put(next_node);
return ERR_PTR(-ENOSPC);
}
if (fd->record + 1 < num_recs) {
/* new record is in the lower half,
* so leave some more space there
*/
old_rec_off += 2;
num_recs--;
data_start = hfs_bnode_read_u16(node, old_rec_off);
} else {
hfs_bnode_put(node);
hfs_bnode_get(new_node);
fd->bnode = new_node;
fd->record -= num_recs;
fd->keyoffset -= data_start - 14;
fd->entryoffset -= data_start - 14;
}
new_node->num_recs = node->num_recs - num_recs;
node->num_recs = num_recs;
new_rec_off = tree->node_size - 2;
new_off = 14;
size = data_start - new_off;
num_recs = new_node->num_recs;
data_end = data_start;
while (num_recs) {
hfs_bnode_write_u16(new_node, new_rec_off, new_off);
old_rec_off -= 2;
new_rec_off -= 2;
data_end = hfs_bnode_read_u16(node, old_rec_off);
new_off = data_end - size;
num_recs--;
}
hfs_bnode_write_u16(new_node, new_rec_off, new_off);
hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start);
/* update new bnode header */
node_desc.next = cpu_to_be32(new_node->next);
node_desc.prev = cpu_to_be32(new_node->prev);
node_desc.type = new_node->type;
node_desc.height = new_node->height;
node_desc.num_recs = cpu_to_be16(new_node->num_recs);
node_desc.reserved = 0;
hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc));
/* update previous bnode header */
node->next = new_node->this;
hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc));
node_desc.next = cpu_to_be32(node->next);
node_desc.num_recs = cpu_to_be16(node->num_recs);
hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
/* update next bnode header */
if (next_node) {
next_node->prev = new_node->this;
hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
node_desc.prev = cpu_to_be32(next_node->prev);
hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc));
hfs_bnode_put(next_node);
} else if (node->this == tree->leaf_tail) {
/* if there is no next node, this might be the new tail */
tree->leaf_tail = new_node->this;
mark_inode_dirty(tree->inode);
}
hfs_bnode_dump(node);
hfs_bnode_dump(new_node);
hfs_bnode_put(node);
return new_node;
}
static int hfs_brec_update_parent(struct hfs_find_data *fd)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *new_node, *parent;
int newkeylen, diff;
int rec, rec_off, end_rec_off;
int start_off, end_off;
tree = fd->tree;
node = fd->bnode;
new_node = NULL;
if (!node->parent)
return 0;
again:
parent = hfs_bnode_find(tree, node->parent);
if (IS_ERR(parent))
return PTR_ERR(parent);
__hfs_brec_find(parent, fd, hfs_find_rec_by_key);
if (fd->record < 0)
return -ENOENT;
hfs_bnode_dump(parent);
rec = fd->record;
/* size difference between old and new key */
if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
(tree->cnid == HFSPLUS_ATTR_CNID))
newkeylen = hfs_bnode_read_u16(node, 14) + 2;
else
fd->keylength = newkeylen = tree->max_key_len + 2;
hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n",
rec, fd->keylength, newkeylen);
rec_off = tree->node_size - (rec + 2) * 2;
end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
diff = newkeylen - fd->keylength;
if (!diff)
goto skip;
if (diff > 0) {
end_off = hfs_bnode_read_u16(parent, end_rec_off);
if (end_rec_off - end_off < diff) {
hfs_dbg(BNODE_MOD, "splitting index node\n");
fd->bnode = parent;
new_node = hfs_bnode_split(fd);
if (IS_ERR(new_node))
return PTR_ERR(new_node);
parent = fd->bnode;
rec = fd->record;
rec_off = tree->node_size - (rec + 2) * 2;
end_rec_off = tree->node_size -
(parent->num_recs + 1) * 2;
}
}
end_off = start_off = hfs_bnode_read_u16(parent, rec_off);
hfs_bnode_write_u16(parent, rec_off, start_off + diff);
start_off -= 4; /* move previous cnid too */
while (rec_off > end_rec_off) {
rec_off -= 2;
end_off = hfs_bnode_read_u16(parent, rec_off);
hfs_bnode_write_u16(parent, rec_off, end_off + diff);
}
hfs_bnode_move(parent, start_off + diff, start_off,
end_off - start_off);
skip:
hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen);
hfs_bnode_dump(parent);
hfs_bnode_put(node);
node = parent;
if (new_node) {
__be32 cnid;
if (!new_node->parent) {
hfs_btree_inc_height(tree);
new_node->parent = tree->root;
}
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
cnid = cpu_to_be32(new_node->this);
__hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
hfs_brec_insert(fd, &cnid, sizeof(cnid));
hfs_bnode_put(fd->bnode);
hfs_bnode_put(new_node);
if (!rec) {
if (new_node == node)
goto out;
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
new_node = NULL;
}
if (!rec && node->parent)
goto again;
out:
fd->bnode = node;
return 0;
}
static int hfs_btree_inc_height(struct hfs_btree *tree)
{
struct hfs_bnode *node, *new_node;
struct hfs_bnode_desc node_desc;
int key_size, rec;
__be32 cnid;
node = NULL;
if (tree->root) {
node = hfs_bnode_find(tree, tree->root);
if (IS_ERR(node))
return PTR_ERR(node);
}
new_node = hfs_bmap_alloc(tree);
if (IS_ERR(new_node)) {
hfs_bnode_put(node);
return PTR_ERR(new_node);
}
tree->root = new_node->this;
if (!tree->depth) {
tree->leaf_head = tree->leaf_tail = new_node->this;
new_node->type = HFS_NODE_LEAF;
new_node->num_recs = 0;
} else {
new_node->type = HFS_NODE_INDEX;
new_node->num_recs = 1;
}
new_node->parent = 0;
new_node->next = 0;
new_node->prev = 0;
new_node->height = ++tree->depth;
node_desc.next = cpu_to_be32(new_node->next);
node_desc.prev = cpu_to_be32(new_node->prev);
node_desc.type = new_node->type;
node_desc.height = new_node->height;
node_desc.num_recs = cpu_to_be16(new_node->num_recs);
node_desc.reserved = 0;
hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc));
rec = tree->node_size - 2;
hfs_bnode_write_u16(new_node, rec, 14);
if (node) {
/* insert old root idx into new root */
node->parent = tree->root;
if (node->type == HFS_NODE_LEAF ||
tree->attributes & HFS_TREE_VARIDXKEYS ||
tree->cnid == HFSPLUS_ATTR_CNID)
key_size = hfs_bnode_read_u16(node, 14) + 2;
else
key_size = tree->max_key_len + 2;
hfs_bnode_copy(new_node, 14, node, 14, key_size);
if (!(tree->attributes & HFS_TREE_VARIDXKEYS) &&
(tree->cnid != HFSPLUS_ATTR_CNID)) {
key_size = tree->max_key_len + 2;
hfs_bnode_write_u16(new_node, 14, tree->max_key_len);
}
cnid = cpu_to_be32(node->this);
hfs_bnode_write(new_node, &cnid, 14 + key_size, 4);
rec -= 2;
hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4);
hfs_bnode_put(node);
}
hfs_bnode_put(new_node);
mark_inode_dirty(tree->inode);
return 0;
}
| linux-master | fs/hfsplus/brec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/catalog.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Handling of catalog records
*/
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2)
{
__be32 k1p, k2p;
k1p = k1->cat.parent;
k2p = k2->cat.parent;
if (k1p != k2p)
return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
return hfsplus_strcasecmp(&k1->cat.name, &k2->cat.name);
}
int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2)
{
__be32 k1p, k2p;
k1p = k1->cat.parent;
k2p = k2->cat.parent;
if (k1p != k2p)
return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
return hfsplus_strcmp(&k1->cat.name, &k2->cat.name);
}
/* Generates key for catalog file/folders record. */
int hfsplus_cat_build_key(struct super_block *sb,
hfsplus_btree_key *key, u32 parent, const struct qstr *str)
{
int len, err;
key->cat.parent = cpu_to_be32(parent);
err = hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
str->name, str->len);
if (unlikely(err < 0))
return err;
len = be16_to_cpu(key->cat.name.length);
key->key_len = cpu_to_be16(6 + 2 * len);
return 0;
}
/* Generates key for catalog thread record. */
void hfsplus_cat_build_key_with_cnid(struct super_block *sb,
hfsplus_btree_key *key, u32 parent)
{
key->cat.parent = cpu_to_be32(parent);
key->cat.name.length = 0;
key->key_len = cpu_to_be16(6);
}
static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
struct hfsplus_unistr *name)
{
int ustrlen;
ustrlen = be16_to_cpu(name->length);
key->cat.parent = cpu_to_be32(parent);
key->cat.name.length = cpu_to_be16(ustrlen);
ustrlen *= 2;
memcpy(key->cat.name.unicode, name->unicode, ustrlen);
key->key_len = cpu_to_be16(6 + ustrlen);
}
void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms)
{
if (inode->i_flags & S_IMMUTABLE)
perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
else
perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
if (inode->i_flags & S_APPEND)
perms->rootflags |= HFSPLUS_FLG_APPEND;
else
perms->rootflags &= ~HFSPLUS_FLG_APPEND;
perms->userflags = HFSPLUS_I(inode)->userflags;
perms->mode = cpu_to_be16(inode->i_mode);
perms->owner = cpu_to_be32(i_uid_read(inode));
perms->group = cpu_to_be32(i_gid_read(inode));
if (S_ISREG(inode->i_mode))
perms->dev = cpu_to_be32(inode->i_nlink);
else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode))
perms->dev = cpu_to_be32(inode->i_rdev);
else
perms->dev = 0;
}
static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
u32 cnid, struct inode *inode)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
if (S_ISDIR(inode->i_mode)) {
struct hfsplus_cat_folder *folder;
folder = &entry->folder;
memset(folder, 0, sizeof(*folder));
folder->type = cpu_to_be16(HFSPLUS_FOLDER);
if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags))
folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT);
folder->id = cpu_to_be32(inode->i_ino);
HFSPLUS_I(inode)->create_date =
folder->create_date =
folder->content_mod_date =
folder->attribute_mod_date =
folder->access_date = hfsp_now2mt();
hfsplus_cat_set_perms(inode, &folder->permissions);
if (inode == sbi->hidden_dir)
/* invisible and namelocked */
folder->user_info.frFlags = cpu_to_be16(0x5000);
return sizeof(*folder);
} else {
struct hfsplus_cat_file *file;
file = &entry->file;
memset(file, 0, sizeof(*file));
file->type = cpu_to_be16(HFSPLUS_FILE);
file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
file->id = cpu_to_be32(cnid);
HFSPLUS_I(inode)->create_date =
file->create_date =
file->content_mod_date =
file->attribute_mod_date =
file->access_date = hfsp_now2mt();
if (cnid == inode->i_ino) {
hfsplus_cat_set_perms(inode, &file->permissions);
if (S_ISLNK(inode->i_mode)) {
file->user_info.fdType =
cpu_to_be32(HFSP_SYMLINK_TYPE);
file->user_info.fdCreator =
cpu_to_be32(HFSP_SYMLINK_CREATOR);
} else {
file->user_info.fdType =
cpu_to_be32(sbi->type);
file->user_info.fdCreator =
cpu_to_be32(sbi->creator);
}
if (HFSPLUS_FLG_IMMUTABLE &
(file->permissions.rootflags |
file->permissions.userflags))
file->flags |=
cpu_to_be16(HFSPLUS_FILE_LOCKED);
} else {
file->user_info.fdType =
cpu_to_be32(HFSP_HARDLINK_TYPE);
file->user_info.fdCreator =
cpu_to_be32(HFSP_HFSPLUS_CREATOR);
file->user_info.fdFlags =
cpu_to_be16(0x100);
file->create_date =
HFSPLUS_I(sbi->hidden_dir)->create_date;
file->permissions.dev =
cpu_to_be32(HFSPLUS_I(inode)->linkid);
}
return sizeof(*file);
}
}
static int hfsplus_fill_cat_thread(struct super_block *sb,
hfsplus_cat_entry *entry, int type,
u32 parentid, const struct qstr *str)
{
int err;
entry->type = cpu_to_be16(type);
entry->thread.reserved = 0;
entry->thread.parentID = cpu_to_be32(parentid);
err = hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
str->name, str->len);
if (unlikely(err < 0))
return err;
return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
}
/* Try to get a catalog entry for given catalog id */
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd)
{
hfsplus_cat_entry tmp;
int err;
u16 type;
hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
if (err)
return err;
type = be16_to_cpu(tmp.type);
if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
pr_err("found bad thread record in catalog\n");
return -EIO;
}
if (be16_to_cpu(tmp.thread.nodeName.length) > 255) {
pr_err("catalog name length corrupted\n");
return -EIO;
}
hfsplus_cat_build_key_uni(fd->search_key,
be32_to_cpu(tmp.thread.parentID),
&tmp.thread.nodeName);
return hfs_brec_find(fd, hfs_find_rec_by_key);
}
static void hfsplus_subfolders_inc(struct inode *dir)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
/*
* Increment subfolder count. Note, the value is only meaningful
* for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
*/
HFSPLUS_I(dir)->subfolders++;
}
}
static void hfsplus_subfolders_dec(struct inode *dir)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
/*
* Decrement subfolder count. Note, the value is only meaningful
* for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
*
* Check for zero. Some subfolders may have been created
* by an implementation ignorant of this counter.
*/
if (HFSPLUS_I(dir)->subfolders)
HFSPLUS_I(dir)->subfolders--;
}
}
int hfsplus_create_cat(u32 cnid, struct inode *dir,
const struct qstr *str, struct inode *inode)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
hfsplus_cat_entry entry;
int entry_size;
int err;
hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n",
str->name, cnid, inode->i_nlink);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return err;
/*
* Fail early and avoid ENOSPC during the btree operations. We may
* have to split the root node at most once.
*/
err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
if (err)
goto err2;
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
dir->i_ino, str);
if (unlikely(entry_size < 0)) {
err = entry_size;
goto err2;
}
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
goto err2;
}
err = hfs_brec_insert(&fd, &entry, entry_size);
if (err)
goto err2;
err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
if (unlikely(err))
goto err1;
entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
/* panic? */
if (!err)
err = -EEXIST;
goto err1;
}
err = hfs_brec_insert(&fd, &entry, entry_size);
if (err)
goto err1;
dir->i_size++;
if (S_ISDIR(inode->i_mode))
hfsplus_subfolders_inc(dir);
dir->i_mtime = inode_set_ctime_current(dir);
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
hfs_find_exit(&fd);
return 0;
err1:
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
if (!hfs_brec_find(&fd, hfs_find_rec_by_key))
hfs_brec_remove(&fd);
err2:
hfs_find_exit(&fd);
return err;
}
int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
struct hfsplus_fork_raw fork;
struct list_head *pos;
int err, off;
u16 type;
hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return err;
/*
* Fail early and avoid ENOSPC during the btree operations. We may
* have to split the root node at most once.
*/
err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2);
if (err)
goto out;
if (!str) {
int len;
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
off = fd.entryoffset +
offsetof(struct hfsplus_cat_thread, nodeName);
fd.search_key->cat.parent = cpu_to_be32(dir->i_ino);
hfs_bnode_read(fd.bnode,
&fd.search_key->cat.name.length, off, 2);
len = be16_to_cpu(fd.search_key->cat.name.length) * 2;
hfs_bnode_read(fd.bnode,
&fd.search_key->cat.name.unicode,
off + 2, len);
fd.search_key->key_len = cpu_to_be16(6 + len);
} else {
err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
if (unlikely(err))
goto out;
}
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (type == HFSPLUS_FILE) {
#if 0
off = fd.entryoffset + offsetof(hfsplus_cat_file, data_fork);
hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork));
hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_DATA);
#endif
off = fd.entryoffset +
offsetof(struct hfsplus_cat_file, rsrc_fork);
hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork));
hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
}
/* we only need to take spinlock for exclusion with ->release() */
spin_lock(&HFSPLUS_I(dir)->open_dir_lock);
list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) {
struct hfsplus_readdir_data *rd =
list_entry(pos, struct hfsplus_readdir_data, list);
if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
rd->file->f_pos--;
}
spin_unlock(&HFSPLUS_I(dir)->open_dir_lock);
err = hfs_brec_remove(&fd);
if (err)
goto out;
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
err = hfs_brec_remove(&fd);
if (err)
goto out;
dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(dir);
dir->i_mtime = inode_set_ctime_current(dir);
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
if (HFSPLUS_SB(sb)->attr_tree)
hfsplus_delete_all_attrs(dir, cnid);
}
out:
hfs_find_exit(&fd);
return err;
}
int hfsplus_rename_cat(u32 cnid,
struct inode *src_dir, const struct qstr *src_name,
struct inode *dst_dir, const struct qstr *dst_name)
{
struct super_block *sb = src_dir->i_sb;
struct hfs_find_data src_fd, dst_fd;
hfsplus_cat_entry entry;
int entry_size, type;
int err;
hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
if (err)
return err;
dst_fd = src_fd;
/*
* Fail early and avoid ENOSPC during the btree operations. We may
* have to split the root node at most twice.
*/
err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1);
if (err)
goto out;
/* find the old dir entry and read the data */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
src_dir->i_ino, src_name);
if (unlikely(err))
goto out;
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
err = -EIO;
goto out;
}
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength);
type = be16_to_cpu(entry.type);
/* create new dir entry with the data from the old entry */
err = hfsplus_cat_build_key(sb, dst_fd.search_key,
dst_dir->i_ino, dst_name);
if (unlikely(err))
goto out;
err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
goto out;
}
err = hfs_brec_insert(&dst_fd, &entry, src_fd.entrylength);
if (err)
goto out;
dst_dir->i_size++;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_inc(dst_dir);
dst_dir->i_mtime = inode_set_ctime_current(dst_dir);
/* finally remove the old entry */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
src_dir->i_ino, src_name);
if (unlikely(err))
goto out;
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
err = hfs_brec_remove(&src_fd);
if (err)
goto out;
src_dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(src_dir);
src_dir->i_mtime = inode_set_ctime_current(src_dir);
/* remove old thread entry */
hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset);
err = hfs_brec_remove(&src_fd);
if (err)
goto out;
/* create new thread entry */
hfsplus_cat_build_key_with_cnid(sb, dst_fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
dst_dir->i_ino, dst_name);
if (unlikely(entry_size < 0)) {
err = entry_size;
goto out;
}
err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
goto out;
}
err = hfs_brec_insert(&dst_fd, &entry, entry_size);
hfsplus_mark_inode_dirty(dst_dir, HFSPLUS_I_CAT_DIRTY);
hfsplus_mark_inode_dirty(src_dir, HFSPLUS_I_CAT_DIRTY);
out:
hfs_bnode_put(dst_fd.bnode);
hfs_find_exit(&src_fd);
return err;
}
| linux-master | fs/hfsplus/catalog.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/options.c
*
* Copyright (C) 2001
* Brad Boyer ([email protected])
* (C) 2003 Ardis Technologies <[email protected]>
*
* Option parsing
*/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/parser.h>
#include <linux/nls.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "hfsplus_fs.h"
enum {
opt_creator, opt_type,
opt_umask, opt_uid, opt_gid,
opt_part, opt_session, opt_nls,
opt_nodecompose, opt_decompose,
opt_barrier, opt_nobarrier,
opt_force, opt_err
};
static const match_table_t tokens = {
{ opt_creator, "creator=%s" },
{ opt_type, "type=%s" },
{ opt_umask, "umask=%o" },
{ opt_uid, "uid=%u" },
{ opt_gid, "gid=%u" },
{ opt_part, "part=%u" },
{ opt_session, "session=%u" },
{ opt_nls, "nls=%s" },
{ opt_decompose, "decompose" },
{ opt_nodecompose, "nodecompose" },
{ opt_barrier, "barrier" },
{ opt_nobarrier, "nobarrier" },
{ opt_force, "force" },
{ opt_err, NULL }
};
/* Initialize an options object to reasonable defaults */
void hfsplus_fill_defaults(struct hfsplus_sb_info *opts)
{
if (!opts)
return;
opts->creator = HFSPLUS_DEF_CR_TYPE;
opts->type = HFSPLUS_DEF_CR_TYPE;
opts->umask = current_umask();
opts->uid = current_uid();
opts->gid = current_gid();
opts->part = -1;
opts->session = -1;
}
/* convert a "four byte character" to a 32 bit int with error checks */
static inline int match_fourchar(substring_t *arg, u32 *result)
{
if (arg->to - arg->from != 4)
return -EINVAL;
memcpy(result, arg->from, 4);
return 0;
}
int hfsplus_parse_options_remount(char *input, int *force)
{
char *p;
substring_t args[MAX_OPT_ARGS];
int token;
if (!input)
return 1;
while ((p = strsep(&input, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case opt_force:
*force = 1;
break;
default:
break;
}
}
return 1;
}
/* Parse options from mount. Returns 0 on failure */
/* input is the options passed to mount() as a string */
int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
{
char *p;
substring_t args[MAX_OPT_ARGS];
int tmp, token;
if (!input)
goto done;
while ((p = strsep(&input, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case opt_creator:
if (match_fourchar(&args[0], &sbi->creator)) {
pr_err("creator requires a 4 character value\n");
return 0;
}
break;
case opt_type:
if (match_fourchar(&args[0], &sbi->type)) {
pr_err("type requires a 4 character value\n");
return 0;
}
break;
case opt_umask:
if (match_octal(&args[0], &tmp)) {
pr_err("umask requires a value\n");
return 0;
}
sbi->umask = (umode_t)tmp;
break;
case opt_uid:
if (match_int(&args[0], &tmp)) {
pr_err("uid requires an argument\n");
return 0;
}
sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp);
if (!uid_valid(sbi->uid)) {
pr_err("invalid uid specified\n");
return 0;
} else {
set_bit(HFSPLUS_SB_UID, &sbi->flags);
}
break;
case opt_gid:
if (match_int(&args[0], &tmp)) {
pr_err("gid requires an argument\n");
return 0;
}
sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp);
if (!gid_valid(sbi->gid)) {
pr_err("invalid gid specified\n");
return 0;
} else {
set_bit(HFSPLUS_SB_GID, &sbi->flags);
}
break;
case opt_part:
if (match_int(&args[0], &sbi->part)) {
pr_err("part requires an argument\n");
return 0;
}
break;
case opt_session:
if (match_int(&args[0], &sbi->session)) {
pr_err("session requires an argument\n");
return 0;
}
break;
case opt_nls:
if (sbi->nls) {
pr_err("unable to change nls mapping\n");
return 0;
}
p = match_strdup(&args[0]);
if (p)
sbi->nls = load_nls(p);
if (!sbi->nls) {
pr_err("unable to load nls mapping \"%s\"\n",
p);
kfree(p);
return 0;
}
kfree(p);
break;
case opt_decompose:
clear_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
break;
case opt_nodecompose:
set_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
break;
case opt_barrier:
clear_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags);
break;
case opt_nobarrier:
set_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags);
break;
case opt_force:
set_bit(HFSPLUS_SB_FORCE, &sbi->flags);
break;
default:
return 0;
}
}
done:
if (!sbi->nls) {
/* try utf8 first, as this is the old default behaviour */
sbi->nls = load_nls("utf8");
if (!sbi->nls)
sbi->nls = load_nls_default();
if (!sbi->nls)
return 0;
}
return 1;
}
int hfsplus_show_options(struct seq_file *seq, struct dentry *root)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb);
if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4);
if (sbi->type != HFSPLUS_DEF_CR_TYPE)
seq_show_option_n(seq, "type", (char *)&sbi->type, 4);
seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
from_kuid_munged(&init_user_ns, sbi->uid),
from_kgid_munged(&init_user_ns, sbi->gid));
if (sbi->part >= 0)
seq_printf(seq, ",part=%u", sbi->part);
if (sbi->session >= 0)
seq_printf(seq, ",session=%u", sbi->session);
if (sbi->nls)
seq_printf(seq, ",nls=%s", sbi->nls->charset);
if (test_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags))
seq_puts(seq, ",nodecompose");
if (test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
seq_puts(seq, ",nobarrier");
return 0;
}
| linux-master | fs/hfsplus/options.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/attributes.c
*
* Vyacheslav Dubeyko <[email protected]>
*
* Handling of records in attributes tree
*/
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
static struct kmem_cache *hfsplus_attr_tree_cachep;
int __init hfsplus_create_attr_tree_cache(void)
{
if (hfsplus_attr_tree_cachep)
return -EEXIST;
hfsplus_attr_tree_cachep =
kmem_cache_create("hfsplus_attr_cache",
sizeof(hfsplus_attr_entry), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!hfsplus_attr_tree_cachep)
return -ENOMEM;
return 0;
}
void hfsplus_destroy_attr_tree_cache(void)
{
kmem_cache_destroy(hfsplus_attr_tree_cachep);
}
int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2)
{
__be32 k1_cnid, k2_cnid;
k1_cnid = k1->attr.cnid;
k2_cnid = k2->attr.cnid;
if (k1_cnid != k2_cnid)
return be32_to_cpu(k1_cnid) < be32_to_cpu(k2_cnid) ? -1 : 1;
return hfsplus_strcmp(
(const struct hfsplus_unistr *)&k1->attr.key_name,
(const struct hfsplus_unistr *)&k2->attr.key_name);
}
int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key,
u32 cnid, const char *name)
{
int len;
memset(key, 0, sizeof(struct hfsplus_attr_key));
key->attr.cnid = cpu_to_be32(cnid);
if (name) {
int res = hfsplus_asc2uni(sb,
(struct hfsplus_unistr *)&key->attr.key_name,
HFSPLUS_ATTR_MAX_STRLEN, name, strlen(name));
if (res)
return res;
len = be16_to_cpu(key->attr.key_name.length);
} else {
key->attr.key_name.length = 0;
len = 0;
}
/* The length of the key, as stored in key_len field, does not include
* the size of the key_len field itself.
* So, offsetof(hfsplus_attr_key, key_name) is a trick because
* it takes into consideration key_len field (__be16) of
* hfsplus_attr_key structure instead of length field (__be16) of
* hfsplus_attr_unistr structure.
*/
key->key_len =
cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) +
2 * len);
return 0;
}
hfsplus_attr_entry *hfsplus_alloc_attr_entry(void)
{
return kmem_cache_alloc(hfsplus_attr_tree_cachep, GFP_KERNEL);
}
void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry)
{
if (entry)
kmem_cache_free(hfsplus_attr_tree_cachep, entry);
}
#define HFSPLUS_INVALID_ATTR_RECORD -1
static int hfsplus_attr_build_record(hfsplus_attr_entry *entry, int record_type,
u32 cnid, const void *value, size_t size)
{
if (record_type == HFSPLUS_ATTR_FORK_DATA) {
/*
* Mac OS X supports only inline data attributes.
* Do nothing
*/
memset(entry, 0, sizeof(*entry));
return sizeof(struct hfsplus_attr_fork_data);
} else if (record_type == HFSPLUS_ATTR_EXTENTS) {
/*
* Mac OS X supports only inline data attributes.
* Do nothing.
*/
memset(entry, 0, sizeof(*entry));
return sizeof(struct hfsplus_attr_extents);
} else if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
u16 len;
memset(entry, 0, sizeof(struct hfsplus_attr_inline_data));
entry->inline_data.record_type = cpu_to_be32(record_type);
if (size <= HFSPLUS_MAX_INLINE_DATA_SIZE)
len = size;
else
return HFSPLUS_INVALID_ATTR_RECORD;
entry->inline_data.length = cpu_to_be16(len);
memcpy(entry->inline_data.raw_bytes, value, len);
/*
* Align len on two-byte boundary.
* It needs to add pad byte if we have odd len.
*/
len = round_up(len, 2);
return offsetof(struct hfsplus_attr_inline_data, raw_bytes) +
len;
} else /* invalid input */
memset(entry, 0, sizeof(*entry));
return HFSPLUS_INVALID_ATTR_RECORD;
}
int hfsplus_find_attr(struct super_block *sb, u32 cnid,
const char *name, struct hfs_find_data *fd)
{
int err = 0;
hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid);
if (!HFSPLUS_SB(sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
return -EINVAL;
}
if (name) {
err = hfsplus_attr_build_key(sb, fd->search_key, cnid, name);
if (err)
goto failed_find_attr;
err = hfs_brec_find(fd, hfs_find_rec_by_key);
if (err)
goto failed_find_attr;
} else {
err = hfsplus_attr_build_key(sb, fd->search_key, cnid, NULL);
if (err)
goto failed_find_attr;
err = hfs_brec_find(fd, hfs_find_1st_rec_by_cnid);
if (err)
goto failed_find_attr;
}
failed_find_attr:
return err;
}
int hfsplus_attr_exists(struct inode *inode, const char *name)
{
int err = 0;
struct super_block *sb = inode->i_sb;
struct hfs_find_data fd;
if (!HFSPLUS_SB(sb)->attr_tree)
return 0;
err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
if (err)
return 0;
err = hfsplus_find_attr(sb, inode->i_ino, name, &fd);
if (err)
goto attr_not_found;
hfs_find_exit(&fd);
return 1;
attr_not_found:
hfs_find_exit(&fd);
return 0;
}
int hfsplus_create_attr(struct inode *inode,
const char *name,
const void *value, size_t size)
{
struct super_block *sb = inode->i_sb;
struct hfs_find_data fd;
hfsplus_attr_entry *entry_ptr;
int entry_size;
int err;
hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n",
name ? name : NULL, inode->i_ino);
if (!HFSPLUS_SB(sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
return -EINVAL;
}
entry_ptr = hfsplus_alloc_attr_entry();
if (!entry_ptr)
return -ENOMEM;
err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
if (err)
goto failed_init_create_attr;
/* Fail early and avoid ENOSPC during the btree operation */
err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1);
if (err)
goto failed_create_attr;
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
if (err)
goto failed_create_attr;
} else {
err = -EINVAL;
goto failed_create_attr;
}
/* Mac OS X supports only inline data attributes. */
entry_size = hfsplus_attr_build_record(entry_ptr,
HFSPLUS_ATTR_INLINE_DATA,
inode->i_ino,
value, size);
if (entry_size == HFSPLUS_INVALID_ATTR_RECORD) {
err = -EINVAL;
goto failed_create_attr;
}
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
goto failed_create_attr;
}
err = hfs_brec_insert(&fd, entry_ptr, entry_size);
if (err)
goto failed_create_attr;
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
failed_create_attr:
hfs_find_exit(&fd);
failed_init_create_attr:
hfsplus_destroy_attr_entry(entry_ptr);
return err;
}
static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
struct hfs_find_data *fd)
{
int err = 0;
__be32 found_cnid, record_type;
hfs_bnode_read(fd->bnode, &found_cnid,
fd->keyoffset +
offsetof(struct hfsplus_attr_key, cnid),
sizeof(__be32));
if (cnid != be32_to_cpu(found_cnid))
return -ENOENT;
hfs_bnode_read(fd->bnode, &record_type,
fd->entryoffset, sizeof(record_type));
switch (be32_to_cpu(record_type)) {
case HFSPLUS_ATTR_INLINE_DATA:
/* All is OK. Do nothing. */
break;
case HFSPLUS_ATTR_FORK_DATA:
case HFSPLUS_ATTR_EXTENTS:
pr_err("only inline data xattr are supported\n");
return -EOPNOTSUPP;
default:
pr_err("invalid extended attribute record\n");
return -ENOENT;
}
/* Avoid btree corruption */
hfs_bnode_read(fd->bnode, fd->search_key,
fd->keyoffset, fd->keylength);
err = hfs_brec_remove(fd);
if (err)
return err;
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
return err;
}
int hfsplus_delete_attr(struct inode *inode, const char *name)
{
int err = 0;
struct super_block *sb = inode->i_sb;
struct hfs_find_data fd;
hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n",
name ? name : NULL, inode->i_ino);
if (!HFSPLUS_SB(sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
return -EINVAL;
}
err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
if (err)
return err;
/* Fail early and avoid ENOSPC during the btree operation */
err = hfs_bmap_reserve(fd.tree, fd.tree->depth);
if (err)
goto out;
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
if (err)
goto out;
} else {
pr_err("invalid extended attribute name\n");
err = -EINVAL;
goto out;
}
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
err = __hfsplus_delete_attr(inode, inode->i_ino, &fd);
if (err)
goto out;
out:
hfs_find_exit(&fd);
return err;
}
int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
{
int err = 0;
struct hfs_find_data fd;
hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid);
if (!HFSPLUS_SB(dir->i_sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
return -EINVAL;
}
err = hfs_find_init(HFSPLUS_SB(dir->i_sb)->attr_tree, &fd);
if (err)
return err;
for (;;) {
err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd);
if (err) {
if (err != -ENOENT)
pr_err("xattr search failed\n");
goto end_delete_all;
}
err = __hfsplus_delete_attr(dir, cnid, &fd);
if (err)
goto end_delete_all;
}
end_delete_all:
hfs_find_exit(&fd);
return err;
}
| linux-master | fs/hfsplus/attributes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux driver model AC97 bus interface
*
* Author: Nicolas Pitre
* Created: Jan 14, 2005
* Copyright: (C) MontaVista Software Inc.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/string.h>
#include <sound/ac97_codec.h>
/*
* snd_ac97_check_id() - Reads and checks the vendor ID of the device
* @ac97: The AC97 device to check
* @id: The ID to compare to
* @id_mask: Mask that is applied to the device ID before comparing to @id
*
* If @id is 0 this function returns true if the read device vendor ID is
* a valid ID. If @id is non 0 this functions returns true if @id
* matches the read vendor ID. Otherwise the function returns false.
*/
static bool snd_ac97_check_id(struct snd_ac97 *ac97, unsigned int id,
unsigned int id_mask)
{
ac97->id = ac97->bus->ops->read(ac97, AC97_VENDOR_ID1) << 16;
ac97->id |= ac97->bus->ops->read(ac97, AC97_VENDOR_ID2);
if (ac97->id == 0x0 || ac97->id == 0xffffffff)
return false;
if (id != 0 && id != (ac97->id & id_mask))
return false;
return true;
}
/**
* snd_ac97_reset() - Reset AC'97 device
* @ac97: The AC'97 device to reset
* @try_warm: Try a warm reset first
* @id: Expected device vendor ID
* @id_mask: Mask that is applied to the device ID before comparing to @id
*
* This function resets the AC'97 device. If @try_warm is true the function
* first performs a warm reset. If the warm reset is successful the function
* returns 1. Otherwise or if @try_warm is false the function issues cold reset
* followed by a warm reset. If this is successful the function returns 0,
* otherwise a negative error code. If @id is 0 any valid device ID will be
* accepted, otherwise only the ID that matches @id and @id_mask is accepted.
*/
int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
unsigned int id_mask)
{
const struct snd_ac97_bus_ops *ops = ac97->bus->ops;
if (try_warm && ops->warm_reset) {
ops->warm_reset(ac97);
if (snd_ac97_check_id(ac97, id, id_mask))
return 1;
}
if (ops->reset)
ops->reset(ac97);
if (ops->warm_reset)
ops->warm_reset(ac97);
if (snd_ac97_check_id(ac97, id, id_mask))
return 0;
return -ENODEV;
}
EXPORT_SYMBOL_GPL(snd_ac97_reset);
struct bus_type ac97_bus_type = {
.name = "ac97",
};
static int __init ac97_bus_init(void)
{
return bus_register(&ac97_bus_type);
}
subsys_initcall(ac97_bus_init);
static void __exit ac97_bus_exit(void)
{
bus_unregister(&ac97_bus_type);
}
module_exit(ac97_bus_exit);
EXPORT_SYMBOL(ac97_bus_type);
MODULE_LICENSE("GPL");
| linux-master | sound/ac97_bus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Sound core. This file is composed of two parts. sound_class
* which is common to both OSS and ALSA and OSS sound core which
* is used OSS or emulation of it.
*/
/*
* First, the common part.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <sound/core.h>
#ifdef CONFIG_SOUND_OSS_CORE
static int __init init_oss_soundcore(void);
static void cleanup_oss_soundcore(void);
#else
static inline int init_oss_soundcore(void) { return 0; }
static inline void cleanup_oss_soundcore(void) { }
#endif
MODULE_DESCRIPTION("Core sound module");
MODULE_AUTHOR("Alan Cox");
MODULE_LICENSE("GPL");
static char *sound_devnode(const struct device *dev, umode_t *mode)
{
if (MAJOR(dev->devt) == SOUND_MAJOR)
return NULL;
return kasprintf(GFP_KERNEL, "snd/%s", dev_name(dev));
}
const struct class sound_class = {
.name = "sound",
.devnode = sound_devnode,
};
EXPORT_SYMBOL(sound_class);
static int __init init_soundcore(void)
{
int rc;
rc = init_oss_soundcore();
if (rc)
return rc;
rc = class_register(&sound_class);
if (rc) {
cleanup_oss_soundcore();
return rc;
}
return 0;
}
static void __exit cleanup_soundcore(void)
{
cleanup_oss_soundcore();
class_unregister(&sound_class);
}
subsys_initcall(init_soundcore);
module_exit(cleanup_soundcore);
#ifdef CONFIG_SOUND_OSS_CORE
/*
* OSS sound core handling. Breaks out sound functions to submodules
*
* Author: Alan Cox <[email protected]>
*
* Fixes:
*
* --------------------
*
* Top level handler for the sound subsystem. Various devices can
* plug into this. The fact they don't all go via OSS doesn't mean
* they don't have to implement the OSS API. There is a lot of logic
* to keeping much of the OSS weight out of the code in a compatibility
* module, but it's up to the driver to rember to load it...
*
* The code provides a set of functions for registration of devices
* by type. This is done rather than providing a single call so that
* we can hide any future changes in the internals (eg when we go to
* 32bit dev_t) from the modules and their interface.
*
* Secondly we need to allocate the dsp, dsp16 and audio devices as
* one. Thus we misuse the chains a bit to simplify this.
*
* Thirdly to make it more fun and for 2.3.x and above we do all
* of this using fine grained locking.
*
* FIXME: we have to resolve modules and fine grained load/unload
* locking at some point in 2.3.x.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sound.h>
#include <linux/kmod.h>
#define SOUND_STEP 16
struct sound_unit
{
int unit_minor;
const struct file_operations *unit_fops;
struct sound_unit *next;
char name[32];
};
/*
* By default, OSS sound_core claims full legacy minor range (0-255)
* of SOUND_MAJOR to trap open attempts to any sound minor and
* requests modules using custom sound-slot/service-* module aliases.
* The only benefit of doing this is allowing use of custom module
* aliases instead of the standard char-major-* ones. This behavior
* prevents alternative OSS implementation and is scheduled to be
* removed.
*
* CONFIG_SOUND_OSS_CORE_PRECLAIM and soundcore.preclaim_oss kernel
* parameter are added to allow distros and developers to try and
* switch to alternative implementations without needing to rebuild
* the kernel in the meantime. If preclaim_oss is non-zero, the
* kernel will behave the same as before. All SOUND_MAJOR minors are
* preclaimed and the custom module aliases along with standard chrdev
* ones are emitted if a missing device is opened. If preclaim_oss is
* zero, sound_core only grabs what's actually in use and for missing
* devices only the standard chrdev aliases are requested.
*
* All these clutters are scheduled to be removed along with
* sound-slot/service-* module aliases.
*/
static int preclaim_oss = IS_ENABLED(CONFIG_SOUND_OSS_CORE_PRECLAIM);
module_param(preclaim_oss, int, 0444);
static int soundcore_open(struct inode *, struct file *);
static const struct file_operations soundcore_fops =
{
/* We must have an owner or the module locking fails */
.owner = THIS_MODULE,
.open = soundcore_open,
.llseek = noop_llseek,
};
/*
* Low level list operator. Scan the ordered list, find a hole and
* join into it. Called with the lock asserted
*/
static int __sound_insert_unit(struct sound_unit * s, struct sound_unit **list, const struct file_operations *fops, int index, int low, int top)
{
int n=low;
if (index < 0) { /* first free */
while (*list && (*list)->unit_minor<n)
list=&((*list)->next);
while(n<top)
{
/* Found a hole ? */
if(*list==NULL || (*list)->unit_minor>n)
break;
list=&((*list)->next);
n+=SOUND_STEP;
}
if(n>=top)
return -ENOENT;
} else {
n = low+(index*16);
while (*list) {
if ((*list)->unit_minor==n)
return -EBUSY;
if ((*list)->unit_minor>n)
break;
list=&((*list)->next);
}
}
/*
* Fill it in
*/
s->unit_minor=n;
s->unit_fops=fops;
/*
* Link it
*/
s->next=*list;
*list=s;
return n;
}
/*
* Remove a node from the chain. Called with the lock asserted
*/
static struct sound_unit *__sound_remove_unit(struct sound_unit **list, int unit)
{
while(*list)
{
struct sound_unit *p=*list;
if(p->unit_minor==unit)
{
*list=p->next;
return p;
}
list=&(p->next);
}
printk(KERN_ERR "Sound device %d went missing!\n", unit);
return NULL;
}
/*
* This lock guards the sound loader list.
*/
static DEFINE_SPINLOCK(sound_loader_lock);
/*
* Allocate the controlling structure and add it to the sound driver
* list. Acquires locks as needed
*/
static int sound_insert_unit(struct sound_unit **list, const struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev)
{
struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL);
int r;
if (!s)
return -ENOMEM;
spin_lock(&sound_loader_lock);
retry:
r = __sound_insert_unit(s, list, fops, index, low, top);
spin_unlock(&sound_loader_lock);
if (r < 0)
goto fail;
else if (r < SOUND_STEP)
sprintf(s->name, "sound/%s", name);
else
sprintf(s->name, "sound/%s%d", name, r / SOUND_STEP);
if (!preclaim_oss) {
/*
* Something else might have grabbed the minor. If
* first free slot is requested, rescan with @low set
* to the next unit; otherwise, -EBUSY.
*/
r = __register_chrdev(SOUND_MAJOR, s->unit_minor, 1, s->name,
&soundcore_fops);
if (r < 0) {
spin_lock(&sound_loader_lock);
__sound_remove_unit(list, s->unit_minor);
if (index < 0) {
low = s->unit_minor + SOUND_STEP;
goto retry;
}
spin_unlock(&sound_loader_lock);
r = -EBUSY;
goto fail;
}
}
device_create(&sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor),
NULL, "%s", s->name+6);
return s->unit_minor;
fail:
kfree(s);
return r;
}
/*
* Remove a unit. Acquires locks as needed. The drivers MUST have
* completed the removal before their file operations become
* invalid.
*/
static void sound_remove_unit(struct sound_unit **list, int unit)
{
struct sound_unit *p;
spin_lock(&sound_loader_lock);
p = __sound_remove_unit(list, unit);
spin_unlock(&sound_loader_lock);
if (p) {
if (!preclaim_oss)
__unregister_chrdev(SOUND_MAJOR, p->unit_minor, 1,
p->name);
device_destroy(&sound_class, MKDEV(SOUND_MAJOR, p->unit_minor));
kfree(p);
}
}
/*
* Allocations
*
* 0 *16 Mixers
* 1 *8 Sequencers
* 2 *16 Midi
* 3 *16 DSP
* 4 *16 SunDSP
* 5 *16 DSP16
* 6 -- sndstat (obsolete)
* 7 *16 unused
* 8 -- alternate sequencer (see above)
* 9 *16 raw synthesizer access
* 10 *16 unused
* 11 *16 unused
* 12 *16 unused
* 13 *16 unused
* 14 *16 unused
* 15 *16 unused
*/
static struct sound_unit *chains[SOUND_STEP];
/**
* register_sound_special_device - register a special sound node
* @fops: File operations for the driver
* @unit: Unit number to allocate
* @dev: device pointer
*
* Allocate a special sound device by minor number from the sound
* subsystem.
*
* Return: The allocated number is returned on success. On failure,
* a negative error code is returned.
*/
int register_sound_special_device(const struct file_operations *fops, int unit,
struct device *dev)
{
const int chain = unit % SOUND_STEP;
int max_unit = 256;
const char *name;
char _name[16];
switch (chain) {
case 0:
name = "mixer";
break;
case 1:
name = "sequencer";
if (unit >= SOUND_STEP)
goto __unknown;
max_unit = unit + 1;
break;
case 2:
name = "midi";
break;
case 3:
name = "dsp";
break;
case 4:
name = "audio";
break;
case 5:
name = "dspW";
break;
case 8:
name = "sequencer2";
if (unit >= SOUND_STEP)
goto __unknown;
max_unit = unit + 1;
break;
case 9:
name = "dmmidi";
break;
case 10:
name = "dmfm";
break;
case 12:
name = "adsp";
break;
case 13:
name = "amidi";
break;
case 14:
name = "admmidi";
break;
default:
{
__unknown:
sprintf(_name, "unknown%d", chain);
if (unit >= SOUND_STEP)
strcat(_name, "-");
name = _name;
}
break;
}
return sound_insert_unit(&chains[chain], fops, -1, unit, max_unit,
name, 0600, dev);
}
EXPORT_SYMBOL(register_sound_special_device);
int register_sound_special(const struct file_operations *fops, int unit)
{
return register_sound_special_device(fops, unit, NULL);
}
EXPORT_SYMBOL(register_sound_special);
/**
* register_sound_mixer - register a mixer device
* @fops: File operations for the driver
* @dev: Unit number to allocate
*
* Allocate a mixer device. Unit is the number of the mixer requested.
* Pass -1 to request the next free mixer unit.
*
* Return: On success, the allocated number is returned. On failure,
* a negative error code is returned.
*/
int register_sound_mixer(const struct file_operations *fops, int dev)
{
return sound_insert_unit(&chains[0], fops, dev, 0, 128,
"mixer", 0600, NULL);
}
EXPORT_SYMBOL(register_sound_mixer);
/*
* DSP's are registered as a triple. Register only one and cheat
* in open - see below.
*/
/**
* register_sound_dsp - register a DSP device
* @fops: File operations for the driver
* @dev: Unit number to allocate
*
* Allocate a DSP device. Unit is the number of the DSP requested.
* Pass -1 to request the next free DSP unit.
*
* This function allocates both the audio and dsp device entries together
* and will always allocate them as a matching pair - eg dsp3/audio3
*
* Return: On success, the allocated number is returned. On failure,
* a negative error code is returned.
*/
int register_sound_dsp(const struct file_operations *fops, int dev)
{
return sound_insert_unit(&chains[3], fops, dev, 3, 131,
"dsp", 0600, NULL);
}
EXPORT_SYMBOL(register_sound_dsp);
/**
* unregister_sound_special - unregister a special sound device
* @unit: unit number to allocate
*
* Release a sound device that was allocated with
* register_sound_special(). The unit passed is the return value from
* the register function.
*/
void unregister_sound_special(int unit)
{
sound_remove_unit(&chains[unit % SOUND_STEP], unit);
}
EXPORT_SYMBOL(unregister_sound_special);
/**
* unregister_sound_mixer - unregister a mixer
* @unit: unit number to allocate
*
* Release a sound device that was allocated with register_sound_mixer().
* The unit passed is the return value from the register function.
*/
void unregister_sound_mixer(int unit)
{
sound_remove_unit(&chains[0], unit);
}
EXPORT_SYMBOL(unregister_sound_mixer);
/**
* unregister_sound_dsp - unregister a DSP device
* @unit: unit number to allocate
*
* Release a sound device that was allocated with register_sound_dsp().
* The unit passed is the return value from the register function.
*
* Both of the allocated units are released together automatically.
*/
void unregister_sound_dsp(int unit)
{
sound_remove_unit(&chains[3], unit);
}
EXPORT_SYMBOL(unregister_sound_dsp);
static struct sound_unit *__look_for_unit(int chain, int unit)
{
struct sound_unit *s;
s=chains[chain];
while(s && s->unit_minor <= unit)
{
if(s->unit_minor==unit)
return s;
s=s->next;
}
return NULL;
}
static int soundcore_open(struct inode *inode, struct file *file)
{
int chain;
int unit = iminor(inode);
struct sound_unit *s;
const struct file_operations *new_fops = NULL;
chain=unit&0x0F;
if(chain==4 || chain==5) /* dsp/audio/dsp16 */
{
unit&=0xF0;
unit|=3;
chain=3;
}
spin_lock(&sound_loader_lock);
s = __look_for_unit(chain, unit);
if (s)
new_fops = fops_get(s->unit_fops);
if (preclaim_oss && !new_fops) {
spin_unlock(&sound_loader_lock);
/*
* Please, don't change this order or code.
* For ALSA slot means soundcard and OSS emulation code
* comes as add-on modules which aren't depend on
* ALSA toplevel modules for soundcards, thus we need
* load them at first. [Jaroslav Kysela <[email protected]>]
*/
request_module("sound-slot-%i", unit>>4);
request_module("sound-service-%i-%i", unit>>4, chain);
/*
* sound-slot/service-* module aliases are scheduled
* for removal in favor of the standard char-major-*
* module aliases. For the time being, generate both
* the legacy and standard module aliases to ease
* transition.
*/
if (request_module("char-major-%d-%d", SOUND_MAJOR, unit) > 0)
request_module("char-major-%d", SOUND_MAJOR);
spin_lock(&sound_loader_lock);
s = __look_for_unit(chain, unit);
if (s)
new_fops = fops_get(s->unit_fops);
}
spin_unlock(&sound_loader_lock);
if (!new_fops)
return -ENODEV;
/*
* We rely upon the fact that we can't be unloaded while the
* subdriver is there.
*/
replace_fops(file, new_fops);
if (!file->f_op->open)
return -ENODEV;
return file->f_op->open(inode, file);
}
MODULE_ALIAS_CHARDEV_MAJOR(SOUND_MAJOR);
static void cleanup_oss_soundcore(void)
{
/* We have nothing to really do here - we know the lists must be
empty */
unregister_chrdev(SOUND_MAJOR, "sound");
}
static int __init init_oss_soundcore(void)
{
if (preclaim_oss &&
register_chrdev(SOUND_MAJOR, "sound", &soundcore_fops) < 0) {
printk(KERN_ERR "soundcore: sound device already in use.\n");
return -EBUSY;
}
return 0;
}
#endif /* CONFIG_SOUND_OSS_CORE */
| linux-master | sound/sound_core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Advanced Linux Sound Architecture
* Copyright (c) by Jaroslav Kysela <[email protected]>
*/
#include <linux/init.h>
#include <sound/core.h>
static int __init alsa_sound_last_init(void)
{
struct snd_card *card;
int idx, ok = 0;
printk(KERN_INFO "ALSA device list:\n");
for (idx = 0; idx < SNDRV_CARDS; idx++) {
card = snd_card_ref(idx);
if (card) {
printk(KERN_INFO " #%i: %s\n", idx, card->longname);
snd_card_unref(card);
ok++;
}
}
if (ok == 0)
printk(KERN_INFO " No soundcards found.\n");
return 0;
}
late_initcall_sync(alsa_sound_last_init);
| linux-master | sound/last.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Robert Jarzmik <[email protected]>
*/
#include <sound/ac97_codec.h>
#include <sound/ac97/codec.h>
#include <sound/ac97/controller.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <sound/soc.h> /* For compat_ac97_* */
| linux-master | sound/ac97/codec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Robert Jarzmik <[email protected]>
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <sound/ac97/codec.h>
#include <sound/ac97/controller.h>
#include <sound/ac97/regs.h>
#include "ac97_core.h"
/*
* Protects ac97_controllers and each ac97_controller structure.
*/
static DEFINE_MUTEX(ac97_controllers_mutex);
static DEFINE_IDR(ac97_adapter_idr);
static LIST_HEAD(ac97_controllers);
static struct bus_type ac97_bus_type;
static inline struct ac97_controller*
to_ac97_controller(struct device *ac97_adapter)
{
return container_of(ac97_adapter, struct ac97_controller, adap);
}
static int ac97_unbound_ctrl_write(struct ac97_controller *adrv, int slot,
unsigned short reg, unsigned short val)
{
return -ENODEV;
}
static int ac97_unbound_ctrl_read(struct ac97_controller *adrv, int slot,
unsigned short reg)
{
return -ENODEV;
}
static const struct ac97_controller_ops ac97_unbound_ctrl_ops = {
.write = ac97_unbound_ctrl_write,
.read = ac97_unbound_ctrl_read,
};
static struct ac97_controller ac97_unbound_ctrl = {
.ops = &ac97_unbound_ctrl_ops,
};
static struct ac97_codec_device *
ac97_codec_find(struct ac97_controller *ac97_ctrl, unsigned int codec_num)
{
if (codec_num >= AC97_BUS_MAX_CODECS)
return ERR_PTR(-EINVAL);
return ac97_ctrl->codecs[codec_num];
}
static struct device_node *
ac97_of_get_child_device(struct ac97_controller *ac97_ctrl, int idx,
unsigned int vendor_id)
{
struct device_node *node;
u32 reg;
char compat[] = "ac97,0000,0000";
snprintf(compat, sizeof(compat), "ac97,%04x,%04x",
vendor_id >> 16, vendor_id & 0xffff);
for_each_child_of_node(ac97_ctrl->parent->of_node, node) {
if ((idx != of_property_read_u32(node, "reg", ®)) ||
!of_device_is_compatible(node, compat))
continue;
return node;
}
return NULL;
}
static void ac97_codec_release(struct device *dev)
{
struct ac97_codec_device *adev;
struct ac97_controller *ac97_ctrl;
adev = to_ac97_device(dev);
ac97_ctrl = adev->ac97_ctrl;
ac97_ctrl->codecs[adev->num] = NULL;
of_node_put(dev->of_node);
kfree(adev);
}
static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
unsigned int vendor_id)
{
struct ac97_codec_device *codec;
int ret;
codec = kzalloc(sizeof(*codec), GFP_KERNEL);
if (!codec)
return -ENOMEM;
ac97_ctrl->codecs[idx] = codec;
codec->vendor_id = vendor_id;
codec->dev.release = ac97_codec_release;
codec->dev.bus = &ac97_bus_type;
codec->dev.parent = &ac97_ctrl->adap;
codec->num = idx;
codec->ac97_ctrl = ac97_ctrl;
device_initialize(&codec->dev);
dev_set_name(&codec->dev, "%s:%u", dev_name(ac97_ctrl->parent), idx);
codec->dev.of_node = ac97_of_get_child_device(ac97_ctrl, idx,
vendor_id);
ret = device_add(&codec->dev);
if (ret) {
put_device(&codec->dev);
return ret;
}
return 0;
}
unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv,
unsigned int codec_num)
{
unsigned short vid1, vid2;
int ret;
ret = adrv->ops->read(adrv, codec_num, AC97_VENDOR_ID1);
vid1 = (ret & 0xffff);
if (ret < 0)
return 0;
ret = adrv->ops->read(adrv, codec_num, AC97_VENDOR_ID2);
vid2 = (ret & 0xffff);
if (ret < 0)
return 0;
dev_dbg(&adrv->adap, "%s(codec_num=%u): vendor_id=0x%08x\n",
__func__, codec_num, AC97_ID(vid1, vid2));
return AC97_ID(vid1, vid2);
}
static int ac97_bus_scan(struct ac97_controller *ac97_ctrl)
{
int ret, i;
unsigned int vendor_id;
for (i = 0; i < AC97_BUS_MAX_CODECS; i++) {
if (ac97_codec_find(ac97_ctrl, i))
continue;
if (!(ac97_ctrl->slots_available & BIT(i)))
continue;
vendor_id = snd_ac97_bus_scan_one(ac97_ctrl, i);
if (!vendor_id)
continue;
ret = ac97_codec_add(ac97_ctrl, i, vendor_id);
if (ret < 0)
return ret;
}
return 0;
}
static int ac97_bus_reset(struct ac97_controller *ac97_ctrl)
{
ac97_ctrl->ops->reset(ac97_ctrl);
return 0;
}
/**
* snd_ac97_codec_driver_register - register an AC97 codec driver
* @dev: AC97 driver codec to register
*
* Register an AC97 codec driver to the ac97 bus driver, aka. the AC97 digital
* controller.
*
* Returns 0 on success or error code
*/
int snd_ac97_codec_driver_register(struct ac97_codec_driver *drv)
{
drv->driver.bus = &ac97_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(snd_ac97_codec_driver_register);
/**
* snd_ac97_codec_driver_unregister - unregister an AC97 codec driver
* @dev: AC97 codec driver to unregister
*
* Unregister a previously registered ac97 codec driver.
*/
void snd_ac97_codec_driver_unregister(struct ac97_codec_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(snd_ac97_codec_driver_unregister);
/**
* snd_ac97_codec_get_platdata - get platform_data
* @adev: the ac97 codec device
*
* For legacy platforms, in order to have platform_data in codec drivers
* available, while ac97 device are auto-created upon probe, this retrieves the
* platdata which was setup on ac97 controller registration.
*
* Returns the platform data pointer
*/
void *snd_ac97_codec_get_platdata(const struct ac97_codec_device *adev)
{
struct ac97_controller *ac97_ctrl = adev->ac97_ctrl;
return ac97_ctrl->codecs_pdata[adev->num];
}
EXPORT_SYMBOL_GPL(snd_ac97_codec_get_platdata);
static void ac97_ctrl_codecs_unregister(struct ac97_controller *ac97_ctrl)
{
int i;
for (i = 0; i < AC97_BUS_MAX_CODECS; i++)
if (ac97_ctrl->codecs[i]) {
ac97_ctrl->codecs[i]->ac97_ctrl = &ac97_unbound_ctrl;
device_unregister(&ac97_ctrl->codecs[i]->dev);
}
}
static ssize_t cold_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t len)
{
struct ac97_controller *ac97_ctrl;
mutex_lock(&ac97_controllers_mutex);
ac97_ctrl = to_ac97_controller(dev);
ac97_ctrl->ops->reset(ac97_ctrl);
mutex_unlock(&ac97_controllers_mutex);
return len;
}
static DEVICE_ATTR_WO(cold_reset);
static ssize_t warm_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t len)
{
struct ac97_controller *ac97_ctrl;
if (!dev)
return -ENODEV;
mutex_lock(&ac97_controllers_mutex);
ac97_ctrl = to_ac97_controller(dev);
ac97_ctrl->ops->warm_reset(ac97_ctrl);
mutex_unlock(&ac97_controllers_mutex);
return len;
}
static DEVICE_ATTR_WO(warm_reset);
static struct attribute *ac97_controller_device_attrs[] = {
&dev_attr_cold_reset.attr,
&dev_attr_warm_reset.attr,
NULL
};
static const struct attribute_group ac97_adapter_attr_group = {
.name = "ac97_operations",
.attrs = ac97_controller_device_attrs,
};
static const struct attribute_group *ac97_adapter_groups[] = {
&ac97_adapter_attr_group,
NULL,
};
static void ac97_del_adapter(struct ac97_controller *ac97_ctrl)
{
mutex_lock(&ac97_controllers_mutex);
ac97_ctrl_codecs_unregister(ac97_ctrl);
list_del(&ac97_ctrl->controllers);
mutex_unlock(&ac97_controllers_mutex);
device_unregister(&ac97_ctrl->adap);
}
static void ac97_adapter_release(struct device *dev)
{
struct ac97_controller *ac97_ctrl;
ac97_ctrl = to_ac97_controller(dev);
idr_remove(&ac97_adapter_idr, ac97_ctrl->nr);
dev_dbg(&ac97_ctrl->adap, "adapter unregistered by %s\n",
dev_name(ac97_ctrl->parent));
}
static const struct device_type ac97_adapter_type = {
.groups = ac97_adapter_groups,
.release = ac97_adapter_release,
};
static int ac97_add_adapter(struct ac97_controller *ac97_ctrl)
{
int ret;
mutex_lock(&ac97_controllers_mutex);
ret = idr_alloc(&ac97_adapter_idr, ac97_ctrl, 0, 0, GFP_KERNEL);
ac97_ctrl->nr = ret;
if (ret >= 0) {
dev_set_name(&ac97_ctrl->adap, "ac97-%d", ret);
ac97_ctrl->adap.type = &ac97_adapter_type;
ac97_ctrl->adap.parent = ac97_ctrl->parent;
ret = device_register(&ac97_ctrl->adap);
if (ret)
put_device(&ac97_ctrl->adap);
}
if (!ret)
list_add(&ac97_ctrl->controllers, &ac97_controllers);
mutex_unlock(&ac97_controllers_mutex);
if (!ret)
dev_dbg(&ac97_ctrl->adap, "adapter registered by %s\n",
dev_name(ac97_ctrl->parent));
return ret;
}
/**
* snd_ac97_controller_register - register an ac97 controller
* @ops: the ac97 bus operations
* @dev: the device providing the ac97 DC function
* @slots_available: mask of the ac97 codecs that can be scanned and probed
* bit0 => codec 0, bit1 => codec 1 ... bit 3 => codec 3
*
* Register a digital controller which can control up to 4 ac97 codecs. This is
* the controller side of the AC97 AC-link, while the slave side are the codecs.
*
* Returns a valid controller upon success, negative pointer value upon error
*/
struct ac97_controller *snd_ac97_controller_register(
const struct ac97_controller_ops *ops, struct device *dev,
unsigned short slots_available, void **codecs_pdata)
{
struct ac97_controller *ac97_ctrl;
int ret, i;
ac97_ctrl = kzalloc(sizeof(*ac97_ctrl), GFP_KERNEL);
if (!ac97_ctrl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < AC97_BUS_MAX_CODECS && codecs_pdata; i++)
ac97_ctrl->codecs_pdata[i] = codecs_pdata[i];
ac97_ctrl->ops = ops;
ac97_ctrl->slots_available = slots_available;
ac97_ctrl->parent = dev;
ret = ac97_add_adapter(ac97_ctrl);
if (ret)
goto err;
ac97_bus_reset(ac97_ctrl);
ac97_bus_scan(ac97_ctrl);
return ac97_ctrl;
err:
kfree(ac97_ctrl);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(snd_ac97_controller_register);
/**
* snd_ac97_controller_unregister - unregister an ac97 controller
* @ac97_ctrl: the device previously provided to ac97_controller_register()
*
*/
void snd_ac97_controller_unregister(struct ac97_controller *ac97_ctrl)
{
ac97_del_adapter(ac97_ctrl);
}
EXPORT_SYMBOL_GPL(snd_ac97_controller_unregister);
#ifdef CONFIG_PM
static int ac97_pm_runtime_suspend(struct device *dev)
{
struct ac97_codec_device *codec = to_ac97_device(dev);
int ret = pm_generic_runtime_suspend(dev);
if (ret == 0 && dev->driver) {
if (pm_runtime_is_irq_safe(dev))
clk_disable(codec->clk);
else
clk_disable_unprepare(codec->clk);
}
return ret;
}
static int ac97_pm_runtime_resume(struct device *dev)
{
struct ac97_codec_device *codec = to_ac97_device(dev);
int ret;
if (dev->driver) {
if (pm_runtime_is_irq_safe(dev))
ret = clk_enable(codec->clk);
else
ret = clk_prepare_enable(codec->clk);
if (ret)
return ret;
}
return pm_generic_runtime_resume(dev);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops ac97_pm = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
.freeze = pm_generic_freeze,
.thaw = pm_generic_thaw,
.poweroff = pm_generic_poweroff,
.restore = pm_generic_restore,
SET_RUNTIME_PM_OPS(
ac97_pm_runtime_suspend,
ac97_pm_runtime_resume,
NULL)
};
static int ac97_get_enable_clk(struct ac97_codec_device *adev)
{
int ret;
adev->clk = clk_get(&adev->dev, "ac97_clk");
if (IS_ERR(adev->clk))
return PTR_ERR(adev->clk);
ret = clk_prepare_enable(adev->clk);
if (ret)
clk_put(adev->clk);
return ret;
}
static void ac97_put_disable_clk(struct ac97_codec_device *adev)
{
clk_disable_unprepare(adev->clk);
clk_put(adev->clk);
}
static ssize_t vendor_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ac97_codec_device *codec = to_ac97_device(dev);
return sysfs_emit(buf, "%08x", codec->vendor_id);
}
static DEVICE_ATTR_RO(vendor_id);
static struct attribute *ac97_dev_attrs[] = {
&dev_attr_vendor_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(ac97_dev);
static int ac97_bus_match(struct device *dev, struct device_driver *drv)
{
struct ac97_codec_device *adev = to_ac97_device(dev);
struct ac97_codec_driver *adrv = to_ac97_driver(drv);
const struct ac97_id *id = adrv->id_table;
int i = 0;
if (adev->vendor_id == 0x0 || adev->vendor_id == 0xffffffff)
return false;
do {
if (ac97_ids_match(id[i].id, adev->vendor_id, id[i].mask))
return true;
} while (id[i++].id);
return false;
}
static int ac97_bus_probe(struct device *dev)
{
struct ac97_codec_device *adev = to_ac97_device(dev);
struct ac97_codec_driver *adrv = to_ac97_driver(dev->driver);
int ret;
ret = ac97_get_enable_clk(adev);
if (ret)
return ret;
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = adrv->probe(adev);
if (ret == 0)
return 0;
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
ac97_put_disable_clk(adev);
return ret;
}
static void ac97_bus_remove(struct device *dev)
{
struct ac97_codec_device *adev = to_ac97_device(dev);
struct ac97_codec_driver *adrv = to_ac97_driver(dev->driver);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
adrv->remove(adev);
pm_runtime_put_noidle(dev);
ac97_put_disable_clk(adev);
pm_runtime_disable(dev);
}
static struct bus_type ac97_bus_type = {
.name = "ac97bus",
.dev_groups = ac97_dev_groups,
.match = ac97_bus_match,
.pm = &ac97_pm,
.probe = ac97_bus_probe,
.remove = ac97_bus_remove,
};
static int __init ac97_bus_init(void)
{
return bus_register(&ac97_bus_type);
}
subsys_initcall(ac97_bus_init);
static void __exit ac97_bus_exit(void)
{
bus_unregister(&ac97_bus_type);
}
module_exit(ac97_bus_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Jarzmik <[email protected]>");
| linux-master | sound/ac97/bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Robert Jarzmik <[email protected]>
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <sound/ac97/codec.h>
#include <sound/ac97/compat.h>
#include <sound/ac97/controller.h>
#include <sound/soc.h>
#include "ac97_core.h"
static void compat_ac97_release(struct device *dev)
{
kfree(to_ac97_t(dev));
}
static void compat_ac97_reset(struct snd_ac97 *ac97)
{
struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
struct ac97_controller *actrl = adev->ac97_ctrl;
if (actrl->ops->reset)
actrl->ops->reset(actrl);
}
static void compat_ac97_warm_reset(struct snd_ac97 *ac97)
{
struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
struct ac97_controller *actrl = adev->ac97_ctrl;
if (actrl->ops->warm_reset)
actrl->ops->warm_reset(actrl);
}
static void compat_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
struct ac97_controller *actrl = adev->ac97_ctrl;
actrl->ops->write(actrl, ac97->num, reg, val);
}
static unsigned short compat_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
struct ac97_controller *actrl = adev->ac97_ctrl;
return actrl->ops->read(actrl, ac97->num, reg);
}
static const struct snd_ac97_bus_ops compat_snd_ac97_bus_ops = {
.reset = compat_ac97_reset,
.warm_reset = compat_ac97_warm_reset,
.write = compat_ac97_write,
.read = compat_ac97_read,
};
static struct snd_ac97_bus compat_soc_ac97_bus = {
.ops = &compat_snd_ac97_bus_ops,
};
struct snd_ac97 *snd_ac97_compat_alloc(struct ac97_codec_device *adev)
{
struct snd_ac97 *ac97;
int ret;
ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
if (ac97 == NULL)
return ERR_PTR(-ENOMEM);
ac97->private_data = adev;
ac97->bus = &compat_soc_ac97_bus;
ac97->dev.parent = &adev->dev;
ac97->dev.release = compat_ac97_release;
dev_set_name(&ac97->dev, "%s-compat", dev_name(&adev->dev));
ret = device_register(&ac97->dev);
if (ret) {
put_device(&ac97->dev);
return ERR_PTR(ret);
}
return ac97;
}
EXPORT_SYMBOL_GPL(snd_ac97_compat_alloc);
void snd_ac97_compat_release(struct snd_ac97 *ac97)
{
device_unregister(&ac97->dev);
}
EXPORT_SYMBOL_GPL(snd_ac97_compat_release);
int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
unsigned int id_mask)
{
struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
struct ac97_controller *actrl = adev->ac97_ctrl;
unsigned int scanned;
if (try_warm) {
compat_ac97_warm_reset(ac97);
scanned = snd_ac97_bus_scan_one(actrl, adev->num);
if (ac97_ids_match(scanned, adev->vendor_id, id_mask))
return 1;
}
compat_ac97_reset(ac97);
compat_ac97_warm_reset(ac97);
scanned = snd_ac97_bus_scan_one(actrl, adev->num);
if (ac97_ids_match(scanned, adev->vendor_id, id_mask))
return 0;
return -ENODEV;
}
EXPORT_SYMBOL_GPL(snd_ac97_reset);
| linux-master | sound/ac97/snd_ac97_compat.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Xen para-virtual sound device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <[email protected]>
*/
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include "xen_snd_front.h"
#include "xen_snd_front_alsa.h"
#include "xen_snd_front_cfg.h"
#include "xen_snd_front_evtchnl.h"
static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
{
struct xen_snd_front_evtchnl *channel = dev_id;
struct xen_snd_front_info *front_info = channel->front_info;
struct xensnd_resp *resp;
RING_IDX i, rp;
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED;
mutex_lock(&channel->ring_io_lock);
again:
rp = channel->u.req.ring.sring->rsp_prod;
/* Ensure we see queued responses up to rp. */
rmb();
/*
* Assume that the backend is trusted to always write sane values
* to the ring counters, so no overflow checks on frontend side
* are required.
*/
for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
if (resp->id != channel->evt_id)
continue;
switch (resp->operation) {
case XENSND_OP_OPEN:
case XENSND_OP_CLOSE:
case XENSND_OP_READ:
case XENSND_OP_WRITE:
case XENSND_OP_TRIGGER:
channel->u.req.resp_status = resp->status;
complete(&channel->u.req.completion);
break;
case XENSND_OP_HW_PARAM_QUERY:
channel->u.req.resp_status = resp->status;
channel->u.req.resp.hw_param =
resp->resp.hw_param;
complete(&channel->u.req.completion);
break;
default:
dev_err(&front_info->xb_dev->dev,
"Operation %d is not supported\n",
resp->operation);
break;
}
}
channel->u.req.ring.rsp_cons = i;
if (i != channel->u.req.ring.req_prod_pvt) {
int more_to_do;
RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
more_to_do);
if (more_to_do)
goto again;
} else {
channel->u.req.ring.sring->rsp_event = i + 1;
}
mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED;
}
static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
{
struct xen_snd_front_evtchnl *channel = dev_id;
struct xensnd_event_page *page = channel->u.evt.page;
u32 cons, prod;
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED;
mutex_lock(&channel->ring_io_lock);
prod = page->in_prod;
/* Ensure we see ring contents up to prod. */
virt_rmb();
if (prod == page->in_cons)
goto out;
/*
* Assume that the backend is trusted to always write sane values
* to the ring counters, so no overflow checks on frontend side
* are required.
*/
for (cons = page->in_cons; cons != prod; cons++) {
struct xensnd_evt *event;
event = &XENSND_IN_RING_REF(page, cons);
if (unlikely(event->id != channel->evt_id++))
continue;
switch (event->type) {
case XENSND_EVT_CUR_POS:
xen_snd_front_alsa_handle_cur_pos(channel,
event->op.cur_pos.position);
break;
}
}
page->in_cons = cons;
/* Ensure ring contents. */
virt_wmb();
out:
mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED;
}
void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
{
int notify;
channel->u.req.ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
if (notify)
notify_remote_via_irq(channel->irq);
}
static void evtchnl_free(struct xen_snd_front_info *front_info,
struct xen_snd_front_evtchnl *channel)
{
void *page = NULL;
if (channel->type == EVTCHNL_TYPE_REQ)
page = channel->u.req.ring.sring;
else if (channel->type == EVTCHNL_TYPE_EVT)
page = channel->u.evt.page;
if (!page)
return;
channel->state = EVTCHNL_STATE_DISCONNECTED;
if (channel->type == EVTCHNL_TYPE_REQ) {
/* Release all who still waits for response if any. */
channel->u.req.resp_status = -EIO;
complete_all(&channel->u.req.completion);
}
if (channel->irq)
unbind_from_irqhandler(channel->irq, channel);
if (channel->port)
xenbus_free_evtchn(front_info->xb_dev, channel->port);
/* End access and free the page. */
xenbus_teardown_ring(&page, 1, &channel->gref);
memset(channel, 0, sizeof(*channel));
}
void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
{
int i;
if (!front_info->evt_pairs)
return;
for (i = 0; i < front_info->num_evt_pairs; i++) {
evtchnl_free(front_info, &front_info->evt_pairs[i].req);
evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
}
kfree(front_info->evt_pairs);
front_info->evt_pairs = NULL;
}
static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
struct xen_snd_front_evtchnl *channel,
enum xen_snd_front_evtchnl_type type)
{
struct xenbus_device *xb_dev = front_info->xb_dev;
void *page;
irq_handler_t handler;
char *handler_name = NULL;
int ret;
memset(channel, 0, sizeof(*channel));
channel->type = type;
channel->index = index;
channel->front_info = front_info;
channel->state = EVTCHNL_STATE_DISCONNECTED;
ret = xenbus_setup_ring(xb_dev, GFP_KERNEL, &page, 1, &channel->gref);
if (ret)
goto fail;
handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
type == EVTCHNL_TYPE_REQ ?
XENSND_FIELD_RING_REF :
XENSND_FIELD_EVT_RING_REF);
if (!handler_name) {
ret = -ENOMEM;
goto fail;
}
mutex_init(&channel->ring_io_lock);
if (type == EVTCHNL_TYPE_REQ) {
struct xen_sndif_sring *sring = page;
init_completion(&channel->u.req.completion);
mutex_init(&channel->u.req.req_io_lock);
XEN_FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
handler = evtchnl_interrupt_req;
} else {
channel->u.evt.page = page;
handler = evtchnl_interrupt_evt;
}
ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
if (ret < 0)
goto fail;
ret = bind_evtchn_to_irq(channel->port);
if (ret < 0) {
dev_err(&xb_dev->dev,
"Failed to bind IRQ for domid %d port %d: %d\n",
front_info->xb_dev->otherend_id, channel->port, ret);
goto fail;
}
channel->irq = ret;
ret = request_threaded_irq(channel->irq, NULL, handler,
IRQF_ONESHOT, handler_name, channel);
if (ret < 0) {
dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
channel->irq, ret);
goto fail;
}
kfree(handler_name);
return 0;
fail:
kfree(handler_name);
dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
return ret;
}
int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
int num_streams)
{
struct xen_front_cfg_card *cfg = &front_info->cfg;
struct device *dev = &front_info->xb_dev->dev;
int d, ret = 0;
front_info->evt_pairs =
kcalloc(num_streams,
sizeof(struct xen_snd_front_evtchnl_pair),
GFP_KERNEL);
if (!front_info->evt_pairs)
return -ENOMEM;
/* Iterate over devices and their streams and create event channels. */
for (d = 0; d < cfg->num_pcm_instances; d++) {
struct xen_front_cfg_pcm_instance *pcm_instance;
int s, index;
pcm_instance = &cfg->pcm_instances[d];
for (s = 0; s < pcm_instance->num_streams_pb; s++) {
index = pcm_instance->streams_pb[s].index;
ret = evtchnl_alloc(front_info, index,
&front_info->evt_pairs[index].req,
EVTCHNL_TYPE_REQ);
if (ret < 0) {
dev_err(dev, "Error allocating control channel\n");
goto fail;
}
ret = evtchnl_alloc(front_info, index,
&front_info->evt_pairs[index].evt,
EVTCHNL_TYPE_EVT);
if (ret < 0) {
dev_err(dev, "Error allocating in-event channel\n");
goto fail;
}
}
for (s = 0; s < pcm_instance->num_streams_cap; s++) {
index = pcm_instance->streams_cap[s].index;
ret = evtchnl_alloc(front_info, index,
&front_info->evt_pairs[index].req,
EVTCHNL_TYPE_REQ);
if (ret < 0) {
dev_err(dev, "Error allocating control channel\n");
goto fail;
}
ret = evtchnl_alloc(front_info, index,
&front_info->evt_pairs[index].evt,
EVTCHNL_TYPE_EVT);
if (ret < 0) {
dev_err(dev, "Error allocating in-event channel\n");
goto fail;
}
}
}
front_info->num_evt_pairs = num_streams;
return 0;
fail:
xen_snd_front_evtchnl_free_all(front_info);
return ret;
}
static int evtchnl_publish(struct xenbus_transaction xbt,
struct xen_snd_front_evtchnl *channel,
const char *path, const char *node_ring,
const char *node_chnl)
{
struct xenbus_device *xb_dev = channel->front_info->xb_dev;
int ret;
/* Write control channel ring reference. */
ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
if (ret < 0) {
dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
return ret;
}
/* Write event channel ring reference. */
ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
if (ret < 0) {
dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
return ret;
}
return 0;
}
int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
{
struct xen_front_cfg_card *cfg = &front_info->cfg;
struct xenbus_transaction xbt;
int ret, d;
again:
ret = xenbus_transaction_start(&xbt);
if (ret < 0) {
xenbus_dev_fatal(front_info->xb_dev, ret,
"starting transaction");
return ret;
}
for (d = 0; d < cfg->num_pcm_instances; d++) {
struct xen_front_cfg_pcm_instance *pcm_instance;
int s, index;
pcm_instance = &cfg->pcm_instances[d];
for (s = 0; s < pcm_instance->num_streams_pb; s++) {
index = pcm_instance->streams_pb[s].index;
ret = evtchnl_publish(xbt,
&front_info->evt_pairs[index].req,
pcm_instance->streams_pb[s].xenstore_path,
XENSND_FIELD_RING_REF,
XENSND_FIELD_EVT_CHNL);
if (ret < 0)
goto fail;
ret = evtchnl_publish(xbt,
&front_info->evt_pairs[index].evt,
pcm_instance->streams_pb[s].xenstore_path,
XENSND_FIELD_EVT_RING_REF,
XENSND_FIELD_EVT_EVT_CHNL);
if (ret < 0)
goto fail;
}
for (s = 0; s < pcm_instance->num_streams_cap; s++) {
index = pcm_instance->streams_cap[s].index;
ret = evtchnl_publish(xbt,
&front_info->evt_pairs[index].req,
pcm_instance->streams_cap[s].xenstore_path,
XENSND_FIELD_RING_REF,
XENSND_FIELD_EVT_CHNL);
if (ret < 0)
goto fail;
ret = evtchnl_publish(xbt,
&front_info->evt_pairs[index].evt,
pcm_instance->streams_cap[s].xenstore_path,
XENSND_FIELD_EVT_RING_REF,
XENSND_FIELD_EVT_EVT_CHNL);
if (ret < 0)
goto fail;
}
}
ret = xenbus_transaction_end(xbt, 0);
if (ret < 0) {
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(front_info->xb_dev, ret,
"completing transaction");
goto fail_to_end;
}
return 0;
fail:
xenbus_transaction_end(xbt, 1);
fail_to_end:
xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
return ret;
}
void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
bool is_connected)
{
enum xen_snd_front_evtchnl_state state;
if (is_connected)
state = EVTCHNL_STATE_CONNECTED;
else
state = EVTCHNL_STATE_DISCONNECTED;
mutex_lock(&evt_pair->req.ring_io_lock);
evt_pair->req.state = state;
mutex_unlock(&evt_pair->req.ring_io_lock);
mutex_lock(&evt_pair->evt.ring_io_lock);
evt_pair->evt.state = state;
mutex_unlock(&evt_pair->evt.ring_io_lock);
}
void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
{
mutex_lock(&evt_pair->req.ring_io_lock);
evt_pair->req.evt_next_id = 0;
mutex_unlock(&evt_pair->req.ring_io_lock);
mutex_lock(&evt_pair->evt.ring_io_lock);
evt_pair->evt.evt_next_id = 0;
mutex_unlock(&evt_pair->evt.ring_io_lock);
}
| linux-master | sound/xen/xen_snd_front_evtchnl.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Xen para-virtual sound device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <[email protected]>
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <xen/page.h>
#include <xen/platform_pci.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/sndif.h>
#include "xen_snd_front.h"
#include "xen_snd_front_alsa.h"
#include "xen_snd_front_evtchnl.h"
static struct xensnd_req *
be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation)
{
struct xensnd_req *req;
req = RING_GET_REQUEST(&evtchnl->u.req.ring,
evtchnl->u.req.ring.req_prod_pvt);
req->operation = operation;
req->id = evtchnl->evt_next_id++;
evtchnl->evt_id = req->id;
return req;
}
static int be_stream_do_io(struct xen_snd_front_evtchnl *evtchnl)
{
if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
return -EIO;
reinit_completion(&evtchnl->u.req.completion);
xen_snd_front_evtchnl_flush(evtchnl);
return 0;
}
static int be_stream_wait_io(struct xen_snd_front_evtchnl *evtchnl)
{
if (wait_for_completion_timeout(&evtchnl->u.req.completion,
msecs_to_jiffies(VSND_WAIT_BACK_MS)) <= 0)
return -ETIMEDOUT;
return evtchnl->u.req.resp_status;
}
int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_query_hw_param *hw_param_req,
struct xensnd_query_hw_param *hw_param_resp)
{
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
req->op.hw_param = *hw_param_req;
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
if (ret == 0)
*hw_param_resp = evtchnl->u.req.resp.hw_param;
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
struct xen_front_pgdir_shbuf *shbuf,
u8 format, unsigned int channels,
unsigned int rate, u32 buffer_sz,
u32 period_sz)
{
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
req->op.open.pcm_format = format;
req->op.open.pcm_channels = channels;
req->op.open.pcm_rate = rate;
req->op.open.buffer_sz = buffer_sz;
req->op.open.period_sz = period_sz;
req->op.open.gref_directory =
xen_front_pgdir_shbuf_get_dir_start(shbuf);
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
int xen_snd_front_stream_close(struct xen_snd_front_evtchnl *evtchnl)
{
__always_unused struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
int xen_snd_front_stream_write(struct xen_snd_front_evtchnl *evtchnl,
unsigned long pos, unsigned long count)
{
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
req->op.rw.length = count;
req->op.rw.offset = pos;
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
int xen_snd_front_stream_read(struct xen_snd_front_evtchnl *evtchnl,
unsigned long pos, unsigned long count)
{
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
req->op.rw.length = count;
req->op.rw.offset = pos;
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl *evtchnl,
int type)
{
struct xensnd_req *req;
int ret;
mutex_lock(&evtchnl->u.req.req_io_lock);
mutex_lock(&evtchnl->ring_io_lock);
req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
req->op.trigger.type = type;
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
static void xen_snd_drv_fini(struct xen_snd_front_info *front_info)
{
xen_snd_front_alsa_fini(front_info);
xen_snd_front_evtchnl_free_all(front_info);
}
static int sndback_initwait(struct xen_snd_front_info *front_info)
{
int num_streams;
int ret;
ret = xen_snd_front_cfg_card(front_info, &num_streams);
if (ret < 0)
return ret;
/* create event channels for all streams and publish */
ret = xen_snd_front_evtchnl_create_all(front_info, num_streams);
if (ret < 0)
return ret;
return xen_snd_front_evtchnl_publish_all(front_info);
}
static int sndback_connect(struct xen_snd_front_info *front_info)
{
return xen_snd_front_alsa_init(front_info);
}
static void sndback_disconnect(struct xen_snd_front_info *front_info)
{
xen_snd_drv_fini(front_info);
xenbus_switch_state(front_info->xb_dev, XenbusStateInitialising);
}
static void sndback_changed(struct xenbus_device *xb_dev,
enum xenbus_state backend_state)
{
struct xen_snd_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
int ret;
dev_dbg(&xb_dev->dev, "Backend state is %s, front is %s\n",
xenbus_strstate(backend_state),
xenbus_strstate(xb_dev->state));
switch (backend_state) {
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateInitialised:
break;
case XenbusStateInitialising:
/* Recovering after backend unexpected closure. */
sndback_disconnect(front_info);
break;
case XenbusStateInitWait:
/* Recovering after backend unexpected closure. */
sndback_disconnect(front_info);
ret = sndback_initwait(front_info);
if (ret < 0)
xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
else
xenbus_switch_state(xb_dev, XenbusStateInitialised);
break;
case XenbusStateConnected:
if (xb_dev->state != XenbusStateInitialised)
break;
ret = sndback_connect(front_info);
if (ret < 0)
xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
else
xenbus_switch_state(xb_dev, XenbusStateConnected);
break;
case XenbusStateClosing:
/*
* In this state backend starts freeing resources,
* so let it go into closed state first, so we can also
* remove ours.
*/
break;
case XenbusStateUnknown:
case XenbusStateClosed:
if (xb_dev->state == XenbusStateClosed)
break;
sndback_disconnect(front_info);
break;
}
}
static int xen_drv_probe(struct xenbus_device *xb_dev,
const struct xenbus_device_id *id)
{
struct xen_snd_front_info *front_info;
front_info = devm_kzalloc(&xb_dev->dev,
sizeof(*front_info), GFP_KERNEL);
if (!front_info)
return -ENOMEM;
front_info->xb_dev = xb_dev;
dev_set_drvdata(&xb_dev->dev, front_info);
return xenbus_switch_state(xb_dev, XenbusStateInitialising);
}
static void xen_drv_remove(struct xenbus_device *dev)
{
struct xen_snd_front_info *front_info = dev_get_drvdata(&dev->dev);
int to = 100;
xenbus_switch_state(dev, XenbusStateClosing);
/*
* On driver removal it is disconnected from XenBus,
* so no backend state change events come via .otherend_changed
* callback. This prevents us from exiting gracefully, e.g.
* signaling the backend to free event channels, waiting for its
* state to change to XenbusStateClosed and cleaning at our end.
* Normally when front driver removed backend will finally go into
* XenbusStateInitWait state.
*
* Workaround: read backend's state manually and wait with time-out.
*/
while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
XenbusStateUnknown) != XenbusStateInitWait) &&
--to)
msleep(10);
if (!to) {
unsigned int state;
state = xenbus_read_unsigned(front_info->xb_dev->otherend,
"state", XenbusStateUnknown);
pr_err("Backend state is %s while removing driver\n",
xenbus_strstate(state));
}
xen_snd_drv_fini(front_info);
xenbus_frontend_closed(dev);
}
static const struct xenbus_device_id xen_drv_ids[] = {
{ XENSND_DRIVER_NAME },
{ "" }
};
static struct xenbus_driver xen_driver = {
.ids = xen_drv_ids,
.probe = xen_drv_probe,
.remove = xen_drv_remove,
.otherend_changed = sndback_changed,
.not_essential = true,
};
static int __init xen_drv_init(void)
{
if (!xen_domain())
return -ENODEV;
if (!xen_has_pv_devices())
return -ENODEV;
/* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
if (XEN_PAGE_SIZE != PAGE_SIZE) {
pr_err(XENSND_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
XEN_PAGE_SIZE, PAGE_SIZE);
return -ENODEV;
}
pr_info("Initialising Xen " XENSND_DRIVER_NAME " frontend driver\n");
return xenbus_register_frontend(&xen_driver);
}
static void __exit xen_drv_fini(void)
{
pr_info("Unregistering Xen " XENSND_DRIVER_NAME " frontend driver\n");
xenbus_unregister_driver(&xen_driver);
}
module_init(xen_drv_init);
module_exit(xen_drv_fini);
MODULE_DESCRIPTION("Xen virtual sound device frontend");
MODULE_LICENSE("GPL");
MODULE_ALIAS("xen:" XENSND_DRIVER_NAME);
| linux-master | sound/xen/xen_snd_front.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Xen para-virtual sound device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <[email protected]>
*/
#include <xen/xenbus.h>
#include <xen/interface/io/sndif.h>
#include "xen_snd_front.h"
#include "xen_snd_front_cfg.h"
/* Maximum number of supported streams. */
#define VSND_MAX_STREAM 8
struct cfg_hw_sample_rate {
const char *name;
unsigned int mask;
unsigned int value;
};
static const struct cfg_hw_sample_rate CFG_HW_SUPPORTED_RATES[] = {
{ .name = "5512", .mask = SNDRV_PCM_RATE_5512, .value = 5512 },
{ .name = "8000", .mask = SNDRV_PCM_RATE_8000, .value = 8000 },
{ .name = "11025", .mask = SNDRV_PCM_RATE_11025, .value = 11025 },
{ .name = "16000", .mask = SNDRV_PCM_RATE_16000, .value = 16000 },
{ .name = "22050", .mask = SNDRV_PCM_RATE_22050, .value = 22050 },
{ .name = "32000", .mask = SNDRV_PCM_RATE_32000, .value = 32000 },
{ .name = "44100", .mask = SNDRV_PCM_RATE_44100, .value = 44100 },
{ .name = "48000", .mask = SNDRV_PCM_RATE_48000, .value = 48000 },
{ .name = "64000", .mask = SNDRV_PCM_RATE_64000, .value = 64000 },
{ .name = "96000", .mask = SNDRV_PCM_RATE_96000, .value = 96000 },
{ .name = "176400", .mask = SNDRV_PCM_RATE_176400, .value = 176400 },
{ .name = "192000", .mask = SNDRV_PCM_RATE_192000, .value = 192000 },
};
struct cfg_hw_sample_format {
const char *name;
u64 mask;
};
static const struct cfg_hw_sample_format CFG_HW_SUPPORTED_FORMATS[] = {
{
.name = XENSND_PCM_FORMAT_U8_STR,
.mask = SNDRV_PCM_FMTBIT_U8
},
{
.name = XENSND_PCM_FORMAT_S8_STR,
.mask = SNDRV_PCM_FMTBIT_S8
},
{
.name = XENSND_PCM_FORMAT_U16_LE_STR,
.mask = SNDRV_PCM_FMTBIT_U16_LE
},
{
.name = XENSND_PCM_FORMAT_U16_BE_STR,
.mask = SNDRV_PCM_FMTBIT_U16_BE
},
{
.name = XENSND_PCM_FORMAT_S16_LE_STR,
.mask = SNDRV_PCM_FMTBIT_S16_LE
},
{
.name = XENSND_PCM_FORMAT_S16_BE_STR,
.mask = SNDRV_PCM_FMTBIT_S16_BE
},
{
.name = XENSND_PCM_FORMAT_U24_LE_STR,
.mask = SNDRV_PCM_FMTBIT_U24_LE
},
{
.name = XENSND_PCM_FORMAT_U24_BE_STR,
.mask = SNDRV_PCM_FMTBIT_U24_BE
},
{
.name = XENSND_PCM_FORMAT_S24_LE_STR,
.mask = SNDRV_PCM_FMTBIT_S24_LE
},
{
.name = XENSND_PCM_FORMAT_S24_BE_STR,
.mask = SNDRV_PCM_FMTBIT_S24_BE
},
{
.name = XENSND_PCM_FORMAT_U32_LE_STR,
.mask = SNDRV_PCM_FMTBIT_U32_LE
},
{
.name = XENSND_PCM_FORMAT_U32_BE_STR,
.mask = SNDRV_PCM_FMTBIT_U32_BE
},
{
.name = XENSND_PCM_FORMAT_S32_LE_STR,
.mask = SNDRV_PCM_FMTBIT_S32_LE
},
{
.name = XENSND_PCM_FORMAT_S32_BE_STR,
.mask = SNDRV_PCM_FMTBIT_S32_BE
},
{
.name = XENSND_PCM_FORMAT_A_LAW_STR,
.mask = SNDRV_PCM_FMTBIT_A_LAW
},
{
.name = XENSND_PCM_FORMAT_MU_LAW_STR,
.mask = SNDRV_PCM_FMTBIT_MU_LAW
},
{
.name = XENSND_PCM_FORMAT_F32_LE_STR,
.mask = SNDRV_PCM_FMTBIT_FLOAT_LE
},
{
.name = XENSND_PCM_FORMAT_F32_BE_STR,
.mask = SNDRV_PCM_FMTBIT_FLOAT_BE
},
{
.name = XENSND_PCM_FORMAT_F64_LE_STR,
.mask = SNDRV_PCM_FMTBIT_FLOAT64_LE
},
{
.name = XENSND_PCM_FORMAT_F64_BE_STR,
.mask = SNDRV_PCM_FMTBIT_FLOAT64_BE
},
{
.name = XENSND_PCM_FORMAT_IEC958_SUBFRAME_LE_STR,
.mask = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
},
{
.name = XENSND_PCM_FORMAT_IEC958_SUBFRAME_BE_STR,
.mask = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE
},
{
.name = XENSND_PCM_FORMAT_IMA_ADPCM_STR,
.mask = SNDRV_PCM_FMTBIT_IMA_ADPCM
},
{
.name = XENSND_PCM_FORMAT_MPEG_STR,
.mask = SNDRV_PCM_FMTBIT_MPEG
},
{
.name = XENSND_PCM_FORMAT_GSM_STR,
.mask = SNDRV_PCM_FMTBIT_GSM
},
};
static void cfg_hw_rates(char *list, unsigned int len,
const char *path, struct snd_pcm_hardware *pcm_hw)
{
char *cur_rate;
unsigned int cur_mask;
unsigned int cur_value;
unsigned int rates;
unsigned int rate_min;
unsigned int rate_max;
int i;
rates = 0;
rate_min = -1;
rate_max = 0;
while ((cur_rate = strsep(&list, XENSND_LIST_SEPARATOR))) {
for (i = 0; i < ARRAY_SIZE(CFG_HW_SUPPORTED_RATES); i++)
if (!strncasecmp(cur_rate,
CFG_HW_SUPPORTED_RATES[i].name,
XENSND_SAMPLE_RATE_MAX_LEN)) {
cur_mask = CFG_HW_SUPPORTED_RATES[i].mask;
cur_value = CFG_HW_SUPPORTED_RATES[i].value;
rates |= cur_mask;
if (rate_min > cur_value)
rate_min = cur_value;
if (rate_max < cur_value)
rate_max = cur_value;
}
}
if (rates) {
pcm_hw->rates = rates;
pcm_hw->rate_min = rate_min;
pcm_hw->rate_max = rate_max;
}
}
static void cfg_formats(char *list, unsigned int len,
const char *path, struct snd_pcm_hardware *pcm_hw)
{
u64 formats;
char *cur_format;
int i;
formats = 0;
while ((cur_format = strsep(&list, XENSND_LIST_SEPARATOR))) {
for (i = 0; i < ARRAY_SIZE(CFG_HW_SUPPORTED_FORMATS); i++)
if (!strncasecmp(cur_format,
CFG_HW_SUPPORTED_FORMATS[i].name,
XENSND_SAMPLE_FORMAT_MAX_LEN))
formats |= CFG_HW_SUPPORTED_FORMATS[i].mask;
}
if (formats)
pcm_hw->formats = formats;
}
#define MAX_BUFFER_SIZE (64 * 1024)
#define MIN_PERIOD_SIZE 64
#define MAX_PERIOD_SIZE MAX_BUFFER_SIZE
#define USE_FORMATS (SNDRV_PCM_FMTBIT_U8 | \
SNDRV_PCM_FMTBIT_S16_LE)
#define USE_RATE (SNDRV_PCM_RATE_CONTINUOUS | \
SNDRV_PCM_RATE_8000_48000)
#define USE_RATE_MIN 5512
#define USE_RATE_MAX 48000
#define USE_CHANNELS_MIN 1
#define USE_CHANNELS_MAX 2
#define USE_PERIODS_MIN 2
#define USE_PERIODS_MAX (MAX_BUFFER_SIZE / MIN_PERIOD_SIZE)
static const struct snd_pcm_hardware SND_DRV_PCM_HW_DEFAULT = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = USE_FORMATS,
.rates = USE_RATE,
.rate_min = USE_RATE_MIN,
.rate_max = USE_RATE_MAX,
.channels_min = USE_CHANNELS_MIN,
.channels_max = USE_CHANNELS_MAX,
.buffer_bytes_max = MAX_BUFFER_SIZE,
.period_bytes_min = MIN_PERIOD_SIZE,
.period_bytes_max = MAX_PERIOD_SIZE,
.periods_min = USE_PERIODS_MIN,
.periods_max = USE_PERIODS_MAX,
.fifo_size = 0,
};
static void cfg_read_pcm_hw(const char *path,
struct snd_pcm_hardware *parent_pcm_hw,
struct snd_pcm_hardware *pcm_hw)
{
char *list;
int val;
size_t buf_sz;
unsigned int len;
/* Inherit parent's PCM HW and read overrides from XenStore. */
if (parent_pcm_hw)
*pcm_hw = *parent_pcm_hw;
else
*pcm_hw = SND_DRV_PCM_HW_DEFAULT;
val = xenbus_read_unsigned(path, XENSND_FIELD_CHANNELS_MIN, 0);
if (val)
pcm_hw->channels_min = val;
val = xenbus_read_unsigned(path, XENSND_FIELD_CHANNELS_MAX, 0);
if (val)
pcm_hw->channels_max = val;
list = xenbus_read(XBT_NIL, path, XENSND_FIELD_SAMPLE_RATES, &len);
if (!IS_ERR(list)) {
cfg_hw_rates(list, len, path, pcm_hw);
kfree(list);
}
list = xenbus_read(XBT_NIL, path, XENSND_FIELD_SAMPLE_FORMATS, &len);
if (!IS_ERR(list)) {
cfg_formats(list, len, path, pcm_hw);
kfree(list);
}
buf_sz = xenbus_read_unsigned(path, XENSND_FIELD_BUFFER_SIZE, 0);
if (buf_sz)
pcm_hw->buffer_bytes_max = buf_sz;
/* Update configuration to match new values. */
if (pcm_hw->channels_min > pcm_hw->channels_max)
pcm_hw->channels_min = pcm_hw->channels_max;
if (pcm_hw->rate_min > pcm_hw->rate_max)
pcm_hw->rate_min = pcm_hw->rate_max;
pcm_hw->period_bytes_max = pcm_hw->buffer_bytes_max;
pcm_hw->periods_max = pcm_hw->period_bytes_max /
pcm_hw->period_bytes_min;
}
static int cfg_get_stream_type(const char *path, int index,
int *num_pb, int *num_cap)
{
char *str = NULL;
char *stream_path;
int ret;
*num_pb = 0;
*num_cap = 0;
stream_path = kasprintf(GFP_KERNEL, "%s/%d", path, index);
if (!stream_path) {
ret = -ENOMEM;
goto fail;
}
str = xenbus_read(XBT_NIL, stream_path, XENSND_FIELD_TYPE, NULL);
if (IS_ERR(str)) {
ret = PTR_ERR(str);
str = NULL;
goto fail;
}
if (!strncasecmp(str, XENSND_STREAM_TYPE_PLAYBACK,
sizeof(XENSND_STREAM_TYPE_PLAYBACK))) {
(*num_pb)++;
} else if (!strncasecmp(str, XENSND_STREAM_TYPE_CAPTURE,
sizeof(XENSND_STREAM_TYPE_CAPTURE))) {
(*num_cap)++;
} else {
ret = -EINVAL;
goto fail;
}
ret = 0;
fail:
kfree(stream_path);
kfree(str);
return ret;
}
static int cfg_stream(struct xen_snd_front_info *front_info,
struct xen_front_cfg_pcm_instance *pcm_instance,
const char *path, int index, int *cur_pb, int *cur_cap,
int *stream_cnt)
{
char *str = NULL;
char *stream_path;
struct xen_front_cfg_stream *stream;
int ret;
stream_path = devm_kasprintf(&front_info->xb_dev->dev,
GFP_KERNEL, "%s/%d", path, index);
if (!stream_path) {
ret = -ENOMEM;
goto fail;
}
str = xenbus_read(XBT_NIL, stream_path, XENSND_FIELD_TYPE, NULL);
if (IS_ERR(str)) {
ret = PTR_ERR(str);
str = NULL;
goto fail;
}
if (!strncasecmp(str, XENSND_STREAM_TYPE_PLAYBACK,
sizeof(XENSND_STREAM_TYPE_PLAYBACK))) {
stream = &pcm_instance->streams_pb[(*cur_pb)++];
} else if (!strncasecmp(str, XENSND_STREAM_TYPE_CAPTURE,
sizeof(XENSND_STREAM_TYPE_CAPTURE))) {
stream = &pcm_instance->streams_cap[(*cur_cap)++];
} else {
ret = -EINVAL;
goto fail;
}
/* Get next stream index. */
stream->index = (*stream_cnt)++;
stream->xenstore_path = stream_path;
/*
* Check XenStore if PCM HW configuration exists for this stream
* and update if so, e.g. we inherit all values from device's PCM HW,
* but can still override some of the values for the stream.
*/
cfg_read_pcm_hw(stream->xenstore_path,
&pcm_instance->pcm_hw, &stream->pcm_hw);
ret = 0;
fail:
kfree(str);
return ret;
}
static int cfg_device(struct xen_snd_front_info *front_info,
struct xen_front_cfg_pcm_instance *pcm_instance,
struct snd_pcm_hardware *parent_pcm_hw,
const char *path, int node_index, int *stream_cnt)
{
char *str;
char *device_path;
int ret, i, num_streams;
int num_pb, num_cap;
int cur_pb, cur_cap;
char node[3];
device_path = kasprintf(GFP_KERNEL, "%s/%d", path, node_index);
if (!device_path)
return -ENOMEM;
str = xenbus_read(XBT_NIL, device_path, XENSND_FIELD_DEVICE_NAME, NULL);
if (!IS_ERR(str)) {
strscpy(pcm_instance->name, str, sizeof(pcm_instance->name));
kfree(str);
}
pcm_instance->device_id = node_index;
/*
* Check XenStore if PCM HW configuration exists for this device
* and update if so, e.g. we inherit all values from card's PCM HW,
* but can still override some of the values for the device.
*/
cfg_read_pcm_hw(device_path, parent_pcm_hw, &pcm_instance->pcm_hw);
/* Find out how many streams were configured in Xen store. */
num_streams = 0;
do {
snprintf(node, sizeof(node), "%d", num_streams);
if (!xenbus_exists(XBT_NIL, device_path, node))
break;
num_streams++;
} while (num_streams < VSND_MAX_STREAM);
pcm_instance->num_streams_pb = 0;
pcm_instance->num_streams_cap = 0;
/* Get number of playback and capture streams. */
for (i = 0; i < num_streams; i++) {
ret = cfg_get_stream_type(device_path, i, &num_pb, &num_cap);
if (ret < 0)
goto fail;
pcm_instance->num_streams_pb += num_pb;
pcm_instance->num_streams_cap += num_cap;
}
if (pcm_instance->num_streams_pb) {
pcm_instance->streams_pb =
devm_kcalloc(&front_info->xb_dev->dev,
pcm_instance->num_streams_pb,
sizeof(struct xen_front_cfg_stream),
GFP_KERNEL);
if (!pcm_instance->streams_pb) {
ret = -ENOMEM;
goto fail;
}
}
if (pcm_instance->num_streams_cap) {
pcm_instance->streams_cap =
devm_kcalloc(&front_info->xb_dev->dev,
pcm_instance->num_streams_cap,
sizeof(struct xen_front_cfg_stream),
GFP_KERNEL);
if (!pcm_instance->streams_cap) {
ret = -ENOMEM;
goto fail;
}
}
cur_pb = 0;
cur_cap = 0;
for (i = 0; i < num_streams; i++) {
ret = cfg_stream(front_info, pcm_instance, device_path, i,
&cur_pb, &cur_cap, stream_cnt);
if (ret < 0)
goto fail;
}
ret = 0;
fail:
kfree(device_path);
return ret;
}
int xen_snd_front_cfg_card(struct xen_snd_front_info *front_info,
int *stream_cnt)
{
struct xenbus_device *xb_dev = front_info->xb_dev;
struct xen_front_cfg_card *cfg = &front_info->cfg;
int ret, num_devices, i;
char node[3];
*stream_cnt = 0;
num_devices = 0;
do {
scnprintf(node, sizeof(node), "%d", num_devices);
if (!xenbus_exists(XBT_NIL, xb_dev->nodename, node))
break;
num_devices++;
} while (num_devices < SNDRV_PCM_DEVICES);
if (!num_devices) {
dev_warn(&xb_dev->dev,
"No devices configured for sound card at %s\n",
xb_dev->nodename);
return -ENODEV;
}
/* Start from default PCM HW configuration for the card. */
cfg_read_pcm_hw(xb_dev->nodename, NULL, &cfg->pcm_hw);
cfg->pcm_instances =
devm_kcalloc(&front_info->xb_dev->dev, num_devices,
sizeof(struct xen_front_cfg_pcm_instance),
GFP_KERNEL);
if (!cfg->pcm_instances)
return -ENOMEM;
for (i = 0; i < num_devices; i++) {
ret = cfg_device(front_info, &cfg->pcm_instances[i],
&cfg->pcm_hw, xb_dev->nodename, i, stream_cnt);
if (ret < 0)
return ret;
}
cfg->num_pcm_instances = num_devices;
return 0;
}
| linux-master | sound/xen/xen_snd_front_cfg.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Xen para-virtual sound device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <[email protected]>
*/
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <xen/xenbus.h>
#include <xen/xen-front-pgdir-shbuf.h>
#include "xen_snd_front.h"
#include "xen_snd_front_alsa.h"
#include "xen_snd_front_cfg.h"
#include "xen_snd_front_evtchnl.h"
struct xen_snd_front_pcm_stream_info {
struct xen_snd_front_info *front_info;
struct xen_snd_front_evtchnl_pair *evt_pair;
/* This is the shared buffer with its backing storage. */
struct xen_front_pgdir_shbuf shbuf;
u8 *buffer;
size_t buffer_sz;
int num_pages;
struct page **pages;
int index;
bool is_open;
struct snd_pcm_hardware pcm_hw;
/* Number of processed frames as reported by the backend. */
snd_pcm_uframes_t be_cur_frame;
/* Current HW pointer to be reported via .period callback. */
atomic_t hw_ptr;
/* Modulo of the number of processed frames - for period detection. */
u32 out_frames;
};
struct xen_snd_front_pcm_instance_info {
struct xen_snd_front_card_info *card_info;
struct snd_pcm *pcm;
struct snd_pcm_hardware pcm_hw;
int num_pcm_streams_pb;
struct xen_snd_front_pcm_stream_info *streams_pb;
int num_pcm_streams_cap;
struct xen_snd_front_pcm_stream_info *streams_cap;
};
struct xen_snd_front_card_info {
struct xen_snd_front_info *front_info;
struct snd_card *card;
struct snd_pcm_hardware pcm_hw;
int num_pcm_instances;
struct xen_snd_front_pcm_instance_info *pcm_instances;
};
struct alsa_sndif_sample_format {
u8 sndif;
snd_pcm_format_t alsa;
};
struct alsa_sndif_hw_param {
u8 sndif;
snd_pcm_hw_param_t alsa;
};
static const struct alsa_sndif_sample_format ALSA_SNDIF_FORMATS[] = {
{
.sndif = XENSND_PCM_FORMAT_U8,
.alsa = SNDRV_PCM_FORMAT_U8
},
{
.sndif = XENSND_PCM_FORMAT_S8,
.alsa = SNDRV_PCM_FORMAT_S8
},
{
.sndif = XENSND_PCM_FORMAT_U16_LE,
.alsa = SNDRV_PCM_FORMAT_U16_LE
},
{
.sndif = XENSND_PCM_FORMAT_U16_BE,
.alsa = SNDRV_PCM_FORMAT_U16_BE
},
{
.sndif = XENSND_PCM_FORMAT_S16_LE,
.alsa = SNDRV_PCM_FORMAT_S16_LE
},
{
.sndif = XENSND_PCM_FORMAT_S16_BE,
.alsa = SNDRV_PCM_FORMAT_S16_BE
},
{
.sndif = XENSND_PCM_FORMAT_U24_LE,
.alsa = SNDRV_PCM_FORMAT_U24_LE
},
{
.sndif = XENSND_PCM_FORMAT_U24_BE,
.alsa = SNDRV_PCM_FORMAT_U24_BE
},
{
.sndif = XENSND_PCM_FORMAT_S24_LE,
.alsa = SNDRV_PCM_FORMAT_S24_LE
},
{
.sndif = XENSND_PCM_FORMAT_S24_BE,
.alsa = SNDRV_PCM_FORMAT_S24_BE
},
{
.sndif = XENSND_PCM_FORMAT_U32_LE,
.alsa = SNDRV_PCM_FORMAT_U32_LE
},
{
.sndif = XENSND_PCM_FORMAT_U32_BE,
.alsa = SNDRV_PCM_FORMAT_U32_BE
},
{
.sndif = XENSND_PCM_FORMAT_S32_LE,
.alsa = SNDRV_PCM_FORMAT_S32_LE
},
{
.sndif = XENSND_PCM_FORMAT_S32_BE,
.alsa = SNDRV_PCM_FORMAT_S32_BE
},
{
.sndif = XENSND_PCM_FORMAT_A_LAW,
.alsa = SNDRV_PCM_FORMAT_A_LAW
},
{
.sndif = XENSND_PCM_FORMAT_MU_LAW,
.alsa = SNDRV_PCM_FORMAT_MU_LAW
},
{
.sndif = XENSND_PCM_FORMAT_F32_LE,
.alsa = SNDRV_PCM_FORMAT_FLOAT_LE
},
{
.sndif = XENSND_PCM_FORMAT_F32_BE,
.alsa = SNDRV_PCM_FORMAT_FLOAT_BE
},
{
.sndif = XENSND_PCM_FORMAT_F64_LE,
.alsa = SNDRV_PCM_FORMAT_FLOAT64_LE
},
{
.sndif = XENSND_PCM_FORMAT_F64_BE,
.alsa = SNDRV_PCM_FORMAT_FLOAT64_BE
},
{
.sndif = XENSND_PCM_FORMAT_IEC958_SUBFRAME_LE,
.alsa = SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
},
{
.sndif = XENSND_PCM_FORMAT_IEC958_SUBFRAME_BE,
.alsa = SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
},
{
.sndif = XENSND_PCM_FORMAT_IMA_ADPCM,
.alsa = SNDRV_PCM_FORMAT_IMA_ADPCM
},
{
.sndif = XENSND_PCM_FORMAT_MPEG,
.alsa = SNDRV_PCM_FORMAT_MPEG
},
{
.sndif = XENSND_PCM_FORMAT_GSM,
.alsa = SNDRV_PCM_FORMAT_GSM
},
};
static int to_sndif_format(snd_pcm_format_t format)
{
int i;
for (i = 0; i < ARRAY_SIZE(ALSA_SNDIF_FORMATS); i++)
if (ALSA_SNDIF_FORMATS[i].alsa == format)
return ALSA_SNDIF_FORMATS[i].sndif;
return -EINVAL;
}
static u64 to_sndif_formats_mask(u64 alsa_formats)
{
u64 mask;
int i;
mask = 0;
for (i = 0; i < ARRAY_SIZE(ALSA_SNDIF_FORMATS); i++)
if (pcm_format_to_bits(ALSA_SNDIF_FORMATS[i].alsa) & alsa_formats)
mask |= BIT_ULL(ALSA_SNDIF_FORMATS[i].sndif);
return mask;
}
static u64 to_alsa_formats_mask(u64 sndif_formats)
{
u64 mask;
int i;
mask = 0;
for (i = 0; i < ARRAY_SIZE(ALSA_SNDIF_FORMATS); i++)
if (BIT_ULL(ALSA_SNDIF_FORMATS[i].sndif) & sndif_formats)
mask |= pcm_format_to_bits(ALSA_SNDIF_FORMATS[i].alsa);
return mask;
}
static void stream_clear(struct xen_snd_front_pcm_stream_info *stream)
{
stream->is_open = false;
stream->be_cur_frame = 0;
stream->out_frames = 0;
atomic_set(&stream->hw_ptr, 0);
xen_snd_front_evtchnl_pair_clear(stream->evt_pair);
memset(&stream->shbuf, 0, sizeof(stream->shbuf));
stream->buffer = NULL;
stream->buffer_sz = 0;
stream->pages = NULL;
stream->num_pages = 0;
}
static void stream_free(struct xen_snd_front_pcm_stream_info *stream)
{
xen_front_pgdir_shbuf_unmap(&stream->shbuf);
xen_front_pgdir_shbuf_free(&stream->shbuf);
if (stream->buffer)
free_pages_exact(stream->buffer, stream->buffer_sz);
kfree(stream->pages);
stream_clear(stream);
}
static struct xen_snd_front_pcm_stream_info *
stream_get(struct snd_pcm_substream *substream)
{
struct xen_snd_front_pcm_instance_info *pcm_instance =
snd_pcm_substream_chip(substream);
struct xen_snd_front_pcm_stream_info *stream;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
stream = &pcm_instance->streams_pb[substream->number];
else
stream = &pcm_instance->streams_cap[substream->number];
return stream;
}
static int alsa_hw_rule(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct xen_snd_front_pcm_stream_info *stream = rule->private;
struct device *dev = &stream->front_info->xb_dev->dev;
struct snd_mask *formats =
hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
struct snd_interval *rates =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *period =
hw_param_interval(params,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
struct snd_interval *buffer =
hw_param_interval(params,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE);
struct xensnd_query_hw_param req;
struct xensnd_query_hw_param resp;
struct snd_interval interval;
struct snd_mask mask;
u64 sndif_formats;
int changed, ret;
/* Collect all the values we need for the query. */
req.formats = to_sndif_formats_mask((u64)formats->bits[0] |
(u64)(formats->bits[1]) << 32);
req.rates.min = rates->min;
req.rates.max = rates->max;
req.channels.min = channels->min;
req.channels.max = channels->max;
req.buffer.min = buffer->min;
req.buffer.max = buffer->max;
req.period.min = period->min;
req.period.max = period->max;
ret = xen_snd_front_stream_query_hw_param(&stream->evt_pair->req,
&req, &resp);
if (ret < 0) {
/* Check if this is due to backend communication error. */
if (ret == -EIO || ret == -ETIMEDOUT)
dev_err(dev, "Failed to query ALSA HW parameters\n");
return ret;
}
/* Refine HW parameters after the query. */
changed = 0;
sndif_formats = to_alsa_formats_mask(resp.formats);
snd_mask_none(&mask);
mask.bits[0] = (u32)sndif_formats;
mask.bits[1] = (u32)(sndif_formats >> 32);
ret = snd_mask_refine(formats, &mask);
if (ret < 0)
return ret;
changed |= ret;
interval.openmin = 0;
interval.openmax = 0;
interval.integer = 1;
interval.min = resp.rates.min;
interval.max = resp.rates.max;
ret = snd_interval_refine(rates, &interval);
if (ret < 0)
return ret;
changed |= ret;
interval.min = resp.channels.min;
interval.max = resp.channels.max;
ret = snd_interval_refine(channels, &interval);
if (ret < 0)
return ret;
changed |= ret;
interval.min = resp.buffer.min;
interval.max = resp.buffer.max;
ret = snd_interval_refine(buffer, &interval);
if (ret < 0)
return ret;
changed |= ret;
interval.min = resp.period.min;
interval.max = resp.period.max;
ret = snd_interval_refine(period, &interval);
if (ret < 0)
return ret;
changed |= ret;
return changed;
}
static int alsa_open(struct snd_pcm_substream *substream)
{
struct xen_snd_front_pcm_instance_info *pcm_instance =
snd_pcm_substream_chip(substream);
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct xen_snd_front_info *front_info =
pcm_instance->card_info->front_info;
struct device *dev = &front_info->xb_dev->dev;
int ret;
/*
* Return our HW properties: override defaults with those configured
* via XenStore.
*/
runtime->hw = stream->pcm_hw;
runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_DOUBLE |
SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_NONINTERLEAVED |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_PAUSE);
runtime->hw.info |= SNDRV_PCM_INFO_INTERLEAVED;
stream->evt_pair = &front_info->evt_pairs[stream->index];
stream->front_info = front_info;
stream->evt_pair->evt.u.evt.substream = substream;
stream_clear(stream);
xen_snd_front_evtchnl_pair_set_connected(stream->evt_pair, true);
ret = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
alsa_hw_rule, stream,
SNDRV_PCM_HW_PARAM_FORMAT, -1);
if (ret) {
dev_err(dev, "Failed to add HW rule for SNDRV_PCM_HW_PARAM_FORMAT\n");
return ret;
}
ret = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
alsa_hw_rule, stream,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (ret) {
dev_err(dev, "Failed to add HW rule for SNDRV_PCM_HW_PARAM_RATE\n");
return ret;
}
ret = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
alsa_hw_rule, stream,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (ret) {
dev_err(dev, "Failed to add HW rule for SNDRV_PCM_HW_PARAM_CHANNELS\n");
return ret;
}
ret = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
alsa_hw_rule, stream,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
if (ret) {
dev_err(dev, "Failed to add HW rule for SNDRV_PCM_HW_PARAM_PERIOD_SIZE\n");
return ret;
}
ret = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
alsa_hw_rule, stream,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
if (ret) {
dev_err(dev, "Failed to add HW rule for SNDRV_PCM_HW_PARAM_BUFFER_SIZE\n");
return ret;
}
return 0;
}
static int alsa_close(struct snd_pcm_substream *substream)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
xen_snd_front_evtchnl_pair_set_connected(stream->evt_pair, false);
return 0;
}
static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
size_t buffer_sz)
{
int i;
stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL);
if (!stream->buffer)
return -ENOMEM;
stream->buffer_sz = buffer_sz;
stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE);
stream->pages = kcalloc(stream->num_pages, sizeof(struct page *),
GFP_KERNEL);
if (!stream->pages)
return -ENOMEM;
for (i = 0; i < stream->num_pages; i++)
stream->pages[i] = virt_to_page(stream->buffer + i * PAGE_SIZE);
return 0;
}
static int alsa_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
struct xen_snd_front_info *front_info = stream->front_info;
struct xen_front_pgdir_shbuf_cfg buf_cfg;
int ret;
/*
* This callback may be called multiple times,
* so free the previously allocated shared buffer if any.
*/
stream_free(stream);
ret = shbuf_setup_backstore(stream, params_buffer_bytes(params));
if (ret < 0)
goto fail;
memset(&buf_cfg, 0, sizeof(buf_cfg));
buf_cfg.xb_dev = front_info->xb_dev;
buf_cfg.pgdir = &stream->shbuf;
buf_cfg.num_pages = stream->num_pages;
buf_cfg.pages = stream->pages;
ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
if (ret < 0)
goto fail;
ret = xen_front_pgdir_shbuf_map(&stream->shbuf);
if (ret < 0)
goto fail;
return 0;
fail:
stream_free(stream);
dev_err(&front_info->xb_dev->dev,
"Failed to allocate buffers for stream with index %d\n",
stream->index);
return ret;
}
static int alsa_hw_free(struct snd_pcm_substream *substream)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
int ret;
ret = xen_snd_front_stream_close(&stream->evt_pair->req);
stream_free(stream);
return ret;
}
static int alsa_prepare(struct snd_pcm_substream *substream)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
if (!stream->is_open) {
struct snd_pcm_runtime *runtime = substream->runtime;
u8 sndif_format;
int ret;
ret = to_sndif_format(runtime->format);
if (ret < 0) {
dev_err(&stream->front_info->xb_dev->dev,
"Unsupported sample format: %d\n",
runtime->format);
return ret;
}
sndif_format = ret;
ret = xen_snd_front_stream_prepare(&stream->evt_pair->req,
&stream->shbuf,
sndif_format,
runtime->channels,
runtime->rate,
snd_pcm_lib_buffer_bytes(substream),
snd_pcm_lib_period_bytes(substream));
if (ret < 0)
return ret;
stream->is_open = true;
}
return 0;
}
static int alsa_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
int type;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
type = XENSND_OP_TRIGGER_START;
break;
case SNDRV_PCM_TRIGGER_RESUME:
type = XENSND_OP_TRIGGER_RESUME;
break;
case SNDRV_PCM_TRIGGER_STOP:
type = XENSND_OP_TRIGGER_STOP;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
type = XENSND_OP_TRIGGER_PAUSE;
break;
default:
return -EINVAL;
}
return xen_snd_front_stream_trigger(&stream->evt_pair->req, type);
}
void xen_snd_front_alsa_handle_cur_pos(struct xen_snd_front_evtchnl *evtchnl,
u64 pos_bytes)
{
struct snd_pcm_substream *substream = evtchnl->u.evt.substream;
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
snd_pcm_uframes_t delta, new_hw_ptr, cur_frame;
cur_frame = bytes_to_frames(substream->runtime, pos_bytes);
delta = cur_frame - stream->be_cur_frame;
stream->be_cur_frame = cur_frame;
new_hw_ptr = (snd_pcm_uframes_t)atomic_read(&stream->hw_ptr);
new_hw_ptr = (new_hw_ptr + delta) % substream->runtime->buffer_size;
atomic_set(&stream->hw_ptr, (int)new_hw_ptr);
stream->out_frames += delta;
if (stream->out_frames > substream->runtime->period_size) {
stream->out_frames %= substream->runtime->period_size;
snd_pcm_period_elapsed(substream);
}
}
static snd_pcm_uframes_t alsa_pointer(struct snd_pcm_substream *substream)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
return (snd_pcm_uframes_t)atomic_read(&stream->hw_ptr);
}
static int alsa_pb_copy(struct snd_pcm_substream *substream,
int channel, unsigned long pos, struct iov_iter *src,
unsigned long count)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
if (copy_from_iter(stream->buffer + pos, count, src) != count)
return -EFAULT;
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
}
static int alsa_cap_copy(struct snd_pcm_substream *substream,
int channel, unsigned long pos, struct iov_iter *dst,
unsigned long count)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
int ret;
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
if (ret < 0)
return ret;
if (copy_to_iter(stream->buffer + pos, count, dst) != count)
return -EFAULT;
return 0;
}
static int alsa_pb_fill_silence(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
unsigned long count)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
memset(stream->buffer + pos, 0, count);
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
}
/*
* FIXME: The mmaped data transfer is asynchronous and there is no
* ack signal from user-space when it is done. This is the
* reason it is not implemented in the PV driver as we do need
* to know when the buffer can be transferred to the backend.
*/
static const struct snd_pcm_ops snd_drv_alsa_playback_ops = {
.open = alsa_open,
.close = alsa_close,
.hw_params = alsa_hw_params,
.hw_free = alsa_hw_free,
.prepare = alsa_prepare,
.trigger = alsa_trigger,
.pointer = alsa_pointer,
.copy = alsa_pb_copy,
.fill_silence = alsa_pb_fill_silence,
};
static const struct snd_pcm_ops snd_drv_alsa_capture_ops = {
.open = alsa_open,
.close = alsa_close,
.hw_params = alsa_hw_params,
.hw_free = alsa_hw_free,
.prepare = alsa_prepare,
.trigger = alsa_trigger,
.pointer = alsa_pointer,
.copy = alsa_cap_copy,
};
static int new_pcm_instance(struct xen_snd_front_card_info *card_info,
struct xen_front_cfg_pcm_instance *instance_cfg,
struct xen_snd_front_pcm_instance_info *pcm_instance_info)
{
struct snd_pcm *pcm;
int ret, i;
dev_dbg(&card_info->front_info->xb_dev->dev,
"New PCM device \"%s\" with id %d playback %d capture %d",
instance_cfg->name,
instance_cfg->device_id,
instance_cfg->num_streams_pb,
instance_cfg->num_streams_cap);
pcm_instance_info->card_info = card_info;
pcm_instance_info->pcm_hw = instance_cfg->pcm_hw;
if (instance_cfg->num_streams_pb) {
pcm_instance_info->streams_pb =
devm_kcalloc(&card_info->card->card_dev,
instance_cfg->num_streams_pb,
sizeof(struct xen_snd_front_pcm_stream_info),
GFP_KERNEL);
if (!pcm_instance_info->streams_pb)
return -ENOMEM;
}
if (instance_cfg->num_streams_cap) {
pcm_instance_info->streams_cap =
devm_kcalloc(&card_info->card->card_dev,
instance_cfg->num_streams_cap,
sizeof(struct xen_snd_front_pcm_stream_info),
GFP_KERNEL);
if (!pcm_instance_info->streams_cap)
return -ENOMEM;
}
pcm_instance_info->num_pcm_streams_pb =
instance_cfg->num_streams_pb;
pcm_instance_info->num_pcm_streams_cap =
instance_cfg->num_streams_cap;
for (i = 0; i < pcm_instance_info->num_pcm_streams_pb; i++) {
pcm_instance_info->streams_pb[i].pcm_hw =
instance_cfg->streams_pb[i].pcm_hw;
pcm_instance_info->streams_pb[i].index =
instance_cfg->streams_pb[i].index;
}
for (i = 0; i < pcm_instance_info->num_pcm_streams_cap; i++) {
pcm_instance_info->streams_cap[i].pcm_hw =
instance_cfg->streams_cap[i].pcm_hw;
pcm_instance_info->streams_cap[i].index =
instance_cfg->streams_cap[i].index;
}
ret = snd_pcm_new(card_info->card, instance_cfg->name,
instance_cfg->device_id,
instance_cfg->num_streams_pb,
instance_cfg->num_streams_cap,
&pcm);
if (ret < 0)
return ret;
pcm->private_data = pcm_instance_info;
pcm->info_flags = 0;
/* we want to handle all PCM operations in non-atomic context */
pcm->nonatomic = true;
strscpy(pcm->name, "Virtual card PCM", sizeof(pcm->name));
if (instance_cfg->num_streams_pb)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_drv_alsa_playback_ops);
if (instance_cfg->num_streams_cap)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_drv_alsa_capture_ops);
pcm_instance_info->pcm = pcm;
return 0;
}
int xen_snd_front_alsa_init(struct xen_snd_front_info *front_info)
{
struct device *dev = &front_info->xb_dev->dev;
struct xen_front_cfg_card *cfg = &front_info->cfg;
struct xen_snd_front_card_info *card_info;
struct snd_card *card;
int ret, i;
dev_dbg(dev, "Creating virtual sound card\n");
ret = snd_card_new(dev, 0, XENSND_DRIVER_NAME, THIS_MODULE,
sizeof(struct xen_snd_front_card_info), &card);
if (ret < 0)
return ret;
card_info = card->private_data;
card_info->front_info = front_info;
front_info->card_info = card_info;
card_info->card = card;
card_info->pcm_instances =
devm_kcalloc(dev, cfg->num_pcm_instances,
sizeof(struct xen_snd_front_pcm_instance_info),
GFP_KERNEL);
if (!card_info->pcm_instances) {
ret = -ENOMEM;
goto fail;
}
card_info->num_pcm_instances = cfg->num_pcm_instances;
card_info->pcm_hw = cfg->pcm_hw;
for (i = 0; i < cfg->num_pcm_instances; i++) {
ret = new_pcm_instance(card_info, &cfg->pcm_instances[i],
&card_info->pcm_instances[i]);
if (ret < 0)
goto fail;
}
strscpy(card->driver, XENSND_DRIVER_NAME, sizeof(card->driver));
strscpy(card->shortname, cfg->name_short, sizeof(card->shortname));
strscpy(card->longname, cfg->name_long, sizeof(card->longname));
ret = snd_card_register(card);
if (ret < 0)
goto fail;
return 0;
fail:
snd_card_free(card);
return ret;
}
void xen_snd_front_alsa_fini(struct xen_snd_front_info *front_info)
{
struct xen_snd_front_card_info *card_info;
struct snd_card *card;
card_info = front_info->card_info;
if (!card_info)
return;
card = card_info->card;
if (!card)
return;
dev_dbg(&front_info->xb_dev->dev, "Removing virtual sound card %d\n",
card->number);
snd_card_free(card);
/* Card_info will be freed when destroying front_info->xb_dev->dev. */
card_info->card = NULL;
}
| linux-master | sound/xen/xen_snd_front_alsa.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Hewlett-Packard Harmony audio driver
*
* This is a driver for the Harmony audio chipset found
* on the LASI ASIC of various early HP PA-RISC workstations.
*
* Copyright (C) 2004, Kyle McMartin <kyle@{debian.org,parisc-linux.org}>
*
* Based on the previous Harmony incarnations by,
* Copyright 2000 (c) Linuxcare Canada, Alex deVries
* Copyright 2000-2003 (c) Helge Deller
* Copyright 2001 (c) Matthieu Delahaye
* Copyright 2001 (c) Jean-Christophe Vaugeois
* Copyright 2003 (c) Laurent Canet
* Copyright 2004 (c) Stuart Brady
*
* Notes:
* - graveyard and silence buffers last for lifetime of
* the driver. playback and capture buffers are allocated
* per _open()/_close().
*
* TODO:
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/control.h>
#include <sound/rawmidi.h>
#include <sound/initval.h>
#include <sound/info.h>
#include <asm/hardware.h>
#include <asm/parisc-device.h>
#include "harmony.h"
static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for Harmony driver.");
module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for Harmony driver.");
static const struct parisc_device_id snd_harmony_devtable[] __initconst = {
/* bushmaster / flounder */
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007A },
/* 712 / 715 */
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007B },
/* pace */
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007E },
/* outfield / coral II */
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007F },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, snd_harmony_devtable);
#define NAME "harmony"
#define PFX NAME ": "
static const unsigned int snd_harmony_rates[] = {
5512, 6615, 8000, 9600,
11025, 16000, 18900, 22050,
27428, 32000, 33075, 37800,
44100, 48000
};
static const unsigned int rate_bits[14] = {
HARMONY_SR_5KHZ, HARMONY_SR_6KHZ, HARMONY_SR_8KHZ,
HARMONY_SR_9KHZ, HARMONY_SR_11KHZ, HARMONY_SR_16KHZ,
HARMONY_SR_18KHZ, HARMONY_SR_22KHZ, HARMONY_SR_27KHZ,
HARMONY_SR_32KHZ, HARMONY_SR_33KHZ, HARMONY_SR_37KHZ,
HARMONY_SR_44KHZ, HARMONY_SR_48KHZ
};
static const struct snd_pcm_hw_constraint_list hw_constraint_rates = {
.count = ARRAY_SIZE(snd_harmony_rates),
.list = snd_harmony_rates,
.mask = 0,
};
static inline unsigned long
harmony_read(struct snd_harmony *h, unsigned r)
{
return __raw_readl(h->iobase + r);
}
static inline void
harmony_write(struct snd_harmony *h, unsigned r, unsigned long v)
{
__raw_writel(v, h->iobase + r);
}
static inline void
harmony_wait_for_control(struct snd_harmony *h)
{
while (harmony_read(h, HARMONY_CNTL) & HARMONY_CNTL_C) ;
}
static inline void
harmony_reset(struct snd_harmony *h)
{
harmony_write(h, HARMONY_RESET, 1);
mdelay(50);
harmony_write(h, HARMONY_RESET, 0);
}
static void
harmony_disable_interrupts(struct snd_harmony *h)
{
u32 dstatus;
harmony_wait_for_control(h);
dstatus = harmony_read(h, HARMONY_DSTATUS);
dstatus &= ~HARMONY_DSTATUS_IE;
harmony_write(h, HARMONY_DSTATUS, dstatus);
}
static void
harmony_enable_interrupts(struct snd_harmony *h)
{
u32 dstatus;
harmony_wait_for_control(h);
dstatus = harmony_read(h, HARMONY_DSTATUS);
dstatus |= HARMONY_DSTATUS_IE;
harmony_write(h, HARMONY_DSTATUS, dstatus);
}
static void
harmony_mute(struct snd_harmony *h)
{
unsigned long flags;
spin_lock_irqsave(&h->mixer_lock, flags);
harmony_wait_for_control(h);
harmony_write(h, HARMONY_GAINCTL, HARMONY_GAIN_SILENCE);
spin_unlock_irqrestore(&h->mixer_lock, flags);
}
static void
harmony_unmute(struct snd_harmony *h)
{
unsigned long flags;
spin_lock_irqsave(&h->mixer_lock, flags);
harmony_wait_for_control(h);
harmony_write(h, HARMONY_GAINCTL, h->st.gain);
spin_unlock_irqrestore(&h->mixer_lock, flags);
}
static void
harmony_set_control(struct snd_harmony *h)
{
u32 ctrl;
unsigned long flags;
spin_lock_irqsave(&h->lock, flags);
ctrl = (HARMONY_CNTL_C |
(h->st.format << 6) |
(h->st.stereo << 5) |
(h->st.rate));
harmony_wait_for_control(h);
harmony_write(h, HARMONY_CNTL, ctrl);
spin_unlock_irqrestore(&h->lock, flags);
}
static irqreturn_t
snd_harmony_interrupt(int irq, void *dev)
{
u32 dstatus;
struct snd_harmony *h = dev;
spin_lock(&h->lock);
harmony_disable_interrupts(h);
harmony_wait_for_control(h);
dstatus = harmony_read(h, HARMONY_DSTATUS);
spin_unlock(&h->lock);
if (dstatus & HARMONY_DSTATUS_PN) {
if (h->psubs && h->st.playing) {
spin_lock(&h->lock);
h->pbuf.buf += h->pbuf.count; /* PAGE_SIZE */
h->pbuf.buf %= h->pbuf.size; /* MAX_BUFS*PAGE_SIZE */
harmony_write(h, HARMONY_PNXTADD,
h->pbuf.addr + h->pbuf.buf);
h->stats.play_intr++;
spin_unlock(&h->lock);
snd_pcm_period_elapsed(h->psubs);
} else {
spin_lock(&h->lock);
harmony_write(h, HARMONY_PNXTADD, h->sdma.addr);
h->stats.silence_intr++;
spin_unlock(&h->lock);
}
}
if (dstatus & HARMONY_DSTATUS_RN) {
if (h->csubs && h->st.capturing) {
spin_lock(&h->lock);
h->cbuf.buf += h->cbuf.count;
h->cbuf.buf %= h->cbuf.size;
harmony_write(h, HARMONY_RNXTADD,
h->cbuf.addr + h->cbuf.buf);
h->stats.rec_intr++;
spin_unlock(&h->lock);
snd_pcm_period_elapsed(h->csubs);
} else {
spin_lock(&h->lock);
harmony_write(h, HARMONY_RNXTADD, h->gdma.addr);
h->stats.graveyard_intr++;
spin_unlock(&h->lock);
}
}
spin_lock(&h->lock);
harmony_enable_interrupts(h);
spin_unlock(&h->lock);
return IRQ_HANDLED;
}
static unsigned int
snd_harmony_rate_bits(int rate)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(snd_harmony_rates); i++)
if (snd_harmony_rates[i] == rate)
return rate_bits[i];
return HARMONY_SR_44KHZ;
}
static const struct snd_pcm_hardware snd_harmony_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_JOINT_DUPLEX | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BLOCK_TRANSFER),
.formats = (SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_MU_LAW |
SNDRV_PCM_FMTBIT_A_LAW),
.rates = (SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000 |
SNDRV_PCM_RATE_KNOT),
.rate_min = 5512,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = MAX_BUF_SIZE,
.period_bytes_min = BUF_SIZE,
.period_bytes_max = BUF_SIZE,
.periods_min = 1,
.periods_max = MAX_BUFS,
.fifo_size = 0,
};
static const struct snd_pcm_hardware snd_harmony_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_JOINT_DUPLEX | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BLOCK_TRANSFER),
.formats = (SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_MU_LAW |
SNDRV_PCM_FMTBIT_A_LAW),
.rates = (SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000 |
SNDRV_PCM_RATE_KNOT),
.rate_min = 5512,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = MAX_BUF_SIZE,
.period_bytes_min = BUF_SIZE,
.period_bytes_max = BUF_SIZE,
.periods_min = 1,
.periods_max = MAX_BUFS,
.fifo_size = 0,
};
static int
snd_harmony_playback_trigger(struct snd_pcm_substream *ss, int cmd)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
if (h->st.capturing)
return -EBUSY;
spin_lock(&h->lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
h->st.playing = 1;
harmony_write(h, HARMONY_PNXTADD, h->pbuf.addr);
harmony_write(h, HARMONY_RNXTADD, h->gdma.addr);
harmony_unmute(h);
harmony_enable_interrupts(h);
break;
case SNDRV_PCM_TRIGGER_STOP:
h->st.playing = 0;
harmony_mute(h);
harmony_write(h, HARMONY_PNXTADD, h->sdma.addr);
harmony_disable_interrupts(h);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_SUSPEND:
default:
spin_unlock(&h->lock);
snd_BUG();
return -EINVAL;
}
spin_unlock(&h->lock);
return 0;
}
static int
snd_harmony_capture_trigger(struct snd_pcm_substream *ss, int cmd)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
if (h->st.playing)
return -EBUSY;
spin_lock(&h->lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
h->st.capturing = 1;
harmony_write(h, HARMONY_PNXTADD, h->sdma.addr);
harmony_write(h, HARMONY_RNXTADD, h->cbuf.addr);
harmony_unmute(h);
harmony_enable_interrupts(h);
break;
case SNDRV_PCM_TRIGGER_STOP:
h->st.capturing = 0;
harmony_mute(h);
harmony_write(h, HARMONY_RNXTADD, h->gdma.addr);
harmony_disable_interrupts(h);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_SUSPEND:
default:
spin_unlock(&h->lock);
snd_BUG();
return -EINVAL;
}
spin_unlock(&h->lock);
return 0;
}
static int
snd_harmony_set_data_format(struct snd_harmony *h, int fmt, int force)
{
int o = h->st.format;
int n;
switch(fmt) {
case SNDRV_PCM_FORMAT_S16_BE:
n = HARMONY_DF_16BIT_LINEAR;
break;
case SNDRV_PCM_FORMAT_A_LAW:
n = HARMONY_DF_8BIT_ALAW;
break;
case SNDRV_PCM_FORMAT_MU_LAW:
n = HARMONY_DF_8BIT_ULAW;
break;
default:
n = HARMONY_DF_16BIT_LINEAR;
break;
}
if (force || o != n) {
snd_pcm_format_set_silence(fmt, h->sdma.area, SILENCE_BUFSZ /
(snd_pcm_format_physical_width(fmt)
/ 8));
}
return n;
}
static int
snd_harmony_playback_prepare(struct snd_pcm_substream *ss)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
struct snd_pcm_runtime *rt = ss->runtime;
if (h->st.capturing)
return -EBUSY;
h->pbuf.size = snd_pcm_lib_buffer_bytes(ss);
h->pbuf.count = snd_pcm_lib_period_bytes(ss);
if (h->pbuf.buf >= h->pbuf.size)
h->pbuf.buf = 0;
h->st.playing = 0;
h->st.rate = snd_harmony_rate_bits(rt->rate);
h->st.format = snd_harmony_set_data_format(h, rt->format, 0);
if (rt->channels == 2)
h->st.stereo = HARMONY_SS_STEREO;
else
h->st.stereo = HARMONY_SS_MONO;
harmony_set_control(h);
h->pbuf.addr = rt->dma_addr;
return 0;
}
static int
snd_harmony_capture_prepare(struct snd_pcm_substream *ss)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
struct snd_pcm_runtime *rt = ss->runtime;
if (h->st.playing)
return -EBUSY;
h->cbuf.size = snd_pcm_lib_buffer_bytes(ss);
h->cbuf.count = snd_pcm_lib_period_bytes(ss);
if (h->cbuf.buf >= h->cbuf.size)
h->cbuf.buf = 0;
h->st.capturing = 0;
h->st.rate = snd_harmony_rate_bits(rt->rate);
h->st.format = snd_harmony_set_data_format(h, rt->format, 0);
if (rt->channels == 2)
h->st.stereo = HARMONY_SS_STEREO;
else
h->st.stereo = HARMONY_SS_MONO;
harmony_set_control(h);
h->cbuf.addr = rt->dma_addr;
return 0;
}
static snd_pcm_uframes_t
snd_harmony_playback_pointer(struct snd_pcm_substream *ss)
{
struct snd_pcm_runtime *rt = ss->runtime;
struct snd_harmony *h = snd_pcm_substream_chip(ss);
unsigned long pcuradd;
unsigned long played;
if (!(h->st.playing) || (h->psubs == NULL))
return 0;
if ((h->pbuf.addr == 0) || (h->pbuf.size == 0))
return 0;
pcuradd = harmony_read(h, HARMONY_PCURADD);
played = pcuradd - h->pbuf.addr;
#ifdef HARMONY_DEBUG
printk(KERN_DEBUG PFX "playback_pointer is 0x%lx-0x%lx = %d bytes\n",
pcuradd, h->pbuf.addr, played);
#endif
if (pcuradd > h->pbuf.addr + h->pbuf.size) {
return 0;
}
return bytes_to_frames(rt, played);
}
static snd_pcm_uframes_t
snd_harmony_capture_pointer(struct snd_pcm_substream *ss)
{
struct snd_pcm_runtime *rt = ss->runtime;
struct snd_harmony *h = snd_pcm_substream_chip(ss);
unsigned long rcuradd;
unsigned long caught;
if (!(h->st.capturing) || (h->csubs == NULL))
return 0;
if ((h->cbuf.addr == 0) || (h->cbuf.size == 0))
return 0;
rcuradd = harmony_read(h, HARMONY_RCURADD);
caught = rcuradd - h->cbuf.addr;
#ifdef HARMONY_DEBUG
printk(KERN_DEBUG PFX "capture_pointer is 0x%lx-0x%lx = %d bytes\n",
rcuradd, h->cbuf.addr, caught);
#endif
if (rcuradd > h->cbuf.addr + h->cbuf.size) {
return 0;
}
return bytes_to_frames(rt, caught);
}
static int
snd_harmony_playback_open(struct snd_pcm_substream *ss)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
struct snd_pcm_runtime *rt = ss->runtime;
int err;
h->psubs = ss;
rt->hw = snd_harmony_playback;
snd_pcm_hw_constraint_list(rt, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraint_rates);
err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
return 0;
}
static int
snd_harmony_capture_open(struct snd_pcm_substream *ss)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
struct snd_pcm_runtime *rt = ss->runtime;
int err;
h->csubs = ss;
rt->hw = snd_harmony_capture;
snd_pcm_hw_constraint_list(rt, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraint_rates);
err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
return 0;
}
static int
snd_harmony_playback_close(struct snd_pcm_substream *ss)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
h->psubs = NULL;
return 0;
}
static int
snd_harmony_capture_close(struct snd_pcm_substream *ss)
{
struct snd_harmony *h = snd_pcm_substream_chip(ss);
h->csubs = NULL;
return 0;
}
static const struct snd_pcm_ops snd_harmony_playback_ops = {
.open = snd_harmony_playback_open,
.close = snd_harmony_playback_close,
.prepare = snd_harmony_playback_prepare,
.trigger = snd_harmony_playback_trigger,
.pointer = snd_harmony_playback_pointer,
};
static const struct snd_pcm_ops snd_harmony_capture_ops = {
.open = snd_harmony_capture_open,
.close = snd_harmony_capture_close,
.prepare = snd_harmony_capture_prepare,
.trigger = snd_harmony_capture_trigger,
.pointer = snd_harmony_capture_pointer,
};
static int
snd_harmony_pcm_init(struct snd_harmony *h)
{
struct snd_pcm *pcm;
int err;
if (snd_BUG_ON(!h))
return -EINVAL;
harmony_disable_interrupts(h);
err = snd_pcm_new(h->card, "harmony", 0, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_harmony_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_harmony_capture_ops);
pcm->private_data = h;
pcm->info_flags = 0;
strcpy(pcm->name, "harmony");
h->pcm = pcm;
h->psubs = NULL;
h->csubs = NULL;
/* initialize graveyard buffer */
h->dma.type = SNDRV_DMA_TYPE_DEV;
h->dma.dev = &h->dev->dev;
err = snd_dma_alloc_pages(h->dma.type,
h->dma.dev,
BUF_SIZE*GRAVEYARD_BUFS,
&h->gdma);
if (err < 0) {
printk(KERN_ERR PFX "cannot allocate graveyard buffer!\n");
return err;
}
/* initialize silence buffers */
err = snd_dma_alloc_pages(h->dma.type,
h->dma.dev,
BUF_SIZE*SILENCE_BUFS,
&h->sdma);
if (err < 0) {
printk(KERN_ERR PFX "cannot allocate silence buffer!\n");
return err;
}
/* pre-allocate space for DMA */
snd_pcm_set_managed_buffer_all(pcm, h->dma.type, h->dma.dev,
MAX_BUF_SIZE, MAX_BUF_SIZE);
h->st.format = snd_harmony_set_data_format(h,
SNDRV_PCM_FORMAT_S16_BE, 1);
return 0;
}
static void
snd_harmony_set_new_gain(struct snd_harmony *h)
{
harmony_wait_for_control(h);
harmony_write(h, HARMONY_GAINCTL, h->st.gain);
}
static int
snd_harmony_mixercontrol_info(struct snd_kcontrol *kc,
struct snd_ctl_elem_info *uinfo)
{
int mask = (kc->private_value >> 16) & 0xff;
int left_shift = (kc->private_value) & 0xff;
int right_shift = (kc->private_value >> 8) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN :
SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = left_shift == right_shift ? 1 : 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int
snd_harmony_volume_get(struct snd_kcontrol *kc,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_harmony *h = snd_kcontrol_chip(kc);
int shift_left = (kc->private_value) & 0xff;
int shift_right = (kc->private_value >> 8) & 0xff;
int mask = (kc->private_value >> 16) & 0xff;
int invert = (kc->private_value >> 24) & 0xff;
int left, right;
spin_lock_irq(&h->mixer_lock);
left = (h->st.gain >> shift_left) & mask;
right = (h->st.gain >> shift_right) & mask;
if (invert) {
left = mask - left;
right = mask - right;
}
ucontrol->value.integer.value[0] = left;
if (shift_left != shift_right)
ucontrol->value.integer.value[1] = right;
spin_unlock_irq(&h->mixer_lock);
return 0;
}
static int
snd_harmony_volume_put(struct snd_kcontrol *kc,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_harmony *h = snd_kcontrol_chip(kc);
int shift_left = (kc->private_value) & 0xff;
int shift_right = (kc->private_value >> 8) & 0xff;
int mask = (kc->private_value >> 16) & 0xff;
int invert = (kc->private_value >> 24) & 0xff;
int left, right;
int old_gain = h->st.gain;
spin_lock_irq(&h->mixer_lock);
left = ucontrol->value.integer.value[0] & mask;
if (invert)
left = mask - left;
h->st.gain &= ~( (mask << shift_left ) );
h->st.gain |= (left << shift_left);
if (shift_left != shift_right) {
right = ucontrol->value.integer.value[1] & mask;
if (invert)
right = mask - right;
h->st.gain &= ~( (mask << shift_right) );
h->st.gain |= (right << shift_right);
}
snd_harmony_set_new_gain(h);
spin_unlock_irq(&h->mixer_lock);
return h->st.gain != old_gain;
}
static int
snd_harmony_captureroute_info(struct snd_kcontrol *kc,
struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[2] = { "Line", "Mic" };
return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int
snd_harmony_captureroute_get(struct snd_kcontrol *kc,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_harmony *h = snd_kcontrol_chip(kc);
int value;
spin_lock_irq(&h->mixer_lock);
value = (h->st.gain >> HARMONY_GAIN_IS_SHIFT) & 1;
ucontrol->value.enumerated.item[0] = value;
spin_unlock_irq(&h->mixer_lock);
return 0;
}
static int
snd_harmony_captureroute_put(struct snd_kcontrol *kc,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_harmony *h = snd_kcontrol_chip(kc);
int value;
int old_gain = h->st.gain;
spin_lock_irq(&h->mixer_lock);
value = ucontrol->value.enumerated.item[0] & 1;
h->st.gain &= ~HARMONY_GAIN_IS_MASK;
h->st.gain |= value << HARMONY_GAIN_IS_SHIFT;
snd_harmony_set_new_gain(h);
spin_unlock_irq(&h->mixer_lock);
return h->st.gain != old_gain;
}
#define HARMONY_CONTROLS ARRAY_SIZE(snd_harmony_controls)
#define HARMONY_VOLUME(xname, left_shift, right_shift, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_harmony_mixercontrol_info, \
.get = snd_harmony_volume_get, .put = snd_harmony_volume_put, \
.private_value = ((left_shift) | ((right_shift) << 8) | \
((mask) << 16) | ((invert) << 24)) }
static const struct snd_kcontrol_new snd_harmony_controls[] = {
HARMONY_VOLUME("Master Playback Volume", HARMONY_GAIN_LO_SHIFT,
HARMONY_GAIN_RO_SHIFT, HARMONY_GAIN_OUT, 1),
HARMONY_VOLUME("Capture Volume", HARMONY_GAIN_LI_SHIFT,
HARMONY_GAIN_RI_SHIFT, HARMONY_GAIN_IN, 0),
HARMONY_VOLUME("Monitor Volume", HARMONY_GAIN_MA_SHIFT,
HARMONY_GAIN_MA_SHIFT, HARMONY_GAIN_MA, 1),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input Route",
.info = snd_harmony_captureroute_info,
.get = snd_harmony_captureroute_get,
.put = snd_harmony_captureroute_put
},
HARMONY_VOLUME("Internal Speaker Switch", HARMONY_GAIN_SE_SHIFT,
HARMONY_GAIN_SE_SHIFT, 1, 0),
HARMONY_VOLUME("Line-Out Switch", HARMONY_GAIN_LE_SHIFT,
HARMONY_GAIN_LE_SHIFT, 1, 0),
HARMONY_VOLUME("Headphones Switch", HARMONY_GAIN_HE_SHIFT,
HARMONY_GAIN_HE_SHIFT, 1, 0),
};
static void
snd_harmony_mixer_reset(struct snd_harmony *h)
{
harmony_mute(h);
harmony_reset(h);
h->st.gain = HARMONY_GAIN_DEFAULT;
harmony_unmute(h);
}
static int
snd_harmony_mixer_init(struct snd_harmony *h)
{
struct snd_card *card;
int idx, err;
if (snd_BUG_ON(!h))
return -EINVAL;
card = h->card;
strcpy(card->mixername, "Harmony Gain control interface");
for (idx = 0; idx < HARMONY_CONTROLS; idx++) {
err = snd_ctl_add(card,
snd_ctl_new1(&snd_harmony_controls[idx], h));
if (err < 0)
return err;
}
snd_harmony_mixer_reset(h);
return 0;
}
static int
snd_harmony_free(struct snd_harmony *h)
{
if (h->gdma.addr)
snd_dma_free_pages(&h->gdma);
if (h->sdma.addr)
snd_dma_free_pages(&h->sdma);
if (h->irq >= 0)
free_irq(h->irq, h);
iounmap(h->iobase);
kfree(h);
return 0;
}
static int
snd_harmony_dev_free(struct snd_device *dev)
{
struct snd_harmony *h = dev->device_data;
return snd_harmony_free(h);
}
static int
snd_harmony_create(struct snd_card *card,
struct parisc_device *padev,
struct snd_harmony **rchip)
{
int err;
struct snd_harmony *h;
static const struct snd_device_ops ops = {
.dev_free = snd_harmony_dev_free,
};
*rchip = NULL;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (h == NULL)
return -ENOMEM;
h->hpa = padev->hpa.start;
h->card = card;
h->dev = padev;
h->irq = -1;
h->iobase = ioremap(padev->hpa.start, HARMONY_SIZE);
if (h->iobase == NULL) {
printk(KERN_ERR PFX "unable to remap hpa 0x%lx\n",
(unsigned long)padev->hpa.start);
err = -EBUSY;
goto free_and_ret;
}
err = request_irq(padev->irq, snd_harmony_interrupt, 0,
"harmony", h);
if (err) {
printk(KERN_ERR PFX "could not obtain interrupt %d",
padev->irq);
goto free_and_ret;
}
h->irq = padev->irq;
spin_lock_init(&h->mixer_lock);
spin_lock_init(&h->lock);
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, h, &ops);
if (err < 0)
goto free_and_ret;
*rchip = h;
return 0;
free_and_ret:
snd_harmony_free(h);
return err;
}
static int __init
snd_harmony_probe(struct parisc_device *padev)
{
int err;
struct snd_card *card;
struct snd_harmony *h;
err = snd_card_new(&padev->dev, index, id, THIS_MODULE, 0, &card);
if (err < 0)
return err;
err = snd_harmony_create(card, padev, &h);
if (err < 0)
goto free_and_ret;
err = snd_harmony_pcm_init(h);
if (err < 0)
goto free_and_ret;
err = snd_harmony_mixer_init(h);
if (err < 0)
goto free_and_ret;
strcpy(card->driver, "harmony");
strcpy(card->shortname, "Harmony");
sprintf(card->longname, "%s at 0x%lx, irq %i",
card->shortname, h->hpa, h->irq);
err = snd_card_register(card);
if (err < 0)
goto free_and_ret;
parisc_set_drvdata(padev, card);
return 0;
free_and_ret:
snd_card_free(card);
return err;
}
static void __exit
snd_harmony_remove(struct parisc_device *padev)
{
snd_card_free(parisc_get_drvdata(padev));
}
static struct parisc_driver snd_harmony_driver __refdata = {
.name = "harmony",
.id_table = snd_harmony_devtable,
.probe = snd_harmony_probe,
.remove = __exit_p(snd_harmony_remove),
};
static int __init
alsa_harmony_init(void)
{
return register_parisc_driver(&snd_harmony_driver);
}
static void __exit
alsa_harmony_fini(void)
{
unregister_parisc_driver(&snd_harmony_driver);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyle McMartin <[email protected]>");
MODULE_DESCRIPTION("Harmony sound driver");
module_init(alsa_harmony_init);
module_exit(alsa_harmony_fini);
| linux-master | sound/parisc/harmony.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* bt87x.c - Brooktree Bt878/Bt879 driver for ALSA
*
* Copyright (c) Clemens Ladisch <[email protected]>
*
* based on btaudio.c by Gerd Knorr <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/control.h>
#include <sound/initval.h>
MODULE_AUTHOR("Clemens Ladisch <[email protected]>");
MODULE_DESCRIPTION("Brooktree Bt87x audio driver");
MODULE_LICENSE("GPL");
static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -2}; /* Exclude the first card */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
static int digital_rate[SNDRV_CARDS]; /* digital input rate */
static bool load_all; /* allow to load cards not the allowlist */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Bt87x soundcard");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Bt87x soundcard");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable Bt87x soundcard");
module_param_array(digital_rate, int, NULL, 0444);
MODULE_PARM_DESC(digital_rate, "Digital input rate for Bt87x soundcard");
module_param(load_all, bool, 0444);
MODULE_PARM_DESC(load_all, "Allow to load cards not on the allowlist");
/* register offsets */
#define REG_INT_STAT 0x100 /* interrupt status */
#define REG_INT_MASK 0x104 /* interrupt mask */
#define REG_GPIO_DMA_CTL 0x10c /* audio control */
#define REG_PACKET_LEN 0x110 /* audio packet lengths */
#define REG_RISC_STRT_ADD 0x114 /* RISC program start address */
#define REG_RISC_COUNT 0x120 /* RISC program counter */
/* interrupt bits */
#define INT_OFLOW (1 << 3) /* audio A/D overflow */
#define INT_RISCI (1 << 11) /* RISC instruction IRQ bit set */
#define INT_FBUS (1 << 12) /* FIFO overrun due to bus access latency */
#define INT_FTRGT (1 << 13) /* FIFO overrun due to target latency */
#define INT_FDSR (1 << 14) /* FIFO data stream resynchronization */
#define INT_PPERR (1 << 15) /* PCI parity error */
#define INT_RIPERR (1 << 16) /* RISC instruction parity error */
#define INT_PABORT (1 << 17) /* PCI master or target abort */
#define INT_OCERR (1 << 18) /* invalid opcode */
#define INT_SCERR (1 << 19) /* sync counter overflow */
#define INT_RISC_EN (1 << 27) /* DMA controller running */
#define INT_RISCS_SHIFT 28 /* RISC status bits */
/* audio control bits */
#define CTL_FIFO_ENABLE (1 << 0) /* enable audio data FIFO */
#define CTL_RISC_ENABLE (1 << 1) /* enable audio DMA controller */
#define CTL_PKTP_4 (0 << 2) /* packet mode FIFO trigger point - 4 DWORDs */
#define CTL_PKTP_8 (1 << 2) /* 8 DWORDs */
#define CTL_PKTP_16 (2 << 2) /* 16 DWORDs */
#define CTL_ACAP_EN (1 << 4) /* enable audio capture */
#define CTL_DA_APP (1 << 5) /* GPIO input */
#define CTL_DA_IOM_AFE (0 << 6) /* audio A/D input */
#define CTL_DA_IOM_DA (1 << 6) /* digital audio input */
#define CTL_DA_SDR_SHIFT 8 /* DDF first stage decimation rate */
#define CTL_DA_SDR_MASK (0xf<< 8)
#define CTL_DA_LMT (1 << 12) /* limit audio data values */
#define CTL_DA_ES2 (1 << 13) /* enable DDF stage 2 */
#define CTL_DA_SBR (1 << 14) /* samples rounded to 8 bits */
#define CTL_DA_DPM (1 << 15) /* data packet mode */
#define CTL_DA_LRD_SHIFT 16 /* ALRCK delay */
#define CTL_DA_MLB (1 << 21) /* MSB/LSB format */
#define CTL_DA_LRI (1 << 22) /* left/right indication */
#define CTL_DA_SCE (1 << 23) /* sample clock edge */
#define CTL_A_SEL_STV (0 << 24) /* TV tuner audio input */
#define CTL_A_SEL_SFM (1 << 24) /* FM audio input */
#define CTL_A_SEL_SML (2 << 24) /* mic/line audio input */
#define CTL_A_SEL_SMXC (3 << 24) /* MUX bypass */
#define CTL_A_SEL_SHIFT 24
#define CTL_A_SEL_MASK (3 << 24)
#define CTL_A_PWRDN (1 << 26) /* analog audio power-down */
#define CTL_A_G2X (1 << 27) /* audio gain boost */
#define CTL_A_GAIN_SHIFT 28 /* audio input gain */
#define CTL_A_GAIN_MASK (0xf<<28)
/* RISC instruction opcodes */
#define RISC_WRITE (0x1 << 28) /* write FIFO data to memory at address */
#define RISC_WRITEC (0x5 << 28) /* write FIFO data to memory at current address */
#define RISC_SKIP (0x2 << 28) /* skip FIFO data */
#define RISC_JUMP (0x7 << 28) /* jump to address */
#define RISC_SYNC (0x8 << 28) /* synchronize with FIFO */
/* RISC instruction bits */
#define RISC_BYTES_ENABLE (0xf << 12) /* byte enable bits */
#define RISC_RESYNC ( 1 << 15) /* disable FDSR errors */
#define RISC_SET_STATUS_SHIFT 16 /* set status bits */
#define RISC_RESET_STATUS_SHIFT 20 /* clear status bits */
#define RISC_IRQ ( 1 << 24) /* interrupt */
#define RISC_EOL ( 1 << 26) /* end of line */
#define RISC_SOL ( 1 << 27) /* start of line */
/* SYNC status bits values */
#define RISC_SYNC_FM1 0x6
#define RISC_SYNC_VRO 0xc
#define ANALOG_CLOCK 1792000
#ifdef CONFIG_SND_BT87X_OVERCLOCK
#define CLOCK_DIV_MIN 1
#else
#define CLOCK_DIV_MIN 4
#endif
#define CLOCK_DIV_MAX 15
#define ERROR_INTERRUPTS (INT_FBUS | INT_FTRGT | INT_PPERR | \
INT_RIPERR | INT_PABORT | INT_OCERR)
#define MY_INTERRUPTS (INT_RISCI | ERROR_INTERRUPTS)
/* SYNC, one WRITE per line, one extra WRITE per page boundary, SYNC, JUMP */
#define MAX_RISC_SIZE ((1 + 255 + (PAGE_ALIGN(255 * 4092) / PAGE_SIZE - 1) + 1 + 1) * 8)
/* Cards with configuration information */
enum snd_bt87x_boardid {
SND_BT87X_BOARD_UNKNOWN,
SND_BT87X_BOARD_GENERIC, /* both an & dig interfaces, 32kHz */
SND_BT87X_BOARD_ANALOG, /* board with no external A/D */
SND_BT87X_BOARD_OSPREY2x0,
SND_BT87X_BOARD_OSPREY440,
SND_BT87X_BOARD_AVPHONE98,
};
/* Card configuration */
struct snd_bt87x_board {
int dig_rate; /* Digital input sampling rate */
u32 digital_fmt; /* Register settings for digital input */
unsigned no_analog:1; /* No analog input */
unsigned no_digital:1; /* No digital input */
};
static const struct snd_bt87x_board snd_bt87x_boards[] = {
[SND_BT87X_BOARD_UNKNOWN] = {
.dig_rate = 32000, /* just a guess */
},
[SND_BT87X_BOARD_GENERIC] = {
.dig_rate = 32000,
},
[SND_BT87X_BOARD_ANALOG] = {
.no_digital = 1,
},
[SND_BT87X_BOARD_OSPREY2x0] = {
.dig_rate = 44100,
.digital_fmt = CTL_DA_LRI | (1 << CTL_DA_LRD_SHIFT),
},
[SND_BT87X_BOARD_OSPREY440] = {
.dig_rate = 32000,
.digital_fmt = CTL_DA_LRI | (1 << CTL_DA_LRD_SHIFT),
.no_analog = 1,
},
[SND_BT87X_BOARD_AVPHONE98] = {
.dig_rate = 48000,
},
};
struct snd_bt87x {
struct snd_card *card;
struct pci_dev *pci;
struct snd_bt87x_board board;
void __iomem *mmio;
int irq;
spinlock_t reg_lock;
unsigned long opened;
struct snd_pcm_substream *substream;
struct snd_dma_buffer dma_risc;
unsigned int line_bytes;
unsigned int lines;
u32 reg_control;
u32 interrupt_mask;
int current_line;
int pci_parity_errors;
};
enum { DEVICE_DIGITAL, DEVICE_ANALOG };
static inline u32 snd_bt87x_readl(struct snd_bt87x *chip, u32 reg)
{
return readl(chip->mmio + reg);
}
static inline void snd_bt87x_writel(struct snd_bt87x *chip, u32 reg, u32 value)
{
writel(value, chip->mmio + reg);
}
static int snd_bt87x_create_risc(struct snd_bt87x *chip, struct snd_pcm_substream *substream,
unsigned int periods, unsigned int period_bytes)
{
unsigned int i, offset;
__le32 *risc;
if (chip->dma_risc.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(MAX_RISC_SIZE), &chip->dma_risc) < 0)
return -ENOMEM;
}
risc = (__le32 *)chip->dma_risc.area;
offset = 0;
*risc++ = cpu_to_le32(RISC_SYNC | RISC_SYNC_FM1);
*risc++ = cpu_to_le32(0);
for (i = 0; i < periods; ++i) {
u32 rest;
rest = period_bytes;
do {
u32 cmd, len;
unsigned int addr;
len = PAGE_SIZE - (offset % PAGE_SIZE);
if (len > rest)
len = rest;
cmd = RISC_WRITE | len;
if (rest == period_bytes) {
u32 block = i * 16 / periods;
cmd |= RISC_SOL;
cmd |= block << RISC_SET_STATUS_SHIFT;
cmd |= (~block & 0xf) << RISC_RESET_STATUS_SHIFT;
}
if (len == rest)
cmd |= RISC_EOL | RISC_IRQ;
*risc++ = cpu_to_le32(cmd);
addr = snd_pcm_sgbuf_get_addr(substream, offset);
*risc++ = cpu_to_le32(addr);
offset += len;
rest -= len;
} while (rest > 0);
}
*risc++ = cpu_to_le32(RISC_SYNC | RISC_SYNC_VRO);
*risc++ = cpu_to_le32(0);
*risc++ = cpu_to_le32(RISC_JUMP);
*risc++ = cpu_to_le32(chip->dma_risc.addr);
chip->line_bytes = period_bytes;
chip->lines = periods;
return 0;
}
static void snd_bt87x_free_risc(struct snd_bt87x *chip)
{
if (chip->dma_risc.area) {
snd_dma_free_pages(&chip->dma_risc);
chip->dma_risc.area = NULL;
}
}
static void snd_bt87x_pci_error(struct snd_bt87x *chip, unsigned int status)
{
int pci_status = pci_status_get_and_clear_errors(chip->pci);
if (pci_status != PCI_STATUS_DETECTED_PARITY)
dev_err(chip->card->dev,
"Aieee - PCI error! status %#08x, PCI status %#04x\n",
status & ERROR_INTERRUPTS, pci_status);
else {
dev_err(chip->card->dev,
"Aieee - PCI parity error detected!\n");
/* error 'handling' similar to aic7xxx_pci.c: */
chip->pci_parity_errors++;
if (chip->pci_parity_errors > 20) {
dev_err(chip->card->dev,
"Too many PCI parity errors observed.\n");
dev_err(chip->card->dev,
"Some device on this bus is generating bad parity.\n");
dev_err(chip->card->dev,
"This is an error *observed by*, not *generated by*, this card.\n");
dev_err(chip->card->dev,
"PCI parity error checking has been disabled.\n");
chip->interrupt_mask &= ~(INT_PPERR | INT_RIPERR);
snd_bt87x_writel(chip, REG_INT_MASK, chip->interrupt_mask);
}
}
}
static irqreturn_t snd_bt87x_interrupt(int irq, void *dev_id)
{
struct snd_bt87x *chip = dev_id;
unsigned int status, irq_status;
status = snd_bt87x_readl(chip, REG_INT_STAT);
irq_status = status & chip->interrupt_mask;
if (!irq_status)
return IRQ_NONE;
snd_bt87x_writel(chip, REG_INT_STAT, irq_status);
if (irq_status & ERROR_INTERRUPTS) {
if (irq_status & (INT_FBUS | INT_FTRGT))
dev_warn(chip->card->dev,
"FIFO overrun, status %#08x\n", status);
if (irq_status & INT_OCERR)
dev_err(chip->card->dev,
"internal RISC error, status %#08x\n", status);
if (irq_status & (INT_PPERR | INT_RIPERR | INT_PABORT))
snd_bt87x_pci_error(chip, irq_status);
}
if ((irq_status & INT_RISCI) && (chip->reg_control & CTL_ACAP_EN)) {
int current_block, irq_block;
/* assume that exactly one line has been recorded */
chip->current_line = (chip->current_line + 1) % chip->lines;
/* but check if some interrupts have been skipped */
current_block = chip->current_line * 16 / chip->lines;
irq_block = status >> INT_RISCS_SHIFT;
if (current_block != irq_block)
chip->current_line = DIV_ROUND_UP(irq_block * chip->lines,
16);
snd_pcm_period_elapsed(chip->substream);
}
return IRQ_HANDLED;
}
static const struct snd_pcm_hardware snd_bt87x_digital_hw = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BATCH,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = 0, /* set at runtime */
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 255 * 4092,
.period_bytes_min = 32,
.period_bytes_max = 4092,
.periods_min = 2,
.periods_max = 255,
};
static const struct snd_pcm_hardware snd_bt87x_analog_hw = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BATCH,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8,
.rates = SNDRV_PCM_RATE_KNOT,
.rate_min = ANALOG_CLOCK / CLOCK_DIV_MAX,
.rate_max = ANALOG_CLOCK / CLOCK_DIV_MIN,
.channels_min = 1,
.channels_max = 1,
.buffer_bytes_max = 255 * 4092,
.period_bytes_min = 32,
.period_bytes_max = 4092,
.periods_min = 2,
.periods_max = 255,
};
static int snd_bt87x_set_digital_hw(struct snd_bt87x *chip, struct snd_pcm_runtime *runtime)
{
chip->reg_control |= CTL_DA_IOM_DA | CTL_A_PWRDN;
runtime->hw = snd_bt87x_digital_hw;
runtime->hw.rates = snd_pcm_rate_to_rate_bit(chip->board.dig_rate);
runtime->hw.rate_min = chip->board.dig_rate;
runtime->hw.rate_max = chip->board.dig_rate;
return 0;
}
static int snd_bt87x_set_analog_hw(struct snd_bt87x *chip, struct snd_pcm_runtime *runtime)
{
static const struct snd_ratnum analog_clock = {
.num = ANALOG_CLOCK,
.den_min = CLOCK_DIV_MIN,
.den_max = CLOCK_DIV_MAX,
.den_step = 1
};
static const struct snd_pcm_hw_constraint_ratnums constraint_rates = {
.nrats = 1,
.rats = &analog_clock
};
chip->reg_control &= ~(CTL_DA_IOM_DA | CTL_A_PWRDN);
runtime->hw = snd_bt87x_analog_hw;
return snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&constraint_rates);
}
static int snd_bt87x_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_bt87x *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
if (test_and_set_bit(0, &chip->opened))
return -EBUSY;
if (substream->pcm->device == DEVICE_DIGITAL)
err = snd_bt87x_set_digital_hw(chip, runtime);
else
err = snd_bt87x_set_analog_hw(chip, runtime);
if (err < 0)
goto _error;
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
goto _error;
chip->substream = substream;
return 0;
_error:
clear_bit(0, &chip->opened);
smp_mb__after_atomic();
return err;
}
static int snd_bt87x_close(struct snd_pcm_substream *substream)
{
struct snd_bt87x *chip = snd_pcm_substream_chip(substream);
spin_lock_irq(&chip->reg_lock);
chip->reg_control |= CTL_A_PWRDN;
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
spin_unlock_irq(&chip->reg_lock);
chip->substream = NULL;
clear_bit(0, &chip->opened);
smp_mb__after_atomic();
return 0;
}
static int snd_bt87x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_bt87x *chip = snd_pcm_substream_chip(substream);
return snd_bt87x_create_risc(chip, substream,
params_periods(hw_params),
params_period_bytes(hw_params));
}
static int snd_bt87x_hw_free(struct snd_pcm_substream *substream)
{
struct snd_bt87x *chip = snd_pcm_substream_chip(substream);
snd_bt87x_free_risc(chip);
return 0;
}
static int snd_bt87x_prepare(struct snd_pcm_substream *substream)
{
struct snd_bt87x *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int decimation;
spin_lock_irq(&chip->reg_lock);
chip->reg_control &= ~(CTL_DA_SDR_MASK | CTL_DA_SBR);
decimation = (ANALOG_CLOCK + runtime->rate / 4) / runtime->rate;
chip->reg_control |= decimation << CTL_DA_SDR_SHIFT;
if (runtime->format == SNDRV_PCM_FORMAT_S8)
chip->reg_control |= CTL_DA_SBR;
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int snd_bt87x_start(struct snd_bt87x *chip)
{
spin_lock(&chip->reg_lock);
chip->current_line = 0;
chip->reg_control |= CTL_FIFO_ENABLE | CTL_RISC_ENABLE | CTL_ACAP_EN;
snd_bt87x_writel(chip, REG_RISC_STRT_ADD, chip->dma_risc.addr);
snd_bt87x_writel(chip, REG_PACKET_LEN,
chip->line_bytes | (chip->lines << 16));
snd_bt87x_writel(chip, REG_INT_MASK, chip->interrupt_mask);
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
spin_unlock(&chip->reg_lock);
return 0;
}
static int snd_bt87x_stop(struct snd_bt87x *chip)
{
spin_lock(&chip->reg_lock);
chip->reg_control &= ~(CTL_FIFO_ENABLE | CTL_RISC_ENABLE | CTL_ACAP_EN);
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
snd_bt87x_writel(chip, REG_INT_MASK, 0);
snd_bt87x_writel(chip, REG_INT_STAT, MY_INTERRUPTS);
spin_unlock(&chip->reg_lock);
return 0;
}
static int snd_bt87x_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_bt87x *chip = snd_pcm_substream_chip(substream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
return snd_bt87x_start(chip);
case SNDRV_PCM_TRIGGER_STOP:
return snd_bt87x_stop(chip);
default:
return -EINVAL;
}
}
static snd_pcm_uframes_t snd_bt87x_pointer(struct snd_pcm_substream *substream)
{
struct snd_bt87x *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
return (snd_pcm_uframes_t)bytes_to_frames(runtime, chip->current_line * chip->line_bytes);
}
static const struct snd_pcm_ops snd_bt87x_pcm_ops = {
.open = snd_bt87x_pcm_open,
.close = snd_bt87x_close,
.hw_params = snd_bt87x_hw_params,
.hw_free = snd_bt87x_hw_free,
.prepare = snd_bt87x_prepare,
.trigger = snd_bt87x_trigger,
.pointer = snd_bt87x_pointer,
};
static int snd_bt87x_capture_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
info->count = 1;
info->value.integer.min = 0;
info->value.integer.max = 15;
return 0;
}
static int snd_bt87x_capture_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol);
value->value.integer.value[0] = (chip->reg_control & CTL_A_GAIN_MASK) >> CTL_A_GAIN_SHIFT;
return 0;
}
static int snd_bt87x_capture_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol);
u32 old_control;
int changed;
spin_lock_irq(&chip->reg_lock);
old_control = chip->reg_control;
chip->reg_control = (chip->reg_control & ~CTL_A_GAIN_MASK)
| (value->value.integer.value[0] << CTL_A_GAIN_SHIFT);
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
changed = old_control != chip->reg_control;
spin_unlock_irq(&chip->reg_lock);
return changed;
}
static const struct snd_kcontrol_new snd_bt87x_capture_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Volume",
.info = snd_bt87x_capture_volume_info,
.get = snd_bt87x_capture_volume_get,
.put = snd_bt87x_capture_volume_put,
};
#define snd_bt87x_capture_boost_info snd_ctl_boolean_mono_info
static int snd_bt87x_capture_boost_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol);
value->value.integer.value[0] = !! (chip->reg_control & CTL_A_G2X);
return 0;
}
static int snd_bt87x_capture_boost_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol);
u32 old_control;
int changed;
spin_lock_irq(&chip->reg_lock);
old_control = chip->reg_control;
chip->reg_control = (chip->reg_control & ~CTL_A_G2X)
| (value->value.integer.value[0] ? CTL_A_G2X : 0);
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
changed = chip->reg_control != old_control;
spin_unlock_irq(&chip->reg_lock);
return changed;
}
static const struct snd_kcontrol_new snd_bt87x_capture_boost = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Boost",
.info = snd_bt87x_capture_boost_info,
.get = snd_bt87x_capture_boost_get,
.put = snd_bt87x_capture_boost_put,
};
static int snd_bt87x_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
static const char *const texts[3] = {"TV Tuner", "FM", "Mic/Line"};
return snd_ctl_enum_info(info, 1, 3, texts);
}
static int snd_bt87x_capture_source_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol);
value->value.enumerated.item[0] = (chip->reg_control & CTL_A_SEL_MASK) >> CTL_A_SEL_SHIFT;
return 0;
}
static int snd_bt87x_capture_source_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol);
u32 old_control;
int changed;
spin_lock_irq(&chip->reg_lock);
old_control = chip->reg_control;
chip->reg_control = (chip->reg_control & ~CTL_A_SEL_MASK)
| (value->value.enumerated.item[0] << CTL_A_SEL_SHIFT);
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
changed = chip->reg_control != old_control;
spin_unlock_irq(&chip->reg_lock);
return changed;
}
static const struct snd_kcontrol_new snd_bt87x_capture_source = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = snd_bt87x_capture_source_info,
.get = snd_bt87x_capture_source_get,
.put = snd_bt87x_capture_source_put,
};
static void snd_bt87x_free(struct snd_card *card)
{
struct snd_bt87x *chip = card->private_data;
snd_bt87x_stop(chip);
}
static int snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *name)
{
int err;
struct snd_pcm *pcm;
err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
if (err < 0)
return err;
pcm->private_data = chip;
strcpy(pcm->name, name);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_bt87x_pcm_ops);
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
&chip->pci->dev,
128 * 1024,
ALIGN(255 * 4092, 1024));
return 0;
}
static int snd_bt87x_create(struct snd_card *card,
struct pci_dev *pci)
{
struct snd_bt87x *chip = card->private_data;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
chip->card = card;
chip->pci = pci;
chip->irq = -1;
spin_lock_init(&chip->reg_lock);
err = pcim_iomap_regions(pci, 1 << 0, "Bt87x audio");
if (err < 0)
return err;
chip->mmio = pcim_iomap_table(pci)[0];
chip->reg_control = CTL_A_PWRDN | CTL_DA_ES2 |
CTL_PKTP_16 | (15 << CTL_DA_SDR_SHIFT);
chip->interrupt_mask = MY_INTERRUPTS;
snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control);
snd_bt87x_writel(chip, REG_INT_MASK, 0);
snd_bt87x_writel(chip, REG_INT_STAT, MY_INTERRUPTS);
err = devm_request_irq(&pci->dev, pci->irq, snd_bt87x_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip);
if (err < 0) {
dev_err(card->dev, "cannot grab irq %d\n", pci->irq);
return err;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
card->private_free = snd_bt87x_free;
pci_set_master(pci);
return 0;
}
#define BT_DEVICE(chip, subvend, subdev, id) \
{ .vendor = PCI_VENDOR_ID_BROOKTREE, \
.device = chip, \
.subvendor = subvend, .subdevice = subdev, \
.driver_data = SND_BT87X_BOARD_ ## id }
/* driver_data is the card id for that device */
static const struct pci_device_id snd_bt87x_ids[] = {
/* Hauppauge WinTV series */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x0070, 0x13eb, GENERIC),
/* Hauppauge WinTV series */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_879, 0x0070, 0x13eb, GENERIC),
/* Viewcast Osprey 200 */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x0070, 0xff01, OSPREY2x0),
/* Viewcast Osprey 440 (rate is configurable via gpio) */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x0070, 0xff07, OSPREY440),
/* ATI TV-Wonder */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1002, 0x0001, GENERIC),
/* Leadtek Winfast tv 2000xp delux */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x107d, 0x6606, GENERIC),
/* Pinnacle PCTV */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x11bd, 0x0012, GENERIC),
/* Voodoo TV 200 */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x121a, 0x3000, GENERIC),
/* Askey Computer Corp. MagicTView'99 */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x144f, 0x3000, GENERIC),
/* AVerMedia Studio No. 103, 203, ...? */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1461, 0x0003, AVPHONE98),
/* Prolink PixelView PV-M4900 */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1554, 0x4011, GENERIC),
/* Pinnacle Studio PCTV rave */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0xbd11, 0x1200, GENERIC),
{ }
};
MODULE_DEVICE_TABLE(pci, snd_bt87x_ids);
/* cards known not to have audio
* (DVB cards use the audio function to transfer MPEG data) */
static struct {
unsigned short subvendor, subdevice;
} denylist[] = {
{0x0071, 0x0101}, /* Nebula Electronics DigiTV */
{0x11bd, 0x001c}, /* Pinnacle PCTV Sat */
{0x11bd, 0x0026}, /* Pinnacle PCTV SAT CI */
{0x1461, 0x0761}, /* AVermedia AverTV DVB-T */
{0x1461, 0x0771}, /* AVermedia DVB-T 771 */
{0x1822, 0x0001}, /* Twinhan VisionPlus DVB-T */
{0x18ac, 0xd500}, /* DVICO FusionHDTV 5 Lite */
{0x18ac, 0xdb10}, /* DVICO FusionHDTV DVB-T Lite */
{0x18ac, 0xdb11}, /* Ultraview DVB-T Lite */
{0x270f, 0xfc00}, /* Chaintech Digitop DST-1000 DVB-S */
{0x7063, 0x2000}, /* pcHDTV HD-2000 TV */
};
static struct pci_driver driver;
/* return the id of the card, or a negative value if it's on the denylist */
static int snd_bt87x_detect_card(struct pci_dev *pci)
{
int i;
const struct pci_device_id *supported;
supported = pci_match_id(snd_bt87x_ids, pci);
if (supported && supported->driver_data > 0)
return supported->driver_data;
for (i = 0; i < ARRAY_SIZE(denylist); ++i)
if (denylist[i].subvendor == pci->subsystem_vendor &&
denylist[i].subdevice == pci->subsystem_device) {
dev_dbg(&pci->dev,
"card %#04x-%#04x:%#04x has no audio\n",
pci->device, pci->subsystem_vendor, pci->subsystem_device);
return -EBUSY;
}
dev_info(&pci->dev, "unknown card %#04x-%#04x:%#04x\n",
pci->device, pci->subsystem_vendor, pci->subsystem_device);
dev_info(&pci->dev, "please mail id, board name, and, "
"if it works, the correct digital_rate option to "
"<[email protected]>\n");
return SND_BT87X_BOARD_UNKNOWN;
}
static int __snd_bt87x_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct snd_bt87x *chip;
int err;
enum snd_bt87x_boardid boardid;
if (!pci_id->driver_data) {
err = snd_bt87x_detect_card(pci);
if (err < 0)
return -ENODEV;
boardid = err;
} else
boardid = pci_id->driver_data;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
++dev;
return -ENOENT;
}
err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
err = snd_bt87x_create(card, pci);
if (err < 0)
return err;
memcpy(&chip->board, &snd_bt87x_boards[boardid], sizeof(chip->board));
if (!chip->board.no_digital) {
if (digital_rate[dev] > 0)
chip->board.dig_rate = digital_rate[dev];
chip->reg_control |= chip->board.digital_fmt;
err = snd_bt87x_pcm(chip, DEVICE_DIGITAL, "Bt87x Digital");
if (err < 0)
return err;
}
if (!chip->board.no_analog) {
err = snd_bt87x_pcm(chip, DEVICE_ANALOG, "Bt87x Analog");
if (err < 0)
return err;
err = snd_ctl_add(card, snd_ctl_new1(
&snd_bt87x_capture_volume, chip));
if (err < 0)
return err;
err = snd_ctl_add(card, snd_ctl_new1(
&snd_bt87x_capture_boost, chip));
if (err < 0)
return err;
err = snd_ctl_add(card, snd_ctl_new1(
&snd_bt87x_capture_source, chip));
if (err < 0)
return err;
}
dev_info(card->dev, "bt87x%d: Using board %d, %sanalog, %sdigital "
"(rate %d Hz)\n", dev, boardid,
chip->board.no_analog ? "no " : "",
chip->board.no_digital ? "no " : "", chip->board.dig_rate);
strcpy(card->driver, "Bt87x");
sprintf(card->shortname, "Brooktree Bt%x", pci->device);
sprintf(card->longname, "%s at %#llx, irq %i",
card->shortname, (unsigned long long)pci_resource_start(pci, 0),
chip->irq);
strcpy(card->mixername, "Bt87x");
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
++dev;
return 0;
}
static int snd_bt87x_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_bt87x_probe(pci, pci_id));
}
/* default entries for all Bt87x cards - it's not exported */
/* driver_data is set to 0 to call detection */
static const struct pci_device_id snd_bt87x_default_ids[] = {
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, PCI_ANY_ID, PCI_ANY_ID, UNKNOWN),
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_879, PCI_ANY_ID, PCI_ANY_ID, UNKNOWN),
{ }
};
static struct pci_driver driver = {
.name = KBUILD_MODNAME,
.id_table = snd_bt87x_ids,
.probe = snd_bt87x_probe,
};
static int __init alsa_card_bt87x_init(void)
{
if (load_all)
driver.id_table = snd_bt87x_default_ids;
return pci_register_driver(&driver);
}
static void __exit alsa_card_bt87x_exit(void)
{
pci_unregister_driver(&driver);
}
module_init(alsa_card_bt87x_init)
module_exit(alsa_card_bt87x_exit)
| linux-master | sound/pci/bt87x.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The driver for the ForteMedia FM801 based soundcards
* Copyright (c) by Jaroslav Kysela <[email protected]>
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/tlv.h>
#include <sound/ac97_codec.h>
#include <sound/mpu401.h>
#include <sound/opl3.h>
#include <sound/initval.h>
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
#include <media/drv-intf/tea575x.h>
#endif
MODULE_AUTHOR("Jaroslav Kysela <[email protected]>");
MODULE_DESCRIPTION("ForteMedia FM801");
MODULE_LICENSE("GPL");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
/*
* Enable TEA575x tuner
* 1 = MediaForte 256-PCS
* 2 = MediaForte 256-PCP
* 3 = MediaForte 64-PCR
* 16 = setup tuner only (this is additional bit), i.e. SF64-PCR FM card
* High 16-bits are video (radio) device number + 1
*/
static int tea575x_tuner[SNDRV_CARDS];
static int radio_nr[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1};
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the FM801 soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for the FM801 soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable FM801 soundcard.");
module_param_array(tea575x_tuner, int, NULL, 0444);
MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only).");
module_param_array(radio_nr, int, NULL, 0444);
MODULE_PARM_DESC(radio_nr, "Radio device numbers");
#define TUNER_DISABLED (1<<3)
#define TUNER_ONLY (1<<4)
#define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF)
/*
* Direct registers
*/
#define fm801_writew(chip,reg,value) outw((value), chip->port + FM801_##reg)
#define fm801_readw(chip,reg) inw(chip->port + FM801_##reg)
#define fm801_writel(chip,reg,value) outl((value), chip->port + FM801_##reg)
#define FM801_PCM_VOL 0x00 /* PCM Output Volume */
#define FM801_FM_VOL 0x02 /* FM Output Volume */
#define FM801_I2S_VOL 0x04 /* I2S Volume */
#define FM801_REC_SRC 0x06 /* Record Source */
#define FM801_PLY_CTRL 0x08 /* Playback Control */
#define FM801_PLY_COUNT 0x0a /* Playback Count */
#define FM801_PLY_BUF1 0x0c /* Playback Bufer I */
#define FM801_PLY_BUF2 0x10 /* Playback Buffer II */
#define FM801_CAP_CTRL 0x14 /* Capture Control */
#define FM801_CAP_COUNT 0x16 /* Capture Count */
#define FM801_CAP_BUF1 0x18 /* Capture Buffer I */
#define FM801_CAP_BUF2 0x1c /* Capture Buffer II */
#define FM801_CODEC_CTRL 0x22 /* Codec Control */
#define FM801_I2S_MODE 0x24 /* I2S Mode Control */
#define FM801_VOLUME 0x26 /* Volume Up/Down/Mute Status */
#define FM801_I2C_CTRL 0x29 /* I2C Control */
#define FM801_AC97_CMD 0x2a /* AC'97 Command */
#define FM801_AC97_DATA 0x2c /* AC'97 Data */
#define FM801_MPU401_DATA 0x30 /* MPU401 Data */
#define FM801_MPU401_CMD 0x31 /* MPU401 Command */
#define FM801_GPIO_CTRL 0x52 /* General Purpose I/O Control */
#define FM801_GEN_CTRL 0x54 /* General Control */
#define FM801_IRQ_MASK 0x56 /* Interrupt Mask */
#define FM801_IRQ_STATUS 0x5a /* Interrupt Status */
#define FM801_OPL3_BANK0 0x68 /* OPL3 Status Read / Bank 0 Write */
#define FM801_OPL3_DATA0 0x69 /* OPL3 Data 0 Write */
#define FM801_OPL3_BANK1 0x6a /* OPL3 Bank 1 Write */
#define FM801_OPL3_DATA1 0x6b /* OPL3 Bank 1 Write */
#define FM801_POWERDOWN 0x70 /* Blocks Power Down Control */
/* codec access */
#define FM801_AC97_READ (1<<7) /* read=1, write=0 */
#define FM801_AC97_VALID (1<<8) /* port valid=1 */
#define FM801_AC97_BUSY (1<<9) /* busy=1 */
#define FM801_AC97_ADDR_SHIFT 10 /* codec id (2bit) */
/* playback and record control register bits */
#define FM801_BUF1_LAST (1<<1)
#define FM801_BUF2_LAST (1<<2)
#define FM801_START (1<<5)
#define FM801_PAUSE (1<<6)
#define FM801_IMMED_STOP (1<<7)
#define FM801_RATE_SHIFT 8
#define FM801_RATE_MASK (15 << FM801_RATE_SHIFT)
#define FM801_CHANNELS_4 (1<<12) /* playback only */
#define FM801_CHANNELS_6 (2<<12) /* playback only */
#define FM801_CHANNELS_6MS (3<<12) /* playback only */
#define FM801_CHANNELS_MASK (3<<12)
#define FM801_16BIT (1<<14)
#define FM801_STEREO (1<<15)
/* IRQ status bits */
#define FM801_IRQ_PLAYBACK (1<<8)
#define FM801_IRQ_CAPTURE (1<<9)
#define FM801_IRQ_VOLUME (1<<14)
#define FM801_IRQ_MPU (1<<15)
/* GPIO control register */
#define FM801_GPIO_GP0 (1<<0) /* read/write */
#define FM801_GPIO_GP1 (1<<1)
#define FM801_GPIO_GP2 (1<<2)
#define FM801_GPIO_GP3 (1<<3)
#define FM801_GPIO_GP(x) (1<<(0+(x)))
#define FM801_GPIO_GD0 (1<<8) /* directions: 1 = input, 0 = output*/
#define FM801_GPIO_GD1 (1<<9)
#define FM801_GPIO_GD2 (1<<10)
#define FM801_GPIO_GD3 (1<<11)
#define FM801_GPIO_GD(x) (1<<(8+(x)))
#define FM801_GPIO_GS0 (1<<12) /* function select: */
#define FM801_GPIO_GS1 (1<<13) /* 1 = GPIO */
#define FM801_GPIO_GS2 (1<<14) /* 0 = other (S/PDIF, VOL) */
#define FM801_GPIO_GS3 (1<<15)
#define FM801_GPIO_GS(x) (1<<(12+(x)))
/**
* struct fm801 - describes FM801 chip
* @dev: device for this chio
* @irq: irq number
* @port: I/O port number
* @multichannel: multichannel support
* @secondary: secondary codec
* @secondary_addr: address of the secondary codec
* @tea575x_tuner: tuner access method & flags
* @ply_ctrl: playback control
* @cap_ctrl: capture control
* @ply_buffer: playback buffer
* @ply_buf: playback buffer index
* @ply_count: playback buffer count
* @ply_size: playback buffer size
* @ply_pos: playback position
* @cap_buffer: capture buffer
* @cap_buf: capture buffer index
* @cap_count: capture buffer count
* @cap_size: capture buffer size
* @cap_pos: capture position
* @ac97_bus: ac97 bus handle
* @ac97: ac97 handle
* @ac97_sec: ac97 secondary handle
* @card: ALSA card
* @pcm: PCM devices
* @rmidi: rmidi device
* @playback_substream: substream for playback
* @capture_substream: substream for capture
* @p_dma_size: playback DMA size
* @c_dma_size: capture DMA size
* @reg_lock: lock
* @proc_entry: /proc entry
* @v4l2_dev: v4l2 device
* @tea: tea575a structure
* @saved_regs: context saved during suspend
*/
struct fm801 {
struct device *dev;
int irq;
unsigned long port;
unsigned int multichannel: 1,
secondary: 1;
unsigned char secondary_addr;
unsigned int tea575x_tuner;
unsigned short ply_ctrl;
unsigned short cap_ctrl;
unsigned long ply_buffer;
unsigned int ply_buf;
unsigned int ply_count;
unsigned int ply_size;
unsigned int ply_pos;
unsigned long cap_buffer;
unsigned int cap_buf;
unsigned int cap_count;
unsigned int cap_size;
unsigned int cap_pos;
struct snd_ac97_bus *ac97_bus;
struct snd_ac97 *ac97;
struct snd_ac97 *ac97_sec;
struct snd_card *card;
struct snd_pcm *pcm;
struct snd_rawmidi *rmidi;
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
unsigned int p_dma_size;
unsigned int c_dma_size;
spinlock_t reg_lock;
struct snd_info_entry *proc_entry;
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
struct v4l2_device v4l2_dev;
struct snd_tea575x tea;
#endif
#ifdef CONFIG_PM_SLEEP
u16 saved_regs[0x20];
#endif
};
/*
* IO accessors
*/
static inline void fm801_iowrite16(struct fm801 *chip, unsigned short offset, u16 value)
{
outw(value, chip->port + offset);
}
static inline u16 fm801_ioread16(struct fm801 *chip, unsigned short offset)
{
return inw(chip->port + offset);
}
static const struct pci_device_id snd_fm801_ids[] = {
{ 0x1319, 0x0801, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0, }, /* FM801 */
{ 0x5213, 0x0510, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0, }, /* Gallant Odyssey Sound 4 */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_fm801_ids);
/*
* common I/O routines
*/
static bool fm801_ac97_is_ready(struct fm801 *chip, unsigned int iterations)
{
unsigned int idx;
for (idx = 0; idx < iterations; idx++) {
if (!(fm801_readw(chip, AC97_CMD) & FM801_AC97_BUSY))
return true;
udelay(10);
}
return false;
}
static bool fm801_ac97_is_valid(struct fm801 *chip, unsigned int iterations)
{
unsigned int idx;
for (idx = 0; idx < iterations; idx++) {
if (fm801_readw(chip, AC97_CMD) & FM801_AC97_VALID)
return true;
udelay(10);
}
return false;
}
static int snd_fm801_update_bits(struct fm801 *chip, unsigned short reg,
unsigned short mask, unsigned short value)
{
int change;
unsigned long flags;
unsigned short old, new;
spin_lock_irqsave(&chip->reg_lock, flags);
old = fm801_ioread16(chip, reg);
new = (old & ~mask) | value;
change = old != new;
if (change)
fm801_iowrite16(chip, reg, new);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return change;
}
static void snd_fm801_codec_write(struct snd_ac97 *ac97,
unsigned short reg,
unsigned short val)
{
struct fm801 *chip = ac97->private_data;
/*
* Wait until the codec interface is not ready..
*/
if (!fm801_ac97_is_ready(chip, 100)) {
dev_err(chip->card->dev, "AC'97 interface is busy (1)\n");
return;
}
/* write data and address */
fm801_writew(chip, AC97_DATA, val);
fm801_writew(chip, AC97_CMD, reg | (ac97->addr << FM801_AC97_ADDR_SHIFT));
/*
* Wait until the write command is not completed..
*/
if (!fm801_ac97_is_ready(chip, 1000))
dev_err(chip->card->dev, "AC'97 interface #%d is busy (2)\n",
ac97->num);
}
static unsigned short snd_fm801_codec_read(struct snd_ac97 *ac97, unsigned short reg)
{
struct fm801 *chip = ac97->private_data;
/*
* Wait until the codec interface is not ready..
*/
if (!fm801_ac97_is_ready(chip, 100)) {
dev_err(chip->card->dev, "AC'97 interface is busy (1)\n");
return 0;
}
/* read command */
fm801_writew(chip, AC97_CMD,
reg | (ac97->addr << FM801_AC97_ADDR_SHIFT) | FM801_AC97_READ);
if (!fm801_ac97_is_ready(chip, 100)) {
dev_err(chip->card->dev, "AC'97 interface #%d is busy (2)\n",
ac97->num);
return 0;
}
if (!fm801_ac97_is_valid(chip, 1000)) {
dev_err(chip->card->dev,
"AC'97 interface #%d is not valid (2)\n", ac97->num);
return 0;
}
return fm801_readw(chip, AC97_DATA);
}
static const unsigned int rates[] = {
5500, 8000, 9600, 11025,
16000, 19200, 22050, 32000,
38400, 44100, 48000
};
static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
static const unsigned int channels[] = {
2, 4, 6
};
static const struct snd_pcm_hw_constraint_list hw_constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
};
/*
* Sample rate routines
*/
static unsigned short snd_fm801_rate_bits(unsigned int rate)
{
unsigned int idx;
for (idx = 0; idx < ARRAY_SIZE(rates); idx++)
if (rates[idx] == rate)
return idx;
snd_BUG();
return ARRAY_SIZE(rates) - 1;
}
/*
* PCM part
*/
static int snd_fm801_playback_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
spin_lock(&chip->reg_lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
chip->ply_ctrl &= ~(FM801_BUF1_LAST |
FM801_BUF2_LAST |
FM801_PAUSE);
chip->ply_ctrl |= FM801_START |
FM801_IMMED_STOP;
break;
case SNDRV_PCM_TRIGGER_STOP:
chip->ply_ctrl &= ~(FM801_START | FM801_PAUSE);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
chip->ply_ctrl |= FM801_PAUSE;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
chip->ply_ctrl &= ~FM801_PAUSE;
break;
default:
spin_unlock(&chip->reg_lock);
snd_BUG();
return -EINVAL;
}
fm801_writew(chip, PLY_CTRL, chip->ply_ctrl);
spin_unlock(&chip->reg_lock);
return 0;
}
static int snd_fm801_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
spin_lock(&chip->reg_lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
chip->cap_ctrl &= ~(FM801_BUF1_LAST |
FM801_BUF2_LAST |
FM801_PAUSE);
chip->cap_ctrl |= FM801_START |
FM801_IMMED_STOP;
break;
case SNDRV_PCM_TRIGGER_STOP:
chip->cap_ctrl &= ~(FM801_START | FM801_PAUSE);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
chip->cap_ctrl |= FM801_PAUSE;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
chip->cap_ctrl &= ~FM801_PAUSE;
break;
default:
spin_unlock(&chip->reg_lock);
snd_BUG();
return -EINVAL;
}
fm801_writew(chip, CAP_CTRL, chip->cap_ctrl);
spin_unlock(&chip->reg_lock);
return 0;
}
static int snd_fm801_playback_prepare(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
chip->ply_size = snd_pcm_lib_buffer_bytes(substream);
chip->ply_count = snd_pcm_lib_period_bytes(substream);
spin_lock_irq(&chip->reg_lock);
chip->ply_ctrl &= ~(FM801_START | FM801_16BIT |
FM801_STEREO | FM801_RATE_MASK |
FM801_CHANNELS_MASK);
if (snd_pcm_format_width(runtime->format) == 16)
chip->ply_ctrl |= FM801_16BIT;
if (runtime->channels > 1) {
chip->ply_ctrl |= FM801_STEREO;
if (runtime->channels == 4)
chip->ply_ctrl |= FM801_CHANNELS_4;
else if (runtime->channels == 6)
chip->ply_ctrl |= FM801_CHANNELS_6;
}
chip->ply_ctrl |= snd_fm801_rate_bits(runtime->rate) << FM801_RATE_SHIFT;
chip->ply_buf = 0;
fm801_writew(chip, PLY_CTRL, chip->ply_ctrl);
fm801_writew(chip, PLY_COUNT, chip->ply_count - 1);
chip->ply_buffer = runtime->dma_addr;
chip->ply_pos = 0;
fm801_writel(chip, PLY_BUF1, chip->ply_buffer);
fm801_writel(chip, PLY_BUF2,
chip->ply_buffer + (chip->ply_count % chip->ply_size));
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int snd_fm801_capture_prepare(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
chip->cap_size = snd_pcm_lib_buffer_bytes(substream);
chip->cap_count = snd_pcm_lib_period_bytes(substream);
spin_lock_irq(&chip->reg_lock);
chip->cap_ctrl &= ~(FM801_START | FM801_16BIT |
FM801_STEREO | FM801_RATE_MASK);
if (snd_pcm_format_width(runtime->format) == 16)
chip->cap_ctrl |= FM801_16BIT;
if (runtime->channels > 1)
chip->cap_ctrl |= FM801_STEREO;
chip->cap_ctrl |= snd_fm801_rate_bits(runtime->rate) << FM801_RATE_SHIFT;
chip->cap_buf = 0;
fm801_writew(chip, CAP_CTRL, chip->cap_ctrl);
fm801_writew(chip, CAP_COUNT, chip->cap_count - 1);
chip->cap_buffer = runtime->dma_addr;
chip->cap_pos = 0;
fm801_writel(chip, CAP_BUF1, chip->cap_buffer);
fm801_writel(chip, CAP_BUF2,
chip->cap_buffer + (chip->cap_count % chip->cap_size));
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static snd_pcm_uframes_t snd_fm801_playback_pointer(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(chip->ply_ctrl & FM801_START))
return 0;
spin_lock(&chip->reg_lock);
ptr = chip->ply_pos + (chip->ply_count - 1) - fm801_readw(chip, PLY_COUNT);
if (fm801_readw(chip, IRQ_STATUS) & FM801_IRQ_PLAYBACK) {
ptr += chip->ply_count;
ptr %= chip->ply_size;
}
spin_unlock(&chip->reg_lock);
return bytes_to_frames(substream->runtime, ptr);
}
static snd_pcm_uframes_t snd_fm801_capture_pointer(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(chip->cap_ctrl & FM801_START))
return 0;
spin_lock(&chip->reg_lock);
ptr = chip->cap_pos + (chip->cap_count - 1) - fm801_readw(chip, CAP_COUNT);
if (fm801_readw(chip, IRQ_STATUS) & FM801_IRQ_CAPTURE) {
ptr += chip->cap_count;
ptr %= chip->cap_size;
}
spin_unlock(&chip->reg_lock);
return bytes_to_frames(substream->runtime, ptr);
}
static irqreturn_t snd_fm801_interrupt(int irq, void *dev_id)
{
struct fm801 *chip = dev_id;
unsigned short status;
unsigned int tmp;
status = fm801_readw(chip, IRQ_STATUS);
status &= FM801_IRQ_PLAYBACK|FM801_IRQ_CAPTURE|FM801_IRQ_MPU|FM801_IRQ_VOLUME;
if (! status)
return IRQ_NONE;
/* ack first */
fm801_writew(chip, IRQ_STATUS, status);
if (chip->pcm && (status & FM801_IRQ_PLAYBACK) && chip->playback_substream) {
spin_lock(&chip->reg_lock);
chip->ply_buf++;
chip->ply_pos += chip->ply_count;
chip->ply_pos %= chip->ply_size;
tmp = chip->ply_pos + chip->ply_count;
tmp %= chip->ply_size;
if (chip->ply_buf & 1)
fm801_writel(chip, PLY_BUF1, chip->ply_buffer + tmp);
else
fm801_writel(chip, PLY_BUF2, chip->ply_buffer + tmp);
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(chip->playback_substream);
}
if (chip->pcm && (status & FM801_IRQ_CAPTURE) && chip->capture_substream) {
spin_lock(&chip->reg_lock);
chip->cap_buf++;
chip->cap_pos += chip->cap_count;
chip->cap_pos %= chip->cap_size;
tmp = chip->cap_pos + chip->cap_count;
tmp %= chip->cap_size;
if (chip->cap_buf & 1)
fm801_writel(chip, CAP_BUF1, chip->cap_buffer + tmp);
else
fm801_writel(chip, CAP_BUF2, chip->cap_buffer + tmp);
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(chip->capture_substream);
}
if (chip->rmidi && (status & FM801_IRQ_MPU))
snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data);
if (status & FM801_IRQ_VOLUME) {
/* TODO */
}
return IRQ_HANDLED;
}
static const struct snd_pcm_hardware snd_fm801_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000,
.rate_min = 5500,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static const struct snd_pcm_hardware snd_fm801_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000,
.rate_min = 5500,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static int snd_fm801_playback_open(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
chip->playback_substream = substream;
runtime->hw = snd_fm801_playback;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
if (chip->multichannel) {
runtime->hw.channels_max = 6;
snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
&hw_constraints_channels);
}
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
return 0;
}
static int snd_fm801_capture_open(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
chip->capture_substream = substream;
runtime->hw = snd_fm801_capture;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
return 0;
}
static int snd_fm801_playback_close(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
chip->playback_substream = NULL;
return 0;
}
static int snd_fm801_capture_close(struct snd_pcm_substream *substream)
{
struct fm801 *chip = snd_pcm_substream_chip(substream);
chip->capture_substream = NULL;
return 0;
}
static const struct snd_pcm_ops snd_fm801_playback_ops = {
.open = snd_fm801_playback_open,
.close = snd_fm801_playback_close,
.prepare = snd_fm801_playback_prepare,
.trigger = snd_fm801_playback_trigger,
.pointer = snd_fm801_playback_pointer,
};
static const struct snd_pcm_ops snd_fm801_capture_ops = {
.open = snd_fm801_capture_open,
.close = snd_fm801_capture_close,
.prepare = snd_fm801_capture_prepare,
.trigger = snd_fm801_capture_trigger,
.pointer = snd_fm801_capture_pointer,
};
static int snd_fm801_pcm(struct fm801 *chip, int device)
{
struct pci_dev *pdev = to_pci_dev(chip->dev);
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(chip->card, "FM801", device, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_fm801_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_fm801_capture_ops);
pcm->private_data = chip;
pcm->info_flags = 0;
strcpy(pcm->name, "FM801");
chip->pcm = pcm;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, &pdev->dev,
chip->multichannel ? 128*1024 : 64*1024, 128*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_alt_chmaps,
chip->multichannel ? 6 : 2, 0,
NULL);
}
/*
* TEA5757 radio
*/
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
/* GPIO to TEA575x maps */
struct snd_fm801_tea575x_gpio {
u8 data, clk, wren, most;
char *name;
};
static const struct snd_fm801_tea575x_gpio snd_fm801_tea575x_gpios[] = {
{ .data = 1, .clk = 3, .wren = 2, .most = 0, .name = "SF256-PCS" },
{ .data = 1, .clk = 0, .wren = 2, .most = 3, .name = "SF256-PCP" },
{ .data = 2, .clk = 0, .wren = 1, .most = 3, .name = "SF64-PCR" },
};
#define get_tea575x_gpio(chip) \
(&snd_fm801_tea575x_gpios[((chip)->tea575x_tuner & TUNER_TYPE_MASK) - 1])
static void snd_fm801_tea575x_set_pins(struct snd_tea575x *tea, u8 pins)
{
struct fm801 *chip = tea->private_data;
unsigned short reg = fm801_readw(chip, GPIO_CTRL);
struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip);
reg &= ~(FM801_GPIO_GP(gpio.data) |
FM801_GPIO_GP(gpio.clk) |
FM801_GPIO_GP(gpio.wren));
reg |= (pins & TEA575X_DATA) ? FM801_GPIO_GP(gpio.data) : 0;
reg |= (pins & TEA575X_CLK) ? FM801_GPIO_GP(gpio.clk) : 0;
/* WRITE_ENABLE is inverted */
reg |= (pins & TEA575X_WREN) ? 0 : FM801_GPIO_GP(gpio.wren);
fm801_writew(chip, GPIO_CTRL, reg);
}
static u8 snd_fm801_tea575x_get_pins(struct snd_tea575x *tea)
{
struct fm801 *chip = tea->private_data;
unsigned short reg = fm801_readw(chip, GPIO_CTRL);
struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip);
u8 ret;
ret = 0;
if (reg & FM801_GPIO_GP(gpio.data))
ret |= TEA575X_DATA;
if (reg & FM801_GPIO_GP(gpio.most))
ret |= TEA575X_MOST;
return ret;
}
static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output)
{
struct fm801 *chip = tea->private_data;
unsigned short reg = fm801_readw(chip, GPIO_CTRL);
struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip);
/* use GPIO lines and set write enable bit */
reg |= FM801_GPIO_GS(gpio.data) |
FM801_GPIO_GS(gpio.wren) |
FM801_GPIO_GS(gpio.clk) |
FM801_GPIO_GS(gpio.most);
if (output) {
/* all of lines are in the write direction */
/* clear data and clock lines */
reg &= ~(FM801_GPIO_GD(gpio.data) |
FM801_GPIO_GD(gpio.wren) |
FM801_GPIO_GD(gpio.clk) |
FM801_GPIO_GP(gpio.data) |
FM801_GPIO_GP(gpio.clk) |
FM801_GPIO_GP(gpio.wren));
} else {
/* use GPIO lines, set data direction to input */
reg |= FM801_GPIO_GD(gpio.data) |
FM801_GPIO_GD(gpio.most) |
FM801_GPIO_GP(gpio.data) |
FM801_GPIO_GP(gpio.most) |
FM801_GPIO_GP(gpio.wren);
/* all of lines are in the write direction, except data */
/* clear data, write enable and clock lines */
reg &= ~(FM801_GPIO_GD(gpio.wren) |
FM801_GPIO_GD(gpio.clk) |
FM801_GPIO_GP(gpio.clk));
}
fm801_writew(chip, GPIO_CTRL, reg);
}
static const struct snd_tea575x_ops snd_fm801_tea_ops = {
.set_pins = snd_fm801_tea575x_set_pins,
.get_pins = snd_fm801_tea575x_get_pins,
.set_direction = snd_fm801_tea575x_set_direction,
};
#endif
/*
* Mixer routines
*/
#define FM801_SINGLE(xname, reg, shift, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_fm801_info_single, \
.get = snd_fm801_get_single, .put = snd_fm801_put_single, \
.private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) }
static int snd_fm801_info_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 16) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_fm801_get_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct fm801 *chip = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
long *value = ucontrol->value.integer.value;
value[0] = (fm801_ioread16(chip, reg) >> shift) & mask;
if (invert)
value[0] = mask - value[0];
return 0;
}
static int snd_fm801_put_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct fm801 *chip = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
unsigned short val;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
val = mask - val;
return snd_fm801_update_bits(chip, reg, mask << shift, val << shift);
}
#define FM801_DOUBLE(xname, reg, shift_left, shift_right, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_fm801_info_double, \
.get = snd_fm801_get_double, .put = snd_fm801_put_double, \
.private_value = reg | (shift_left << 8) | (shift_right << 12) | (mask << 16) | (invert << 24) }
#define FM801_DOUBLE_TLV(xname, reg, shift_left, shift_right, mask, invert, xtlv) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
.name = xname, .info = snd_fm801_info_double, \
.get = snd_fm801_get_double, .put = snd_fm801_put_double, \
.private_value = reg | (shift_left << 8) | (shift_right << 12) | (mask << 16) | (invert << 24), \
.tlv = { .p = (xtlv) } }
static int snd_fm801_info_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 16) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_fm801_get_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct fm801 *chip = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
int shift_left = (kcontrol->private_value >> 8) & 0x0f;
int shift_right = (kcontrol->private_value >> 12) & 0x0f;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
long *value = ucontrol->value.integer.value;
spin_lock_irq(&chip->reg_lock);
value[0] = (fm801_ioread16(chip, reg) >> shift_left) & mask;
value[1] = (fm801_ioread16(chip, reg) >> shift_right) & mask;
spin_unlock_irq(&chip->reg_lock);
if (invert) {
value[0] = mask - value[0];
value[1] = mask - value[1];
}
return 0;
}
static int snd_fm801_put_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct fm801 *chip = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
int shift_left = (kcontrol->private_value >> 8) & 0x0f;
int shift_right = (kcontrol->private_value >> 12) & 0x0f;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
unsigned short val1, val2;
val1 = ucontrol->value.integer.value[0] & mask;
val2 = ucontrol->value.integer.value[1] & mask;
if (invert) {
val1 = mask - val1;
val2 = mask - val2;
}
return snd_fm801_update_bits(chip, reg,
(mask << shift_left) | (mask << shift_right),
(val1 << shift_left ) | (val2 << shift_right));
}
static int snd_fm801_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[5] = {
"AC97 Primary", "FM", "I2S", "PCM", "AC97 Secondary"
};
return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
static int snd_fm801_get_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct fm801 *chip = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = fm801_readw(chip, REC_SRC) & 7;
if (val > 4)
val = 4;
ucontrol->value.enumerated.item[0] = val;
return 0;
}
static int snd_fm801_put_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct fm801 *chip = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ucontrol->value.enumerated.item[0];
if (val > 4)
return -EINVAL;
return snd_fm801_update_bits(chip, FM801_REC_SRC, 7, val);
}
static const DECLARE_TLV_DB_SCALE(db_scale_dsp, -3450, 150, 0);
#define FM801_CONTROLS ARRAY_SIZE(snd_fm801_controls)
static const struct snd_kcontrol_new snd_fm801_controls[] = {
FM801_DOUBLE_TLV("Wave Playback Volume", FM801_PCM_VOL, 0, 8, 31, 1,
db_scale_dsp),
FM801_SINGLE("Wave Playback Switch", FM801_PCM_VOL, 15, 1, 1),
FM801_DOUBLE_TLV("I2S Playback Volume", FM801_I2S_VOL, 0, 8, 31, 1,
db_scale_dsp),
FM801_SINGLE("I2S Playback Switch", FM801_I2S_VOL, 15, 1, 1),
FM801_DOUBLE_TLV("FM Playback Volume", FM801_FM_VOL, 0, 8, 31, 1,
db_scale_dsp),
FM801_SINGLE("FM Playback Switch", FM801_FM_VOL, 15, 1, 1),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Digital Capture Source",
.info = snd_fm801_info_mux,
.get = snd_fm801_get_mux,
.put = snd_fm801_put_mux,
}
};
#define FM801_CONTROLS_MULTI ARRAY_SIZE(snd_fm801_controls_multi)
static const struct snd_kcontrol_new snd_fm801_controls_multi[] = {
FM801_SINGLE("AC97 2ch->4ch Copy Switch", FM801_CODEC_CTRL, 7, 1, 0),
FM801_SINGLE("AC97 18-bit Switch", FM801_CODEC_CTRL, 10, 1, 0),
FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), FM801_I2S_MODE, 8, 1, 0),
FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",PLAYBACK,SWITCH), FM801_I2S_MODE, 9, 1, 0),
FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",CAPTURE,SWITCH), FM801_I2S_MODE, 10, 1, 0),
FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), FM801_GEN_CTRL, 2, 1, 0),
};
static int snd_fm801_mixer(struct fm801 *chip)
{
struct snd_ac97_template ac97;
unsigned int i;
int err;
static const struct snd_ac97_bus_ops ops = {
.write = snd_fm801_codec_write,
.read = snd_fm801_codec_read,
};
err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus);
if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97);
if (err < 0)
return err;
if (chip->secondary) {
ac97.num = 1;
ac97.addr = chip->secondary_addr;
err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec);
if (err < 0)
return err;
}
for (i = 0; i < FM801_CONTROLS; i++) {
err = snd_ctl_add(chip->card,
snd_ctl_new1(&snd_fm801_controls[i], chip));
if (err < 0)
return err;
}
if (chip->multichannel) {
for (i = 0; i < FM801_CONTROLS_MULTI; i++) {
err = snd_ctl_add(chip->card,
snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
if (err < 0)
return err;
}
}
return 0;
}
/*
* initialization routines
*/
static int wait_for_codec(struct fm801 *chip, unsigned int codec_id,
unsigned short reg, unsigned long waits)
{
unsigned long timeout = jiffies + waits;
fm801_writew(chip, AC97_CMD,
reg | (codec_id << FM801_AC97_ADDR_SHIFT) | FM801_AC97_READ);
udelay(5);
do {
if ((fm801_readw(chip, AC97_CMD) &
(FM801_AC97_VALID | FM801_AC97_BUSY)) == FM801_AC97_VALID)
return 0;
schedule_timeout_uninterruptible(1);
} while (time_after(timeout, jiffies));
return -EIO;
}
static int reset_codec(struct fm801 *chip)
{
/* codec cold reset + AC'97 warm reset */
fm801_writew(chip, CODEC_CTRL, (1 << 5) | (1 << 6));
fm801_readw(chip, CODEC_CTRL); /* flush posting data */
udelay(100);
fm801_writew(chip, CODEC_CTRL, 0);
return wait_for_codec(chip, 0, AC97_RESET, msecs_to_jiffies(750));
}
static void snd_fm801_chip_multichannel_init(struct fm801 *chip)
{
unsigned short cmdw;
if (chip->multichannel) {
if (chip->secondary_addr) {
wait_for_codec(chip, chip->secondary_addr,
AC97_VENDOR_ID1, msecs_to_jiffies(50));
} else {
/* my card has the secondary codec */
/* at address #3, so the loop is inverted */
int i;
for (i = 3; i > 0; i--) {
if (!wait_for_codec(chip, i, AC97_VENDOR_ID1,
msecs_to_jiffies(50))) {
cmdw = fm801_readw(chip, AC97_DATA);
if (cmdw != 0xffff && cmdw != 0) {
chip->secondary = 1;
chip->secondary_addr = i;
break;
}
}
}
}
/* the recovery phase, it seems that probing for non-existing codec might */
/* cause timeout problems */
wait_for_codec(chip, 0, AC97_VENDOR_ID1, msecs_to_jiffies(750));
}
}
static void snd_fm801_chip_init(struct fm801 *chip)
{
unsigned short cmdw;
/* init volume */
fm801_writew(chip, PCM_VOL, 0x0808);
fm801_writew(chip, FM_VOL, 0x9f1f);
fm801_writew(chip, I2S_VOL, 0x8808);
/* I2S control - I2S mode */
fm801_writew(chip, I2S_MODE, 0x0003);
/* interrupt setup */
cmdw = fm801_readw(chip, IRQ_MASK);
if (chip->irq < 0)
cmdw |= 0x00c3; /* mask everything, no PCM nor MPU */
else
cmdw &= ~0x0083; /* unmask MPU, PLAYBACK & CAPTURE */
fm801_writew(chip, IRQ_MASK, cmdw);
/* interrupt clear */
fm801_writew(chip, IRQ_STATUS,
FM801_IRQ_PLAYBACK | FM801_IRQ_CAPTURE | FM801_IRQ_MPU);
}
static void snd_fm801_free(struct snd_card *card)
{
struct fm801 *chip = card->private_data;
unsigned short cmdw;
/* interrupt setup - mask everything */
cmdw = fm801_readw(chip, IRQ_MASK);
cmdw |= 0x00c3;
fm801_writew(chip, IRQ_MASK, cmdw);
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
if (!(chip->tea575x_tuner & TUNER_DISABLED)) {
snd_tea575x_exit(&chip->tea);
v4l2_device_unregister(&chip->v4l2_dev);
}
#endif
}
static int snd_fm801_create(struct snd_card *card,
struct pci_dev *pci,
int tea575x_tuner,
int radio_nr)
{
struct fm801 *chip = card->private_data;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
spin_lock_init(&chip->reg_lock);
chip->card = card;
chip->dev = &pci->dev;
chip->irq = -1;
chip->tea575x_tuner = tea575x_tuner;
err = pci_request_regions(pci, "FM801");
if (err < 0)
return err;
chip->port = pci_resource_start(pci, 0);
if (pci->revision >= 0xb1) /* FM801-AU */
chip->multichannel = 1;
if (!(chip->tea575x_tuner & TUNER_ONLY)) {
if (reset_codec(chip) < 0) {
dev_info(chip->card->dev,
"Primary AC'97 codec not found, assume SF64-PCR (tuner-only)\n");
chip->tea575x_tuner = 3 | TUNER_ONLY;
} else {
snd_fm801_chip_multichannel_init(chip);
}
}
if ((chip->tea575x_tuner & TUNER_ONLY) == 0) {
if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
return -EBUSY;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
pci_set_master(pci);
}
card->private_free = snd_fm801_free;
snd_fm801_chip_init(chip);
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
err = v4l2_device_register(&pci->dev, &chip->v4l2_dev);
if (err < 0)
return err;
chip->tea.v4l2_dev = &chip->v4l2_dev;
chip->tea.radio_nr = radio_nr;
chip->tea.private_data = chip;
chip->tea.ops = &snd_fm801_tea_ops;
sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
if ((chip->tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
(chip->tea575x_tuner & TUNER_TYPE_MASK) < 4) {
if (snd_tea575x_init(&chip->tea, THIS_MODULE)) {
dev_err(card->dev, "TEA575x radio not found\n");
return -ENODEV;
}
} else if ((chip->tea575x_tuner & TUNER_TYPE_MASK) == 0) {
unsigned int tuner_only = chip->tea575x_tuner & TUNER_ONLY;
/* autodetect tuner connection */
for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
chip->tea575x_tuner = tea575x_tuner;
if (!snd_tea575x_init(&chip->tea, THIS_MODULE)) {
dev_info(card->dev,
"detected TEA575x radio type %s\n",
get_tea575x_gpio(chip)->name);
break;
}
}
if (tea575x_tuner == 4) {
dev_err(card->dev, "TEA575x radio not found\n");
chip->tea575x_tuner = TUNER_DISABLED;
}
chip->tea575x_tuner |= tuner_only;
}
if (!(chip->tea575x_tuner & TUNER_DISABLED)) {
strscpy(chip->tea.card, get_tea575x_gpio(chip)->name,
sizeof(chip->tea.card));
}
#endif
return 0;
}
static int __snd_card_fm801_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct fm801 *chip;
struct snd_opl3 *opl3;
int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
err = snd_fm801_create(card, pci, tea575x_tuner[dev], radio_nr[dev]);
if (err < 0)
return err;
strcpy(card->driver, "FM801");
strcpy(card->shortname, "ForteMedia FM801-");
strcat(card->shortname, chip->multichannel ? "AU" : "AS");
sprintf(card->longname, "%s at 0x%lx, irq %i",
card->shortname, chip->port, chip->irq);
if (chip->tea575x_tuner & TUNER_ONLY)
goto __fm801_tuner_only;
err = snd_fm801_pcm(chip, 0);
if (err < 0)
return err;
err = snd_fm801_mixer(chip);
if (err < 0)
return err;
err = snd_mpu401_uart_new(card, 0, MPU401_HW_FM801,
chip->port + FM801_MPU401_DATA,
MPU401_INFO_INTEGRATED |
MPU401_INFO_IRQ_HOOK,
-1, &chip->rmidi);
if (err < 0)
return err;
err = snd_opl3_create(card, chip->port + FM801_OPL3_BANK0,
chip->port + FM801_OPL3_BANK1,
OPL3_HW_OPL3_FM801, 1, &opl3);
if (err < 0)
return err;
err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
if (err < 0)
return err;
__fm801_tuner_only:
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
dev++;
return 0;
}
static int snd_card_fm801_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_card_fm801_probe(pci, pci_id));
}
#ifdef CONFIG_PM_SLEEP
static const unsigned char saved_regs[] = {
FM801_PCM_VOL, FM801_I2S_VOL, FM801_FM_VOL, FM801_REC_SRC,
FM801_PLY_CTRL, FM801_PLY_COUNT, FM801_PLY_BUF1, FM801_PLY_BUF2,
FM801_CAP_CTRL, FM801_CAP_COUNT, FM801_CAP_BUF1, FM801_CAP_BUF2,
FM801_CODEC_CTRL, FM801_I2S_MODE, FM801_VOLUME, FM801_GEN_CTRL,
};
static int snd_fm801_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct fm801 *chip = card->private_data;
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < ARRAY_SIZE(saved_regs); i++)
chip->saved_regs[i] = fm801_ioread16(chip, saved_regs[i]);
if (chip->tea575x_tuner & TUNER_ONLY) {
/* FIXME: tea575x suspend */
} else {
snd_ac97_suspend(chip->ac97);
snd_ac97_suspend(chip->ac97_sec);
}
return 0;
}
static int snd_fm801_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct fm801 *chip = card->private_data;
int i;
if (chip->tea575x_tuner & TUNER_ONLY) {
snd_fm801_chip_init(chip);
} else {
reset_codec(chip);
snd_fm801_chip_multichannel_init(chip);
snd_fm801_chip_init(chip);
snd_ac97_resume(chip->ac97);
snd_ac97_resume(chip->ac97_sec);
}
for (i = 0; i < ARRAY_SIZE(saved_regs); i++)
fm801_iowrite16(chip, saved_regs[i], chip->saved_regs[i]);
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
if (!(chip->tea575x_tuner & TUNER_DISABLED))
snd_tea575x_set_freq(&chip->tea);
#endif
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_fm801_pm, snd_fm801_suspend, snd_fm801_resume);
#define SND_FM801_PM_OPS &snd_fm801_pm
#else
#define SND_FM801_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct pci_driver fm801_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_fm801_ids,
.probe = snd_card_fm801_probe,
.driver = {
.pm = SND_FM801_PM_OPS,
},
};
module_pci_driver(fm801_driver);
| linux-master | sound/pci/fm801.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Ensoniq ES1370/ES1371 AudioPCI soundcard
* Copyright (c) by Jaroslav Kysela <[email protected]>,
* Thomas Sailer <[email protected]>
*/
/* Power-Management-Code ( CONFIG_PM )
* for ens1371 only ( FIXME )
* derived from cs4281.c, atiixp.c and via82xx.c
* using https://www.kernel.org/doc/html/latest/sound/kernel-api/writing-an-alsa-driver.html
* by Kurt J. Bosch
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/gameport.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
#include <sound/rawmidi.h>
#ifdef CHIP1371
#include <sound/ac97_codec.h>
#else
#include <sound/ak4531_codec.h>
#endif
#include <sound/initval.h>
#include <sound/asoundef.h>
#ifndef CHIP1371
#undef CHIP1370
#define CHIP1370
#endif
#ifdef CHIP1370
#define DRIVER_NAME "ENS1370"
#define CHIP_NAME "ES1370" /* it can be ENS but just to keep compatibility... */
#else
#define DRIVER_NAME "ENS1371"
#define CHIP_NAME "ES1371"
#endif
MODULE_AUTHOR("Jaroslav Kysela <[email protected]>, Thomas Sailer <[email protected]>");
MODULE_LICENSE("GPL");
#ifdef CHIP1370
MODULE_DESCRIPTION("Ensoniq AudioPCI ES1370");
#endif
#ifdef CHIP1371
MODULE_DESCRIPTION("Ensoniq/Creative AudioPCI ES1371+");
#endif
#if IS_REACHABLE(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK
#endif
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */
#ifdef SUPPORT_JOYSTICK
#ifdef CHIP1371
static int joystick_port[SNDRV_CARDS];
#else
static bool joystick[SNDRV_CARDS];
#endif
#endif
#ifdef CHIP1371
static int spdif[SNDRV_CARDS];
static int lineio[SNDRV_CARDS];
#endif
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Ensoniq AudioPCI soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Ensoniq AudioPCI soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable Ensoniq AudioPCI soundcard.");
#ifdef SUPPORT_JOYSTICK
#ifdef CHIP1371
module_param_hw_array(joystick_port, int, ioport, NULL, 0444);
MODULE_PARM_DESC(joystick_port, "Joystick port address.");
#else
module_param_array(joystick, bool, NULL, 0444);
MODULE_PARM_DESC(joystick, "Enable joystick.");
#endif
#endif /* SUPPORT_JOYSTICK */
#ifdef CHIP1371
module_param_array(spdif, int, NULL, 0444);
MODULE_PARM_DESC(spdif, "S/PDIF output (-1 = none, 0 = auto, 1 = force).");
module_param_array(lineio, int, NULL, 0444);
MODULE_PARM_DESC(lineio, "Line In to Rear Out (0 = auto, 1 = force).");
#endif
/* ES1371 chip ID */
/* This is a little confusing because all ES1371 compatible chips have the
same DEVICE_ID, the only thing differentiating them is the REV_ID field.
This is only significant if you want to enable features on the later parts.
Yes, I know it's stupid and why didn't we use the sub IDs?
*/
#define ES1371REV_ES1373_A 0x04
#define ES1371REV_ES1373_B 0x06
#define ES1371REV_CT5880_A 0x07
#define CT5880REV_CT5880_C 0x02
#define CT5880REV_CT5880_D 0x03 /* ??? -jk */
#define CT5880REV_CT5880_E 0x04 /* mw */
#define ES1371REV_ES1371_B 0x09
#define EV1938REV_EV1938_A 0x00
#define ES1371REV_ES1373_8 0x08
/*
* Direct registers
*/
#define ES_REG(ensoniq, x) ((ensoniq)->port + ES_REG_##x)
#define ES_REG_CONTROL 0x00 /* R/W: Interrupt/Chip select control register */
#define ES_1370_ADC_STOP (1<<31) /* disable capture buffer transfers */
#define ES_1370_XCTL1 (1<<30) /* general purpose output bit */
#define ES_1373_BYPASS_P1 (1<<31) /* bypass SRC for PB1 */
#define ES_1373_BYPASS_P2 (1<<30) /* bypass SRC for PB2 */
#define ES_1373_BYPASS_R (1<<29) /* bypass SRC for REC */
#define ES_1373_TEST_BIT (1<<28) /* should be set to 0 for normal operation */
#define ES_1373_RECEN_B (1<<27) /* mix record with playback for I2S/SPDIF out */
#define ES_1373_SPDIF_THRU (1<<26) /* 0 = SPDIF thru mode, 1 = SPDIF == dig out */
#define ES_1371_JOY_ASEL(o) (((o)&0x03)<<24)/* joystick port mapping */
#define ES_1371_JOY_ASELM (0x03<<24) /* mask for above */
#define ES_1371_JOY_ASELI(i) (((i)>>24)&0x03)
#define ES_1371_GPIO_IN(i) (((i)>>20)&0x0f)/* GPIO in [3:0] pins - R/O */
#define ES_1370_PCLKDIVO(o) (((o)&0x1fff)<<16)/* clock divide ratio for DAC2 */
#define ES_1370_PCLKDIVM ((0x1fff)<<16) /* mask for above */
#define ES_1370_PCLKDIVI(i) (((i)>>16)&0x1fff)/* clock divide ratio for DAC2 */
#define ES_1371_GPIO_OUT(o) (((o)&0x0f)<<16)/* GPIO out [3:0] pins - W/R */
#define ES_1371_GPIO_OUTM (0x0f<<16) /* mask for above */
#define ES_MSFMTSEL (1<<15) /* MPEG serial data format; 0 = SONY, 1 = I2S */
#define ES_1370_M_SBB (1<<14) /* clock source for DAC - 0 = clock generator; 1 = MPEG clocks */
#define ES_1371_SYNC_RES (1<<14) /* Warm AC97 reset */
#define ES_1370_WTSRSEL(o) (((o)&0x03)<<12)/* fixed frequency clock for DAC1 */
#define ES_1370_WTSRSELM (0x03<<12) /* mask for above */
#define ES_1371_ADC_STOP (1<<13) /* disable CCB transfer capture information */
#define ES_1371_PWR_INTRM (1<<12) /* power level change interrupts enable */
#define ES_1370_DAC_SYNC (1<<11) /* DAC's are synchronous */
#define ES_1371_M_CB (1<<11) /* capture clock source; 0 = AC'97 ADC; 1 = I2S */
#define ES_CCB_INTRM (1<<10) /* CCB voice interrupts enable */
#define ES_1370_M_CB (1<<9) /* capture clock source; 0 = ADC; 1 = MPEG */
#define ES_1370_XCTL0 (1<<8) /* generap purpose output bit */
#define ES_1371_PDLEV(o) (((o)&0x03)<<8) /* current power down level */
#define ES_1371_PDLEVM (0x03<<8) /* mask for above */
#define ES_BREQ (1<<7) /* memory bus request enable */
#define ES_DAC1_EN (1<<6) /* DAC1 playback channel enable */
#define ES_DAC2_EN (1<<5) /* DAC2 playback channel enable */
#define ES_ADC_EN (1<<4) /* ADC capture channel enable */
#define ES_UART_EN (1<<3) /* UART enable */
#define ES_JYSTK_EN (1<<2) /* Joystick module enable */
#define ES_1370_CDC_EN (1<<1) /* Codec interface enable */
#define ES_1371_XTALCKDIS (1<<1) /* Xtal clock disable */
#define ES_1370_SERR_DISABLE (1<<0) /* PCI serr signal disable */
#define ES_1371_PCICLKDIS (1<<0) /* PCI clock disable */
#define ES_REG_STATUS 0x04 /* R/O: Interrupt/Chip select status register */
#define ES_INTR (1<<31) /* Interrupt is pending */
#define ES_1371_ST_AC97_RST (1<<29) /* CT5880 AC'97 Reset bit */
#define ES_1373_REAR_BIT27 (1<<27) /* rear bits: 000 - front, 010 - mirror, 101 - separate */
#define ES_1373_REAR_BIT26 (1<<26)
#define ES_1373_REAR_BIT24 (1<<24)
#define ES_1373_GPIO_INT_EN(o)(((o)&0x0f)<<20)/* GPIO [3:0] pins - interrupt enable */
#define ES_1373_SPDIF_EN (1<<18) /* SPDIF enable */
#define ES_1373_SPDIF_TEST (1<<17) /* SPDIF test */
#define ES_1371_TEST (1<<16) /* test ASIC */
#define ES_1373_GPIO_INT(i) (((i)&0x0f)>>12)/* GPIO [3:0] pins - interrupt pending */
#define ES_1370_CSTAT (1<<10) /* CODEC is busy or register write in progress */
#define ES_1370_CBUSY (1<<9) /* CODEC is busy */
#define ES_1370_CWRIP (1<<8) /* CODEC register write in progress */
#define ES_1371_SYNC_ERR (1<<8) /* CODEC synchronization error occurred */
#define ES_1371_VC(i) (((i)>>6)&0x03) /* voice code from CCB module */
#define ES_1370_VC(i) (((i)>>5)&0x03) /* voice code from CCB module */
#define ES_1371_MPWR (1<<5) /* power level interrupt pending */
#define ES_MCCB (1<<4) /* CCB interrupt pending */
#define ES_UART (1<<3) /* UART interrupt pending */
#define ES_DAC1 (1<<2) /* DAC1 channel interrupt pending */
#define ES_DAC2 (1<<1) /* DAC2 channel interrupt pending */
#define ES_ADC (1<<0) /* ADC channel interrupt pending */
#define ES_REG_UART_DATA 0x08 /* R/W: UART data register */
#define ES_REG_UART_STATUS 0x09 /* R/O: UART status register */
#define ES_RXINT (1<<7) /* RX interrupt occurred */
#define ES_TXINT (1<<2) /* TX interrupt occurred */
#define ES_TXRDY (1<<1) /* transmitter ready */
#define ES_RXRDY (1<<0) /* receiver ready */
#define ES_REG_UART_CONTROL 0x09 /* W/O: UART control register */
#define ES_RXINTEN (1<<7) /* RX interrupt enable */
#define ES_TXINTENO(o) (((o)&0x03)<<5) /* TX interrupt enable */
#define ES_TXINTENM (0x03<<5) /* mask for above */
#define ES_TXINTENI(i) (((i)>>5)&0x03)
#define ES_CNTRL(o) (((o)&0x03)<<0) /* control */
#define ES_CNTRLM (0x03<<0) /* mask for above */
#define ES_REG_UART_RES 0x0a /* R/W: UART reserver register */
#define ES_TEST_MODE (1<<0) /* test mode enabled */
#define ES_REG_MEM_PAGE 0x0c /* R/W: Memory page register */
#define ES_MEM_PAGEO(o) (((o)&0x0f)<<0) /* memory page select - out */
#define ES_MEM_PAGEM (0x0f<<0) /* mask for above */
#define ES_MEM_PAGEI(i) (((i)>>0)&0x0f) /* memory page select - in */
#define ES_REG_1370_CODEC 0x10 /* W/O: Codec write register address */
#define ES_1370_CODEC_WRITE(a,d) ((((a)&0xff)<<8)|(((d)&0xff)<<0))
#define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */
#define ES_1371_CODEC_RDY (1<<31) /* codec ready */
#define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */
#define EV_1938_CODEC_MAGIC (1<<26)
#define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */
#define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0))
#define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD)
#define ES_1371_CODEC_READ(i) (((i)>>0)&0xffff)
#define ES_REG_1371_SMPRATE 0x10 /* W/R: Codec rate converter interface register */
#define ES_1371_SRC_RAM_ADDRO(o) (((o)&0x7f)<<25)/* address of the sample rate converter */
#define ES_1371_SRC_RAM_ADDRM (0x7f<<25) /* mask for above */
#define ES_1371_SRC_RAM_ADDRI(i) (((i)>>25)&0x7f)/* address of the sample rate converter */
#define ES_1371_SRC_RAM_WE (1<<24) /* R/W: read/write control for sample rate converter */
#define ES_1371_SRC_RAM_BUSY (1<<23) /* R/O: sample rate memory is busy */
#define ES_1371_SRC_DISABLE (1<<22) /* sample rate converter disable */
#define ES_1371_DIS_P1 (1<<21) /* playback channel 1 accumulator update disable */
#define ES_1371_DIS_P2 (1<<20) /* playback channel 1 accumulator update disable */
#define ES_1371_DIS_R1 (1<<19) /* capture channel accumulator update disable */
#define ES_1371_SRC_RAM_DATAO(o) (((o)&0xffff)<<0)/* current value of the sample rate converter */
#define ES_1371_SRC_RAM_DATAM (0xffff<<0) /* mask for above */
#define ES_1371_SRC_RAM_DATAI(i) (((i)>>0)&0xffff)/* current value of the sample rate converter */
#define ES_REG_1371_LEGACY 0x18 /* W/R: Legacy control/status register */
#define ES_1371_JFAST (1<<31) /* fast joystick timing */
#define ES_1371_HIB (1<<30) /* host interrupt blocking enable */
#define ES_1371_VSB (1<<29) /* SB; 0 = addr 0x220xH, 1 = 0x22FxH */
#define ES_1371_VMPUO(o) (((o)&0x03)<<27)/* base register address; 0 = 0x320xH; 1 = 0x330xH; 2 = 0x340xH; 3 = 0x350xH */
#define ES_1371_VMPUM (0x03<<27) /* mask for above */
#define ES_1371_VMPUI(i) (((i)>>27)&0x03)/* base register address */
#define ES_1371_VCDCO(o) (((o)&0x03)<<25)/* CODEC; 0 = 0x530xH; 1 = undefined; 2 = 0xe80xH; 3 = 0xF40xH */
#define ES_1371_VCDCM (0x03<<25) /* mask for above */
#define ES_1371_VCDCI(i) (((i)>>25)&0x03)/* CODEC address */
#define ES_1371_FIRQ (1<<24) /* force an interrupt */
#define ES_1371_SDMACAP (1<<23) /* enable event capture for slave DMA controller */
#define ES_1371_SPICAP (1<<22) /* enable event capture for slave IRQ controller */
#define ES_1371_MDMACAP (1<<21) /* enable event capture for master DMA controller */
#define ES_1371_MPICAP (1<<20) /* enable event capture for master IRQ controller */
#define ES_1371_ADCAP (1<<19) /* enable event capture for ADLIB register; 0x388xH */
#define ES_1371_SVCAP (1<<18) /* enable event capture for SB registers */
#define ES_1371_CDCCAP (1<<17) /* enable event capture for CODEC registers */
#define ES_1371_BACAP (1<<16) /* enable event capture for SoundScape base address */
#define ES_1371_EXI(i) (((i)>>8)&0x07) /* event number */
#define ES_1371_AI(i) (((i)>>3)&0x1f) /* event significant I/O address */
#define ES_1371_WR (1<<2) /* event capture; 0 = read; 1 = write */
#define ES_1371_LEGINT (1<<0) /* interrupt for legacy events; 0 = interrupt did occur */
#define ES_REG_CHANNEL_STATUS 0x1c /* R/W: first 32-bits from S/PDIF channel status block, es1373 */
#define ES_REG_SERIAL 0x20 /* R/W: Serial interface control register */
#define ES_1371_DAC_TEST (1<<22) /* DAC test mode enable */
#define ES_P2_END_INCO(o) (((o)&0x07)<<19)/* binary offset value to increment / loop end */
#define ES_P2_END_INCM (0x07<<19) /* mask for above */
#define ES_P2_END_INCI(i) (((i)>>16)&0x07)/* binary offset value to increment / loop end */
#define ES_P2_ST_INCO(o) (((o)&0x07)<<16)/* binary offset value to increment / start */
#define ES_P2_ST_INCM (0x07<<16) /* mask for above */
#define ES_P2_ST_INCI(i) (((i)<<16)&0x07)/* binary offset value to increment / start */
#define ES_R1_LOOP_SEL (1<<15) /* ADC; 0 - loop mode; 1 = stop mode */
#define ES_P2_LOOP_SEL (1<<14) /* DAC2; 0 - loop mode; 1 = stop mode */
#define ES_P1_LOOP_SEL (1<<13) /* DAC1; 0 - loop mode; 1 = stop mode */
#define ES_P2_PAUSE (1<<12) /* DAC2; 0 - play mode; 1 = pause mode */
#define ES_P1_PAUSE (1<<11) /* DAC1; 0 - play mode; 1 = pause mode */
#define ES_R1_INT_EN (1<<10) /* ADC interrupt enable */
#define ES_P2_INT_EN (1<<9) /* DAC2 interrupt enable */
#define ES_P1_INT_EN (1<<8) /* DAC1 interrupt enable */
#define ES_P1_SCT_RLD (1<<7) /* force sample counter reload for DAC1 */
#define ES_P2_DAC_SEN (1<<6) /* when stop mode: 0 - DAC2 play back zeros; 1 = DAC2 play back last sample */
#define ES_R1_MODEO(o) (((o)&0x03)<<4) /* ADC mode; 0 = 8-bit mono; 1 = 8-bit stereo; 2 = 16-bit mono; 3 = 16-bit stereo */
#define ES_R1_MODEM (0x03<<4) /* mask for above */
#define ES_R1_MODEI(i) (((i)>>4)&0x03)
#define ES_P2_MODEO(o) (((o)&0x03)<<2) /* DAC2 mode; -- '' -- */
#define ES_P2_MODEM (0x03<<2) /* mask for above */
#define ES_P2_MODEI(i) (((i)>>2)&0x03)
#define ES_P1_MODEO(o) (((o)&0x03)<<0) /* DAC1 mode; -- '' -- */
#define ES_P1_MODEM (0x03<<0) /* mask for above */
#define ES_P1_MODEI(i) (((i)>>0)&0x03)
#define ES_REG_DAC1_COUNT 0x24 /* R/W: DAC1 sample count register */
#define ES_REG_DAC2_COUNT 0x28 /* R/W: DAC2 sample count register */
#define ES_REG_ADC_COUNT 0x2c /* R/W: ADC sample count register */
#define ES_REG_CURR_COUNT(i) (((i)>>16)&0xffff)
#define ES_REG_COUNTO(o) (((o)&0xffff)<<0)
#define ES_REG_COUNTM (0xffff<<0)
#define ES_REG_COUNTI(i) (((i)>>0)&0xffff)
#define ES_REG_DAC1_FRAME 0x30 /* R/W: PAGE 0x0c; DAC1 frame address */
#define ES_REG_DAC1_SIZE 0x34 /* R/W: PAGE 0x0c; DAC1 frame size */
#define ES_REG_DAC2_FRAME 0x38 /* R/W: PAGE 0x0c; DAC2 frame address */
#define ES_REG_DAC2_SIZE 0x3c /* R/W: PAGE 0x0c; DAC2 frame size */
#define ES_REG_ADC_FRAME 0x30 /* R/W: PAGE 0x0d; ADC frame address */
#define ES_REG_ADC_SIZE 0x34 /* R/W: PAGE 0x0d; ADC frame size */
#define ES_REG_FCURR_COUNTO(o) (((o)&0xffff)<<16)
#define ES_REG_FCURR_COUNTM (0xffff<<16)
#define ES_REG_FCURR_COUNTI(i) (((i)>>14)&0x3fffc)
#define ES_REG_FSIZEO(o) (((o)&0xffff)<<0)
#define ES_REG_FSIZEM (0xffff<<0)
#define ES_REG_FSIZEI(i) (((i)>>0)&0xffff)
#define ES_REG_PHANTOM_FRAME 0x38 /* R/W: PAGE 0x0d: phantom frame address */
#define ES_REG_PHANTOM_COUNT 0x3c /* R/W: PAGE 0x0d: phantom frame count */
#define ES_REG_UART_FIFO 0x30 /* R/W: PAGE 0x0e; UART FIFO register */
#define ES_REG_UF_VALID (1<<8)
#define ES_REG_UF_BYTEO(o) (((o)&0xff)<<0)
#define ES_REG_UF_BYTEM (0xff<<0)
#define ES_REG_UF_BYTEI(i) (((i)>>0)&0xff)
/*
* Pages
*/
#define ES_PAGE_DAC 0x0c
#define ES_PAGE_ADC 0x0d
#define ES_PAGE_UART 0x0e
#define ES_PAGE_UART1 0x0f
/*
* Sample rate converter addresses
*/
#define ES_SMPREG_DAC1 0x70
#define ES_SMPREG_DAC2 0x74
#define ES_SMPREG_ADC 0x78
#define ES_SMPREG_VOL_ADC 0x6c
#define ES_SMPREG_VOL_DAC1 0x7c
#define ES_SMPREG_VOL_DAC2 0x7e
#define ES_SMPREG_TRUNC_N 0x00
#define ES_SMPREG_INT_REGS 0x01
#define ES_SMPREG_ACCUM_FRAC 0x02
#define ES_SMPREG_VFREQ_FRAC 0x03
/*
* Some contants
*/
#define ES_1370_SRCLOCK 1411200
#define ES_1370_SRTODIV(x) (ES_1370_SRCLOCK/(x)-2)
/*
* Open modes
*/
#define ES_MODE_PLAY1 0x0001
#define ES_MODE_PLAY2 0x0002
#define ES_MODE_CAPTURE 0x0004
#define ES_MODE_OUTPUT 0x0001 /* for MIDI */
#define ES_MODE_INPUT 0x0002 /* for MIDI */
/*
*/
struct ensoniq {
spinlock_t reg_lock;
struct mutex src_mutex;
int irq;
unsigned long playback1size;
unsigned long playback2size;
unsigned long capture3size;
unsigned long port;
unsigned int mode;
unsigned int uartm; /* UART mode */
unsigned int ctrl; /* control register */
unsigned int sctrl; /* serial control register */
unsigned int cssr; /* control status register */
unsigned int uartc; /* uart control register */
unsigned int rev; /* chip revision */
union {
#ifdef CHIP1371
struct {
struct snd_ac97 *ac97;
} es1371;
#else
struct {
int pclkdiv_lock;
struct snd_ak4531 *ak4531;
} es1370;
#endif
} u;
struct pci_dev *pci;
struct snd_card *card;
struct snd_pcm *pcm1; /* DAC1/ADC PCM */
struct snd_pcm *pcm2; /* DAC2 PCM */
struct snd_pcm_substream *playback1_substream;
struct snd_pcm_substream *playback2_substream;
struct snd_pcm_substream *capture_substream;
unsigned int p1_dma_size;
unsigned int p2_dma_size;
unsigned int c_dma_size;
unsigned int p1_period_size;
unsigned int p2_period_size;
unsigned int c_period_size;
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *midi_input;
struct snd_rawmidi_substream *midi_output;
unsigned int spdif;
unsigned int spdif_default;
unsigned int spdif_stream;
#ifdef CHIP1370
struct snd_dma_buffer *dma_bug;
#endif
#ifdef SUPPORT_JOYSTICK
struct gameport *gameport;
#endif
};
static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id);
static const struct pci_device_id snd_audiopci_ids[] = {
#ifdef CHIP1370
{ PCI_VDEVICE(ENSONIQ, 0x5000), 0, }, /* ES1370 */
#endif
#ifdef CHIP1371
{ PCI_VDEVICE(ENSONIQ, 0x1371), 0, }, /* ES1371 */
{ PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */
{ PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */
#endif
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_audiopci_ids);
/*
* constants
*/
#define POLL_COUNT 0xa000
#ifdef CHIP1370
static const unsigned int snd_es1370_fixed_rates[] =
{5512, 11025, 22050, 44100};
static const struct snd_pcm_hw_constraint_list snd_es1370_hw_constraints_rates = {
.count = 4,
.list = snd_es1370_fixed_rates,
.mask = 0,
};
static const struct snd_ratnum es1370_clock = {
.num = ES_1370_SRCLOCK,
.den_min = 29,
.den_max = 353,
.den_step = 1,
};
static const struct snd_pcm_hw_constraint_ratnums snd_es1370_hw_constraints_clock = {
.nrats = 1,
.rats = &es1370_clock,
};
#else
static const struct snd_ratden es1371_dac_clock = {
.num_min = 3000 * (1 << 15),
.num_max = 48000 * (1 << 15),
.num_step = 3000,
.den = 1 << 15,
};
static const struct snd_pcm_hw_constraint_ratdens snd_es1371_hw_constraints_dac_clock = {
.nrats = 1,
.rats = &es1371_dac_clock,
};
static const struct snd_ratnum es1371_adc_clock = {
.num = 48000 << 15,
.den_min = 32768,
.den_max = 393216,
.den_step = 1,
};
static const struct snd_pcm_hw_constraint_ratnums snd_es1371_hw_constraints_adc_clock = {
.nrats = 1,
.rats = &es1371_adc_clock,
};
#endif
static const unsigned int snd_ensoniq_sample_shift[] =
{0, 1, 1, 2};
/*
* common I/O routines
*/
#ifdef CHIP1371
static unsigned int snd_es1371_wait_src_ready(struct ensoniq * ensoniq)
{
unsigned int t, r = 0;
for (t = 0; t < POLL_COUNT; t++) {
r = inl(ES_REG(ensoniq, 1371_SMPRATE));
if ((r & ES_1371_SRC_RAM_BUSY) == 0)
return r;
cond_resched();
}
dev_err(ensoniq->card->dev, "wait src ready timeout 0x%lx [0x%x]\n",
ES_REG(ensoniq, 1371_SMPRATE), r);
return 0;
}
static unsigned int snd_es1371_src_read(struct ensoniq * ensoniq, unsigned short reg)
{
unsigned int temp, i, orig, r;
/* wait for ready */
temp = orig = snd_es1371_wait_src_ready(ensoniq);
/* expose the SRC state bits */
r = temp & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 |
ES_1371_DIS_P2 | ES_1371_DIS_R1);
r |= ES_1371_SRC_RAM_ADDRO(reg) | 0x10000;
outl(r, ES_REG(ensoniq, 1371_SMPRATE));
/* now, wait for busy and the correct time to read */
temp = snd_es1371_wait_src_ready(ensoniq);
if ((temp & 0x00870000) != 0x00010000) {
/* wait for the right state */
for (i = 0; i < POLL_COUNT; i++) {
temp = inl(ES_REG(ensoniq, 1371_SMPRATE));
if ((temp & 0x00870000) == 0x00010000)
break;
}
}
/* hide the state bits */
r = orig & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 |
ES_1371_DIS_P2 | ES_1371_DIS_R1);
r |= ES_1371_SRC_RAM_ADDRO(reg);
outl(r, ES_REG(ensoniq, 1371_SMPRATE));
return temp;
}
static void snd_es1371_src_write(struct ensoniq * ensoniq,
unsigned short reg, unsigned short data)
{
unsigned int r;
r = snd_es1371_wait_src_ready(ensoniq) &
(ES_1371_SRC_DISABLE | ES_1371_DIS_P1 |
ES_1371_DIS_P2 | ES_1371_DIS_R1);
r |= ES_1371_SRC_RAM_ADDRO(reg) | ES_1371_SRC_RAM_DATAO(data);
outl(r | ES_1371_SRC_RAM_WE, ES_REG(ensoniq, 1371_SMPRATE));
}
#endif /* CHIP1371 */
#ifdef CHIP1370
static void snd_es1370_codec_write(struct snd_ak4531 *ak4531,
unsigned short reg, unsigned short val)
{
struct ensoniq *ensoniq = ak4531->private_data;
unsigned long end_time = jiffies + HZ / 10;
#if 0
dev_dbg(ensoniq->card->dev,
"CODEC WRITE: reg = 0x%x, val = 0x%x (0x%x), creg = 0x%x\n",
reg, val, ES_1370_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1370_CODEC));
#endif
do {
if (!(inl(ES_REG(ensoniq, STATUS)) & ES_1370_CSTAT)) {
outw(ES_1370_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1370_CODEC));
return;
}
schedule_timeout_uninterruptible(1);
} while (time_after(end_time, jiffies));
dev_err(ensoniq->card->dev, "codec write timeout, status = 0x%x\n",
inl(ES_REG(ensoniq, STATUS)));
}
#endif /* CHIP1370 */
#ifdef CHIP1371
static inline bool is_ev1938(struct ensoniq *ensoniq)
{
return ensoniq->pci->device == 0x8938;
}
static void snd_es1371_codec_write(struct snd_ac97 *ac97,
unsigned short reg, unsigned short val)
{
struct ensoniq *ensoniq = ac97->private_data;
unsigned int t, x, flag;
flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0;
mutex_lock(&ensoniq->src_mutex);
for (t = 0; t < POLL_COUNT; t++) {
if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) {
/* save the current state for latter */
x = snd_es1371_wait_src_ready(ensoniq);
outl((x & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 |
ES_1371_DIS_P2 | ES_1371_DIS_R1)) | 0x00010000,
ES_REG(ensoniq, 1371_SMPRATE));
/* wait for not busy (state 0) first to avoid
transition states */
for (t = 0; t < POLL_COUNT; t++) {
if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) ==
0x00000000)
break;
}
/* wait for a SAFE time to write addr/data and then do it, dammit */
for (t = 0; t < POLL_COUNT; t++) {
if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) ==
0x00010000)
break;
}
outl(ES_1371_CODEC_WRITE(reg, val) | flag,
ES_REG(ensoniq, 1371_CODEC));
/* restore SRC reg */
snd_es1371_wait_src_ready(ensoniq);
outl(x, ES_REG(ensoniq, 1371_SMPRATE));
mutex_unlock(&ensoniq->src_mutex);
return;
}
}
mutex_unlock(&ensoniq->src_mutex);
dev_err(ensoniq->card->dev, "codec write timeout at 0x%lx [0x%x]\n",
ES_REG(ensoniq, 1371_CODEC), inl(ES_REG(ensoniq, 1371_CODEC)));
}
static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct ensoniq *ensoniq = ac97->private_data;
unsigned int t, x, flag, fail = 0;
flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0;
__again:
mutex_lock(&ensoniq->src_mutex);
for (t = 0; t < POLL_COUNT; t++) {
if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) {
/* save the current state for latter */
x = snd_es1371_wait_src_ready(ensoniq);
outl((x & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 |
ES_1371_DIS_P2 | ES_1371_DIS_R1)) | 0x00010000,
ES_REG(ensoniq, 1371_SMPRATE));
/* wait for not busy (state 0) first to avoid
transition states */
for (t = 0; t < POLL_COUNT; t++) {
if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) ==
0x00000000)
break;
}
/* wait for a SAFE time to write addr/data and then do it, dammit */
for (t = 0; t < POLL_COUNT; t++) {
if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) ==
0x00010000)
break;
}
outl(ES_1371_CODEC_READS(reg) | flag,
ES_REG(ensoniq, 1371_CODEC));
/* restore SRC reg */
snd_es1371_wait_src_ready(ensoniq);
outl(x, ES_REG(ensoniq, 1371_SMPRATE));
/* wait for WIP again */
for (t = 0; t < POLL_COUNT; t++) {
if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP))
break;
}
/* now wait for the stinkin' data (RDY) */
for (t = 0; t < POLL_COUNT; t++) {
x = inl(ES_REG(ensoniq, 1371_CODEC));
if (x & ES_1371_CODEC_RDY) {
if (is_ev1938(ensoniq)) {
for (t = 0; t < 100; t++)
inl(ES_REG(ensoniq, CONTROL));
x = inl(ES_REG(ensoniq, 1371_CODEC));
}
mutex_unlock(&ensoniq->src_mutex);
return ES_1371_CODEC_READ(x);
}
}
mutex_unlock(&ensoniq->src_mutex);
if (++fail > 10) {
dev_err(ensoniq->card->dev,
"codec read timeout (final) at 0x%lx, reg = 0x%x [0x%x]\n",
ES_REG(ensoniq, 1371_CODEC), reg,
inl(ES_REG(ensoniq, 1371_CODEC)));
return 0;
}
goto __again;
}
}
mutex_unlock(&ensoniq->src_mutex);
dev_err(ensoniq->card->dev, "codec read timeout at 0x%lx [0x%x]\n",
ES_REG(ensoniq, 1371_CODEC), inl(ES_REG(ensoniq, 1371_CODEC)));
return 0;
}
static void snd_es1371_codec_wait(struct snd_ac97 *ac97)
{
msleep(750);
snd_es1371_codec_read(ac97, AC97_RESET);
snd_es1371_codec_read(ac97, AC97_VENDOR_ID1);
snd_es1371_codec_read(ac97, AC97_VENDOR_ID2);
msleep(50);
}
static void snd_es1371_adc_rate(struct ensoniq * ensoniq, unsigned int rate)
{
unsigned int n, truncm, freq;
mutex_lock(&ensoniq->src_mutex);
n = rate / 3000;
if ((1 << n) & ((1 << 15) | (1 << 13) | (1 << 11) | (1 << 9)))
n--;
truncm = (21 * n - 1) | 1;
freq = ((48000UL << 15) / rate) * n;
if (rate >= 24000) {
if (truncm > 239)
truncm = 239;
snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_TRUNC_N,
(((239 - truncm) >> 1) << 9) | (n << 4));
} else {
if (truncm > 119)
truncm = 119;
snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_TRUNC_N,
0x8000 | (((119 - truncm) >> 1) << 9) | (n << 4));
}
snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_INT_REGS,
(snd_es1371_src_read(ensoniq, ES_SMPREG_ADC +
ES_SMPREG_INT_REGS) & 0x00ff) |
((freq >> 5) & 0xfc00));
snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_VFREQ_FRAC, freq & 0x7fff);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC, n << 8);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC + 1, n << 8);
mutex_unlock(&ensoniq->src_mutex);
}
static void snd_es1371_dac1_rate(struct ensoniq * ensoniq, unsigned int rate)
{
unsigned int freq, r;
mutex_lock(&ensoniq->src_mutex);
freq = DIV_ROUND_CLOSEST(rate << 15, 3000);
r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE |
ES_1371_DIS_P2 | ES_1371_DIS_R1)) |
ES_1371_DIS_P1;
outl(r, ES_REG(ensoniq, 1371_SMPRATE));
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_INT_REGS,
(snd_es1371_src_read(ensoniq, ES_SMPREG_DAC1 +
ES_SMPREG_INT_REGS) & 0x00ff) |
((freq >> 5) & 0xfc00));
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_VFREQ_FRAC, freq & 0x7fff);
r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE |
ES_1371_DIS_P2 | ES_1371_DIS_R1));
outl(r, ES_REG(ensoniq, 1371_SMPRATE));
mutex_unlock(&ensoniq->src_mutex);
}
static void snd_es1371_dac2_rate(struct ensoniq * ensoniq, unsigned int rate)
{
unsigned int freq, r;
mutex_lock(&ensoniq->src_mutex);
freq = DIV_ROUND_CLOSEST(rate << 15, 3000);
r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE |
ES_1371_DIS_P1 | ES_1371_DIS_R1)) |
ES_1371_DIS_P2;
outl(r, ES_REG(ensoniq, 1371_SMPRATE));
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_INT_REGS,
(snd_es1371_src_read(ensoniq, ES_SMPREG_DAC2 +
ES_SMPREG_INT_REGS) & 0x00ff) |
((freq >> 5) & 0xfc00));
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_VFREQ_FRAC,
freq & 0x7fff);
r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE |
ES_1371_DIS_P1 | ES_1371_DIS_R1));
outl(r, ES_REG(ensoniq, 1371_SMPRATE));
mutex_unlock(&ensoniq->src_mutex);
}
#endif /* CHIP1371 */
static int snd_ensoniq_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
{
unsigned int what = 0;
struct snd_pcm_substream *s;
snd_pcm_group_for_each_entry(s, substream) {
if (s == ensoniq->playback1_substream) {
what |= ES_P1_PAUSE;
snd_pcm_trigger_done(s, substream);
} else if (s == ensoniq->playback2_substream) {
what |= ES_P2_PAUSE;
snd_pcm_trigger_done(s, substream);
} else if (s == ensoniq->capture_substream)
return -EINVAL;
}
spin_lock(&ensoniq->reg_lock);
if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH)
ensoniq->sctrl |= what;
else
ensoniq->sctrl &= ~what;
outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL));
spin_unlock(&ensoniq->reg_lock);
break;
}
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_STOP:
{
unsigned int what = 0;
struct snd_pcm_substream *s;
snd_pcm_group_for_each_entry(s, substream) {
if (s == ensoniq->playback1_substream) {
what |= ES_DAC1_EN;
snd_pcm_trigger_done(s, substream);
} else if (s == ensoniq->playback2_substream) {
what |= ES_DAC2_EN;
snd_pcm_trigger_done(s, substream);
} else if (s == ensoniq->capture_substream) {
what |= ES_ADC_EN;
snd_pcm_trigger_done(s, substream);
}
}
spin_lock(&ensoniq->reg_lock);
if (cmd == SNDRV_PCM_TRIGGER_START)
ensoniq->ctrl |= what;
else
ensoniq->ctrl &= ~what;
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
spin_unlock(&ensoniq->reg_lock);
break;
}
default:
return -EINVAL;
}
return 0;
}
/*
* PCM part
*/
static int snd_ensoniq_playback1_prepare(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int mode = 0;
ensoniq->p1_dma_size = snd_pcm_lib_buffer_bytes(substream);
ensoniq->p1_period_size = snd_pcm_lib_period_bytes(substream);
if (snd_pcm_format_width(runtime->format) == 16)
mode |= 0x02;
if (runtime->channels > 1)
mode |= 0x01;
spin_lock_irq(&ensoniq->reg_lock);
ensoniq->ctrl &= ~ES_DAC1_EN;
#ifdef CHIP1371
/* 48k doesn't need SRC (it breaks AC3-passthru) */
if (runtime->rate == 48000)
ensoniq->ctrl |= ES_1373_BYPASS_P1;
else
ensoniq->ctrl &= ~ES_1373_BYPASS_P1;
#endif
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE));
outl(runtime->dma_addr, ES_REG(ensoniq, DAC1_FRAME));
outl((ensoniq->p1_dma_size >> 2) - 1, ES_REG(ensoniq, DAC1_SIZE));
ensoniq->sctrl &= ~(ES_P1_LOOP_SEL | ES_P1_PAUSE | ES_P1_SCT_RLD | ES_P1_MODEM);
ensoniq->sctrl |= ES_P1_INT_EN | ES_P1_MODEO(mode);
outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL));
outl((ensoniq->p1_period_size >> snd_ensoniq_sample_shift[mode]) - 1,
ES_REG(ensoniq, DAC1_COUNT));
#ifdef CHIP1370
ensoniq->ctrl &= ~ES_1370_WTSRSELM;
switch (runtime->rate) {
case 5512: ensoniq->ctrl |= ES_1370_WTSRSEL(0); break;
case 11025: ensoniq->ctrl |= ES_1370_WTSRSEL(1); break;
case 22050: ensoniq->ctrl |= ES_1370_WTSRSEL(2); break;
case 44100: ensoniq->ctrl |= ES_1370_WTSRSEL(3); break;
default: snd_BUG();
}
#endif
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
spin_unlock_irq(&ensoniq->reg_lock);
#ifndef CHIP1370
snd_es1371_dac1_rate(ensoniq, runtime->rate);
#endif
return 0;
}
static int snd_ensoniq_playback2_prepare(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int mode = 0;
ensoniq->p2_dma_size = snd_pcm_lib_buffer_bytes(substream);
ensoniq->p2_period_size = snd_pcm_lib_period_bytes(substream);
if (snd_pcm_format_width(runtime->format) == 16)
mode |= 0x02;
if (runtime->channels > 1)
mode |= 0x01;
spin_lock_irq(&ensoniq->reg_lock);
ensoniq->ctrl &= ~ES_DAC2_EN;
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE));
outl(runtime->dma_addr, ES_REG(ensoniq, DAC2_FRAME));
outl((ensoniq->p2_dma_size >> 2) - 1, ES_REG(ensoniq, DAC2_SIZE));
ensoniq->sctrl &= ~(ES_P2_LOOP_SEL | ES_P2_PAUSE | ES_P2_DAC_SEN |
ES_P2_END_INCM | ES_P2_ST_INCM | ES_P2_MODEM);
ensoniq->sctrl |= ES_P2_INT_EN | ES_P2_MODEO(mode) |
ES_P2_END_INCO(mode & 2 ? 2 : 1) | ES_P2_ST_INCO(0);
outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL));
outl((ensoniq->p2_period_size >> snd_ensoniq_sample_shift[mode]) - 1,
ES_REG(ensoniq, DAC2_COUNT));
#ifdef CHIP1370
if (!(ensoniq->u.es1370.pclkdiv_lock & ES_MODE_CAPTURE)) {
ensoniq->ctrl &= ~ES_1370_PCLKDIVM;
ensoniq->ctrl |= ES_1370_PCLKDIVO(ES_1370_SRTODIV(runtime->rate));
ensoniq->u.es1370.pclkdiv_lock |= ES_MODE_PLAY2;
}
#endif
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
spin_unlock_irq(&ensoniq->reg_lock);
#ifndef CHIP1370
snd_es1371_dac2_rate(ensoniq, runtime->rate);
#endif
return 0;
}
static int snd_ensoniq_capture_prepare(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int mode = 0;
ensoniq->c_dma_size = snd_pcm_lib_buffer_bytes(substream);
ensoniq->c_period_size = snd_pcm_lib_period_bytes(substream);
if (snd_pcm_format_width(runtime->format) == 16)
mode |= 0x02;
if (runtime->channels > 1)
mode |= 0x01;
spin_lock_irq(&ensoniq->reg_lock);
ensoniq->ctrl &= ~ES_ADC_EN;
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
outl(ES_MEM_PAGEO(ES_PAGE_ADC), ES_REG(ensoniq, MEM_PAGE));
outl(runtime->dma_addr, ES_REG(ensoniq, ADC_FRAME));
outl((ensoniq->c_dma_size >> 2) - 1, ES_REG(ensoniq, ADC_SIZE));
ensoniq->sctrl &= ~(ES_R1_LOOP_SEL | ES_R1_MODEM);
ensoniq->sctrl |= ES_R1_INT_EN | ES_R1_MODEO(mode);
outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL));
outl((ensoniq->c_period_size >> snd_ensoniq_sample_shift[mode]) - 1,
ES_REG(ensoniq, ADC_COUNT));
#ifdef CHIP1370
if (!(ensoniq->u.es1370.pclkdiv_lock & ES_MODE_PLAY2)) {
ensoniq->ctrl &= ~ES_1370_PCLKDIVM;
ensoniq->ctrl |= ES_1370_PCLKDIVO(ES_1370_SRTODIV(runtime->rate));
ensoniq->u.es1370.pclkdiv_lock |= ES_MODE_CAPTURE;
}
#endif
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
spin_unlock_irq(&ensoniq->reg_lock);
#ifndef CHIP1370
snd_es1371_adc_rate(ensoniq, runtime->rate);
#endif
return 0;
}
static snd_pcm_uframes_t snd_ensoniq_playback1_pointer(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
size_t ptr;
spin_lock(&ensoniq->reg_lock);
if (inl(ES_REG(ensoniq, CONTROL)) & ES_DAC1_EN) {
outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE));
ptr = ES_REG_FCURR_COUNTI(inl(ES_REG(ensoniq, DAC1_SIZE)));
ptr = bytes_to_frames(substream->runtime, ptr);
} else {
ptr = 0;
}
spin_unlock(&ensoniq->reg_lock);
return ptr;
}
static snd_pcm_uframes_t snd_ensoniq_playback2_pointer(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
size_t ptr;
spin_lock(&ensoniq->reg_lock);
if (inl(ES_REG(ensoniq, CONTROL)) & ES_DAC2_EN) {
outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE));
ptr = ES_REG_FCURR_COUNTI(inl(ES_REG(ensoniq, DAC2_SIZE)));
ptr = bytes_to_frames(substream->runtime, ptr);
} else {
ptr = 0;
}
spin_unlock(&ensoniq->reg_lock);
return ptr;
}
static snd_pcm_uframes_t snd_ensoniq_capture_pointer(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
size_t ptr;
spin_lock(&ensoniq->reg_lock);
if (inl(ES_REG(ensoniq, CONTROL)) & ES_ADC_EN) {
outl(ES_MEM_PAGEO(ES_PAGE_ADC), ES_REG(ensoniq, MEM_PAGE));
ptr = ES_REG_FCURR_COUNTI(inl(ES_REG(ensoniq, ADC_SIZE)));
ptr = bytes_to_frames(substream->runtime, ptr);
} else {
ptr = 0;
}
spin_unlock(&ensoniq->reg_lock);
return ptr;
}
static const struct snd_pcm_hardware snd_ensoniq_playback1 =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates =
#ifndef CHIP1370
SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
#else
(SNDRV_PCM_RATE_KNOT | /* 5512Hz rate */
SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_22050 |
SNDRV_PCM_RATE_44100),
#endif
.rate_min = 4000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static const struct snd_pcm_hardware snd_ensoniq_playback2 =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static const struct snd_pcm_hardware snd_ensoniq_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static int snd_ensoniq_playback1_open(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
ensoniq->mode |= ES_MODE_PLAY1;
ensoniq->playback1_substream = substream;
runtime->hw = snd_ensoniq_playback1;
snd_pcm_set_sync(substream);
spin_lock_irq(&ensoniq->reg_lock);
if (ensoniq->spdif && ensoniq->playback2_substream == NULL)
ensoniq->spdif_stream = ensoniq->spdif_default;
spin_unlock_irq(&ensoniq->reg_lock);
#ifdef CHIP1370
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&snd_es1370_hw_constraints_rates);
#else
snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&snd_es1371_hw_constraints_dac_clock);
#endif
return 0;
}
static int snd_ensoniq_playback2_open(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
ensoniq->mode |= ES_MODE_PLAY2;
ensoniq->playback2_substream = substream;
runtime->hw = snd_ensoniq_playback2;
snd_pcm_set_sync(substream);
spin_lock_irq(&ensoniq->reg_lock);
if (ensoniq->spdif && ensoniq->playback1_substream == NULL)
ensoniq->spdif_stream = ensoniq->spdif_default;
spin_unlock_irq(&ensoniq->reg_lock);
#ifdef CHIP1370
snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&snd_es1370_hw_constraints_clock);
#else
snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&snd_es1371_hw_constraints_dac_clock);
#endif
return 0;
}
static int snd_ensoniq_capture_open(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
ensoniq->mode |= ES_MODE_CAPTURE;
ensoniq->capture_substream = substream;
runtime->hw = snd_ensoniq_capture;
snd_pcm_set_sync(substream);
#ifdef CHIP1370
snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&snd_es1370_hw_constraints_clock);
#else
snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&snd_es1371_hw_constraints_adc_clock);
#endif
return 0;
}
static int snd_ensoniq_playback1_close(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
ensoniq->playback1_substream = NULL;
ensoniq->mode &= ~ES_MODE_PLAY1;
return 0;
}
static int snd_ensoniq_playback2_close(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
ensoniq->playback2_substream = NULL;
spin_lock_irq(&ensoniq->reg_lock);
#ifdef CHIP1370
ensoniq->u.es1370.pclkdiv_lock &= ~ES_MODE_PLAY2;
#endif
ensoniq->mode &= ~ES_MODE_PLAY2;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_ensoniq_capture_close(struct snd_pcm_substream *substream)
{
struct ensoniq *ensoniq = snd_pcm_substream_chip(substream);
ensoniq->capture_substream = NULL;
spin_lock_irq(&ensoniq->reg_lock);
#ifdef CHIP1370
ensoniq->u.es1370.pclkdiv_lock &= ~ES_MODE_CAPTURE;
#endif
ensoniq->mode &= ~ES_MODE_CAPTURE;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static const struct snd_pcm_ops snd_ensoniq_playback1_ops = {
.open = snd_ensoniq_playback1_open,
.close = snd_ensoniq_playback1_close,
.prepare = snd_ensoniq_playback1_prepare,
.trigger = snd_ensoniq_trigger,
.pointer = snd_ensoniq_playback1_pointer,
};
static const struct snd_pcm_ops snd_ensoniq_playback2_ops = {
.open = snd_ensoniq_playback2_open,
.close = snd_ensoniq_playback2_close,
.prepare = snd_ensoniq_playback2_prepare,
.trigger = snd_ensoniq_trigger,
.pointer = snd_ensoniq_playback2_pointer,
};
static const struct snd_pcm_ops snd_ensoniq_capture_ops = {
.open = snd_ensoniq_capture_open,
.close = snd_ensoniq_capture_close,
.prepare = snd_ensoniq_capture_prepare,
.trigger = snd_ensoniq_trigger,
.pointer = snd_ensoniq_capture_pointer,
};
static const struct snd_pcm_chmap_elem surround_map[] = {
{ .channels = 1,
.map = { SNDRV_CHMAP_MONO } },
{ .channels = 2,
.map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
{ }
};
static int snd_ensoniq_pcm(struct ensoniq *ensoniq, int device)
{
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(ensoniq->card, CHIP_NAME "/1", device, 1, 1, &pcm);
if (err < 0)
return err;
#ifdef CHIP1370
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback2_ops);
#else
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback1_ops);
#endif
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ensoniq_capture_ops);
pcm->private_data = ensoniq;
pcm->info_flags = 0;
strcpy(pcm->name, CHIP_NAME " DAC2/ADC");
ensoniq->pcm1 = pcm;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&ensoniq->pci->dev, 64*1024, 128*1024);
#ifdef CHIP1370
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
surround_map, 2, 0, NULL);
#else
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps, 2, 0, NULL);
#endif
return err;
}
static int snd_ensoniq_pcm2(struct ensoniq *ensoniq, int device)
{
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(ensoniq->card, CHIP_NAME "/2", device, 1, 0, &pcm);
if (err < 0)
return err;
#ifdef CHIP1370
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback1_ops);
#else
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback2_ops);
#endif
pcm->private_data = ensoniq;
pcm->info_flags = 0;
strcpy(pcm->name, CHIP_NAME " DAC1");
ensoniq->pcm2 = pcm;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&ensoniq->pci->dev, 64*1024, 128*1024);
#ifdef CHIP1370
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps, 2, 0, NULL);
#else
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
surround_map, 2, 0, NULL);
#endif
return err;
}
/*
* Mixer section
*/
/*
* ENS1371 mixer (including SPDIF interface)
*/
#ifdef CHIP1371
static int snd_ens1373_spdif_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_ens1373_spdif_default_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
spin_lock_irq(&ensoniq->reg_lock);
ucontrol->value.iec958.status[0] = (ensoniq->spdif_default >> 0) & 0xff;
ucontrol->value.iec958.status[1] = (ensoniq->spdif_default >> 8) & 0xff;
ucontrol->value.iec958.status[2] = (ensoniq->spdif_default >> 16) & 0xff;
ucontrol->value.iec958.status[3] = (ensoniq->spdif_default >> 24) & 0xff;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_ens1373_spdif_default_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
unsigned int val;
int change;
val = ((u32)ucontrol->value.iec958.status[0] << 0) |
((u32)ucontrol->value.iec958.status[1] << 8) |
((u32)ucontrol->value.iec958.status[2] << 16) |
((u32)ucontrol->value.iec958.status[3] << 24);
spin_lock_irq(&ensoniq->reg_lock);
change = ensoniq->spdif_default != val;
ensoniq->spdif_default = val;
if (change && ensoniq->playback1_substream == NULL &&
ensoniq->playback2_substream == NULL)
outl(val, ES_REG(ensoniq, CHANNEL_STATUS));
spin_unlock_irq(&ensoniq->reg_lock);
return change;
}
static int snd_ens1373_spdif_mask_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.iec958.status[0] = 0xff;
ucontrol->value.iec958.status[1] = 0xff;
ucontrol->value.iec958.status[2] = 0xff;
ucontrol->value.iec958.status[3] = 0xff;
return 0;
}
static int snd_ens1373_spdif_stream_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
spin_lock_irq(&ensoniq->reg_lock);
ucontrol->value.iec958.status[0] = (ensoniq->spdif_stream >> 0) & 0xff;
ucontrol->value.iec958.status[1] = (ensoniq->spdif_stream >> 8) & 0xff;
ucontrol->value.iec958.status[2] = (ensoniq->spdif_stream >> 16) & 0xff;
ucontrol->value.iec958.status[3] = (ensoniq->spdif_stream >> 24) & 0xff;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_ens1373_spdif_stream_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
unsigned int val;
int change;
val = ((u32)ucontrol->value.iec958.status[0] << 0) |
((u32)ucontrol->value.iec958.status[1] << 8) |
((u32)ucontrol->value.iec958.status[2] << 16) |
((u32)ucontrol->value.iec958.status[3] << 24);
spin_lock_irq(&ensoniq->reg_lock);
change = ensoniq->spdif_stream != val;
ensoniq->spdif_stream = val;
if (change && (ensoniq->playback1_substream != NULL ||
ensoniq->playback2_substream != NULL))
outl(val, ES_REG(ensoniq, CHANNEL_STATUS));
spin_unlock_irq(&ensoniq->reg_lock);
return change;
}
#define ES1371_SPDIF(xname) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_es1371_spdif_info, \
.get = snd_es1371_spdif_get, .put = snd_es1371_spdif_put }
#define snd_es1371_spdif_info snd_ctl_boolean_mono_info
static int snd_es1371_spdif_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
spin_lock_irq(&ensoniq->reg_lock);
ucontrol->value.integer.value[0] = ensoniq->ctrl & ES_1373_SPDIF_THRU ? 1 : 0;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_es1371_spdif_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
unsigned int nval1, nval2;
int change;
nval1 = ucontrol->value.integer.value[0] ? ES_1373_SPDIF_THRU : 0;
nval2 = ucontrol->value.integer.value[0] ? ES_1373_SPDIF_EN : 0;
spin_lock_irq(&ensoniq->reg_lock);
change = (ensoniq->ctrl & ES_1373_SPDIF_THRU) != nval1;
ensoniq->ctrl &= ~ES_1373_SPDIF_THRU;
ensoniq->ctrl |= nval1;
ensoniq->cssr &= ~ES_1373_SPDIF_EN;
ensoniq->cssr |= nval2;
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
outl(ensoniq->cssr, ES_REG(ensoniq, STATUS));
spin_unlock_irq(&ensoniq->reg_lock);
return change;
}
/* spdif controls */
static const struct snd_kcontrol_new snd_es1371_mixer_spdif[] = {
ES1371_SPDIF(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH)),
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
.info = snd_ens1373_spdif_info,
.get = snd_ens1373_spdif_default_get,
.put = snd_ens1373_spdif_default_put,
},
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
.info = snd_ens1373_spdif_info,
.get = snd_ens1373_spdif_mask_get
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM),
.info = snd_ens1373_spdif_info,
.get = snd_ens1373_spdif_stream_get,
.put = snd_ens1373_spdif_stream_put
},
};
#define snd_es1373_rear_info snd_ctl_boolean_mono_info
static int snd_es1373_rear_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
int val = 0;
spin_lock_irq(&ensoniq->reg_lock);
if ((ensoniq->cssr & (ES_1373_REAR_BIT27|ES_1373_REAR_BIT26|
ES_1373_REAR_BIT24)) == ES_1373_REAR_BIT26)
val = 1;
ucontrol->value.integer.value[0] = val;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_es1373_rear_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
unsigned int nval1;
int change;
nval1 = ucontrol->value.integer.value[0] ?
ES_1373_REAR_BIT26 : (ES_1373_REAR_BIT27|ES_1373_REAR_BIT24);
spin_lock_irq(&ensoniq->reg_lock);
change = (ensoniq->cssr & (ES_1373_REAR_BIT27|
ES_1373_REAR_BIT26|ES_1373_REAR_BIT24)) != nval1;
ensoniq->cssr &= ~(ES_1373_REAR_BIT27|ES_1373_REAR_BIT26|ES_1373_REAR_BIT24);
ensoniq->cssr |= nval1;
outl(ensoniq->cssr, ES_REG(ensoniq, STATUS));
spin_unlock_irq(&ensoniq->reg_lock);
return change;
}
static const struct snd_kcontrol_new snd_ens1373_rear =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "AC97 2ch->4ch Copy Switch",
.info = snd_es1373_rear_info,
.get = snd_es1373_rear_get,
.put = snd_es1373_rear_put,
};
#define snd_es1373_line_info snd_ctl_boolean_mono_info
static int snd_es1373_line_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
int val = 0;
spin_lock_irq(&ensoniq->reg_lock);
if (ensoniq->ctrl & ES_1371_GPIO_OUT(4))
val = 1;
ucontrol->value.integer.value[0] = val;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_es1373_line_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
int changed;
unsigned int ctrl;
spin_lock_irq(&ensoniq->reg_lock);
ctrl = ensoniq->ctrl;
if (ucontrol->value.integer.value[0])
ensoniq->ctrl |= ES_1371_GPIO_OUT(4); /* switch line-in -> rear out */
else
ensoniq->ctrl &= ~ES_1371_GPIO_OUT(4);
changed = (ctrl != ensoniq->ctrl);
if (changed)
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
spin_unlock_irq(&ensoniq->reg_lock);
return changed;
}
static const struct snd_kcontrol_new snd_ens1373_line =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line In->Rear Out Switch",
.info = snd_es1373_line_info,
.get = snd_es1373_line_get,
.put = snd_es1373_line_put,
};
static void snd_ensoniq_mixer_free_ac97(struct snd_ac97 *ac97)
{
struct ensoniq *ensoniq = ac97->private_data;
ensoniq->u.es1371.ac97 = NULL;
}
struct es1371_quirk {
unsigned short vid; /* vendor ID */
unsigned short did; /* device ID */
unsigned char rev; /* revision */
};
static int es1371_quirk_lookup(struct ensoniq *ensoniq,
const struct es1371_quirk *list)
{
while (list->vid != (unsigned short)PCI_ANY_ID) {
if (ensoniq->pci->vendor == list->vid &&
ensoniq->pci->device == list->did &&
ensoniq->rev == list->rev)
return 1;
list++;
}
return 0;
}
static const struct es1371_quirk es1371_spdif_present[] = {
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_C },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_D },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_E },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_CT5880_A },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_ES1373_8 },
{ .vid = PCI_ANY_ID, .did = PCI_ANY_ID }
};
static const struct snd_pci_quirk ens1373_line_quirk[] = {
SND_PCI_QUIRK_ID(0x1274, 0x2000), /* GA-7DXR */
SND_PCI_QUIRK_ID(0x1458, 0xa000), /* GA-8IEXP */
{ } /* end */
};
static int snd_ensoniq_1371_mixer(struct ensoniq *ensoniq,
int has_spdif, int has_line)
{
struct snd_card *card = ensoniq->card;
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
int err;
static const struct snd_ac97_bus_ops ops = {
.write = snd_es1371_codec_write,
.read = snd_es1371_codec_read,
.wait = snd_es1371_codec_wait,
};
err = snd_ac97_bus(card, 0, &ops, NULL, &pbus);
if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = ensoniq;
ac97.private_free = snd_ensoniq_mixer_free_ac97;
ac97.pci = ensoniq->pci;
ac97.scaps = AC97_SCAP_AUDIO;
err = snd_ac97_mixer(pbus, &ac97, &ensoniq->u.es1371.ac97);
if (err < 0)
return err;
if (has_spdif > 0 ||
(!has_spdif && es1371_quirk_lookup(ensoniq, es1371_spdif_present))) {
struct snd_kcontrol *kctl;
int i, is_spdif = 0;
ensoniq->spdif_default = ensoniq->spdif_stream =
SNDRV_PCM_DEFAULT_CON_SPDIF;
outl(ensoniq->spdif_default, ES_REG(ensoniq, CHANNEL_STATUS));
if (ensoniq->u.es1371.ac97->ext_id & AC97_EI_SPDIF)
is_spdif++;
for (i = 0; i < ARRAY_SIZE(snd_es1371_mixer_spdif); i++) {
kctl = snd_ctl_new1(&snd_es1371_mixer_spdif[i], ensoniq);
if (!kctl)
return -ENOMEM;
kctl->id.index = is_spdif;
err = snd_ctl_add(card, kctl);
if (err < 0)
return err;
}
}
if (ensoniq->u.es1371.ac97->ext_id & AC97_EI_SDAC) {
/* mirror rear to front speakers */
ensoniq->cssr &= ~(ES_1373_REAR_BIT27|ES_1373_REAR_BIT24);
ensoniq->cssr |= ES_1373_REAR_BIT26;
err = snd_ctl_add(card, snd_ctl_new1(&snd_ens1373_rear, ensoniq));
if (err < 0)
return err;
}
if (has_line > 0 ||
snd_pci_quirk_lookup(ensoniq->pci, ens1373_line_quirk)) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_ens1373_line,
ensoniq));
if (err < 0)
return err;
}
return 0;
}
#endif /* CHIP1371 */
/* generic control callbacks for ens1370 */
#ifdef CHIP1370
#define ENSONIQ_CONTROL(xname, mask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = xname, .info = snd_ensoniq_control_info, \
.get = snd_ensoniq_control_get, .put = snd_ensoniq_control_put, \
.private_value = mask }
#define snd_ensoniq_control_info snd_ctl_boolean_mono_info
static int snd_ensoniq_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
int mask = kcontrol->private_value;
spin_lock_irq(&ensoniq->reg_lock);
ucontrol->value.integer.value[0] = ensoniq->ctrl & mask ? 1 : 0;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_ensoniq_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol);
int mask = kcontrol->private_value;
unsigned int nval;
int change;
nval = ucontrol->value.integer.value[0] ? mask : 0;
spin_lock_irq(&ensoniq->reg_lock);
change = (ensoniq->ctrl & mask) != nval;
ensoniq->ctrl &= ~mask;
ensoniq->ctrl |= nval;
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
spin_unlock_irq(&ensoniq->reg_lock);
return change;
}
/*
* ENS1370 mixer
*/
static const struct snd_kcontrol_new snd_es1370_controls[2] = {
ENSONIQ_CONTROL("PCM 0 Output also on Line-In Jack", ES_1370_XCTL0),
ENSONIQ_CONTROL("Mic +5V bias", ES_1370_XCTL1)
};
#define ES1370_CONTROLS ARRAY_SIZE(snd_es1370_controls)
static void snd_ensoniq_mixer_free_ak4531(struct snd_ak4531 *ak4531)
{
struct ensoniq *ensoniq = ak4531->private_data;
ensoniq->u.es1370.ak4531 = NULL;
}
static int snd_ensoniq_1370_mixer(struct ensoniq *ensoniq)
{
struct snd_card *card = ensoniq->card;
struct snd_ak4531 ak4531;
unsigned int idx;
int err;
/* try reset AK4531 */
outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x02), ES_REG(ensoniq, 1370_CODEC));
inw(ES_REG(ensoniq, 1370_CODEC));
udelay(100);
outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x03), ES_REG(ensoniq, 1370_CODEC));
inw(ES_REG(ensoniq, 1370_CODEC));
udelay(100);
memset(&ak4531, 0, sizeof(ak4531));
ak4531.write = snd_es1370_codec_write;
ak4531.private_data = ensoniq;
ak4531.private_free = snd_ensoniq_mixer_free_ak4531;
err = snd_ak4531_mixer(card, &ak4531, &ensoniq->u.es1370.ak4531);
if (err < 0)
return err;
for (idx = 0; idx < ES1370_CONTROLS; idx++) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_es1370_controls[idx], ensoniq));
if (err < 0)
return err;
}
return 0;
}
#endif /* CHIP1370 */
#ifdef SUPPORT_JOYSTICK
#ifdef CHIP1371
static int snd_ensoniq_get_joystick_port(struct ensoniq *ensoniq, int dev)
{
switch (joystick_port[dev]) {
case 0: /* disabled */
case 1: /* auto-detect */
case 0x200:
case 0x208:
case 0x210:
case 0x218:
return joystick_port[dev];
default:
dev_err(ensoniq->card->dev,
"invalid joystick port %#x", joystick_port[dev]);
return 0;
}
}
#else
static int snd_ensoniq_get_joystick_port(struct ensoniq *ensoniq, int dev)
{
return joystick[dev] ? 0x200 : 0;
}
#endif
static int snd_ensoniq_create_gameport(struct ensoniq *ensoniq, int dev)
{
struct gameport *gp;
int io_port;
io_port = snd_ensoniq_get_joystick_port(ensoniq, dev);
switch (io_port) {
case 0:
return -ENOSYS;
case 1: /* auto_detect */
for (io_port = 0x200; io_port <= 0x218; io_port += 8)
if (request_region(io_port, 8, "ens137x: gameport"))
break;
if (io_port > 0x218) {
dev_warn(ensoniq->card->dev,
"no gameport ports available\n");
return -EBUSY;
}
break;
default:
if (!request_region(io_port, 8, "ens137x: gameport")) {
dev_warn(ensoniq->card->dev,
"gameport io port %#x in use\n",
io_port);
return -EBUSY;
}
break;
}
ensoniq->gameport = gp = gameport_allocate_port();
if (!gp) {
dev_err(ensoniq->card->dev,
"cannot allocate memory for gameport\n");
release_region(io_port, 8);
return -ENOMEM;
}
gameport_set_name(gp, "ES137x");
gameport_set_phys(gp, "pci%s/gameport0", pci_name(ensoniq->pci));
gameport_set_dev_parent(gp, &ensoniq->pci->dev);
gp->io = io_port;
ensoniq->ctrl |= ES_JYSTK_EN;
#ifdef CHIP1371
ensoniq->ctrl &= ~ES_1371_JOY_ASELM;
ensoniq->ctrl |= ES_1371_JOY_ASEL((io_port - 0x200) / 8);
#endif
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
gameport_register_port(ensoniq->gameport);
return 0;
}
static void snd_ensoniq_free_gameport(struct ensoniq *ensoniq)
{
if (ensoniq->gameport) {
int port = ensoniq->gameport->io;
gameport_unregister_port(ensoniq->gameport);
ensoniq->gameport = NULL;
ensoniq->ctrl &= ~ES_JYSTK_EN;
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
release_region(port, 8);
}
}
#else
static inline int snd_ensoniq_create_gameport(struct ensoniq *ensoniq, long port) { return -ENOSYS; }
static inline void snd_ensoniq_free_gameport(struct ensoniq *ensoniq) { }
#endif /* SUPPORT_JOYSTICK */
/*
*/
static void snd_ensoniq_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct ensoniq *ensoniq = entry->private_data;
snd_iprintf(buffer, "Ensoniq AudioPCI " CHIP_NAME "\n\n");
snd_iprintf(buffer, "Joystick enable : %s\n",
ensoniq->ctrl & ES_JYSTK_EN ? "on" : "off");
#ifdef CHIP1370
snd_iprintf(buffer, "MIC +5V bias : %s\n",
ensoniq->ctrl & ES_1370_XCTL1 ? "on" : "off");
snd_iprintf(buffer, "Line In to AOUT : %s\n",
ensoniq->ctrl & ES_1370_XCTL0 ? "on" : "off");
#else
snd_iprintf(buffer, "Joystick port : 0x%x\n",
(ES_1371_JOY_ASELI(ensoniq->ctrl) * 8) + 0x200);
#endif
}
static void snd_ensoniq_proc_init(struct ensoniq *ensoniq)
{
snd_card_ro_proc_new(ensoniq->card, "audiopci", ensoniq,
snd_ensoniq_proc_read);
}
/*
*/
static void snd_ensoniq_free(struct snd_card *card)
{
struct ensoniq *ensoniq = card->private_data;
snd_ensoniq_free_gameport(ensoniq);
#ifdef CHIP1370
outl(ES_1370_SERR_DISABLE, ES_REG(ensoniq, CONTROL)); /* switch everything off */
outl(0, ES_REG(ensoniq, SERIAL)); /* clear serial interface */
#else
outl(0, ES_REG(ensoniq, CONTROL)); /* switch everything off */
outl(0, ES_REG(ensoniq, SERIAL)); /* clear serial interface */
#endif
}
#ifdef CHIP1371
static const struct snd_pci_quirk es1371_amplifier_hack[] = {
SND_PCI_QUIRK_ID(0x107b, 0x2150), /* Gateway Solo 2150 */
SND_PCI_QUIRK_ID(0x13bd, 0x100c), /* EV1938 on Mebius PC-MJ100V */
SND_PCI_QUIRK_ID(0x1102, 0x5938), /* Targa Xtender300 */
SND_PCI_QUIRK_ID(0x1102, 0x8938), /* IPC Topnote G notebook */
{ } /* end */
};
static const struct es1371_quirk es1371_ac97_reset_hack[] = {
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_C },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_D },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_E },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_CT5880_A },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_ES1373_8 },
{ .vid = PCI_ANY_ID, .did = PCI_ANY_ID }
};
#endif
static void snd_ensoniq_chip_init(struct ensoniq *ensoniq)
{
#ifdef CHIP1371
int idx;
#endif
/* this code was part of snd_ensoniq_create before intruduction
* of suspend/resume
*/
#ifdef CHIP1370
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL));
outl(ES_MEM_PAGEO(ES_PAGE_ADC), ES_REG(ensoniq, MEM_PAGE));
outl(ensoniq->dma_bug->addr, ES_REG(ensoniq, PHANTOM_FRAME));
outl(0, ES_REG(ensoniq, PHANTOM_COUNT));
#else
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL));
outl(0, ES_REG(ensoniq, 1371_LEGACY));
if (es1371_quirk_lookup(ensoniq, es1371_ac97_reset_hack)) {
outl(ensoniq->cssr, ES_REG(ensoniq, STATUS));
/* need to delay around 20ms(bleech) to give
some CODECs enough time to wakeup */
msleep(20);
}
/* AC'97 warm reset to start the bitclk */
outl(ensoniq->ctrl | ES_1371_SYNC_RES, ES_REG(ensoniq, CONTROL));
inl(ES_REG(ensoniq, CONTROL));
udelay(20);
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
/* Init the sample rate converter */
snd_es1371_wait_src_ready(ensoniq);
outl(ES_1371_SRC_DISABLE, ES_REG(ensoniq, 1371_SMPRATE));
for (idx = 0; idx < 0x80; idx++)
snd_es1371_src_write(ensoniq, idx, 0);
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_TRUNC_N, 16 << 4);
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_INT_REGS, 16 << 10);
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_TRUNC_N, 16 << 4);
snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_INT_REGS, 16 << 10);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC, 1 << 12);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC + 1, 1 << 12);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC1, 1 << 12);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC1 + 1, 1 << 12);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC2, 1 << 12);
snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC2 + 1, 1 << 12);
snd_es1371_adc_rate(ensoniq, 22050);
snd_es1371_dac1_rate(ensoniq, 22050);
snd_es1371_dac2_rate(ensoniq, 22050);
/* WARNING:
* enabling the sample rate converter without properly programming
* its parameters causes the chip to lock up (the SRC busy bit will
* be stuck high, and I've found no way to rectify this other than
* power cycle) - Thomas Sailer
*/
snd_es1371_wait_src_ready(ensoniq);
outl(0, ES_REG(ensoniq, 1371_SMPRATE));
/* try reset codec directly */
outl(ES_1371_CODEC_WRITE(0, 0), ES_REG(ensoniq, 1371_CODEC));
#endif
outb(ensoniq->uartc = 0x00, ES_REG(ensoniq, UART_CONTROL));
outb(0x00, ES_REG(ensoniq, UART_RES));
outl(ensoniq->cssr, ES_REG(ensoniq, STATUS));
}
#ifdef CONFIG_PM_SLEEP
static int snd_ensoniq_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct ensoniq *ensoniq = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
#ifdef CHIP1371
snd_ac97_suspend(ensoniq->u.es1371.ac97);
#else
/* try to reset AK4531 */
outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x02), ES_REG(ensoniq, 1370_CODEC));
inw(ES_REG(ensoniq, 1370_CODEC));
udelay(100);
outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x03), ES_REG(ensoniq, 1370_CODEC));
inw(ES_REG(ensoniq, 1370_CODEC));
udelay(100);
snd_ak4531_suspend(ensoniq->u.es1370.ak4531);
#endif
return 0;
}
static int snd_ensoniq_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct ensoniq *ensoniq = card->private_data;
snd_ensoniq_chip_init(ensoniq);
#ifdef CHIP1371
snd_ac97_resume(ensoniq->u.es1371.ac97);
#else
snd_ak4531_resume(ensoniq->u.es1370.ak4531);
#endif
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_ensoniq_pm, snd_ensoniq_suspend, snd_ensoniq_resume);
#define SND_ENSONIQ_PM_OPS &snd_ensoniq_pm
#else
#define SND_ENSONIQ_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static int snd_ensoniq_create(struct snd_card *card,
struct pci_dev *pci)
{
struct ensoniq *ensoniq = card->private_data;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
spin_lock_init(&ensoniq->reg_lock);
mutex_init(&ensoniq->src_mutex);
ensoniq->card = card;
ensoniq->pci = pci;
ensoniq->irq = -1;
err = pci_request_regions(pci, "Ensoniq AudioPCI");
if (err < 0)
return err;
ensoniq->port = pci_resource_start(pci, 0);
if (devm_request_irq(&pci->dev, pci->irq, snd_audiopci_interrupt,
IRQF_SHARED, KBUILD_MODNAME, ensoniq)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
return -EBUSY;
}
ensoniq->irq = pci->irq;
card->sync_irq = ensoniq->irq;
#ifdef CHIP1370
ensoniq->dma_bug =
snd_devm_alloc_pages(&pci->dev, SNDRV_DMA_TYPE_DEV, 16);
if (!ensoniq->dma_bug)
return -ENOMEM;
#endif
pci_set_master(pci);
ensoniq->rev = pci->revision;
#ifdef CHIP1370
#if 0
ensoniq->ctrl = ES_1370_CDC_EN | ES_1370_SERR_DISABLE |
ES_1370_PCLKDIVO(ES_1370_SRTODIV(8000));
#else /* get microphone working */
ensoniq->ctrl = ES_1370_CDC_EN | ES_1370_PCLKDIVO(ES_1370_SRTODIV(8000));
#endif
ensoniq->sctrl = 0;
#else
ensoniq->ctrl = 0;
ensoniq->sctrl = 0;
ensoniq->cssr = 0;
if (snd_pci_quirk_lookup(pci, es1371_amplifier_hack))
ensoniq->ctrl |= ES_1371_GPIO_OUT(1); /* turn amplifier on */
if (es1371_quirk_lookup(ensoniq, es1371_ac97_reset_hack))
ensoniq->cssr |= ES_1371_ST_AC97_RST;
#endif
card->private_free = snd_ensoniq_free;
snd_ensoniq_chip_init(ensoniq);
snd_ensoniq_proc_init(ensoniq);
return 0;
}
/*
* MIDI section
*/
static void snd_ensoniq_midi_interrupt(struct ensoniq * ensoniq)
{
struct snd_rawmidi *rmidi = ensoniq->rmidi;
unsigned char status, mask, byte;
if (rmidi == NULL)
return;
/* do Rx at first */
spin_lock(&ensoniq->reg_lock);
mask = ensoniq->uartm & ES_MODE_INPUT ? ES_RXRDY : 0;
while (mask) {
status = inb(ES_REG(ensoniq, UART_STATUS));
if ((status & mask) == 0)
break;
byte = inb(ES_REG(ensoniq, UART_DATA));
snd_rawmidi_receive(ensoniq->midi_input, &byte, 1);
}
spin_unlock(&ensoniq->reg_lock);
/* do Tx at second */
spin_lock(&ensoniq->reg_lock);
mask = ensoniq->uartm & ES_MODE_OUTPUT ? ES_TXRDY : 0;
while (mask) {
status = inb(ES_REG(ensoniq, UART_STATUS));
if ((status & mask) == 0)
break;
if (snd_rawmidi_transmit(ensoniq->midi_output, &byte, 1) != 1) {
ensoniq->uartc &= ~ES_TXINTENM;
outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL));
mask &= ~ES_TXRDY;
} else {
outb(byte, ES_REG(ensoniq, UART_DATA));
}
}
spin_unlock(&ensoniq->reg_lock);
}
static int snd_ensoniq_midi_input_open(struct snd_rawmidi_substream *substream)
{
struct ensoniq *ensoniq = substream->rmidi->private_data;
spin_lock_irq(&ensoniq->reg_lock);
ensoniq->uartm |= ES_MODE_INPUT;
ensoniq->midi_input = substream;
if (!(ensoniq->uartm & ES_MODE_OUTPUT)) {
outb(ES_CNTRL(3), ES_REG(ensoniq, UART_CONTROL));
outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL));
outl(ensoniq->ctrl |= ES_UART_EN, ES_REG(ensoniq, CONTROL));
}
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_ensoniq_midi_input_close(struct snd_rawmidi_substream *substream)
{
struct ensoniq *ensoniq = substream->rmidi->private_data;
spin_lock_irq(&ensoniq->reg_lock);
if (!(ensoniq->uartm & ES_MODE_OUTPUT)) {
outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL));
outl(ensoniq->ctrl &= ~ES_UART_EN, ES_REG(ensoniq, CONTROL));
} else {
outb(ensoniq->uartc &= ~ES_RXINTEN, ES_REG(ensoniq, UART_CONTROL));
}
ensoniq->midi_input = NULL;
ensoniq->uartm &= ~ES_MODE_INPUT;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_ensoniq_midi_output_open(struct snd_rawmidi_substream *substream)
{
struct ensoniq *ensoniq = substream->rmidi->private_data;
spin_lock_irq(&ensoniq->reg_lock);
ensoniq->uartm |= ES_MODE_OUTPUT;
ensoniq->midi_output = substream;
if (!(ensoniq->uartm & ES_MODE_INPUT)) {
outb(ES_CNTRL(3), ES_REG(ensoniq, UART_CONTROL));
outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL));
outl(ensoniq->ctrl |= ES_UART_EN, ES_REG(ensoniq, CONTROL));
}
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static int snd_ensoniq_midi_output_close(struct snd_rawmidi_substream *substream)
{
struct ensoniq *ensoniq = substream->rmidi->private_data;
spin_lock_irq(&ensoniq->reg_lock);
if (!(ensoniq->uartm & ES_MODE_INPUT)) {
outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL));
outl(ensoniq->ctrl &= ~ES_UART_EN, ES_REG(ensoniq, CONTROL));
} else {
outb(ensoniq->uartc &= ~ES_TXINTENM, ES_REG(ensoniq, UART_CONTROL));
}
ensoniq->midi_output = NULL;
ensoniq->uartm &= ~ES_MODE_OUTPUT;
spin_unlock_irq(&ensoniq->reg_lock);
return 0;
}
static void snd_ensoniq_midi_input_trigger(struct snd_rawmidi_substream *substream, int up)
{
unsigned long flags;
struct ensoniq *ensoniq = substream->rmidi->private_data;
int idx;
spin_lock_irqsave(&ensoniq->reg_lock, flags);
if (up) {
if ((ensoniq->uartc & ES_RXINTEN) == 0) {
/* empty input FIFO */
for (idx = 0; idx < 32; idx++)
inb(ES_REG(ensoniq, UART_DATA));
ensoniq->uartc |= ES_RXINTEN;
outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL));
}
} else {
if (ensoniq->uartc & ES_RXINTEN) {
ensoniq->uartc &= ~ES_RXINTEN;
outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL));
}
}
spin_unlock_irqrestore(&ensoniq->reg_lock, flags);
}
static void snd_ensoniq_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
{
unsigned long flags;
struct ensoniq *ensoniq = substream->rmidi->private_data;
unsigned char byte;
spin_lock_irqsave(&ensoniq->reg_lock, flags);
if (up) {
if (ES_TXINTENI(ensoniq->uartc) == 0) {
ensoniq->uartc |= ES_TXINTENO(1);
/* fill UART FIFO buffer at first, and turn Tx interrupts only if necessary */
while (ES_TXINTENI(ensoniq->uartc) == 1 &&
(inb(ES_REG(ensoniq, UART_STATUS)) & ES_TXRDY)) {
if (snd_rawmidi_transmit(substream, &byte, 1) != 1) {
ensoniq->uartc &= ~ES_TXINTENM;
} else {
outb(byte, ES_REG(ensoniq, UART_DATA));
}
}
outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL));
}
} else {
if (ES_TXINTENI(ensoniq->uartc) == 1) {
ensoniq->uartc &= ~ES_TXINTENM;
outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL));
}
}
spin_unlock_irqrestore(&ensoniq->reg_lock, flags);
}
static const struct snd_rawmidi_ops snd_ensoniq_midi_output =
{
.open = snd_ensoniq_midi_output_open,
.close = snd_ensoniq_midi_output_close,
.trigger = snd_ensoniq_midi_output_trigger,
};
static const struct snd_rawmidi_ops snd_ensoniq_midi_input =
{
.open = snd_ensoniq_midi_input_open,
.close = snd_ensoniq_midi_input_close,
.trigger = snd_ensoniq_midi_input_trigger,
};
static int snd_ensoniq_midi(struct ensoniq *ensoniq, int device)
{
struct snd_rawmidi *rmidi;
int err;
err = snd_rawmidi_new(ensoniq->card, "ES1370/1", device, 1, 1, &rmidi);
if (err < 0)
return err;
strcpy(rmidi->name, CHIP_NAME);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_ensoniq_midi_output);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_ensoniq_midi_input);
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = ensoniq;
ensoniq->rmidi = rmidi;
return 0;
}
/*
* Interrupt handler
*/
static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id)
{
struct ensoniq *ensoniq = dev_id;
unsigned int status, sctrl;
if (ensoniq == NULL)
return IRQ_NONE;
status = inl(ES_REG(ensoniq, STATUS));
if (!(status & ES_INTR))
return IRQ_NONE;
spin_lock(&ensoniq->reg_lock);
sctrl = ensoniq->sctrl;
if (status & ES_DAC1)
sctrl &= ~ES_P1_INT_EN;
if (status & ES_DAC2)
sctrl &= ~ES_P2_INT_EN;
if (status & ES_ADC)
sctrl &= ~ES_R1_INT_EN;
outl(sctrl, ES_REG(ensoniq, SERIAL));
outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL));
spin_unlock(&ensoniq->reg_lock);
if (status & ES_UART)
snd_ensoniq_midi_interrupt(ensoniq);
if ((status & ES_DAC2) && ensoniq->playback2_substream)
snd_pcm_period_elapsed(ensoniq->playback2_substream);
if ((status & ES_ADC) && ensoniq->capture_substream)
snd_pcm_period_elapsed(ensoniq->capture_substream);
if ((status & ES_DAC1) && ensoniq->playback1_substream)
snd_pcm_period_elapsed(ensoniq->playback1_substream);
return IRQ_HANDLED;
}
static int __snd_audiopci_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct ensoniq *ensoniq;
int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
sizeof(*ensoniq), &card);
if (err < 0)
return err;
ensoniq = card->private_data;
err = snd_ensoniq_create(card, pci);
if (err < 0)
return err;
#ifdef CHIP1370
err = snd_ensoniq_1370_mixer(ensoniq);
if (err < 0)
return err;
#endif
#ifdef CHIP1371
err = snd_ensoniq_1371_mixer(ensoniq, spdif[dev], lineio[dev]);
if (err < 0)
return err;
#endif
err = snd_ensoniq_pcm(ensoniq, 0);
if (err < 0)
return err;
err = snd_ensoniq_pcm2(ensoniq, 1);
if (err < 0)
return err;
err = snd_ensoniq_midi(ensoniq, 0);
if (err < 0)
return err;
snd_ensoniq_create_gameport(ensoniq, dev);
strcpy(card->driver, DRIVER_NAME);
strcpy(card->shortname, "Ensoniq AudioPCI");
sprintf(card->longname, "%s %s at 0x%lx, irq %i",
card->shortname,
card->driver,
ensoniq->port,
ensoniq->irq);
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
dev++;
return 0;
}
static int snd_audiopci_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_audiopci_probe(pci, pci_id));
}
static struct pci_driver ens137x_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_audiopci_ids,
.probe = snd_audiopci_probe,
.driver = {
.pm = SND_ENSONIQ_PM_OPS,
},
};
module_pci_driver(ens137x_driver);
| linux-master | sound/pci/ens1370.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ALSA modem driver for VIA VT82xx (South Bridge)
*
* VT82C686A/B/C, VT8233A/C, VT8235
*
* Copyright (c) 2000 Jaroslav Kysela <[email protected]>
* Tjeerd.Mulder <[email protected]>
* 2002 Takashi Iwai <[email protected]>
*/
/*
* Changes:
*
* Sep. 2, 2004 Sasha Khapyorsky <[email protected]>
* Modified from original audio driver 'via82xx.c' to support AC97
* modems.
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#if 0
#define POINTER_DEBUG
#endif
MODULE_AUTHOR("Jaroslav Kysela <[email protected]>");
MODULE_DESCRIPTION("VIA VT82xx modem");
MODULE_LICENSE("GPL");
static int index = -2; /* Exclude the first card */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int ac97_clock = 48000;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for VIA 82xx bridge.");
module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for VIA 82xx bridge.");
module_param(ac97_clock, int, 0444);
MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (default 48000Hz).");
/* just for backward compatibility */
static bool enable;
module_param(enable, bool, 0444);
/*
* Direct registers
*/
#define VIAREG(via, x) ((via)->port + VIA_REG_##x)
#define VIADEV_REG(viadev, x) ((viadev)->port + VIA_REG_##x)
/* common offsets */
#define VIA_REG_OFFSET_STATUS 0x00 /* byte - channel status */
#define VIA_REG_STAT_ACTIVE 0x80 /* RO */
#define VIA_REG_STAT_PAUSED 0x40 /* RO */
#define VIA_REG_STAT_TRIGGER_QUEUED 0x08 /* RO */
#define VIA_REG_STAT_STOPPED 0x04 /* RWC */
#define VIA_REG_STAT_EOL 0x02 /* RWC */
#define VIA_REG_STAT_FLAG 0x01 /* RWC */
#define VIA_REG_OFFSET_CONTROL 0x01 /* byte - channel control */
#define VIA_REG_CTRL_START 0x80 /* WO */
#define VIA_REG_CTRL_TERMINATE 0x40 /* WO */
#define VIA_REG_CTRL_AUTOSTART 0x20
#define VIA_REG_CTRL_PAUSE 0x08 /* RW */
#define VIA_REG_CTRL_INT_STOP 0x04
#define VIA_REG_CTRL_INT_EOL 0x02
#define VIA_REG_CTRL_INT_FLAG 0x01
#define VIA_REG_CTRL_RESET 0x01 /* RW - probably reset? undocumented */
#define VIA_REG_CTRL_INT (VIA_REG_CTRL_INT_FLAG | VIA_REG_CTRL_INT_EOL | VIA_REG_CTRL_AUTOSTART)
#define VIA_REG_OFFSET_TYPE 0x02 /* byte - channel type (686 only) */
#define VIA_REG_TYPE_AUTOSTART 0x80 /* RW - autostart at EOL */
#define VIA_REG_TYPE_16BIT 0x20 /* RW */
#define VIA_REG_TYPE_STEREO 0x10 /* RW */
#define VIA_REG_TYPE_INT_LLINE 0x00
#define VIA_REG_TYPE_INT_LSAMPLE 0x04
#define VIA_REG_TYPE_INT_LESSONE 0x08
#define VIA_REG_TYPE_INT_MASK 0x0c
#define VIA_REG_TYPE_INT_EOL 0x02
#define VIA_REG_TYPE_INT_FLAG 0x01
#define VIA_REG_OFFSET_TABLE_PTR 0x04 /* dword - channel table pointer */
#define VIA_REG_OFFSET_CURR_PTR 0x04 /* dword - channel current pointer */
#define VIA_REG_OFFSET_STOP_IDX 0x08 /* dword - stop index, channel type, sample rate */
#define VIA_REG_OFFSET_CURR_COUNT 0x0c /* dword - channel current count (24 bit) */
#define VIA_REG_OFFSET_CURR_INDEX 0x0f /* byte - channel current index (for via8233 only) */
#define DEFINE_VIA_REGSET(name,val) \
enum {\
VIA_REG_##name##_STATUS = (val),\
VIA_REG_##name##_CONTROL = (val) + 0x01,\
VIA_REG_##name##_TYPE = (val) + 0x02,\
VIA_REG_##name##_TABLE_PTR = (val) + 0x04,\
VIA_REG_##name##_CURR_PTR = (val) + 0x04,\
VIA_REG_##name##_STOP_IDX = (val) + 0x08,\
VIA_REG_##name##_CURR_COUNT = (val) + 0x0c,\
}
/* modem block */
DEFINE_VIA_REGSET(MO, 0x40);
DEFINE_VIA_REGSET(MI, 0x50);
/* AC'97 */
#define VIA_REG_AC97 0x80 /* dword */
#define VIA_REG_AC97_CODEC_ID_MASK (3<<30)
#define VIA_REG_AC97_CODEC_ID_SHIFT 30
#define VIA_REG_AC97_CODEC_ID_PRIMARY 0x00
#define VIA_REG_AC97_CODEC_ID_SECONDARY 0x01
#define VIA_REG_AC97_SECONDARY_VALID (1<<27)
#define VIA_REG_AC97_PRIMARY_VALID (1<<25)
#define VIA_REG_AC97_BUSY (1<<24)
#define VIA_REG_AC97_READ (1<<23)
#define VIA_REG_AC97_CMD_SHIFT 16
#define VIA_REG_AC97_CMD_MASK 0x7e
#define VIA_REG_AC97_DATA_SHIFT 0
#define VIA_REG_AC97_DATA_MASK 0xffff
#define VIA_REG_SGD_SHADOW 0x84 /* dword */
#define VIA_REG_SGD_STAT_PB_FLAG (1<<0)
#define VIA_REG_SGD_STAT_CP_FLAG (1<<1)
#define VIA_REG_SGD_STAT_FM_FLAG (1<<2)
#define VIA_REG_SGD_STAT_PB_EOL (1<<4)
#define VIA_REG_SGD_STAT_CP_EOL (1<<5)
#define VIA_REG_SGD_STAT_FM_EOL (1<<6)
#define VIA_REG_SGD_STAT_PB_STOP (1<<8)
#define VIA_REG_SGD_STAT_CP_STOP (1<<9)
#define VIA_REG_SGD_STAT_FM_STOP (1<<10)
#define VIA_REG_SGD_STAT_PB_ACTIVE (1<<12)
#define VIA_REG_SGD_STAT_CP_ACTIVE (1<<13)
#define VIA_REG_SGD_STAT_FM_ACTIVE (1<<14)
#define VIA_REG_SGD_STAT_MR_FLAG (1<<16)
#define VIA_REG_SGD_STAT_MW_FLAG (1<<17)
#define VIA_REG_SGD_STAT_MR_EOL (1<<20)
#define VIA_REG_SGD_STAT_MW_EOL (1<<21)
#define VIA_REG_SGD_STAT_MR_STOP (1<<24)
#define VIA_REG_SGD_STAT_MW_STOP (1<<25)
#define VIA_REG_SGD_STAT_MR_ACTIVE (1<<28)
#define VIA_REG_SGD_STAT_MW_ACTIVE (1<<29)
#define VIA_REG_GPI_STATUS 0x88
#define VIA_REG_GPI_INTR 0x8c
#define VIA_TBL_BIT_FLAG 0x40000000
#define VIA_TBL_BIT_EOL 0x80000000
/* pci space */
#define VIA_ACLINK_STAT 0x40
#define VIA_ACLINK_C11_READY 0x20
#define VIA_ACLINK_C10_READY 0x10
#define VIA_ACLINK_C01_READY 0x04 /* secondary codec ready */
#define VIA_ACLINK_LOWPOWER 0x02 /* low-power state */
#define VIA_ACLINK_C00_READY 0x01 /* primary codec ready */
#define VIA_ACLINK_CTRL 0x41
#define VIA_ACLINK_CTRL_ENABLE 0x80 /* 0: disable, 1: enable */
#define VIA_ACLINK_CTRL_RESET 0x40 /* 0: assert, 1: de-assert */
#define VIA_ACLINK_CTRL_SYNC 0x20 /* 0: release SYNC, 1: force SYNC hi */
#define VIA_ACLINK_CTRL_SDO 0x10 /* 0: release SDO, 1: force SDO hi */
#define VIA_ACLINK_CTRL_VRA 0x08 /* 0: disable VRA, 1: enable VRA */
#define VIA_ACLINK_CTRL_PCM 0x04 /* 0: disable PCM, 1: enable PCM */
#define VIA_ACLINK_CTRL_FM 0x02 /* via686 only */
#define VIA_ACLINK_CTRL_SB 0x01 /* via686 only */
#define VIA_ACLINK_CTRL_INIT (VIA_ACLINK_CTRL_ENABLE|\
VIA_ACLINK_CTRL_RESET|\
VIA_ACLINK_CTRL_PCM)
#define VIA_FUNC_ENABLE 0x42
#define VIA_FUNC_MIDI_PNP 0x80 /* FIXME: it's 0x40 in the datasheet! */
#define VIA_FUNC_MIDI_IRQMASK 0x40 /* FIXME: not documented! */
#define VIA_FUNC_RX2C_WRITE 0x20
#define VIA_FUNC_SB_FIFO_EMPTY 0x10
#define VIA_FUNC_ENABLE_GAME 0x08
#define VIA_FUNC_ENABLE_FM 0x04
#define VIA_FUNC_ENABLE_MIDI 0x02
#define VIA_FUNC_ENABLE_SB 0x01
#define VIA_PNP_CONTROL 0x43
#define VIA_MC97_CTRL 0x44
#define VIA_MC97_CTRL_ENABLE 0x80
#define VIA_MC97_CTRL_SECONDARY 0x40
#define VIA_MC97_CTRL_INIT (VIA_MC97_CTRL_ENABLE|\
VIA_MC97_CTRL_SECONDARY)
/*
* pcm stream
*/
struct snd_via_sg_table {
unsigned int offset;
unsigned int size;
} ;
#define VIA_TABLE_SIZE 255
struct viadev {
unsigned int reg_offset;
unsigned long port;
int direction; /* playback = 0, capture = 1 */
struct snd_pcm_substream *substream;
int running;
unsigned int tbl_entries; /* # descriptors */
struct snd_dma_buffer table;
struct snd_via_sg_table *idx_table;
/* for recovery from the unexpected pointer */
unsigned int lastpos;
unsigned int bufsize;
unsigned int bufsize2;
};
enum { TYPE_CARD_VIA82XX_MODEM = 1 };
#define VIA_MAX_MODEM_DEVS 2
struct via82xx_modem {
int irq;
unsigned long port;
unsigned int intr_mask; /* SGD_SHADOW mask to check interrupts */
struct pci_dev *pci;
struct snd_card *card;
unsigned int num_devs;
unsigned int playback_devno, capture_devno;
struct viadev devs[VIA_MAX_MODEM_DEVS];
struct snd_pcm *pcms[2];
struct snd_ac97_bus *ac97_bus;
struct snd_ac97 *ac97;
unsigned int ac97_clock;
unsigned int ac97_secondary; /* secondary AC'97 codec is present */
spinlock_t reg_lock;
struct snd_info_entry *proc_entry;
};
static const struct pci_device_id snd_via82xx_modem_ids[] = {
{ PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_via82xx_modem_ids);
/*
*/
/*
* allocate and initialize the descriptor buffers
* periods = number of periods
* fragsize = period size in bytes
*/
static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substream,
struct pci_dev *pci,
unsigned int periods, unsigned int fragsize)
{
unsigned int i, idx, ofs, rest;
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
__le32 *pgtbl;
if (dev->table.area == NULL) {
/* the start of each lists must be aligned to 8 bytes,
* but the kernel pages are much bigger, so we don't care
*/
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8),
&dev->table) < 0)
return -ENOMEM;
}
if (! dev->idx_table) {
dev->idx_table = kmalloc_array(VIA_TABLE_SIZE,
sizeof(*dev->idx_table),
GFP_KERNEL);
if (! dev->idx_table)
return -ENOMEM;
}
/* fill the entries */
idx = 0;
ofs = 0;
pgtbl = (__le32 *)dev->table.area;
for (i = 0; i < periods; i++) {
rest = fragsize;
/* fill descriptors for a period.
* a period can be split to several descriptors if it's
* over page boundary.
*/
do {
unsigned int r;
unsigned int flag;
unsigned int addr;
if (idx >= VIA_TABLE_SIZE) {
dev_err(&pci->dev, "too much table size!\n");
return -EINVAL;
}
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
pgtbl[idx << 1] = cpu_to_le32(addr);
r = PAGE_SIZE - (ofs % PAGE_SIZE);
if (rest < r)
r = rest;
rest -= r;
if (! rest) {
if (i == periods - 1)
flag = VIA_TBL_BIT_EOL; /* buffer boundary */
else
flag = VIA_TBL_BIT_FLAG; /* period boundary */
} else
flag = 0; /* period continues to the next */
/*
dev_dbg(&pci->dev,
"tbl %d: at %d size %d (rest %d)\n",
idx, ofs, r, rest);
*/
pgtbl[(idx<<1) + 1] = cpu_to_le32(r | flag);
dev->idx_table[idx].offset = ofs;
dev->idx_table[idx].size = r;
ofs += r;
idx++;
} while (rest > 0);
}
dev->tbl_entries = idx;
dev->bufsize = periods * fragsize;
dev->bufsize2 = dev->bufsize / 2;
return 0;
}
static int clean_via_table(struct viadev *dev, struct snd_pcm_substream *substream,
struct pci_dev *pci)
{
if (dev->table.area) {
snd_dma_free_pages(&dev->table);
dev->table.area = NULL;
}
kfree(dev->idx_table);
dev->idx_table = NULL;
return 0;
}
/*
* Basic I/O
*/
static inline unsigned int snd_via82xx_codec_xread(struct via82xx_modem *chip)
{
return inl(VIAREG(chip, AC97));
}
static inline void snd_via82xx_codec_xwrite(struct via82xx_modem *chip, unsigned int val)
{
outl(val, VIAREG(chip, AC97));
}
static int snd_via82xx_codec_ready(struct via82xx_modem *chip, int secondary)
{
unsigned int timeout = 1000; /* 1ms */
unsigned int val;
while (timeout-- > 0) {
udelay(1);
val = snd_via82xx_codec_xread(chip);
if (!(val & VIA_REG_AC97_BUSY))
return val & 0xffff;
}
dev_err(chip->card->dev, "codec_ready: codec %i is not ready [0x%x]\n",
secondary, snd_via82xx_codec_xread(chip));
return -EIO;
}
static int snd_via82xx_codec_valid(struct via82xx_modem *chip, int secondary)
{
unsigned int timeout = 1000; /* 1ms */
unsigned int val, val1;
unsigned int stat = !secondary ? VIA_REG_AC97_PRIMARY_VALID :
VIA_REG_AC97_SECONDARY_VALID;
while (timeout-- > 0) {
val = snd_via82xx_codec_xread(chip);
val1 = val & (VIA_REG_AC97_BUSY | stat);
if (val1 == stat)
return val & 0xffff;
udelay(1);
}
return -EIO;
}
static void snd_via82xx_codec_wait(struct snd_ac97 *ac97)
{
struct via82xx_modem *chip = ac97->private_data;
__always_unused int err;
err = snd_via82xx_codec_ready(chip, ac97->num);
/* here we need to wait fairly for long time.. */
msleep(500);
}
static void snd_via82xx_codec_write(struct snd_ac97 *ac97,
unsigned short reg,
unsigned short val)
{
struct via82xx_modem *chip = ac97->private_data;
unsigned int xval;
if(reg == AC97_GPIO_STATUS) {
outl(val, VIAREG(chip, GPI_STATUS));
return;
}
xval = !ac97->num ? VIA_REG_AC97_CODEC_ID_PRIMARY : VIA_REG_AC97_CODEC_ID_SECONDARY;
xval <<= VIA_REG_AC97_CODEC_ID_SHIFT;
xval |= reg << VIA_REG_AC97_CMD_SHIFT;
xval |= val << VIA_REG_AC97_DATA_SHIFT;
snd_via82xx_codec_xwrite(chip, xval);
snd_via82xx_codec_ready(chip, ac97->num);
}
static unsigned short snd_via82xx_codec_read(struct snd_ac97 *ac97, unsigned short reg)
{
struct via82xx_modem *chip = ac97->private_data;
unsigned int xval, val = 0xffff;
int again = 0;
xval = ac97->num << VIA_REG_AC97_CODEC_ID_SHIFT;
xval |= ac97->num ? VIA_REG_AC97_SECONDARY_VALID : VIA_REG_AC97_PRIMARY_VALID;
xval |= VIA_REG_AC97_READ;
xval |= (reg & 0x7f) << VIA_REG_AC97_CMD_SHIFT;
while (1) {
if (again++ > 3) {
dev_err(chip->card->dev,
"codec_read: codec %i is not valid [0x%x]\n",
ac97->num, snd_via82xx_codec_xread(chip));
return 0xffff;
}
snd_via82xx_codec_xwrite(chip, xval);
udelay (20);
if (snd_via82xx_codec_valid(chip, ac97->num) >= 0) {
udelay(25);
val = snd_via82xx_codec_xread(chip);
break;
}
}
return val & 0xffff;
}
static void snd_via82xx_channel_reset(struct via82xx_modem *chip, struct viadev *viadev)
{
outb(VIA_REG_CTRL_PAUSE | VIA_REG_CTRL_TERMINATE | VIA_REG_CTRL_RESET,
VIADEV_REG(viadev, OFFSET_CONTROL));
inb(VIADEV_REG(viadev, OFFSET_CONTROL));
udelay(50);
/* disable interrupts */
outb(0x00, VIADEV_REG(viadev, OFFSET_CONTROL));
/* clear interrupts */
outb(0x03, VIADEV_REG(viadev, OFFSET_STATUS));
outb(0x00, VIADEV_REG(viadev, OFFSET_TYPE)); /* for via686 */
// outl(0, VIADEV_REG(viadev, OFFSET_CURR_PTR));
viadev->lastpos = 0;
}
/*
* Interrupt handler
*/
static irqreturn_t snd_via82xx_interrupt(int irq, void *dev_id)
{
struct via82xx_modem *chip = dev_id;
unsigned int status;
unsigned int i;
status = inl(VIAREG(chip, SGD_SHADOW));
if (! (status & chip->intr_mask)) {
return IRQ_NONE;
}
// _skip_sgd:
/* check status for each stream */
spin_lock(&chip->reg_lock);
for (i = 0; i < chip->num_devs; i++) {
struct viadev *viadev = &chip->devs[i];
unsigned char c_status = inb(VIADEV_REG(viadev, OFFSET_STATUS));
c_status &= (VIA_REG_STAT_EOL|VIA_REG_STAT_FLAG|VIA_REG_STAT_STOPPED);
if (! c_status)
continue;
if (viadev->substream && viadev->running) {
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(viadev->substream);
spin_lock(&chip->reg_lock);
}
outb(c_status, VIADEV_REG(viadev, OFFSET_STATUS)); /* ack */
}
spin_unlock(&chip->reg_lock);
return IRQ_HANDLED;
}
/*
* PCM callbacks
*/
/*
* trigger callback
*/
static int snd_via82xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
unsigned char val = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_SUSPEND:
val |= VIA_REG_CTRL_START;
viadev->running = 1;
break;
case SNDRV_PCM_TRIGGER_STOP:
val = VIA_REG_CTRL_TERMINATE;
viadev->running = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
val |= VIA_REG_CTRL_PAUSE;
viadev->running = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
viadev->running = 1;
break;
default:
return -EINVAL;
}
outb(val, VIADEV_REG(viadev, OFFSET_CONTROL));
if (cmd == SNDRV_PCM_TRIGGER_STOP)
snd_via82xx_channel_reset(chip, viadev);
return 0;
}
/*
* pointer callbacks
*/
/*
* calculate the linear position at the given sg-buffer index and the rest count
*/
#define check_invalid_pos(viadev,pos) \
((pos) < viadev->lastpos && ((pos) >= viadev->bufsize2 ||\
viadev->lastpos < viadev->bufsize2))
static inline unsigned int calc_linear_pos(struct via82xx_modem *chip,
struct viadev *viadev,
unsigned int idx,
unsigned int count)
{
unsigned int size, res;
size = viadev->idx_table[idx].size;
res = viadev->idx_table[idx].offset + size - count;
/* check the validity of the calculated position */
if (size < count) {
dev_err(chip->card->dev,
"invalid via82xx_cur_ptr (size = %d, count = %d)\n",
(int)size, (int)count);
res = viadev->lastpos;
} else if (check_invalid_pos(viadev, res)) {
#ifdef POINTER_DEBUG
dev_dbg(chip->card->dev,
"fail: idx = %i/%i, lastpos = 0x%x, bufsize2 = 0x%x, offsize = 0x%x, size = 0x%x, count = 0x%x\n",
idx, viadev->tbl_entries, viadev->lastpos,
viadev->bufsize2, viadev->idx_table[idx].offset,
viadev->idx_table[idx].size, count);
#endif
if (count && size < count) {
dev_dbg(chip->card->dev,
"invalid via82xx_cur_ptr, using last valid pointer\n");
res = viadev->lastpos;
} else {
if (! count)
/* bogus count 0 on the DMA boundary? */
res = viadev->idx_table[idx].offset;
else
/* count register returns full size
* when end of buffer is reached
*/
res = viadev->idx_table[idx].offset + size;
if (check_invalid_pos(viadev, res)) {
dev_dbg(chip->card->dev,
"invalid via82xx_cur_ptr (2), using last valid pointer\n");
res = viadev->lastpos;
}
}
}
viadev->lastpos = res; /* remember the last position */
if (res >= viadev->bufsize)
res -= viadev->bufsize;
return res;
}
/*
* get the current pointer on via686
*/
static snd_pcm_uframes_t snd_via686_pcm_pointer(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
unsigned int idx, ptr, count, res;
if (snd_BUG_ON(!viadev->tbl_entries))
return 0;
if (!(inb(VIADEV_REG(viadev, OFFSET_STATUS)) & VIA_REG_STAT_ACTIVE))
return 0;
spin_lock(&chip->reg_lock);
count = inl(VIADEV_REG(viadev, OFFSET_CURR_COUNT)) & 0xffffff;
/* The via686a does not have the current index register,
* so we need to calculate the index from CURR_PTR.
*/
ptr = inl(VIADEV_REG(viadev, OFFSET_CURR_PTR));
if (ptr <= (unsigned int)viadev->table.addr)
idx = 0;
else /* CURR_PTR holds the address + 8 */
idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) %
viadev->tbl_entries;
res = calc_linear_pos(chip, viadev, idx, count);
spin_unlock(&chip->reg_lock);
return bytes_to_frames(substream->runtime, res);
}
/*
* hw_params callback:
* allocate the buffer and build up the buffer description table
*/
static int snd_via82xx_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
int err;
err = build_via_table(viadev, substream, chip->pci,
params_periods(hw_params),
params_period_bytes(hw_params));
if (err < 0)
return err;
snd_ac97_write(chip->ac97, AC97_LINE1_RATE, params_rate(hw_params));
snd_ac97_write(chip->ac97, AC97_LINE1_LEVEL, 0);
return 0;
}
/*
* hw_free callback:
* clean up the buffer description table and release the buffer
*/
static int snd_via82xx_hw_free(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
clean_via_table(viadev, substream, chip->pci);
return 0;
}
/*
* set up the table pointer
*/
static void snd_via82xx_set_table_ptr(struct via82xx_modem *chip, struct viadev *viadev)
{
snd_via82xx_codec_ready(chip, chip->ac97_secondary);
outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR));
udelay(20);
snd_via82xx_codec_ready(chip, chip->ac97_secondary);
}
/*
* prepare callback for playback and capture
*/
static int snd_via82xx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
snd_via82xx_channel_reset(chip, viadev);
/* this must be set after channel_reset */
snd_via82xx_set_table_ptr(chip, viadev);
outb(VIA_REG_TYPE_AUTOSTART|VIA_REG_TYPE_INT_EOL|VIA_REG_TYPE_INT_FLAG,
VIADEV_REG(viadev, OFFSET_TYPE));
return 0;
}
/*
* pcm hardware definition, identical for both playback and capture
*/
static const struct snd_pcm_hardware snd_via82xx_hw =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
/* SNDRV_PCM_INFO_RESUME | */
SNDRV_PCM_INFO_PAUSE),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT,
.rate_min = 8000,
.rate_max = 16000,
.channels_min = 1,
.channels_max = 1,
.buffer_bytes_max = 128 * 1024,
.period_bytes_min = 32,
.period_bytes_max = 128 * 1024,
.periods_min = 2,
.periods_max = VIA_TABLE_SIZE / 2,
.fifo_size = 0,
};
/*
* open callback skeleton
*/
static int snd_via82xx_modem_pcm_open(struct via82xx_modem *chip, struct viadev *viadev,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
static const unsigned int rates[] = { 8000, 9600, 12000, 16000 };
static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
runtime->hw = snd_via82xx_hw;
err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
if (err < 0)
return err;
/* we may remove following constaint when we modify table entries
in interrupt */
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
runtime->private_data = viadev;
viadev->substream = substream;
return 0;
}
/*
* open callback for playback
*/
static int snd_via82xx_playback_open(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = &chip->devs[chip->playback_devno + substream->number];
return snd_via82xx_modem_pcm_open(chip, viadev, substream);
}
/*
* open callback for capture
*/
static int snd_via82xx_capture_open(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = &chip->devs[chip->capture_devno + substream->pcm->device];
return snd_via82xx_modem_pcm_open(chip, viadev, substream);
}
/*
* close callback
*/
static int snd_via82xx_pcm_close(struct snd_pcm_substream *substream)
{
struct viadev *viadev = substream->runtime->private_data;
viadev->substream = NULL;
return 0;
}
/* via686 playback callbacks */
static const struct snd_pcm_ops snd_via686_playback_ops = {
.open = snd_via82xx_playback_open,
.close = snd_via82xx_pcm_close,
.hw_params = snd_via82xx_hw_params,
.hw_free = snd_via82xx_hw_free,
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
};
/* via686 capture callbacks */
static const struct snd_pcm_ops snd_via686_capture_ops = {
.open = snd_via82xx_capture_open,
.close = snd_via82xx_pcm_close,
.hw_params = snd_via82xx_hw_params,
.hw_free = snd_via82xx_hw_free,
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
};
static void init_viadev(struct via82xx_modem *chip, int idx, unsigned int reg_offset,
int direction)
{
chip->devs[idx].reg_offset = reg_offset;
chip->devs[idx].direction = direction;
chip->devs[idx].port = chip->port + reg_offset;
}
/*
* create a pcm instance for via686a/b
*/
static int snd_via686_pcm_new(struct via82xx_modem *chip)
{
struct snd_pcm *pcm;
int err;
chip->playback_devno = 0;
chip->capture_devno = 1;
chip->num_devs = 2;
chip->intr_mask = 0x330000; /* FLAGS | EOL for MR, MW */
err = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via686_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via686_capture_ops);
pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
pcm->private_data = chip;
strcpy(pcm->name, chip->card->shortname);
chip->pcms[0] = pcm;
init_viadev(chip, 0, VIA_REG_MO_STATUS, 0);
init_viadev(chip, 1, VIA_REG_MI_STATUS, 1);
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
&chip->pci->dev, 64*1024, 128*1024);
return 0;
}
/*
* Mixer part
*/
static void snd_via82xx_mixer_free_ac97_bus(struct snd_ac97_bus *bus)
{
struct via82xx_modem *chip = bus->private_data;
chip->ac97_bus = NULL;
}
static void snd_via82xx_mixer_free_ac97(struct snd_ac97 *ac97)
{
struct via82xx_modem *chip = ac97->private_data;
chip->ac97 = NULL;
}
static int snd_via82xx_mixer_new(struct via82xx_modem *chip)
{
struct snd_ac97_template ac97;
int err;
static const struct snd_ac97_bus_ops ops = {
.write = snd_via82xx_codec_write,
.read = snd_via82xx_codec_read,
.wait = snd_via82xx_codec_wait,
};
err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus);
if (err < 0)
return err;
chip->ac97_bus->private_free = snd_via82xx_mixer_free_ac97_bus;
chip->ac97_bus->clock = chip->ac97_clock;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.private_free = snd_via82xx_mixer_free_ac97;
ac97.pci = chip->pci;
ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE;
ac97.num = chip->ac97_secondary;
err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97);
if (err < 0)
return err;
return 0;
}
/*
* proc interface
*/
static void snd_via82xx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct via82xx_modem *chip = entry->private_data;
int i;
snd_iprintf(buffer, "%s\n\n", chip->card->longname);
for (i = 0; i < 0xa0; i += 4) {
snd_iprintf(buffer, "%02x: %08x\n", i, inl(chip->port + i));
}
}
static void snd_via82xx_proc_init(struct via82xx_modem *chip)
{
snd_card_ro_proc_new(chip->card, "via82xx", chip,
snd_via82xx_proc_read);
}
/*
*
*/
static int snd_via82xx_chip_init(struct via82xx_modem *chip)
{
unsigned int val;
unsigned long end_time;
unsigned char pval;
pci_read_config_byte(chip->pci, VIA_MC97_CTRL, &pval);
if((pval & VIA_MC97_CTRL_INIT) != VIA_MC97_CTRL_INIT) {
pci_write_config_byte(chip->pci, 0x44, pval|VIA_MC97_CTRL_INIT);
udelay(100);
}
pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval);
if (! (pval & VIA_ACLINK_C00_READY)) { /* codec not ready? */
/* deassert ACLink reset, force SYNC */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL,
VIA_ACLINK_CTRL_ENABLE |
VIA_ACLINK_CTRL_RESET |
VIA_ACLINK_CTRL_SYNC);
udelay(100);
#if 1 /* FIXME: should we do full reset here for all chip models? */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, 0x00);
udelay(100);
#else
/* deassert ACLink reset, force SYNC (warm AC'97 reset) */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL,
VIA_ACLINK_CTRL_RESET|VIA_ACLINK_CTRL_SYNC);
udelay(2);
#endif
/* ACLink on, deassert ACLink reset, VSR, SGD data out */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT);
udelay(100);
}
pci_read_config_byte(chip->pci, VIA_ACLINK_CTRL, &pval);
if ((pval & VIA_ACLINK_CTRL_INIT) != VIA_ACLINK_CTRL_INIT) {
/* ACLink on, deassert ACLink reset, VSR, SGD data out */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT);
udelay(100);
}
/* wait until codec ready */
end_time = jiffies + msecs_to_jiffies(750);
do {
pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval);
if (pval & VIA_ACLINK_C00_READY) /* primary codec ready */
break;
schedule_timeout_uninterruptible(1);
} while (time_before(jiffies, end_time));
val = snd_via82xx_codec_xread(chip);
if (val & VIA_REG_AC97_BUSY)
dev_err(chip->card->dev,
"AC'97 codec is not ready [0x%x]\n", val);
snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ |
VIA_REG_AC97_SECONDARY_VALID |
(VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT));
end_time = jiffies + msecs_to_jiffies(750);
snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ |
VIA_REG_AC97_SECONDARY_VALID |
(VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT));
do {
val = snd_via82xx_codec_xread(chip);
if (val & VIA_REG_AC97_SECONDARY_VALID) {
chip->ac97_secondary = 1;
goto __ac97_ok2;
}
schedule_timeout_uninterruptible(1);
} while (time_before(jiffies, end_time));
/* This is ok, the most of motherboards have only one codec */
__ac97_ok2:
/* route FM trap to IRQ, disable FM trap */
// pci_write_config_byte(chip->pci, VIA_FM_NMI_CTRL, 0);
/* disable all GPI interrupts */
outl(0, VIAREG(chip, GPI_INTR));
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
static int snd_via82xx_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
snd_ac97_suspend(chip->ac97);
return 0;
}
static int snd_via82xx_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
snd_via82xx_chip_init(chip);
snd_ac97_resume(chip->ac97);
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_via82xx_pm, snd_via82xx_suspend, snd_via82xx_resume);
#define SND_VIA82XX_PM_OPS &snd_via82xx_pm
#else
#define SND_VIA82XX_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static void snd_via82xx_free(struct snd_card *card)
{
struct via82xx_modem *chip = card->private_data;
unsigned int i;
/* disable interrupts */
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
}
static int snd_via82xx_create(struct snd_card *card,
struct pci_dev *pci,
int chip_type,
int revision,
unsigned int ac97_clock)
{
struct via82xx_modem *chip = card->private_data;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
spin_lock_init(&chip->reg_lock);
chip->card = card;
chip->pci = pci;
chip->irq = -1;
err = pci_request_regions(pci, card->driver);
if (err < 0)
return err;
chip->port = pci_resource_start(pci, 0);
if (devm_request_irq(&pci->dev, pci->irq, snd_via82xx_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
return -EBUSY;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
card->private_free = snd_via82xx_free;
if (ac97_clock >= 8000 && ac97_clock <= 48000)
chip->ac97_clock = ac97_clock;
err = snd_via82xx_chip_init(chip);
if (err < 0)
return err;
/* The 8233 ac97 controller does not implement the master bit
* in the pci command register. IMHO this is a violation of the PCI spec.
* We call pci_set_master here because it does not hurt. */
pci_set_master(pci);
return 0;
}
static int __snd_via82xx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct via82xx_modem *chip;
int chip_type = 0, card_type;
unsigned int i;
int err;
err = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
card_type = pci_id->driver_data;
switch (card_type) {
case TYPE_CARD_VIA82XX_MODEM:
strcpy(card->driver, "VIA82XX-MODEM");
sprintf(card->shortname, "VIA 82XX modem");
break;
default:
dev_err(card->dev, "invalid card type %d\n", card_type);
return -EINVAL;
}
err = snd_via82xx_create(card, pci, chip_type, pci->revision,
ac97_clock);
if (err < 0)
return err;
err = snd_via82xx_mixer_new(chip);
if (err < 0)
return err;
err = snd_via686_pcm_new(chip);
if (err < 0)
return err;
/* disable interrupts */
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
sprintf(card->longname, "%s at 0x%lx, irq %d",
card->shortname, chip->port, chip->irq);
snd_via82xx_proc_init(chip);
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
return 0;
}
static int snd_via82xx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
}
static struct pci_driver via82xx_modem_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_via82xx_modem_ids,
.probe = snd_via82xx_probe,
.driver = {
.pm = SND_VIA82XX_PM_OPS,
},
};
module_pci_driver(via82xx_modem_driver);
| linux-master | sound/pci/via82xx_modem.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cs5530.c - Initialisation code for Cyrix/NatSemi VSA1 softaudio
*
* (C) Copyright 2007 Ash Willis <[email protected]>
* (C) Copyright 2003 Red Hat Inc <[email protected]>
*
* This driver was ported (shamelessly ripped ;) from oss/kahlua.c but I did
* mess with it a bit. The chip seems to have to have trouble with full duplex
* mode. If we're recording in 8bit 8000kHz, say, and we then attempt to
* simultaneously play back audio at 16bit 44100kHz, the device actually plays
* back in the same format in which it is capturing. By forcing the chip to
* always play/capture in 16/44100, we can let alsa-lib convert the samples and
* that way we can hack up some full duplex audio.
*
* XpressAudio(tm) is used on the Cyrix MediaGX (now NatSemi Geode) systems.
* The older version (VSA1) provides fairly good soundblaster emulation
* although there are a couple of bugs: large DMA buffers break record,
* and the MPU event handling seems suspect. VSA2 allows the native driver
* to control the AC97 audio engine directly and requires a different driver.
*
* Thanks to National Semiconductor for providing the needed information
* on the XpressAudio(tm) internals.
*
* TO DO:
* Investigate whether we can portably support Cognac (5520) in the
* same manner.
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/sb.h>
#include <sound/initval.h>
MODULE_AUTHOR("Ash Willis");
MODULE_DESCRIPTION("CS5530 Audio");
MODULE_LICENSE("GPL");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for CS5530 Audio driver.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for CS5530 Audio driver.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable CS5530 Audio driver.");
struct snd_cs5530 {
struct snd_card *card;
struct pci_dev *pci;
struct snd_sb *sb;
unsigned long pci_base;
};
static const struct pci_device_id snd_cs5530_ids[] = {
{PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO, PCI_ANY_ID,
PCI_ANY_ID, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, snd_cs5530_ids);
static u8 snd_cs5530_mixer_read(unsigned long io, u8 reg)
{
outb(reg, io + 4);
udelay(20);
reg = inb(io + 5);
udelay(20);
return reg;
}
static int snd_cs5530_create(struct snd_card *card,
struct pci_dev *pci)
{
struct snd_cs5530 *chip = card->private_data;
unsigned long sb_base;
u8 irq, dma8, dma16 = 0;
u16 map;
void __iomem *mem;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
chip->card = card;
chip->pci = pci;
err = pcim_iomap_regions(pci, 1 << 0, "CS5530");
if (err < 0)
return err;
chip->pci_base = pci_resource_start(pci, 0);
mem = pcim_iomap_table(pci)[0];
map = readw(mem + 0x18);
/* Map bits
0:1 * 0x20 + 0x200 = sb base
2 sb enable
3 adlib enable
5 MPU enable 0x330
6 MPU enable 0x300
The other bits may be used internally so must be masked */
sb_base = 0x220 + 0x20 * (map & 3);
if (map & (1<<2))
dev_info(card->dev, "XpressAudio at 0x%lx\n", sb_base);
else {
dev_err(card->dev, "Could not find XpressAudio!\n");
return -ENODEV;
}
if (map & (1<<5))
dev_info(card->dev, "MPU at 0x300\n");
else if (map & (1<<6))
dev_info(card->dev, "MPU at 0x330\n");
irq = snd_cs5530_mixer_read(sb_base, 0x80) & 0x0F;
dma8 = snd_cs5530_mixer_read(sb_base, 0x81);
if (dma8 & 0x20)
dma16 = 5;
else if (dma8 & 0x40)
dma16 = 6;
else if (dma8 & 0x80)
dma16 = 7;
else {
dev_err(card->dev, "No 16bit DMA enabled\n");
return -ENODEV;
}
if (dma8 & 0x01)
dma8 = 0;
else if (dma8 & 02)
dma8 = 1;
else if (dma8 & 0x08)
dma8 = 3;
else {
dev_err(card->dev, "No 8bit DMA enabled\n");
return -ENODEV;
}
if (irq & 1)
irq = 9;
else if (irq & 2)
irq = 5;
else if (irq & 4)
irq = 7;
else if (irq & 8)
irq = 10;
else {
dev_err(card->dev, "SoundBlaster IRQ not set\n");
return -ENODEV;
}
dev_info(card->dev, "IRQ: %d DMA8: %d DMA16: %d\n", irq, dma8, dma16);
err = snd_sbdsp_create(card, sb_base, irq, snd_sb16dsp_interrupt, dma8,
dma16, SB_HW_CS5530, &chip->sb);
if (err < 0) {
dev_err(card->dev, "Could not create SoundBlaster\n");
return err;
}
err = snd_sb16dsp_pcm(chip->sb, 0);
if (err < 0) {
dev_err(card->dev, "Could not create PCM\n");
return err;
}
err = snd_sbmixer_new(chip->sb);
if (err < 0) {
dev_err(card->dev, "Could not create Mixer\n");
return err;
}
return 0;
}
static int snd_cs5530_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct snd_cs5530 *chip;
int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
err = snd_cs5530_create(card, pci);
if (err < 0)
return err;
strcpy(card->driver, "CS5530");
strcpy(card->shortname, "CS5530 Audio");
sprintf(card->longname, "%s at 0x%lx", card->shortname, chip->pci_base);
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
dev++;
return 0;
}
static struct pci_driver cs5530_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cs5530_ids,
.probe = snd_cs5530_probe,
};
module_pci_driver(cs5530_driver);
| linux-master | sound/pci/cs5530.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ALSA driver for ATI IXP 150/200/250/300 AC97 controllers
*
* Copyright (c) 2004 Takashi Iwai <[email protected]>
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
MODULE_AUTHOR("Takashi Iwai <[email protected]>");
MODULE_DESCRIPTION("ATI IXP AC97 controller");
MODULE_LICENSE("GPL");
static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int ac97_clock = 48000;
static char *ac97_quirk;
static bool spdif_aclink = 1;
static int ac97_codec = -1;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for ATI IXP controller.");
module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for ATI IXP controller.");
module_param(ac97_clock, int, 0444);
MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (default 48000Hz).");
module_param(ac97_quirk, charp, 0444);
MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware.");
module_param(ac97_codec, int, 0444);
MODULE_PARM_DESC(ac97_codec, "Specify codec instead of probing.");
module_param(spdif_aclink, bool, 0444);
MODULE_PARM_DESC(spdif_aclink, "S/PDIF over AC-link.");
/* just for backward compatibility */
static bool enable;
module_param(enable, bool, 0444);
/*
*/
#define ATI_REG_ISR 0x00 /* interrupt source */
#define ATI_REG_ISR_IN_XRUN (1U<<0)
#define ATI_REG_ISR_IN_STATUS (1U<<1)
#define ATI_REG_ISR_OUT_XRUN (1U<<2)
#define ATI_REG_ISR_OUT_STATUS (1U<<3)
#define ATI_REG_ISR_SPDF_XRUN (1U<<4)
#define ATI_REG_ISR_SPDF_STATUS (1U<<5)
#define ATI_REG_ISR_PHYS_INTR (1U<<8)
#define ATI_REG_ISR_PHYS_MISMATCH (1U<<9)
#define ATI_REG_ISR_CODEC0_NOT_READY (1U<<10)
#define ATI_REG_ISR_CODEC1_NOT_READY (1U<<11)
#define ATI_REG_ISR_CODEC2_NOT_READY (1U<<12)
#define ATI_REG_ISR_NEW_FRAME (1U<<13)
#define ATI_REG_IER 0x04 /* interrupt enable */
#define ATI_REG_IER_IN_XRUN_EN (1U<<0)
#define ATI_REG_IER_IO_STATUS_EN (1U<<1)
#define ATI_REG_IER_OUT_XRUN_EN (1U<<2)
#define ATI_REG_IER_OUT_XRUN_COND (1U<<3)
#define ATI_REG_IER_SPDF_XRUN_EN (1U<<4)
#define ATI_REG_IER_SPDF_STATUS_EN (1U<<5)
#define ATI_REG_IER_PHYS_INTR_EN (1U<<8)
#define ATI_REG_IER_PHYS_MISMATCH_EN (1U<<9)
#define ATI_REG_IER_CODEC0_INTR_EN (1U<<10)
#define ATI_REG_IER_CODEC1_INTR_EN (1U<<11)
#define ATI_REG_IER_CODEC2_INTR_EN (1U<<12)
#define ATI_REG_IER_NEW_FRAME_EN (1U<<13) /* (RO */
#define ATI_REG_IER_SET_BUS_BUSY (1U<<14) /* (WO) audio is running */
#define ATI_REG_CMD 0x08 /* command */
#define ATI_REG_CMD_POWERDOWN (1U<<0)
#define ATI_REG_CMD_RECEIVE_EN (1U<<1)
#define ATI_REG_CMD_SEND_EN (1U<<2)
#define ATI_REG_CMD_STATUS_MEM (1U<<3)
#define ATI_REG_CMD_SPDF_OUT_EN (1U<<4)
#define ATI_REG_CMD_SPDF_STATUS_MEM (1U<<5)
#define ATI_REG_CMD_SPDF_THRESHOLD (3U<<6)
#define ATI_REG_CMD_SPDF_THRESHOLD_SHIFT 6
#define ATI_REG_CMD_IN_DMA_EN (1U<<8)
#define ATI_REG_CMD_OUT_DMA_EN (1U<<9)
#define ATI_REG_CMD_SPDF_DMA_EN (1U<<10)
#define ATI_REG_CMD_SPDF_OUT_STOPPED (1U<<11)
#define ATI_REG_CMD_SPDF_CONFIG_MASK (7U<<12)
#define ATI_REG_CMD_SPDF_CONFIG_34 (1U<<12)
#define ATI_REG_CMD_SPDF_CONFIG_78 (2U<<12)
#define ATI_REG_CMD_SPDF_CONFIG_69 (3U<<12)
#define ATI_REG_CMD_SPDF_CONFIG_01 (4U<<12)
#define ATI_REG_CMD_INTERLEAVE_SPDF (1U<<16)
#define ATI_REG_CMD_AUDIO_PRESENT (1U<<20)
#define ATI_REG_CMD_INTERLEAVE_IN (1U<<21)
#define ATI_REG_CMD_INTERLEAVE_OUT (1U<<22)
#define ATI_REG_CMD_LOOPBACK_EN (1U<<23)
#define ATI_REG_CMD_PACKED_DIS (1U<<24)
#define ATI_REG_CMD_BURST_EN (1U<<25)
#define ATI_REG_CMD_PANIC_EN (1U<<26)
#define ATI_REG_CMD_MODEM_PRESENT (1U<<27)
#define ATI_REG_CMD_ACLINK_ACTIVE (1U<<28)
#define ATI_REG_CMD_AC_SOFT_RESET (1U<<29)
#define ATI_REG_CMD_AC_SYNC (1U<<30)
#define ATI_REG_CMD_AC_RESET (1U<<31)
#define ATI_REG_PHYS_OUT_ADDR 0x0c
#define ATI_REG_PHYS_OUT_CODEC_MASK (3U<<0)
#define ATI_REG_PHYS_OUT_RW (1U<<2)
#define ATI_REG_PHYS_OUT_ADDR_EN (1U<<8)
#define ATI_REG_PHYS_OUT_ADDR_SHIFT 9
#define ATI_REG_PHYS_OUT_DATA_SHIFT 16
#define ATI_REG_PHYS_IN_ADDR 0x10
#define ATI_REG_PHYS_IN_READ_FLAG (1U<<8)
#define ATI_REG_PHYS_IN_ADDR_SHIFT 9
#define ATI_REG_PHYS_IN_DATA_SHIFT 16
#define ATI_REG_SLOTREQ 0x14
#define ATI_REG_COUNTER 0x18
#define ATI_REG_COUNTER_SLOT (3U<<0) /* slot # */
#define ATI_REG_COUNTER_BITCLOCK (31U<<8)
#define ATI_REG_IN_FIFO_THRESHOLD 0x1c
#define ATI_REG_IN_DMA_LINKPTR 0x20
#define ATI_REG_IN_DMA_DT_START 0x24 /* RO */
#define ATI_REG_IN_DMA_DT_NEXT 0x28 /* RO */
#define ATI_REG_IN_DMA_DT_CUR 0x2c /* RO */
#define ATI_REG_IN_DMA_DT_SIZE 0x30
#define ATI_REG_OUT_DMA_SLOT 0x34
#define ATI_REG_OUT_DMA_SLOT_BIT(x) (1U << ((x) - 3))
#define ATI_REG_OUT_DMA_SLOT_MASK 0x1ff
#define ATI_REG_OUT_DMA_THRESHOLD_MASK 0xf800
#define ATI_REG_OUT_DMA_THRESHOLD_SHIFT 11
#define ATI_REG_OUT_DMA_LINKPTR 0x38
#define ATI_REG_OUT_DMA_DT_START 0x3c /* RO */
#define ATI_REG_OUT_DMA_DT_NEXT 0x40 /* RO */
#define ATI_REG_OUT_DMA_DT_CUR 0x44 /* RO */
#define ATI_REG_OUT_DMA_DT_SIZE 0x48
#define ATI_REG_SPDF_CMD 0x4c
#define ATI_REG_SPDF_CMD_LFSR (1U<<4)
#define ATI_REG_SPDF_CMD_SINGLE_CH (1U<<5)
#define ATI_REG_SPDF_CMD_LFSR_ACC (0xff<<8) /* RO */
#define ATI_REG_SPDF_DMA_LINKPTR 0x50
#define ATI_REG_SPDF_DMA_DT_START 0x54 /* RO */
#define ATI_REG_SPDF_DMA_DT_NEXT 0x58 /* RO */
#define ATI_REG_SPDF_DMA_DT_CUR 0x5c /* RO */
#define ATI_REG_SPDF_DMA_DT_SIZE 0x60
#define ATI_REG_MODEM_MIRROR 0x7c
#define ATI_REG_AUDIO_MIRROR 0x80
#define ATI_REG_6CH_REORDER 0x84 /* reorder slots for 6ch */
#define ATI_REG_6CH_REORDER_EN (1U<<0) /* 3,4,7,8,6,9 -> 3,4,6,9,7,8 */
#define ATI_REG_FIFO_FLUSH 0x88
#define ATI_REG_FIFO_OUT_FLUSH (1U<<0)
#define ATI_REG_FIFO_IN_FLUSH (1U<<1)
/* LINKPTR */
#define ATI_REG_LINKPTR_EN (1U<<0)
/* [INT|OUT|SPDIF]_DMA_DT_SIZE */
#define ATI_REG_DMA_DT_SIZE (0xffffU<<0)
#define ATI_REG_DMA_FIFO_USED (0x1fU<<16)
#define ATI_REG_DMA_FIFO_FREE (0x1fU<<21)
#define ATI_REG_DMA_STATE (7U<<26)
#define ATI_MAX_DESCRIPTORS 256 /* max number of descriptor packets */
struct atiixp;
/*
* DMA packate descriptor
*/
struct atiixp_dma_desc {
__le32 addr; /* DMA buffer address */
u16 status; /* status bits */
u16 size; /* size of the packet in dwords */
__le32 next; /* address of the next packet descriptor */
};
/*
* stream enum
*/
enum { ATI_DMA_PLAYBACK, ATI_DMA_CAPTURE, ATI_DMA_SPDIF, NUM_ATI_DMAS }; /* DMAs */
enum { ATI_PCM_OUT, ATI_PCM_IN, ATI_PCM_SPDIF, NUM_ATI_PCMS }; /* AC97 pcm slots */
enum { ATI_PCMDEV_ANALOG, ATI_PCMDEV_DIGITAL, NUM_ATI_PCMDEVS }; /* pcm devices */
#define NUM_ATI_CODECS 3
/*
* constants and callbacks for each DMA type
*/
struct atiixp_dma_ops {
int type; /* ATI_DMA_XXX */
unsigned int llp_offset; /* LINKPTR offset */
unsigned int dt_cur; /* DT_CUR offset */
/* called from open callback */
void (*enable_dma)(struct atiixp *chip, int on);
/* called from trigger (START/STOP) */
void (*enable_transfer)(struct atiixp *chip, int on);
/* called from trigger (STOP only) */
void (*flush_dma)(struct atiixp *chip);
};
/*
* DMA stream
*/
struct atiixp_dma {
const struct atiixp_dma_ops *ops;
struct snd_dma_buffer desc_buf;
struct snd_pcm_substream *substream; /* assigned PCM substream */
unsigned int buf_addr, buf_bytes; /* DMA buffer address, bytes */
unsigned int period_bytes, periods;
int opened;
int running;
int suspended;
int pcm_open_flag;
int ac97_pcm_type; /* index # of ac97_pcm to access, -1 = not used */
unsigned int saved_curptr;
};
/*
* ATI IXP chip
*/
struct atiixp {
struct snd_card *card;
struct pci_dev *pci;
unsigned long addr;
void __iomem *remap_addr;
int irq;
struct snd_ac97_bus *ac97_bus;
struct snd_ac97 *ac97[NUM_ATI_CODECS];
spinlock_t reg_lock;
struct atiixp_dma dmas[NUM_ATI_DMAS];
struct ac97_pcm *pcms[NUM_ATI_PCMS];
struct snd_pcm *pcmdevs[NUM_ATI_PCMDEVS];
int max_channels; /* max. channels for PCM out */
unsigned int codec_not_ready_bits; /* for codec detection */
int spdif_over_aclink; /* passed from the module option */
struct mutex open_mutex; /* playback open mutex */
};
/*
*/
static const struct pci_device_id snd_atiixp_ids[] = {
{ PCI_VDEVICE(ATI, 0x4341), 0 }, /* SB200 */
{ PCI_VDEVICE(ATI, 0x4361), 0 }, /* SB300 */
{ PCI_VDEVICE(ATI, 0x4370), 0 }, /* SB400 */
{ PCI_VDEVICE(ATI, 0x4382), 0 }, /* SB600 */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_atiixp_ids);
static const struct snd_pci_quirk atiixp_quirks[] = {
SND_PCI_QUIRK(0x105b, 0x0c81, "Foxconn RC4107MA-RS2", 0),
SND_PCI_QUIRK(0x15bd, 0x3100, "DFI RS482", 0),
{ } /* terminator */
};
/*
* lowlevel functions
*/
/*
* update the bits of the given register.
* return 1 if the bits changed.
*/
static int snd_atiixp_update_bits(struct atiixp *chip, unsigned int reg,
unsigned int mask, unsigned int value)
{
void __iomem *addr = chip->remap_addr + reg;
unsigned int data, old_data;
old_data = data = readl(addr);
data &= ~mask;
data |= value;
if (old_data == data)
return 0;
writel(data, addr);
return 1;
}
/*
* macros for easy use
*/
#define atiixp_write(chip,reg,value) \
writel(value, chip->remap_addr + ATI_REG_##reg)
#define atiixp_read(chip,reg) \
readl(chip->remap_addr + ATI_REG_##reg)
#define atiixp_update(chip,reg,mask,val) \
snd_atiixp_update_bits(chip, ATI_REG_##reg, mask, val)
/*
* handling DMA packets
*
* we allocate a linear buffer for the DMA, and split it to each packet.
* in a future version, a scatter-gather buffer should be implemented.
*/
#define ATI_DESC_LIST_SIZE \
PAGE_ALIGN(ATI_MAX_DESCRIPTORS * sizeof(struct atiixp_dma_desc))
/*
* build packets ring for the given buffer size.
*
* IXP handles the buffer descriptors, which are connected as a linked
* list. although we can change the list dynamically, in this version,
* a static RING of buffer descriptors is used.
*
* the ring is built in this function, and is set up to the hardware.
*/
static int atiixp_build_dma_packets(struct atiixp *chip, struct atiixp_dma *dma,
struct snd_pcm_substream *substream,
unsigned int periods,
unsigned int period_bytes)
{
unsigned int i;
u32 addr, desc_addr;
unsigned long flags;
if (periods > ATI_MAX_DESCRIPTORS)
return -ENOMEM;
if (dma->desc_buf.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
&chip->pci->dev,
ATI_DESC_LIST_SIZE,
&dma->desc_buf) < 0)
return -ENOMEM;
dma->period_bytes = dma->periods = 0; /* clear */
}
if (dma->periods == periods && dma->period_bytes == period_bytes)
return 0;
/* reset DMA before changing the descriptor table */
spin_lock_irqsave(&chip->reg_lock, flags);
writel(0, chip->remap_addr + dma->ops->llp_offset);
dma->ops->enable_dma(chip, 0);
dma->ops->enable_dma(chip, 1);
spin_unlock_irqrestore(&chip->reg_lock, flags);
/* fill the entries */
addr = (u32)substream->runtime->dma_addr;
desc_addr = (u32)dma->desc_buf.addr;
for (i = 0; i < periods; i++) {
struct atiixp_dma_desc *desc;
desc = &((struct atiixp_dma_desc *)dma->desc_buf.area)[i];
desc->addr = cpu_to_le32(addr);
desc->status = 0;
desc->size = period_bytes >> 2; /* in dwords */
desc_addr += sizeof(struct atiixp_dma_desc);
if (i == periods - 1)
desc->next = cpu_to_le32((u32)dma->desc_buf.addr);
else
desc->next = cpu_to_le32(desc_addr);
addr += period_bytes;
}
writel((u32)dma->desc_buf.addr | ATI_REG_LINKPTR_EN,
chip->remap_addr + dma->ops->llp_offset);
dma->period_bytes = period_bytes;
dma->periods = periods;
return 0;
}
/*
* remove the ring buffer and release it if assigned
*/
static void atiixp_clear_dma_packets(struct atiixp *chip, struct atiixp_dma *dma,
struct snd_pcm_substream *substream)
{
if (dma->desc_buf.area) {
writel(0, chip->remap_addr + dma->ops->llp_offset);
snd_dma_free_pages(&dma->desc_buf);
dma->desc_buf.area = NULL;
}
}
/*
* AC97 interface
*/
static int snd_atiixp_acquire_codec(struct atiixp *chip)
{
int timeout = 1000;
while (atiixp_read(chip, PHYS_OUT_ADDR) & ATI_REG_PHYS_OUT_ADDR_EN) {
if (! timeout--) {
dev_warn(chip->card->dev, "codec acquire timeout\n");
return -EBUSY;
}
udelay(1);
}
return 0;
}
static unsigned short snd_atiixp_codec_read(struct atiixp *chip, unsigned short codec, unsigned short reg)
{
unsigned int data;
int timeout;
if (snd_atiixp_acquire_codec(chip) < 0)
return 0xffff;
data = (reg << ATI_REG_PHYS_OUT_ADDR_SHIFT) |
ATI_REG_PHYS_OUT_ADDR_EN |
ATI_REG_PHYS_OUT_RW |
codec;
atiixp_write(chip, PHYS_OUT_ADDR, data);
if (snd_atiixp_acquire_codec(chip) < 0)
return 0xffff;
timeout = 1000;
do {
data = atiixp_read(chip, PHYS_IN_ADDR);
if (data & ATI_REG_PHYS_IN_READ_FLAG)
return data >> ATI_REG_PHYS_IN_DATA_SHIFT;
udelay(1);
} while (--timeout);
/* time out may happen during reset */
if (reg < 0x7c)
dev_warn(chip->card->dev, "codec read timeout (reg %x)\n", reg);
return 0xffff;
}
static void snd_atiixp_codec_write(struct atiixp *chip, unsigned short codec,
unsigned short reg, unsigned short val)
{
unsigned int data;
if (snd_atiixp_acquire_codec(chip) < 0)
return;
data = ((unsigned int)val << ATI_REG_PHYS_OUT_DATA_SHIFT) |
((unsigned int)reg << ATI_REG_PHYS_OUT_ADDR_SHIFT) |
ATI_REG_PHYS_OUT_ADDR_EN | codec;
atiixp_write(chip, PHYS_OUT_ADDR, data);
}
static unsigned short snd_atiixp_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct atiixp *chip = ac97->private_data;
return snd_atiixp_codec_read(chip, ac97->num, reg);
}
static void snd_atiixp_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct atiixp *chip = ac97->private_data;
snd_atiixp_codec_write(chip, ac97->num, reg, val);
}
/*
* reset AC link
*/
static int snd_atiixp_aclink_reset(struct atiixp *chip)
{
int timeout;
/* reset powerdoewn */
if (atiixp_update(chip, CMD, ATI_REG_CMD_POWERDOWN, 0))
udelay(10);
/* perform a software reset */
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SOFT_RESET, ATI_REG_CMD_AC_SOFT_RESET);
atiixp_read(chip, CMD);
udelay(10);
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SOFT_RESET, 0);
timeout = 10;
while (! (atiixp_read(chip, CMD) & ATI_REG_CMD_ACLINK_ACTIVE)) {
/* do a hard reset */
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SYNC|ATI_REG_CMD_AC_RESET,
ATI_REG_CMD_AC_SYNC);
atiixp_read(chip, CMD);
mdelay(1);
atiixp_update(chip, CMD, ATI_REG_CMD_AC_RESET, ATI_REG_CMD_AC_RESET);
if (!--timeout) {
dev_err(chip->card->dev, "codec reset timeout\n");
break;
}
}
/* deassert RESET and assert SYNC to make sure */
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SYNC|ATI_REG_CMD_AC_RESET,
ATI_REG_CMD_AC_SYNC|ATI_REG_CMD_AC_RESET);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int snd_atiixp_aclink_down(struct atiixp *chip)
{
// if (atiixp_read(chip, MODEM_MIRROR) & 0x1) /* modem running, too? */
// return -EBUSY;
atiixp_update(chip, CMD,
ATI_REG_CMD_POWERDOWN | ATI_REG_CMD_AC_RESET,
ATI_REG_CMD_POWERDOWN);
return 0;
}
#endif
/*
* auto-detection of codecs
*
* the IXP chip can generate interrupts for the non-existing codecs.
* NEW_FRAME interrupt is used to make sure that the interrupt is generated
* even if all three codecs are connected.
*/
#define ALL_CODEC_NOT_READY \
(ATI_REG_ISR_CODEC0_NOT_READY |\
ATI_REG_ISR_CODEC1_NOT_READY |\
ATI_REG_ISR_CODEC2_NOT_READY)
#define CODEC_CHECK_BITS (ALL_CODEC_NOT_READY|ATI_REG_ISR_NEW_FRAME)
static int ac97_probing_bugs(struct pci_dev *pci)
{
const struct snd_pci_quirk *q;
q = snd_pci_quirk_lookup(pci, atiixp_quirks);
if (q) {
dev_dbg(&pci->dev, "atiixp quirk for %s. Forcing codec %d\n",
snd_pci_quirk_name(q), q->value);
return q->value;
}
/* this hardware doesn't need workarounds. Probe for codec */
return -1;
}
static int snd_atiixp_codec_detect(struct atiixp *chip)
{
int timeout;
chip->codec_not_ready_bits = 0;
if (ac97_codec == -1)
ac97_codec = ac97_probing_bugs(chip->pci);
if (ac97_codec >= 0) {
chip->codec_not_ready_bits |=
CODEC_CHECK_BITS ^ (1 << (ac97_codec + 10));
return 0;
}
atiixp_write(chip, IER, CODEC_CHECK_BITS);
/* wait for the interrupts */
timeout = 50;
while (timeout-- > 0) {
mdelay(1);
if (chip->codec_not_ready_bits)
break;
}
atiixp_write(chip, IER, 0); /* disable irqs */
if ((chip->codec_not_ready_bits & ALL_CODEC_NOT_READY) == ALL_CODEC_NOT_READY) {
dev_err(chip->card->dev, "no codec detected!\n");
return -ENXIO;
}
return 0;
}
/*
* enable DMA and irqs
*/
static int snd_atiixp_chip_start(struct atiixp *chip)
{
unsigned int reg;
/* set up spdif, enable burst mode */
reg = atiixp_read(chip, CMD);
reg |= 0x02 << ATI_REG_CMD_SPDF_THRESHOLD_SHIFT;
reg |= ATI_REG_CMD_BURST_EN;
atiixp_write(chip, CMD, reg);
reg = atiixp_read(chip, SPDF_CMD);
reg &= ~(ATI_REG_SPDF_CMD_LFSR|ATI_REG_SPDF_CMD_SINGLE_CH);
atiixp_write(chip, SPDF_CMD, reg);
/* clear all interrupt source */
atiixp_write(chip, ISR, 0xffffffff);
/* enable irqs */
atiixp_write(chip, IER,
ATI_REG_IER_IO_STATUS_EN |
ATI_REG_IER_IN_XRUN_EN |
ATI_REG_IER_OUT_XRUN_EN |
ATI_REG_IER_SPDF_XRUN_EN |
ATI_REG_IER_SPDF_STATUS_EN);
return 0;
}
/*
* disable DMA and IRQs
*/
static int snd_atiixp_chip_stop(struct atiixp *chip)
{
/* clear interrupt source */
atiixp_write(chip, ISR, atiixp_read(chip, ISR));
/* disable irqs */
atiixp_write(chip, IER, 0);
return 0;
}
/*
* PCM section
*/
/*
* pointer callback simplly reads XXX_DMA_DT_CUR register as the current
* position. when SG-buffer is implemented, the offset must be calculated
* correctly...
*/
static snd_pcm_uframes_t snd_atiixp_pcm_pointer(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct atiixp_dma *dma = runtime->private_data;
unsigned int curptr;
int timeout = 1000;
while (timeout--) {
curptr = readl(chip->remap_addr + dma->ops->dt_cur);
if (curptr < dma->buf_addr)
continue;
curptr -= dma->buf_addr;
if (curptr >= dma->buf_bytes)
continue;
return bytes_to_frames(runtime, curptr);
}
dev_dbg(chip->card->dev, "invalid DMA pointer read 0x%x (buf=%x)\n",
readl(chip->remap_addr + dma->ops->dt_cur), dma->buf_addr);
return 0;
}
/*
* XRUN detected, and stop the PCM substream
*/
static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma)
{
if (! dma->substream || ! dma->running)
return;
dev_dbg(chip->card->dev, "XRUN detected (DMA %d)\n", dma->ops->type);
snd_pcm_stop_xrun(dma->substream);
}
/*
* the period ack. update the substream.
*/
static void snd_atiixp_update_dma(struct atiixp *chip, struct atiixp_dma *dma)
{
if (! dma->substream || ! dma->running)
return;
snd_pcm_period_elapsed(dma->substream);
}
/* set BUS_BUSY interrupt bit if any DMA is running */
/* call with spinlock held */
static void snd_atiixp_check_bus_busy(struct atiixp *chip)
{
unsigned int bus_busy;
if (atiixp_read(chip, CMD) & (ATI_REG_CMD_SEND_EN |
ATI_REG_CMD_RECEIVE_EN |
ATI_REG_CMD_SPDF_OUT_EN))
bus_busy = ATI_REG_IER_SET_BUS_BUSY;
else
bus_busy = 0;
atiixp_update(chip, IER, ATI_REG_IER_SET_BUS_BUSY, bus_busy);
}
/* common trigger callback
* calling the lowlevel callbacks in it
*/
static int snd_atiixp_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
struct atiixp_dma *dma = substream->runtime->private_data;
int err = 0;
if (snd_BUG_ON(!dma->ops->enable_transfer ||
!dma->ops->flush_dma))
return -EINVAL;
spin_lock(&chip->reg_lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
if (dma->running && dma->suspended &&
cmd == SNDRV_PCM_TRIGGER_RESUME)
writel(dma->saved_curptr, chip->remap_addr +
dma->ops->dt_cur);
dma->ops->enable_transfer(chip, 1);
dma->running = 1;
dma->suspended = 0;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
dma->suspended = cmd == SNDRV_PCM_TRIGGER_SUSPEND;
if (dma->running && dma->suspended)
dma->saved_curptr = readl(chip->remap_addr +
dma->ops->dt_cur);
dma->ops->enable_transfer(chip, 0);
dma->running = 0;
break;
default:
err = -EINVAL;
break;
}
if (! err) {
snd_atiixp_check_bus_busy(chip);
if (cmd == SNDRV_PCM_TRIGGER_STOP) {
dma->ops->flush_dma(chip);
snd_atiixp_check_bus_busy(chip);
}
}
spin_unlock(&chip->reg_lock);
return err;
}
/*
* lowlevel callbacks for each DMA type
*
* every callback is supposed to be called in chip->reg_lock spinlock
*/
/* flush FIFO of analog OUT DMA */
static void atiixp_out_flush_dma(struct atiixp *chip)
{
atiixp_write(chip, FIFO_FLUSH, ATI_REG_FIFO_OUT_FLUSH);
}
/* enable/disable analog OUT DMA */
static void atiixp_out_enable_dma(struct atiixp *chip, int on)
{
unsigned int data;
data = atiixp_read(chip, CMD);
if (on) {
if (data & ATI_REG_CMD_OUT_DMA_EN)
return;
atiixp_out_flush_dma(chip);
data |= ATI_REG_CMD_OUT_DMA_EN;
} else
data &= ~ATI_REG_CMD_OUT_DMA_EN;
atiixp_write(chip, CMD, data);
}
/* start/stop transfer over OUT DMA */
static void atiixp_out_enable_transfer(struct atiixp *chip, int on)
{
atiixp_update(chip, CMD, ATI_REG_CMD_SEND_EN,
on ? ATI_REG_CMD_SEND_EN : 0);
}
/* enable/disable analog IN DMA */
static void atiixp_in_enable_dma(struct atiixp *chip, int on)
{
atiixp_update(chip, CMD, ATI_REG_CMD_IN_DMA_EN,
on ? ATI_REG_CMD_IN_DMA_EN : 0);
}
/* start/stop analog IN DMA */
static void atiixp_in_enable_transfer(struct atiixp *chip, int on)
{
if (on) {
unsigned int data = atiixp_read(chip, CMD);
if (! (data & ATI_REG_CMD_RECEIVE_EN)) {
data |= ATI_REG_CMD_RECEIVE_EN;
#if 0 /* FIXME: this causes the endless loop */
/* wait until slot 3/4 are finished */
while ((atiixp_read(chip, COUNTER) &
ATI_REG_COUNTER_SLOT) != 5)
;
#endif
atiixp_write(chip, CMD, data);
}
} else
atiixp_update(chip, CMD, ATI_REG_CMD_RECEIVE_EN, 0);
}
/* flush FIFO of analog IN DMA */
static void atiixp_in_flush_dma(struct atiixp *chip)
{
atiixp_write(chip, FIFO_FLUSH, ATI_REG_FIFO_IN_FLUSH);
}
/* enable/disable SPDIF OUT DMA */
static void atiixp_spdif_enable_dma(struct atiixp *chip, int on)
{
atiixp_update(chip, CMD, ATI_REG_CMD_SPDF_DMA_EN,
on ? ATI_REG_CMD_SPDF_DMA_EN : 0);
}
/* start/stop SPDIF OUT DMA */
static void atiixp_spdif_enable_transfer(struct atiixp *chip, int on)
{
unsigned int data;
data = atiixp_read(chip, CMD);
if (on)
data |= ATI_REG_CMD_SPDF_OUT_EN;
else
data &= ~ATI_REG_CMD_SPDF_OUT_EN;
atiixp_write(chip, CMD, data);
}
/* flush FIFO of SPDIF OUT DMA */
static void atiixp_spdif_flush_dma(struct atiixp *chip)
{
int timeout;
/* DMA off, transfer on */
atiixp_spdif_enable_dma(chip, 0);
atiixp_spdif_enable_transfer(chip, 1);
timeout = 100;
do {
if (! (atiixp_read(chip, SPDF_DMA_DT_SIZE) & ATI_REG_DMA_FIFO_USED))
break;
udelay(1);
} while (timeout-- > 0);
atiixp_spdif_enable_transfer(chip, 0);
}
/* set up slots and formats for SPDIF OUT */
static int snd_atiixp_spdif_prepare(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
spin_lock_irq(&chip->reg_lock);
if (chip->spdif_over_aclink) {
unsigned int data;
/* enable slots 10/11 */
atiixp_update(chip, CMD, ATI_REG_CMD_SPDF_CONFIG_MASK,
ATI_REG_CMD_SPDF_CONFIG_01);
data = atiixp_read(chip, OUT_DMA_SLOT) & ~ATI_REG_OUT_DMA_SLOT_MASK;
data |= ATI_REG_OUT_DMA_SLOT_BIT(10) |
ATI_REG_OUT_DMA_SLOT_BIT(11);
data |= 0x04 << ATI_REG_OUT_DMA_THRESHOLD_SHIFT;
atiixp_write(chip, OUT_DMA_SLOT, data);
atiixp_update(chip, CMD, ATI_REG_CMD_INTERLEAVE_OUT,
substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE ?
ATI_REG_CMD_INTERLEAVE_OUT : 0);
} else {
atiixp_update(chip, CMD, ATI_REG_CMD_SPDF_CONFIG_MASK, 0);
atiixp_update(chip, CMD, ATI_REG_CMD_INTERLEAVE_SPDF, 0);
}
spin_unlock_irq(&chip->reg_lock);
return 0;
}
/* set up slots and formats for analog OUT */
static int snd_atiixp_playback_prepare(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
unsigned int data;
spin_lock_irq(&chip->reg_lock);
data = atiixp_read(chip, OUT_DMA_SLOT) & ~ATI_REG_OUT_DMA_SLOT_MASK;
switch (substream->runtime->channels) {
case 8:
data |= ATI_REG_OUT_DMA_SLOT_BIT(10) |
ATI_REG_OUT_DMA_SLOT_BIT(11);
fallthrough;
case 6:
data |= ATI_REG_OUT_DMA_SLOT_BIT(7) |
ATI_REG_OUT_DMA_SLOT_BIT(8);
fallthrough;
case 4:
data |= ATI_REG_OUT_DMA_SLOT_BIT(6) |
ATI_REG_OUT_DMA_SLOT_BIT(9);
fallthrough;
default:
data |= ATI_REG_OUT_DMA_SLOT_BIT(3) |
ATI_REG_OUT_DMA_SLOT_BIT(4);
break;
}
/* set output threshold */
data |= 0x04 << ATI_REG_OUT_DMA_THRESHOLD_SHIFT;
atiixp_write(chip, OUT_DMA_SLOT, data);
atiixp_update(chip, CMD, ATI_REG_CMD_INTERLEAVE_OUT,
substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE ?
ATI_REG_CMD_INTERLEAVE_OUT : 0);
/*
* enable 6 channel re-ordering bit if needed
*/
atiixp_update(chip, 6CH_REORDER, ATI_REG_6CH_REORDER_EN,
substream->runtime->channels >= 6 ? ATI_REG_6CH_REORDER_EN: 0);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
/* set up slots and formats for analog IN */
static int snd_atiixp_capture_prepare(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
spin_lock_irq(&chip->reg_lock);
atiixp_update(chip, CMD, ATI_REG_CMD_INTERLEAVE_IN,
substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE ?
ATI_REG_CMD_INTERLEAVE_IN : 0);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
/*
* hw_params - allocate the buffer and set up buffer descriptors
*/
static int snd_atiixp_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
struct atiixp_dma *dma = substream->runtime->private_data;
int err;
dma->buf_addr = substream->runtime->dma_addr;
dma->buf_bytes = params_buffer_bytes(hw_params);
err = atiixp_build_dma_packets(chip, dma, substream,
params_periods(hw_params),
params_period_bytes(hw_params));
if (err < 0)
return err;
if (dma->ac97_pcm_type >= 0) {
struct ac97_pcm *pcm = chip->pcms[dma->ac97_pcm_type];
/* PCM is bound to AC97 codec(s)
* set up the AC97 codecs
*/
if (dma->pcm_open_flag) {
snd_ac97_pcm_close(pcm);
dma->pcm_open_flag = 0;
}
err = snd_ac97_pcm_open(pcm, params_rate(hw_params),
params_channels(hw_params),
pcm->r[0].slots);
if (err >= 0)
dma->pcm_open_flag = 1;
}
return err;
}
static int snd_atiixp_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
struct atiixp_dma *dma = substream->runtime->private_data;
if (dma->pcm_open_flag) {
struct ac97_pcm *pcm = chip->pcms[dma->ac97_pcm_type];
snd_ac97_pcm_close(pcm);
dma->pcm_open_flag = 0;
}
atiixp_clear_dma_packets(chip, dma, substream);
return 0;
}
/*
* pcm hardware definition, identical for all DMA types
*/
static const struct snd_pcm_hardware snd_atiixp_pcm_hw =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 256 * 1024,
.period_bytes_min = 32,
.period_bytes_max = 128 * 1024,
.periods_min = 2,
.periods_max = ATI_MAX_DESCRIPTORS,
};
static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream,
struct atiixp_dma *dma, int pcm_type)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
return -EINVAL;
if (dma->opened)
return -EBUSY;
dma->substream = substream;
runtime->hw = snd_atiixp_pcm_hw;
dma->ac97_pcm_type = pcm_type;
if (pcm_type >= 0) {
runtime->hw.rates = chip->pcms[pcm_type]->rates;
snd_pcm_limit_hw_rates(runtime);
} else {
/* direct SPDIF */
runtime->hw.formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE;
}
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
runtime->private_data = dma;
/* enable DMA bits */
spin_lock_irq(&chip->reg_lock);
dma->ops->enable_dma(chip, 1);
spin_unlock_irq(&chip->reg_lock);
dma->opened = 1;
return 0;
}
static int snd_atiixp_pcm_close(struct snd_pcm_substream *substream,
struct atiixp_dma *dma)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
/* disable DMA bits */
if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
return -EINVAL;
spin_lock_irq(&chip->reg_lock);
dma->ops->enable_dma(chip, 0);
spin_unlock_irq(&chip->reg_lock);
dma->substream = NULL;
dma->opened = 0;
return 0;
}
/*
*/
static int snd_atiixp_playback_open(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
int err;
mutex_lock(&chip->open_mutex);
err = snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_PLAYBACK], 0);
mutex_unlock(&chip->open_mutex);
if (err < 0)
return err;
substream->runtime->hw.channels_max = chip->max_channels;
if (chip->max_channels > 2)
/* channels must be even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS, 2);
return 0;
}
static int snd_atiixp_playback_close(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
int err;
mutex_lock(&chip->open_mutex);
err = snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_PLAYBACK]);
mutex_unlock(&chip->open_mutex);
return err;
}
static int snd_atiixp_capture_open(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
return snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_CAPTURE], 1);
}
static int snd_atiixp_capture_close(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
return snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_CAPTURE]);
}
static int snd_atiixp_spdif_open(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
int err;
mutex_lock(&chip->open_mutex);
if (chip->spdif_over_aclink) /* share DMA_PLAYBACK */
err = snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_PLAYBACK], 2);
else
err = snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_SPDIF], -1);
mutex_unlock(&chip->open_mutex);
return err;
}
static int snd_atiixp_spdif_close(struct snd_pcm_substream *substream)
{
struct atiixp *chip = snd_pcm_substream_chip(substream);
int err;
mutex_lock(&chip->open_mutex);
if (chip->spdif_over_aclink)
err = snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_PLAYBACK]);
else
err = snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_SPDIF]);
mutex_unlock(&chip->open_mutex);
return err;
}
/* AC97 playback */
static const struct snd_pcm_ops snd_atiixp_playback_ops = {
.open = snd_atiixp_playback_open,
.close = snd_atiixp_playback_close,
.hw_params = snd_atiixp_pcm_hw_params,
.hw_free = snd_atiixp_pcm_hw_free,
.prepare = snd_atiixp_playback_prepare,
.trigger = snd_atiixp_pcm_trigger,
.pointer = snd_atiixp_pcm_pointer,
};
/* AC97 capture */
static const struct snd_pcm_ops snd_atiixp_capture_ops = {
.open = snd_atiixp_capture_open,
.close = snd_atiixp_capture_close,
.hw_params = snd_atiixp_pcm_hw_params,
.hw_free = snd_atiixp_pcm_hw_free,
.prepare = snd_atiixp_capture_prepare,
.trigger = snd_atiixp_pcm_trigger,
.pointer = snd_atiixp_pcm_pointer,
};
/* SPDIF playback */
static const struct snd_pcm_ops snd_atiixp_spdif_ops = {
.open = snd_atiixp_spdif_open,
.close = snd_atiixp_spdif_close,
.hw_params = snd_atiixp_pcm_hw_params,
.hw_free = snd_atiixp_pcm_hw_free,
.prepare = snd_atiixp_spdif_prepare,
.trigger = snd_atiixp_pcm_trigger,
.pointer = snd_atiixp_pcm_pointer,
};
static const struct ac97_pcm atiixp_pcm_defs[] = {
/* front PCM */
{
.exclusive = 1,
.r = { {
.slots = (1 << AC97_SLOT_PCM_LEFT) |
(1 << AC97_SLOT_PCM_RIGHT) |
(1 << AC97_SLOT_PCM_CENTER) |
(1 << AC97_SLOT_PCM_SLEFT) |
(1 << AC97_SLOT_PCM_SRIGHT) |
(1 << AC97_SLOT_LFE)
}
}
},
/* PCM IN #1 */
{
.stream = 1,
.exclusive = 1,
.r = { {
.slots = (1 << AC97_SLOT_PCM_LEFT) |
(1 << AC97_SLOT_PCM_RIGHT)
}
}
},
/* S/PDIF OUT (optional) */
{
.exclusive = 1,
.spdif = 1,
.r = { {
.slots = (1 << AC97_SLOT_SPDIF_LEFT2) |
(1 << AC97_SLOT_SPDIF_RIGHT2)
}
}
},
};
static const struct atiixp_dma_ops snd_atiixp_playback_dma_ops = {
.type = ATI_DMA_PLAYBACK,
.llp_offset = ATI_REG_OUT_DMA_LINKPTR,
.dt_cur = ATI_REG_OUT_DMA_DT_CUR,
.enable_dma = atiixp_out_enable_dma,
.enable_transfer = atiixp_out_enable_transfer,
.flush_dma = atiixp_out_flush_dma,
};
static const struct atiixp_dma_ops snd_atiixp_capture_dma_ops = {
.type = ATI_DMA_CAPTURE,
.llp_offset = ATI_REG_IN_DMA_LINKPTR,
.dt_cur = ATI_REG_IN_DMA_DT_CUR,
.enable_dma = atiixp_in_enable_dma,
.enable_transfer = atiixp_in_enable_transfer,
.flush_dma = atiixp_in_flush_dma,
};
static const struct atiixp_dma_ops snd_atiixp_spdif_dma_ops = {
.type = ATI_DMA_SPDIF,
.llp_offset = ATI_REG_SPDF_DMA_LINKPTR,
.dt_cur = ATI_REG_SPDF_DMA_DT_CUR,
.enable_dma = atiixp_spdif_enable_dma,
.enable_transfer = atiixp_spdif_enable_transfer,
.flush_dma = atiixp_spdif_flush_dma,
};
static int snd_atiixp_pcm_new(struct atiixp *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_chmap *chmap;
struct snd_ac97_bus *pbus = chip->ac97_bus;
int err, i, num_pcms;
/* initialize constants */
chip->dmas[ATI_DMA_PLAYBACK].ops = &snd_atiixp_playback_dma_ops;
chip->dmas[ATI_DMA_CAPTURE].ops = &snd_atiixp_capture_dma_ops;
if (! chip->spdif_over_aclink)
chip->dmas[ATI_DMA_SPDIF].ops = &snd_atiixp_spdif_dma_ops;
/* assign AC97 pcm */
if (chip->spdif_over_aclink)
num_pcms = 3;
else
num_pcms = 2;
err = snd_ac97_pcm_assign(pbus, num_pcms, atiixp_pcm_defs);
if (err < 0)
return err;
for (i = 0; i < num_pcms; i++)
chip->pcms[i] = &pbus->pcms[i];
chip->max_channels = 2;
if (pbus->pcms[ATI_PCM_OUT].r[0].slots & (1 << AC97_SLOT_PCM_SLEFT)) {
if (pbus->pcms[ATI_PCM_OUT].r[0].slots & (1 << AC97_SLOT_LFE))
chip->max_channels = 6;
else
chip->max_channels = 4;
}
/* PCM #0: analog I/O */
err = snd_pcm_new(chip->card, "ATI IXP AC97",
ATI_PCMDEV_ANALOG, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_atiixp_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_atiixp_capture_ops);
pcm->private_data = chip;
strcpy(pcm->name, "ATI IXP AC97");
chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pci->dev, 64*1024, 128*1024);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_alt_chmaps, chip->max_channels, 0,
&chmap);
if (err < 0)
return err;
chmap->channel_mask = SND_PCM_CHMAP_MASK_2468;
chip->ac97[0]->chmaps[SNDRV_PCM_STREAM_PLAYBACK] = chmap;
/* no SPDIF support on codec? */
if (chip->pcms[ATI_PCM_SPDIF] && ! chip->pcms[ATI_PCM_SPDIF]->rates)
return 0;
/* FIXME: non-48k sample rate doesn't work on my test machine with AD1888 */
if (chip->pcms[ATI_PCM_SPDIF])
chip->pcms[ATI_PCM_SPDIF]->rates = SNDRV_PCM_RATE_48000;
/* PCM #1: spdif playback */
err = snd_pcm_new(chip->card, "ATI IXP IEC958",
ATI_PCMDEV_DIGITAL, 1, 0, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_atiixp_spdif_ops);
pcm->private_data = chip;
if (chip->spdif_over_aclink)
strcpy(pcm->name, "ATI IXP IEC958 (AC97)");
else
strcpy(pcm->name, "ATI IXP IEC958 (Direct)");
chip->pcmdevs[ATI_PCMDEV_DIGITAL] = pcm;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pci->dev, 64*1024, 128*1024);
/* pre-select AC97 SPDIF slots 10/11 */
for (i = 0; i < NUM_ATI_CODECS; i++) {
if (chip->ac97[i])
snd_ac97_update_bits(chip->ac97[i],
AC97_EXTENDED_STATUS,
0x03 << 4, 0x03 << 4);
}
return 0;
}
/*
* interrupt handler
*/
static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id)
{
struct atiixp *chip = dev_id;
unsigned int status;
status = atiixp_read(chip, ISR);
if (! status)
return IRQ_NONE;
/* process audio DMA */
if (status & ATI_REG_ISR_OUT_XRUN)
snd_atiixp_xrun_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]);
else if (status & ATI_REG_ISR_OUT_STATUS)
snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]);
if (status & ATI_REG_ISR_IN_XRUN)
snd_atiixp_xrun_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]);
else if (status & ATI_REG_ISR_IN_STATUS)
snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]);
if (! chip->spdif_over_aclink) {
if (status & ATI_REG_ISR_SPDF_XRUN)
snd_atiixp_xrun_dma(chip, &chip->dmas[ATI_DMA_SPDIF]);
else if (status & ATI_REG_ISR_SPDF_STATUS)
snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_SPDIF]);
}
/* for codec detection */
if (status & CODEC_CHECK_BITS) {
unsigned int detected;
detected = status & CODEC_CHECK_BITS;
spin_lock(&chip->reg_lock);
chip->codec_not_ready_bits |= detected;
atiixp_update(chip, IER, detected, 0); /* disable the detected irqs */
spin_unlock(&chip->reg_lock);
}
/* ack */
atiixp_write(chip, ISR, status);
return IRQ_HANDLED;
}
/*
* ac97 mixer section
*/
static const struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x103c,
.subdevice = 0x006b,
.name = "HP Pavilion ZV5030US",
.type = AC97_TUNE_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x308b,
.name = "HP nx6125",
.type = AC97_TUNE_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x3091,
.name = "unknown HP",
.type = AC97_TUNE_MUTE_LED
},
{ } /* terminator */
};
static int snd_atiixp_mixer_new(struct atiixp *chip, int clock,
const char *quirk_override)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
int i, err;
int codec_count;
static const struct snd_ac97_bus_ops ops = {
.write = snd_atiixp_ac97_write,
.read = snd_atiixp_ac97_read,
};
static const unsigned int codec_skip[NUM_ATI_CODECS] = {
ATI_REG_ISR_CODEC0_NOT_READY,
ATI_REG_ISR_CODEC1_NOT_READY,
ATI_REG_ISR_CODEC2_NOT_READY,
};
if (snd_atiixp_codec_detect(chip) < 0)
return -ENXIO;
err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus);
if (err < 0)
return err;
pbus->clock = clock;
chip->ac97_bus = pbus;
codec_count = 0;
for (i = 0; i < NUM_ATI_CODECS; i++) {
if (chip->codec_not_ready_bits & codec_skip[i])
continue;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.pci = chip->pci;
ac97.num = i;
ac97.scaps = AC97_SCAP_SKIP_MODEM | AC97_SCAP_POWER_SAVE;
if (! chip->spdif_over_aclink)
ac97.scaps |= AC97_SCAP_NO_SPDIF;
err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i]);
if (err < 0) {
chip->ac97[i] = NULL; /* to be sure */
dev_dbg(chip->card->dev,
"codec %d not available for audio\n", i);
continue;
}
codec_count++;
}
if (! codec_count) {
dev_err(chip->card->dev, "no codec available\n");
return -ENODEV;
}
snd_ac97_tune_hardware(chip->ac97[0], ac97_quirks, quirk_override);
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
static int snd_atiixp_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp *chip = card->private_data;
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < NUM_ATI_CODECS; i++)
snd_ac97_suspend(chip->ac97[i]);
snd_atiixp_aclink_down(chip);
snd_atiixp_chip_stop(chip);
return 0;
}
static int snd_atiixp_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp *chip = card->private_data;
int i;
snd_atiixp_aclink_reset(chip);
snd_atiixp_chip_start(chip);
for (i = 0; i < NUM_ATI_CODECS; i++)
snd_ac97_resume(chip->ac97[i]);
for (i = 0; i < NUM_ATI_PCMDEVS; i++)
if (chip->pcmdevs[i]) {
struct atiixp_dma *dma = &chip->dmas[i];
if (dma->substream && dma->suspended) {
dma->ops->enable_dma(chip, 1);
dma->substream->ops->prepare(dma->substream);
writel((u32)dma->desc_buf.addr | ATI_REG_LINKPTR_EN,
chip->remap_addr + dma->ops->llp_offset);
}
}
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_atiixp_pm, snd_atiixp_suspend, snd_atiixp_resume);
#define SND_ATIIXP_PM_OPS &snd_atiixp_pm
#else
#define SND_ATIIXP_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
/*
* proc interface for register dump
*/
static void snd_atiixp_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct atiixp *chip = entry->private_data;
int i;
for (i = 0; i < 256; i += 4)
snd_iprintf(buffer, "%02x: %08x\n", i, readl(chip->remap_addr + i));
}
static void snd_atiixp_proc_init(struct atiixp *chip)
{
snd_card_ro_proc_new(chip->card, "atiixp", chip, snd_atiixp_proc_read);
}
/*
* destructor
*/
static void snd_atiixp_free(struct snd_card *card)
{
snd_atiixp_chip_stop(card->private_data);
}
/*
* constructor for chip instance
*/
static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
{
struct atiixp *chip = card->private_data;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
spin_lock_init(&chip->reg_lock);
mutex_init(&chip->open_mutex);
chip->card = card;
chip->pci = pci;
chip->irq = -1;
err = pcim_iomap_regions(pci, 1 << 0, "ATI IXP AC97");
if (err < 0)
return err;
chip->addr = pci_resource_start(pci, 0);
chip->remap_addr = pcim_iomap_table(pci)[0];
if (devm_request_irq(&pci->dev, pci->irq, snd_atiixp_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
return -EBUSY;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
card->private_free = snd_atiixp_free;
pci_set_master(pci);
return 0;
}
static int __snd_atiixp_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct atiixp *chip;
int err;
err = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
strcpy(card->driver, spdif_aclink ? "ATIIXP" : "ATIIXP-SPDMA");
strcpy(card->shortname, "ATI IXP");
err = snd_atiixp_init(card, pci);
if (err < 0)
return err;
err = snd_atiixp_aclink_reset(chip);
if (err < 0)
return err;
chip->spdif_over_aclink = spdif_aclink;
err = snd_atiixp_mixer_new(chip, ac97_clock, ac97_quirk);
if (err < 0)
return err;
err = snd_atiixp_pcm_new(chip);
if (err < 0)
return err;
snd_atiixp_proc_init(chip);
snd_atiixp_chip_start(chip);
snprintf(card->longname, sizeof(card->longname),
"%s rev %x with %s at %#lx, irq %i", card->shortname,
pci->revision,
chip->ac97[0] ? snd_ac97_get_short_name(chip->ac97[0]) : "?",
chip->addr, chip->irq);
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
return 0;
}
static int snd_atiixp_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
}
static struct pci_driver atiixp_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
.driver = {
.pm = SND_ATIIXP_PM_OPS,
},
};
module_pci_driver(atiixp_driver);
| linux-master | sound/pci/atiixp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ALSA driver for Intel ICH (i8x0) chipsets
*
* Copyright (c) 2000 Jaroslav Kysela <[email protected]>
*
* This code also contains alpha support for SiS 735 chipsets provided
* by Mike Pieper <[email protected]>. We have no datasheet
* for SiS735, so the code is not fully functional.
*
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/info.h>
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <[email protected]>");
MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
MODULE_LICENSE("GPL");
static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int ac97_clock;
static char *ac97_quirk;
static bool buggy_semaphore;
static int buggy_irq = -1; /* auto-check */
static bool xbox;
static int spdif_aclink = -1;
static int inside_vm = -1;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for Intel i8x0 soundcard.");
module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for Intel i8x0 soundcard.");
module_param(ac97_clock, int, 0444);
MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (0 = allowlist + auto-detect, 1 = force autodetect).");
module_param(ac97_quirk, charp, 0444);
MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware.");
module_param(buggy_semaphore, bool, 0444);
MODULE_PARM_DESC(buggy_semaphore, "Enable workaround for hardwares with problematic codec semaphores.");
module_param(buggy_irq, bint, 0444);
MODULE_PARM_DESC(buggy_irq, "Enable workaround for buggy interrupts on some motherboards.");
module_param(xbox, bool, 0444);
MODULE_PARM_DESC(xbox, "Set to 1 for Xbox, if you have problems with the AC'97 codec detection.");
module_param(spdif_aclink, int, 0444);
MODULE_PARM_DESC(spdif_aclink, "S/PDIF over AC-link.");
module_param(inside_vm, bint, 0444);
MODULE_PARM_DESC(inside_vm, "KVM/Parallels optimization.");
/* just for backward compatibility */
static bool enable;
module_param(enable, bool, 0444);
static int joystick;
module_param(joystick, int, 0444);
/*
* Direct registers
*/
enum { DEVICE_INTEL, DEVICE_INTEL_ICH4, DEVICE_SIS, DEVICE_ALI, DEVICE_NFORCE };
#define ICHREG(x) ICH_REG_##x
#define DEFINE_REGSET(name,base) \
enum { \
ICH_REG_##name##_BDBAR = base + 0x0, /* dword - buffer descriptor list base address */ \
ICH_REG_##name##_CIV = base + 0x04, /* byte - current index value */ \
ICH_REG_##name##_LVI = base + 0x05, /* byte - last valid index */ \
ICH_REG_##name##_SR = base + 0x06, /* byte - status register */ \
ICH_REG_##name##_PICB = base + 0x08, /* word - position in current buffer */ \
ICH_REG_##name##_PIV = base + 0x0a, /* byte - prefetched index value */ \
ICH_REG_##name##_CR = base + 0x0b, /* byte - control register */ \
}
/* busmaster blocks */
DEFINE_REGSET(OFF, 0); /* offset */
DEFINE_REGSET(PI, 0x00); /* PCM in */
DEFINE_REGSET(PO, 0x10); /* PCM out */
DEFINE_REGSET(MC, 0x20); /* Mic in */
/* ICH4 busmaster blocks */
DEFINE_REGSET(MC2, 0x40); /* Mic in 2 */
DEFINE_REGSET(PI2, 0x50); /* PCM in 2 */
DEFINE_REGSET(SP, 0x60); /* SPDIF out */
/* values for each busmaster block */
/* LVI */
#define ICH_REG_LVI_MASK 0x1f
/* SR */
#define ICH_FIFOE 0x10 /* FIFO error */
#define ICH_BCIS 0x08 /* buffer completion interrupt status */
#define ICH_LVBCI 0x04 /* last valid buffer completion interrupt */
#define ICH_CELV 0x02 /* current equals last valid */
#define ICH_DCH 0x01 /* DMA controller halted */
/* PIV */
#define ICH_REG_PIV_MASK 0x1f /* mask */
/* CR */
#define ICH_IOCE 0x10 /* interrupt on completion enable */
#define ICH_FEIE 0x08 /* fifo error interrupt enable */
#define ICH_LVBIE 0x04 /* last valid buffer interrupt enable */
#define ICH_RESETREGS 0x02 /* reset busmaster registers */
#define ICH_STARTBM 0x01 /* start busmaster operation */
/* global block */
#define ICH_REG_GLOB_CNT 0x2c /* dword - global control */
#define ICH_PCM_SPDIF_MASK 0xc0000000 /* s/pdif pcm slot mask (ICH4) */
#define ICH_PCM_SPDIF_NONE 0x00000000 /* reserved - undefined */
#define ICH_PCM_SPDIF_78 0x40000000 /* s/pdif pcm on slots 7&8 */
#define ICH_PCM_SPDIF_69 0x80000000 /* s/pdif pcm on slots 6&9 */
#define ICH_PCM_SPDIF_1011 0xc0000000 /* s/pdif pcm on slots 10&11 */
#define ICH_PCM_20BIT 0x00400000 /* 20-bit samples (ICH4) */
#define ICH_PCM_246_MASK 0x00300000 /* chan mask (not all chips) */
#define ICH_PCM_8 0x00300000 /* 8 channels (not all chips) */
#define ICH_PCM_6 0x00200000 /* 6 channels (not all chips) */
#define ICH_PCM_4 0x00100000 /* 4 channels (not all chips) */
#define ICH_PCM_2 0x00000000 /* 2 channels (stereo) */
#define ICH_SIS_PCM_246_MASK 0x000000c0 /* 6 channels (SIS7012) */
#define ICH_SIS_PCM_6 0x00000080 /* 6 channels (SIS7012) */
#define ICH_SIS_PCM_4 0x00000040 /* 4 channels (SIS7012) */
#define ICH_SIS_PCM_2 0x00000000 /* 2 channels (SIS7012) */
#define ICH_TRIE 0x00000040 /* tertiary resume interrupt enable */
#define ICH_SRIE 0x00000020 /* secondary resume interrupt enable */
#define ICH_PRIE 0x00000010 /* primary resume interrupt enable */
#define ICH_ACLINK 0x00000008 /* AClink shut off */
#define ICH_AC97WARM 0x00000004 /* AC'97 warm reset */
#define ICH_AC97COLD 0x00000002 /* AC'97 cold reset */
#define ICH_GIE 0x00000001 /* GPI interrupt enable */
#define ICH_REG_GLOB_STA 0x30 /* dword - global status */
#define ICH_TRI 0x20000000 /* ICH4: tertiary (AC_SDIN2) resume interrupt */
#define ICH_TCR 0x10000000 /* ICH4: tertiary (AC_SDIN2) codec ready */
#define ICH_BCS 0x08000000 /* ICH4: bit clock stopped */
#define ICH_SPINT 0x04000000 /* ICH4: S/PDIF interrupt */
#define ICH_P2INT 0x02000000 /* ICH4: PCM2-In interrupt */
#define ICH_M2INT 0x01000000 /* ICH4: Mic2-In interrupt */
#define ICH_SAMPLE_CAP 0x00c00000 /* ICH4: sample capability bits (RO) */
#define ICH_SAMPLE_16_20 0x00400000 /* ICH4: 16- and 20-bit samples */
#define ICH_MULTICHAN_CAP 0x00300000 /* ICH4: multi-channel capability bits (RO) */
#define ICH_SIS_TRI 0x00080000 /* SIS: tertiary resume irq */
#define ICH_SIS_TCR 0x00040000 /* SIS: tertiary codec ready */
#define ICH_MD3 0x00020000 /* modem power down semaphore */
#define ICH_AD3 0x00010000 /* audio power down semaphore */
#define ICH_RCS 0x00008000 /* read completion status */
#define ICH_BIT3 0x00004000 /* bit 3 slot 12 */
#define ICH_BIT2 0x00002000 /* bit 2 slot 12 */
#define ICH_BIT1 0x00001000 /* bit 1 slot 12 */
#define ICH_SRI 0x00000800 /* secondary (AC_SDIN1) resume interrupt */
#define ICH_PRI 0x00000400 /* primary (AC_SDIN0) resume interrupt */
#define ICH_SCR 0x00000200 /* secondary (AC_SDIN1) codec ready */
#define ICH_PCR 0x00000100 /* primary (AC_SDIN0) codec ready */
#define ICH_MCINT 0x00000080 /* MIC capture interrupt */
#define ICH_POINT 0x00000040 /* playback interrupt */
#define ICH_PIINT 0x00000020 /* capture interrupt */
#define ICH_NVSPINT 0x00000010 /* nforce spdif interrupt */
#define ICH_MOINT 0x00000004 /* modem playback interrupt */
#define ICH_MIINT 0x00000002 /* modem capture interrupt */
#define ICH_GSCI 0x00000001 /* GPI status change interrupt */
#define ICH_REG_ACC_SEMA 0x34 /* byte - codec write semaphore */
#define ICH_CAS 0x01 /* codec access semaphore */
#define ICH_REG_SDM 0x80
#define ICH_DI2L_MASK 0x000000c0 /* PCM In 2, Mic In 2 data in line */
#define ICH_DI2L_SHIFT 6
#define ICH_DI1L_MASK 0x00000030 /* PCM In 1, Mic In 1 data in line */
#define ICH_DI1L_SHIFT 4
#define ICH_SE 0x00000008 /* steer enable */
#define ICH_LDI_MASK 0x00000003 /* last codec read data input */
#define ICH_MAX_FRAGS 32 /* max hw frags */
/*
* registers for Ali5455
*/
/* ALi 5455 busmaster blocks */
DEFINE_REGSET(AL_PI, 0x40); /* ALi PCM in */
DEFINE_REGSET(AL_PO, 0x50); /* Ali PCM out */
DEFINE_REGSET(AL_MC, 0x60); /* Ali Mic in */
DEFINE_REGSET(AL_CDC_SPO, 0x70); /* Ali Codec SPDIF out */
DEFINE_REGSET(AL_CENTER, 0x80); /* Ali center out */
DEFINE_REGSET(AL_LFE, 0x90); /* Ali center out */
DEFINE_REGSET(AL_CLR_SPI, 0xa0); /* Ali Controller SPDIF in */
DEFINE_REGSET(AL_CLR_SPO, 0xb0); /* Ali Controller SPDIF out */
DEFINE_REGSET(AL_I2S, 0xc0); /* Ali I2S in */
DEFINE_REGSET(AL_PI2, 0xd0); /* Ali PCM2 in */
DEFINE_REGSET(AL_MC2, 0xe0); /* Ali Mic2 in */
enum {
ICH_REG_ALI_SCR = 0x00, /* System Control Register */
ICH_REG_ALI_SSR = 0x04, /* System Status Register */
ICH_REG_ALI_DMACR = 0x08, /* DMA Control Register */
ICH_REG_ALI_FIFOCR1 = 0x0c, /* FIFO Control Register 1 */
ICH_REG_ALI_INTERFACECR = 0x10, /* Interface Control Register */
ICH_REG_ALI_INTERRUPTCR = 0x14, /* Interrupt control Register */
ICH_REG_ALI_INTERRUPTSR = 0x18, /* Interrupt Status Register */
ICH_REG_ALI_FIFOCR2 = 0x1c, /* FIFO Control Register 2 */
ICH_REG_ALI_CPR = 0x20, /* Command Port Register */
ICH_REG_ALI_CPR_ADDR = 0x22, /* ac97 addr write */
ICH_REG_ALI_SPR = 0x24, /* Status Port Register */
ICH_REG_ALI_SPR_ADDR = 0x26, /* ac97 addr read */
ICH_REG_ALI_FIFOCR3 = 0x2c, /* FIFO Control Register 3 */
ICH_REG_ALI_TTSR = 0x30, /* Transmit Tag Slot Register */
ICH_REG_ALI_RTSR = 0x34, /* Receive Tag Slot Register */
ICH_REG_ALI_CSPSR = 0x38, /* Command/Status Port Status Register */
ICH_REG_ALI_CAS = 0x3c, /* Codec Write Semaphore Register */
ICH_REG_ALI_HWVOL = 0xf0, /* hardware volume control/status */
ICH_REG_ALI_I2SCR = 0xf4, /* I2S control/status */
ICH_REG_ALI_SPDIFCSR = 0xf8, /* spdif channel status register */
ICH_REG_ALI_SPDIFICS = 0xfc, /* spdif interface control/status */
};
#define ALI_CAS_SEM_BUSY 0x80000000
#define ALI_CPR_ADDR_SECONDARY 0x100
#define ALI_CPR_ADDR_READ 0x80
#define ALI_CSPSR_CODEC_READY 0x08
#define ALI_CSPSR_READ_OK 0x02
#define ALI_CSPSR_WRITE_OK 0x01
/* interrupts for the whole chip by interrupt status register finish */
#define ALI_INT_MICIN2 (1<<26)
#define ALI_INT_PCMIN2 (1<<25)
#define ALI_INT_I2SIN (1<<24)
#define ALI_INT_SPDIFOUT (1<<23) /* controller spdif out INTERRUPT */
#define ALI_INT_SPDIFIN (1<<22)
#define ALI_INT_LFEOUT (1<<21)
#define ALI_INT_CENTEROUT (1<<20)
#define ALI_INT_CODECSPDIFOUT (1<<19)
#define ALI_INT_MICIN (1<<18)
#define ALI_INT_PCMOUT (1<<17)
#define ALI_INT_PCMIN (1<<16)
#define ALI_INT_CPRAIS (1<<7) /* command port available */
#define ALI_INT_SPRAIS (1<<5) /* status port available */
#define ALI_INT_GPIO (1<<1)
#define ALI_INT_MASK (ALI_INT_SPDIFOUT|ALI_INT_CODECSPDIFOUT|\
ALI_INT_MICIN|ALI_INT_PCMOUT|ALI_INT_PCMIN)
#define ICH_ALI_SC_RESET (1<<31) /* master reset */
#define ICH_ALI_SC_AC97_DBL (1<<30)
#define ICH_ALI_SC_CODEC_SPDF (3<<20) /* 1=7/8, 2=6/9, 3=10/11 */
#define ICH_ALI_SC_IN_BITS (3<<18)
#define ICH_ALI_SC_OUT_BITS (3<<16)
#define ICH_ALI_SC_6CH_CFG (3<<14)
#define ICH_ALI_SC_PCM_4 (1<<8)
#define ICH_ALI_SC_PCM_6 (2<<8)
#define ICH_ALI_SC_PCM_246_MASK (3<<8)
#define ICH_ALI_SS_SEC_ID (3<<5)
#define ICH_ALI_SS_PRI_ID (3<<3)
#define ICH_ALI_IF_AC97SP (1<<21)
#define ICH_ALI_IF_MC (1<<20)
#define ICH_ALI_IF_PI (1<<19)
#define ICH_ALI_IF_MC2 (1<<18)
#define ICH_ALI_IF_PI2 (1<<17)
#define ICH_ALI_IF_LINE_SRC (1<<15) /* 0/1 = slot 3/6 */
#define ICH_ALI_IF_MIC_SRC (1<<14) /* 0/1 = slot 3/6 */
#define ICH_ALI_IF_SPDF_SRC (3<<12) /* 00 = PCM, 01 = AC97-in, 10 = spdif-in, 11 = i2s */
#define ICH_ALI_IF_AC97_OUT (3<<8) /* 00 = PCM, 10 = spdif-in, 11 = i2s */
#define ICH_ALI_IF_PO_SPDF (1<<3)
#define ICH_ALI_IF_PO (1<<1)
/*
*
*/
enum {
ICHD_PCMIN,
ICHD_PCMOUT,
ICHD_MIC,
ICHD_MIC2,
ICHD_PCM2IN,
ICHD_SPBAR,
ICHD_LAST = ICHD_SPBAR
};
enum {
NVD_PCMIN,
NVD_PCMOUT,
NVD_MIC,
NVD_SPBAR,
NVD_LAST = NVD_SPBAR
};
enum {
ALID_PCMIN,
ALID_PCMOUT,
ALID_MIC,
ALID_AC97SPDIFOUT,
ALID_SPDIFIN,
ALID_SPDIFOUT,
ALID_LAST = ALID_SPDIFOUT
};
#define get_ichdev(substream) (substream->runtime->private_data)
struct ichdev {
unsigned int ichd; /* ich device number */
unsigned long reg_offset; /* offset to bmaddr */
__le32 *bdbar; /* CPU address (32bit) */
unsigned int bdbar_addr; /* PCI bus address (32bit) */
struct snd_pcm_substream *substream;
unsigned int physbuf; /* physical address (32bit) */
unsigned int size;
unsigned int fragsize;
unsigned int fragsize1;
unsigned int position;
unsigned int pos_shift;
unsigned int last_pos;
int frags;
int lvi;
int lvi_frag;
int civ;
int ack;
int ack_reload;
unsigned int ack_bit;
unsigned int roff_sr;
unsigned int roff_picb;
unsigned int int_sta_mask; /* interrupt status mask */
unsigned int ali_slot; /* ALI DMA slot */
struct ac97_pcm *pcm;
int pcm_open_flag;
unsigned int prepared:1;
unsigned int suspended: 1;
};
struct intel8x0 {
unsigned int device_type;
int irq;
void __iomem *addr;
void __iomem *bmaddr;
struct pci_dev *pci;
struct snd_card *card;
int pcm_devs;
struct snd_pcm *pcm[6];
struct ichdev ichd[6];
unsigned multi4: 1,
multi6: 1,
multi8 :1,
dra: 1,
smp20bit: 1;
unsigned in_ac97_init: 1,
in_sdin_init: 1;
unsigned in_measurement: 1; /* during ac97 clock measurement */
unsigned fix_nocache: 1; /* workaround for 440MX */
unsigned buggy_irq: 1; /* workaround for buggy mobos */
unsigned xbox: 1; /* workaround for Xbox AC'97 detection */
unsigned buggy_semaphore: 1; /* workaround for buggy codec semaphore */
unsigned inside_vm: 1; /* enable VM optimization */
int spdif_idx; /* SPDIF BAR index; *_SPBAR or -1 if use PCMOUT */
unsigned int sdm_saved; /* SDM reg value */
struct snd_ac97_bus *ac97_bus;
struct snd_ac97 *ac97[3];
unsigned int ac97_sdin[3];
unsigned int max_codecs, ncodecs;
const unsigned int *codec_bit;
unsigned int codec_isr_bits;
unsigned int codec_ready_bits;
spinlock_t reg_lock;
u32 bdbars_count;
struct snd_dma_buffer *bdbars;
u32 int_sta_reg; /* interrupt status register */
u32 int_sta_mask; /* interrupt status mask */
};
static const struct pci_device_id snd_intel8x0_ids[] = {
{ PCI_VDEVICE(INTEL, 0x2415), DEVICE_INTEL }, /* 82801AA */
{ PCI_VDEVICE(INTEL, 0x2425), DEVICE_INTEL }, /* 82901AB */
{ PCI_VDEVICE(INTEL, 0x2445), DEVICE_INTEL }, /* 82801BA */
{ PCI_VDEVICE(INTEL, 0x2485), DEVICE_INTEL }, /* ICH3 */
{ PCI_VDEVICE(INTEL, 0x24c5), DEVICE_INTEL_ICH4 }, /* ICH4 */
{ PCI_VDEVICE(INTEL, 0x24d5), DEVICE_INTEL_ICH4 }, /* ICH5 */
{ PCI_VDEVICE(INTEL, 0x25a6), DEVICE_INTEL_ICH4 }, /* ESB */
{ PCI_VDEVICE(INTEL, 0x266e), DEVICE_INTEL_ICH4 }, /* ICH6 */
{ PCI_VDEVICE(INTEL, 0x27de), DEVICE_INTEL_ICH4 }, /* ICH7 */
{ PCI_VDEVICE(INTEL, 0x2698), DEVICE_INTEL_ICH4 }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x7195), DEVICE_INTEL }, /* 440MX */
{ PCI_VDEVICE(SI, 0x7012), DEVICE_SIS }, /* SI7012 */
{ PCI_VDEVICE(NVIDIA, 0x01b1), DEVICE_NFORCE }, /* NFORCE */
{ PCI_VDEVICE(NVIDIA, 0x003a), DEVICE_NFORCE }, /* MCP04 */
{ PCI_VDEVICE(NVIDIA, 0x006a), DEVICE_NFORCE }, /* NFORCE2 */
{ PCI_VDEVICE(NVIDIA, 0x0059), DEVICE_NFORCE }, /* CK804 */
{ PCI_VDEVICE(NVIDIA, 0x008a), DEVICE_NFORCE }, /* CK8 */
{ PCI_VDEVICE(NVIDIA, 0x00da), DEVICE_NFORCE }, /* NFORCE3 */
{ PCI_VDEVICE(NVIDIA, 0x00ea), DEVICE_NFORCE }, /* CK8S */
{ PCI_VDEVICE(NVIDIA, 0x026b), DEVICE_NFORCE }, /* MCP51 */
{ PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
{ PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */
{ PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids);
/*
* Lowlevel I/O - busmaster
*/
static inline u8 igetbyte(struct intel8x0 *chip, u32 offset)
{
return ioread8(chip->bmaddr + offset);
}
static inline u16 igetword(struct intel8x0 *chip, u32 offset)
{
return ioread16(chip->bmaddr + offset);
}
static inline u32 igetdword(struct intel8x0 *chip, u32 offset)
{
return ioread32(chip->bmaddr + offset);
}
static inline void iputbyte(struct intel8x0 *chip, u32 offset, u8 val)
{
iowrite8(val, chip->bmaddr + offset);
}
static inline void iputword(struct intel8x0 *chip, u32 offset, u16 val)
{
iowrite16(val, chip->bmaddr + offset);
}
static inline void iputdword(struct intel8x0 *chip, u32 offset, u32 val)
{
iowrite32(val, chip->bmaddr + offset);
}
/*
* Lowlevel I/O - AC'97 registers
*/
static inline u16 iagetword(struct intel8x0 *chip, u32 offset)
{
return ioread16(chip->addr + offset);
}
static inline void iaputword(struct intel8x0 *chip, u32 offset, u16 val)
{
iowrite16(val, chip->addr + offset);
}
/*
* Basic I/O
*/
/*
* access to AC97 codec via normal i/o (for ICH and SIS7012)
*/
static int snd_intel8x0_codec_semaphore(struct intel8x0 *chip, unsigned int codec)
{
int time;
if (codec > 2)
return -EIO;
if (chip->in_sdin_init) {
/* we don't know the ready bit assignment at the moment */
/* so we check any */
codec = chip->codec_isr_bits;
} else {
codec = chip->codec_bit[chip->ac97_sdin[codec]];
}
/* codec ready ? */
if ((igetdword(chip, ICHREG(GLOB_STA)) & codec) == 0)
return -EIO;
if (chip->buggy_semaphore)
return 0; /* just ignore ... */
/* Anyone holding a semaphore for 1 msec should be shot... */
time = 100;
do {
if (!(igetbyte(chip, ICHREG(ACC_SEMA)) & ICH_CAS))
return 0;
udelay(10);
} while (time--);
/* access to some forbidden (non existent) ac97 registers will not
* reset the semaphore. So even if you don't get the semaphore, still
* continue the access. We don't need the semaphore anyway. */
dev_err(chip->card->dev,
"codec_semaphore: semaphore is not ready [0x%x][0x%x]\n",
igetbyte(chip, ICHREG(ACC_SEMA)), igetdword(chip, ICHREG(GLOB_STA)));
iagetword(chip, 0); /* clear semaphore flag */
/* I don't care about the semaphore */
return -EBUSY;
}
static void snd_intel8x0_codec_write(struct snd_ac97 *ac97,
unsigned short reg,
unsigned short val)
{
struct intel8x0 *chip = ac97->private_data;
if (snd_intel8x0_codec_semaphore(chip, ac97->num) < 0) {
if (! chip->in_ac97_init)
dev_err(chip->card->dev,
"codec_write %d: semaphore is not ready for register 0x%x\n",
ac97->num, reg);
}
iaputword(chip, reg + ac97->num * 0x80, val);
}
static unsigned short snd_intel8x0_codec_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct intel8x0 *chip = ac97->private_data;
unsigned short res;
unsigned int tmp;
if (snd_intel8x0_codec_semaphore(chip, ac97->num) < 0) {
if (! chip->in_ac97_init)
dev_err(chip->card->dev,
"codec_read %d: semaphore is not ready for register 0x%x\n",
ac97->num, reg);
res = 0xffff;
} else {
res = iagetword(chip, reg + ac97->num * 0x80);
tmp = igetdword(chip, ICHREG(GLOB_STA));
if (tmp & ICH_RCS) {
/* reset RCS and preserve other R/WC bits */
iputdword(chip, ICHREG(GLOB_STA), tmp &
~(chip->codec_ready_bits | ICH_GSCI));
if (! chip->in_ac97_init)
dev_err(chip->card->dev,
"codec_read %d: read timeout for register 0x%x\n",
ac97->num, reg);
res = 0xffff;
}
}
return res;
}
static void snd_intel8x0_codec_read_test(struct intel8x0 *chip,
unsigned int codec)
{
unsigned int tmp;
if (snd_intel8x0_codec_semaphore(chip, codec) >= 0) {
iagetword(chip, codec * 0x80);
tmp = igetdword(chip, ICHREG(GLOB_STA));
if (tmp & ICH_RCS) {
/* reset RCS and preserve other R/WC bits */
iputdword(chip, ICHREG(GLOB_STA), tmp &
~(chip->codec_ready_bits | ICH_GSCI));
}
}
}
/*
* access to AC97 for Ali5455
*/
static int snd_intel8x0_ali_codec_ready(struct intel8x0 *chip, int mask)
{
int count = 0;
for (count = 0; count < 0x7f; count++) {
int val = igetbyte(chip, ICHREG(ALI_CSPSR));
if (val & mask)
return 0;
}
if (! chip->in_ac97_init)
dev_warn(chip->card->dev, "AC97 codec ready timeout.\n");
return -EBUSY;
}
static int snd_intel8x0_ali_codec_semaphore(struct intel8x0 *chip)
{
int time = 100;
if (chip->buggy_semaphore)
return 0; /* just ignore ... */
while (--time && (igetdword(chip, ICHREG(ALI_CAS)) & ALI_CAS_SEM_BUSY))
udelay(1);
if (! time && ! chip->in_ac97_init)
dev_warn(chip->card->dev, "ali_codec_semaphore timeout\n");
return snd_intel8x0_ali_codec_ready(chip, ALI_CSPSR_CODEC_READY);
}
static unsigned short snd_intel8x0_ali_codec_read(struct snd_ac97 *ac97, unsigned short reg)
{
struct intel8x0 *chip = ac97->private_data;
unsigned short data = 0xffff;
if (snd_intel8x0_ali_codec_semaphore(chip))
goto __err;
reg |= ALI_CPR_ADDR_READ;
if (ac97->num)
reg |= ALI_CPR_ADDR_SECONDARY;
iputword(chip, ICHREG(ALI_CPR_ADDR), reg);
if (snd_intel8x0_ali_codec_ready(chip, ALI_CSPSR_READ_OK))
goto __err;
data = igetword(chip, ICHREG(ALI_SPR));
__err:
return data;
}
static void snd_intel8x0_ali_codec_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct intel8x0 *chip = ac97->private_data;
if (snd_intel8x0_ali_codec_semaphore(chip))
return;
iputword(chip, ICHREG(ALI_CPR), val);
if (ac97->num)
reg |= ALI_CPR_ADDR_SECONDARY;
iputword(chip, ICHREG(ALI_CPR_ADDR), reg);
snd_intel8x0_ali_codec_ready(chip, ALI_CSPSR_WRITE_OK);
}
/*
* DMA I/O
*/
static void snd_intel8x0_setup_periods(struct intel8x0 *chip, struct ichdev *ichdev)
{
int idx;
__le32 *bdbar = ichdev->bdbar;
unsigned long port = ichdev->reg_offset;
iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr);
if (ichdev->size == ichdev->fragsize) {
ichdev->ack_reload = ichdev->ack = 2;
ichdev->fragsize1 = ichdev->fragsize >> 1;
for (idx = 0; idx < (ICH_REG_LVI_MASK + 1) * 2; idx += 4) {
bdbar[idx + 0] = cpu_to_le32(ichdev->physbuf);
bdbar[idx + 1] = cpu_to_le32(0x80000000 | /* interrupt on completion */
ichdev->fragsize1 >> ichdev->pos_shift);
bdbar[idx + 2] = cpu_to_le32(ichdev->physbuf + (ichdev->size >> 1));
bdbar[idx + 3] = cpu_to_le32(0x80000000 | /* interrupt on completion */
ichdev->fragsize1 >> ichdev->pos_shift);
}
ichdev->frags = 2;
} else {
ichdev->ack_reload = ichdev->ack = 1;
ichdev->fragsize1 = ichdev->fragsize;
for (idx = 0; idx < (ICH_REG_LVI_MASK + 1) * 2; idx += 2) {
bdbar[idx + 0] = cpu_to_le32(ichdev->physbuf +
(((idx >> 1) * ichdev->fragsize) %
ichdev->size));
bdbar[idx + 1] = cpu_to_le32(0x80000000 | /* interrupt on completion */
ichdev->fragsize >> ichdev->pos_shift);
#if 0
dev_dbg(chip->card->dev, "bdbar[%i] = 0x%x [0x%x]\n",
idx + 0, bdbar[idx + 0], bdbar[idx + 1]);
#endif
}
ichdev->frags = ichdev->size / ichdev->fragsize;
}
iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi = ICH_REG_LVI_MASK);
ichdev->civ = 0;
iputbyte(chip, port + ICH_REG_OFF_CIV, 0);
ichdev->lvi_frag = ICH_REG_LVI_MASK % ichdev->frags;
ichdev->position = 0;
#if 0
dev_dbg(chip->card->dev,
"lvi_frag = %i, frags = %i, period_size = 0x%x, period_size1 = 0x%x\n",
ichdev->lvi_frag, ichdev->frags, ichdev->fragsize,
ichdev->fragsize1);
#endif
/* clear interrupts */
iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI);
}
/*
* Interrupt handler
*/
static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ichdev)
{
unsigned long port = ichdev->reg_offset;
unsigned long flags;
int status, civ, i, step;
int ack = 0;
if (!(ichdev->prepared || chip->in_measurement) || ichdev->suspended)
return;
spin_lock_irqsave(&chip->reg_lock, flags);
status = igetbyte(chip, port + ichdev->roff_sr);
civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
if (!(status & ICH_BCIS)) {
step = 0;
} else if (civ == ichdev->civ) {
// snd_printd("civ same %d\n", civ);
step = 1;
ichdev->civ++;
ichdev->civ &= ICH_REG_LVI_MASK;
} else {
step = civ - ichdev->civ;
if (step < 0)
step += ICH_REG_LVI_MASK + 1;
// if (step != 1)
// snd_printd("step = %d, %d -> %d\n", step, ichdev->civ, civ);
ichdev->civ = civ;
}
ichdev->position += step * ichdev->fragsize1;
if (! chip->in_measurement)
ichdev->position %= ichdev->size;
ichdev->lvi += step;
ichdev->lvi &= ICH_REG_LVI_MASK;
iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi);
for (i = 0; i < step; i++) {
ichdev->lvi_frag++;
ichdev->lvi_frag %= ichdev->frags;
ichdev->bdbar[ichdev->lvi * 2] = cpu_to_le32(ichdev->physbuf + ichdev->lvi_frag * ichdev->fragsize1);
#if 0
dev_dbg(chip->card->dev,
"new: bdbar[%i] = 0x%x [0x%x], prefetch = %i, all = 0x%x, 0x%x\n",
ichdev->lvi * 2, ichdev->bdbar[ichdev->lvi * 2],
ichdev->bdbar[ichdev->lvi * 2 + 1], inb(ICH_REG_OFF_PIV + port),
inl(port + 4), inb(port + ICH_REG_OFF_CR));
#endif
if (--ichdev->ack == 0) {
ichdev->ack = ichdev->ack_reload;
ack = 1;
}
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (ack && ichdev->substream) {
snd_pcm_period_elapsed(ichdev->substream);
}
iputbyte(chip, port + ichdev->roff_sr,
status & (ICH_FIFOE | ICH_BCIS | ICH_LVBCI));
}
static irqreturn_t snd_intel8x0_interrupt(int irq, void *dev_id)
{
struct intel8x0 *chip = dev_id;
struct ichdev *ichdev;
unsigned int status;
unsigned int i;
status = igetdword(chip, chip->int_sta_reg);
if (status == 0xffffffff) /* we are not yet resumed */
return IRQ_NONE;
if ((status & chip->int_sta_mask) == 0) {
if (status) {
/* ack */
iputdword(chip, chip->int_sta_reg, status);
if (! chip->buggy_irq)
status = 0;
}
return IRQ_RETVAL(status);
}
for (i = 0; i < chip->bdbars_count; i++) {
ichdev = &chip->ichd[i];
if (status & ichdev->int_sta_mask)
snd_intel8x0_update(chip, ichdev);
}
/* ack them */
iputdword(chip, chip->int_sta_reg, status & chip->int_sta_mask);
return IRQ_HANDLED;
}
/*
* PCM part
*/
static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
unsigned char val = 0;
unsigned long port = ichdev->reg_offset;
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
ichdev->suspended = 0;
fallthrough;
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
val = ICH_IOCE | ICH_STARTBM;
ichdev->last_pos = ichdev->position;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
ichdev->suspended = 1;
fallthrough;
case SNDRV_PCM_TRIGGER_STOP:
val = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
val = ICH_IOCE;
break;
default:
return -EINVAL;
}
iputbyte(chip, port + ICH_REG_OFF_CR, val);
if (cmd == SNDRV_PCM_TRIGGER_STOP) {
/* wait until DMA stopped */
while (!(igetbyte(chip, port + ichdev->roff_sr) & ICH_DCH)) ;
/* reset whole DMA things */
iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS);
}
return 0;
}
static int snd_intel8x0_ali_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
unsigned long port = ichdev->reg_offset;
static const int fiforeg[] = {
ICHREG(ALI_FIFOCR1), ICHREG(ALI_FIFOCR2), ICHREG(ALI_FIFOCR3)
};
unsigned int val, fifo;
val = igetdword(chip, ICHREG(ALI_DMACR));
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
ichdev->suspended = 0;
fallthrough;
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/* clear FIFO for synchronization of channels */
fifo = igetdword(chip, fiforeg[ichdev->ali_slot / 4]);
fifo &= ~(0xff << (ichdev->ali_slot % 4));
fifo |= 0x83 << (ichdev->ali_slot % 4);
iputdword(chip, fiforeg[ichdev->ali_slot / 4], fifo);
}
iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE);
val &= ~(1 << (ichdev->ali_slot + 16)); /* clear PAUSE flag */
/* start DMA */
iputdword(chip, ICHREG(ALI_DMACR), val | (1 << ichdev->ali_slot));
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
ichdev->suspended = 1;
fallthrough;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
/* pause */
iputdword(chip, ICHREG(ALI_DMACR), val | (1 << (ichdev->ali_slot + 16)));
iputbyte(chip, port + ICH_REG_OFF_CR, 0);
while (igetbyte(chip, port + ICH_REG_OFF_CR))
;
if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH)
break;
/* reset whole DMA things */
iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS);
/* clear interrupts */
iputbyte(chip, port + ICH_REG_OFF_SR,
igetbyte(chip, port + ICH_REG_OFF_SR) | 0x1e);
iputdword(chip, ICHREG(ALI_INTERRUPTSR),
igetdword(chip, ICHREG(ALI_INTERRUPTSR)) & ichdev->int_sta_mask);
break;
default:
return -EINVAL;
}
return 0;
}
static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
int dbl = params_rate(hw_params) > 48000;
int err;
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
ichdev->prepared = 0;
}
err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
params_channels(hw_params),
ichdev->pcm->r[dbl].slots);
if (err >= 0) {
ichdev->pcm_open_flag = 1;
/* Force SPDIF setting */
if (ichdev->ichd == ICHD_PCMOUT && chip->spdif_idx < 0)
snd_ac97_set_rate(ichdev->pcm->r[0].codec[0], AC97_SPDIF,
params_rate(hw_params));
}
return err;
}
static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
{
struct ichdev *ichdev = get_ichdev(substream);
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
ichdev->prepared = 0;
}
return 0;
}
static void snd_intel8x0_setup_pcm_out(struct intel8x0 *chip,
struct snd_pcm_runtime *runtime)
{
unsigned int cnt;
int dbl = runtime->rate > 48000;
spin_lock_irq(&chip->reg_lock);
switch (chip->device_type) {
case DEVICE_ALI:
cnt = igetdword(chip, ICHREG(ALI_SCR));
cnt &= ~ICH_ALI_SC_PCM_246_MASK;
if (runtime->channels == 4 || dbl)
cnt |= ICH_ALI_SC_PCM_4;
else if (runtime->channels == 6)
cnt |= ICH_ALI_SC_PCM_6;
iputdword(chip, ICHREG(ALI_SCR), cnt);
break;
case DEVICE_SIS:
cnt = igetdword(chip, ICHREG(GLOB_CNT));
cnt &= ~ICH_SIS_PCM_246_MASK;
if (runtime->channels == 4 || dbl)
cnt |= ICH_SIS_PCM_4;
else if (runtime->channels == 6)
cnt |= ICH_SIS_PCM_6;
iputdword(chip, ICHREG(GLOB_CNT), cnt);
break;
default:
cnt = igetdword(chip, ICHREG(GLOB_CNT));
cnt &= ~(ICH_PCM_246_MASK | ICH_PCM_20BIT);
if (runtime->channels == 4 || dbl)
cnt |= ICH_PCM_4;
else if (runtime->channels == 6)
cnt |= ICH_PCM_6;
else if (runtime->channels == 8)
cnt |= ICH_PCM_8;
if (chip->device_type == DEVICE_NFORCE) {
/* reset to 2ch once to keep the 6 channel data in alignment,
* to start from Front Left always
*/
if (cnt & ICH_PCM_246_MASK) {
iputdword(chip, ICHREG(GLOB_CNT), cnt & ~ICH_PCM_246_MASK);
spin_unlock_irq(&chip->reg_lock);
msleep(50); /* grrr... */
spin_lock_irq(&chip->reg_lock);
}
} else if (chip->device_type == DEVICE_INTEL_ICH4) {
if (runtime->sample_bits > 16)
cnt |= ICH_PCM_20BIT;
}
iputdword(chip, ICHREG(GLOB_CNT), cnt);
break;
}
spin_unlock_irq(&chip->reg_lock);
}
static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct ichdev *ichdev = get_ichdev(substream);
ichdev->physbuf = runtime->dma_addr;
ichdev->size = snd_pcm_lib_buffer_bytes(substream);
ichdev->fragsize = snd_pcm_lib_period_bytes(substream);
if (ichdev->ichd == ICHD_PCMOUT) {
snd_intel8x0_setup_pcm_out(chip, runtime);
if (chip->device_type == DEVICE_INTEL_ICH4)
ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
}
snd_intel8x0_setup_periods(chip, ichdev);
ichdev->prepared = 1;
return 0;
}
static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
size_t ptr1, ptr;
int civ, timeout = 10;
unsigned int position;
spin_lock(&chip->reg_lock);
do {
civ = igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV);
ptr1 = igetword(chip, ichdev->reg_offset + ichdev->roff_picb);
position = ichdev->position;
if (ptr1 == 0) {
udelay(10);
continue;
}
if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV))
continue;
/* IO read operation is very expensive inside virtual machine
* as it is emulated. The probability that subsequent PICB read
* will return different result is high enough to loop till
* timeout here.
* Same CIV is strict enough condition to be sure that PICB
* is valid inside VM on emulated card. */
if (chip->inside_vm)
break;
if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
break;
} while (timeout--);
ptr = ichdev->last_pos;
if (ptr1 != 0) {
ptr1 <<= ichdev->pos_shift;
ptr = ichdev->fragsize1 - ptr1;
ptr += position;
if (ptr < ichdev->last_pos) {
unsigned int pos_base, last_base;
pos_base = position / ichdev->fragsize1;
last_base = ichdev->last_pos / ichdev->fragsize1;
/* another sanity check; ptr1 can go back to full
* before the base position is updated
*/
if (pos_base == last_base)
ptr = ichdev->last_pos;
}
}
ichdev->last_pos = ptr;
spin_unlock(&chip->reg_lock);
if (ptr >= ichdev->size)
return 0;
return bytes_to_frames(substream->runtime, ptr);
}
static const struct snd_pcm_hardware snd_intel8x0_stream =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 128 * 1024,
.period_bytes_min = 32,
.period_bytes_max = 128 * 1024,
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static const unsigned int channels4[] = {
2, 4,
};
static const struct snd_pcm_hw_constraint_list hw_constraints_channels4 = {
.count = ARRAY_SIZE(channels4),
.list = channels4,
.mask = 0,
};
static const unsigned int channels6[] = {
2, 4, 6,
};
static const struct snd_pcm_hw_constraint_list hw_constraints_channels6 = {
.count = ARRAY_SIZE(channels6),
.list = channels6,
.mask = 0,
};
static const unsigned int channels8[] = {
2, 4, 6, 8,
};
static const struct snd_pcm_hw_constraint_list hw_constraints_channels8 = {
.count = ARRAY_SIZE(channels8),
.list = channels8,
.mask = 0,
};
static int snd_intel8x0_pcm_open(struct snd_pcm_substream *substream, struct ichdev *ichdev)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
ichdev->substream = substream;
runtime->hw = snd_intel8x0_stream;
runtime->hw.rates = ichdev->pcm->rates;
snd_pcm_limit_hw_rates(runtime);
if (chip->device_type == DEVICE_SIS) {
runtime->hw.buffer_bytes_max = 64*1024;
runtime->hw.period_bytes_max = 64*1024;
}
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
runtime->private_data = ichdev;
return 0;
}
static int snd_intel8x0_playback_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
err = snd_intel8x0_pcm_open(substream, &chip->ichd[ICHD_PCMOUT]);
if (err < 0)
return err;
if (chip->multi8) {
runtime->hw.channels_max = 8;
snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
&hw_constraints_channels8);
} else if (chip->multi6) {
runtime->hw.channels_max = 6;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
&hw_constraints_channels6);
} else if (chip->multi4) {
runtime->hw.channels_max = 4;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
&hw_constraints_channels4);
}
if (chip->dra) {
snd_ac97_pcm_double_rate_rules(runtime);
}
if (chip->smp20bit) {
runtime->hw.formats |= SNDRV_PCM_FMTBIT_S32_LE;
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 20);
}
return 0;
}
static int snd_intel8x0_playback_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
chip->ichd[ICHD_PCMOUT].substream = NULL;
return 0;
}
static int snd_intel8x0_capture_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
return snd_intel8x0_pcm_open(substream, &chip->ichd[ICHD_PCMIN]);
}
static int snd_intel8x0_capture_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
chip->ichd[ICHD_PCMIN].substream = NULL;
return 0;
}
static int snd_intel8x0_mic_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
return snd_intel8x0_pcm_open(substream, &chip->ichd[ICHD_MIC]);
}
static int snd_intel8x0_mic_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
chip->ichd[ICHD_MIC].substream = NULL;
return 0;
}
static int snd_intel8x0_mic2_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
return snd_intel8x0_pcm_open(substream, &chip->ichd[ICHD_MIC2]);
}
static int snd_intel8x0_mic2_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
chip->ichd[ICHD_MIC2].substream = NULL;
return 0;
}
static int snd_intel8x0_capture2_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
return snd_intel8x0_pcm_open(substream, &chip->ichd[ICHD_PCM2IN]);
}
static int snd_intel8x0_capture2_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
chip->ichd[ICHD_PCM2IN].substream = NULL;
return 0;
}
static int snd_intel8x0_spdif_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
int idx = chip->device_type == DEVICE_NFORCE ? NVD_SPBAR : ICHD_SPBAR;
return snd_intel8x0_pcm_open(substream, &chip->ichd[idx]);
}
static int snd_intel8x0_spdif_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
int idx = chip->device_type == DEVICE_NFORCE ? NVD_SPBAR : ICHD_SPBAR;
chip->ichd[idx].substream = NULL;
return 0;
}
static int snd_intel8x0_ali_ac97spdifout_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
unsigned int val;
spin_lock_irq(&chip->reg_lock);
val = igetdword(chip, ICHREG(ALI_INTERFACECR));
val |= ICH_ALI_IF_AC97SP;
iputdword(chip, ICHREG(ALI_INTERFACECR), val);
/* also needs to set ALI_SC_CODEC_SPDF correctly */
spin_unlock_irq(&chip->reg_lock);
return snd_intel8x0_pcm_open(substream, &chip->ichd[ALID_AC97SPDIFOUT]);
}
static int snd_intel8x0_ali_ac97spdifout_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
unsigned int val;
chip->ichd[ALID_AC97SPDIFOUT].substream = NULL;
spin_lock_irq(&chip->reg_lock);
val = igetdword(chip, ICHREG(ALI_INTERFACECR));
val &= ~ICH_ALI_IF_AC97SP;
iputdword(chip, ICHREG(ALI_INTERFACECR), val);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
#if 0 // NYI
static int snd_intel8x0_ali_spdifin_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
return snd_intel8x0_pcm_open(substream, &chip->ichd[ALID_SPDIFIN]);
}
static int snd_intel8x0_ali_spdifin_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
chip->ichd[ALID_SPDIFIN].substream = NULL;
return 0;
}
static int snd_intel8x0_ali_spdifout_open(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
return snd_intel8x0_pcm_open(substream, &chip->ichd[ALID_SPDIFOUT]);
}
static int snd_intel8x0_ali_spdifout_close(struct snd_pcm_substream *substream)
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
chip->ichd[ALID_SPDIFOUT].substream = NULL;
return 0;
}
#endif
static const struct snd_pcm_ops snd_intel8x0_playback_ops = {
.open = snd_intel8x0_playback_open,
.close = snd_intel8x0_playback_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_capture_ops = {
.open = snd_intel8x0_capture_open,
.close = snd_intel8x0_capture_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_capture_mic_ops = {
.open = snd_intel8x0_mic_open,
.close = snd_intel8x0_mic_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_capture_mic2_ops = {
.open = snd_intel8x0_mic2_open,
.close = snd_intel8x0_mic2_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_capture2_ops = {
.open = snd_intel8x0_capture2_open,
.close = snd_intel8x0_capture2_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_spdif_ops = {
.open = snd_intel8x0_spdif_open,
.close = snd_intel8x0_spdif_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_ali_playback_ops = {
.open = snd_intel8x0_playback_open,
.close = snd_intel8x0_playback_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_ali_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_ali_capture_ops = {
.open = snd_intel8x0_capture_open,
.close = snd_intel8x0_capture_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_ali_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_ali_capture_mic_ops = {
.open = snd_intel8x0_mic_open,
.close = snd_intel8x0_mic_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_ali_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static const struct snd_pcm_ops snd_intel8x0_ali_ac97spdifout_ops = {
.open = snd_intel8x0_ali_ac97spdifout_open,
.close = snd_intel8x0_ali_ac97spdifout_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_ali_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
#if 0 // NYI
static struct snd_pcm_ops snd_intel8x0_ali_spdifin_ops = {
.open = snd_intel8x0_ali_spdifin_open,
.close = snd_intel8x0_ali_spdifin_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
static struct snd_pcm_ops snd_intel8x0_ali_spdifout_ops = {
.open = snd_intel8x0_ali_spdifout_open,
.close = snd_intel8x0_ali_spdifout_close,
.hw_params = snd_intel8x0_hw_params,
.hw_free = snd_intel8x0_hw_free,
.prepare = snd_intel8x0_pcm_prepare,
.trigger = snd_intel8x0_pcm_trigger,
.pointer = snd_intel8x0_pcm_pointer,
};
#endif // NYI
struct ich_pcm_table {
char *suffix;
const struct snd_pcm_ops *playback_ops;
const struct snd_pcm_ops *capture_ops;
size_t prealloc_size;
size_t prealloc_max_size;
int ac97_idx;
};
#define intel8x0_dma_type(chip) \
((chip)->fix_nocache ? SNDRV_DMA_TYPE_DEV_WC : SNDRV_DMA_TYPE_DEV)
static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
const struct ich_pcm_table *rec)
{
struct snd_pcm *pcm;
int err;
char name[32];
if (rec->suffix)
sprintf(name, "Intel ICH - %s", rec->suffix);
else
strcpy(name, "Intel ICH");
err = snd_pcm_new(chip->card, name, device,
rec->playback_ops ? 1 : 0,
rec->capture_ops ? 1 : 0, &pcm);
if (err < 0)
return err;
if (rec->playback_ops)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, rec->playback_ops);
if (rec->capture_ops)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, rec->capture_ops);
pcm->private_data = chip;
pcm->info_flags = 0;
if (rec->suffix)
sprintf(pcm->name, "%s - %s", chip->card->shortname, rec->suffix);
else
strcpy(pcm->name, chip->card->shortname);
chip->pcm[device] = pcm;
snd_pcm_set_managed_buffer_all(pcm, intel8x0_dma_type(chip),
&chip->pci->dev,
rec->prealloc_size, rec->prealloc_max_size);
if (rec->playback_ops &&
rec->playback_ops->open == snd_intel8x0_playback_open) {
struct snd_pcm_chmap *chmap;
int chs = 2;
if (chip->multi8)
chs = 8;
else if (chip->multi6)
chs = 6;
else if (chip->multi4)
chs = 4;
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_alt_chmaps, chs, 0,
&chmap);
if (err < 0)
return err;
chmap->channel_mask = SND_PCM_CHMAP_MASK_2468;
chip->ac97[0]->chmaps[SNDRV_PCM_STREAM_PLAYBACK] = chmap;
}
return 0;
}
static const struct ich_pcm_table intel_pcms[] = {
{
.playback_ops = &snd_intel8x0_playback_ops,
.capture_ops = &snd_intel8x0_capture_ops,
.prealloc_size = 64 * 1024,
.prealloc_max_size = 128 * 1024,
},
{
.suffix = "MIC ADC",
.capture_ops = &snd_intel8x0_capture_mic_ops,
.prealloc_size = 0,
.prealloc_max_size = 128 * 1024,
.ac97_idx = ICHD_MIC,
},
{
.suffix = "MIC2 ADC",
.capture_ops = &snd_intel8x0_capture_mic2_ops,
.prealloc_size = 0,
.prealloc_max_size = 128 * 1024,
.ac97_idx = ICHD_MIC2,
},
{
.suffix = "ADC2",
.capture_ops = &snd_intel8x0_capture2_ops,
.prealloc_size = 0,
.prealloc_max_size = 128 * 1024,
.ac97_idx = ICHD_PCM2IN,
},
{
.suffix = "IEC958",
.playback_ops = &snd_intel8x0_spdif_ops,
.prealloc_size = 64 * 1024,
.prealloc_max_size = 128 * 1024,
.ac97_idx = ICHD_SPBAR,
},
};
static const struct ich_pcm_table nforce_pcms[] = {
{
.playback_ops = &snd_intel8x0_playback_ops,
.capture_ops = &snd_intel8x0_capture_ops,
.prealloc_size = 64 * 1024,
.prealloc_max_size = 128 * 1024,
},
{
.suffix = "MIC ADC",
.capture_ops = &snd_intel8x0_capture_mic_ops,
.prealloc_size = 0,
.prealloc_max_size = 128 * 1024,
.ac97_idx = NVD_MIC,
},
{
.suffix = "IEC958",
.playback_ops = &snd_intel8x0_spdif_ops,
.prealloc_size = 64 * 1024,
.prealloc_max_size = 128 * 1024,
.ac97_idx = NVD_SPBAR,
},
};
static const struct ich_pcm_table ali_pcms[] = {
{
.playback_ops = &snd_intel8x0_ali_playback_ops,
.capture_ops = &snd_intel8x0_ali_capture_ops,
.prealloc_size = 64 * 1024,
.prealloc_max_size = 128 * 1024,
},
{
.suffix = "MIC ADC",
.capture_ops = &snd_intel8x0_ali_capture_mic_ops,
.prealloc_size = 0,
.prealloc_max_size = 128 * 1024,
.ac97_idx = ALID_MIC,
},
{
.suffix = "IEC958",
.playback_ops = &snd_intel8x0_ali_ac97spdifout_ops,
/* .capture_ops = &snd_intel8x0_ali_spdifin_ops, */
.prealloc_size = 64 * 1024,
.prealloc_max_size = 128 * 1024,
.ac97_idx = ALID_AC97SPDIFOUT,
},
#if 0 // NYI
{
.suffix = "HW IEC958",
.playback_ops = &snd_intel8x0_ali_spdifout_ops,
.prealloc_size = 64 * 1024,
.prealloc_max_size = 128 * 1024,
},
#endif
};
static int snd_intel8x0_pcm(struct intel8x0 *chip)
{
int i, tblsize, device, err;
const struct ich_pcm_table *tbl, *rec;
switch (chip->device_type) {
case DEVICE_INTEL_ICH4:
tbl = intel_pcms;
tblsize = ARRAY_SIZE(intel_pcms);
if (spdif_aclink)
tblsize--;
break;
case DEVICE_NFORCE:
tbl = nforce_pcms;
tblsize = ARRAY_SIZE(nforce_pcms);
if (spdif_aclink)
tblsize--;
break;
case DEVICE_ALI:
tbl = ali_pcms;
tblsize = ARRAY_SIZE(ali_pcms);
break;
default:
tbl = intel_pcms;
tblsize = 2;
break;
}
device = 0;
for (i = 0; i < tblsize; i++) {
rec = tbl + i;
if (i > 0 && rec->ac97_idx) {
/* activate PCM only when associated AC'97 codec */
if (! chip->ichd[rec->ac97_idx].pcm)
continue;
}
err = snd_intel8x0_pcm1(chip, device, rec);
if (err < 0)
return err;
device++;
}
chip->pcm_devs = device;
return 0;
}
/*
* Mixer part
*/
static void snd_intel8x0_mixer_free_ac97_bus(struct snd_ac97_bus *bus)
{
struct intel8x0 *chip = bus->private_data;
chip->ac97_bus = NULL;
}
static void snd_intel8x0_mixer_free_ac97(struct snd_ac97 *ac97)
{
struct intel8x0 *chip = ac97->private_data;
chip->ac97[ac97->num] = NULL;
}
static const struct ac97_pcm ac97_pcm_defs[] = {
/* front PCM */
{
.exclusive = 1,
.r = { {
.slots = (1 << AC97_SLOT_PCM_LEFT) |
(1 << AC97_SLOT_PCM_RIGHT) |
(1 << AC97_SLOT_PCM_CENTER) |
(1 << AC97_SLOT_PCM_SLEFT) |
(1 << AC97_SLOT_PCM_SRIGHT) |
(1 << AC97_SLOT_LFE)
},
{
.slots = (1 << AC97_SLOT_PCM_LEFT) |
(1 << AC97_SLOT_PCM_RIGHT) |
(1 << AC97_SLOT_PCM_LEFT_0) |
(1 << AC97_SLOT_PCM_RIGHT_0)
}
}
},
/* PCM IN #1 */
{
.stream = 1,
.exclusive = 1,
.r = { {
.slots = (1 << AC97_SLOT_PCM_LEFT) |
(1 << AC97_SLOT_PCM_RIGHT)
}
}
},
/* MIC IN #1 */
{
.stream = 1,
.exclusive = 1,
.r = { {
.slots = (1 << AC97_SLOT_MIC)
}
}
},
/* S/PDIF PCM */
{
.exclusive = 1,
.spdif = 1,
.r = { {
.slots = (1 << AC97_SLOT_SPDIF_LEFT2) |
(1 << AC97_SLOT_SPDIF_RIGHT2)
}
}
},
/* PCM IN #2 */
{
.stream = 1,
.exclusive = 1,
.r = { {
.slots = (1 << AC97_SLOT_PCM_LEFT) |
(1 << AC97_SLOT_PCM_RIGHT)
}
}
},
/* MIC IN #2 */
{
.stream = 1,
.exclusive = 1,
.r = { {
.slots = (1 << AC97_SLOT_MIC)
}
}
},
};
static const struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x0e11,
.subdevice = 0x000e,
.name = "Compaq Deskpro EN", /* AD1885 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x0e11,
.subdevice = 0x008a,
.name = "Compaq Evo W4000", /* AD1885 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x0e11,
.subdevice = 0x00b8,
.name = "Compaq Evo D510C",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x0e11,
.subdevice = 0x0860,
.name = "HP/Compaq nx7010",
.type = AC97_TUNE_MUTE_LED
},
{
.subvendor = 0x1014,
.subdevice = 0x0534,
.name = "ThinkPad X31",
.type = AC97_TUNE_INV_EAPD
},
{
.subvendor = 0x1014,
.subdevice = 0x1f00,
.name = "MS-9128",
.type = AC97_TUNE_ALC_JACK
},
{
.subvendor = 0x1014,
.subdevice = 0x0267,
.name = "IBM NetVista A30p", /* AD1981B */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1025,
.subdevice = 0x0082,
.name = "Acer Travelmate 2310",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1025,
.subdevice = 0x0083,
.name = "Acer Aspire 3003LCi",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x00d8,
.name = "Dell Precision 530", /* AD1885 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x010d,
.name = "Dell", /* which model? AD1885 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x0126,
.name = "Dell Optiplex GX260", /* AD1981A */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x012c,
.name = "Dell Precision 650", /* AD1981A */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x012d,
.name = "Dell Precision 450", /* AD1981B*/
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x0147,
.name = "Dell", /* which model? AD1981B*/
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x0151,
.name = "Dell Optiplex GX270", /* AD1981B */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x014e,
.name = "Dell D800", /* STAC9750/51 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x0163,
.name = "Dell Unknown", /* STAC9750/51 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x016a,
.name = "Dell Inspiron 8600", /* STAC9750/51 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x0182,
.name = "Dell Latitude D610", /* STAC9750/51 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1028,
.subdevice = 0x0186,
.name = "Dell Latitude D810", /* cf. Malone #41015 */
.type = AC97_TUNE_HP_MUTE_LED
},
{
.subvendor = 0x1028,
.subdevice = 0x0188,
.name = "Dell Inspiron 6000",
.type = AC97_TUNE_HP_MUTE_LED /* cf. Malone #41015 */
},
{
.subvendor = 0x1028,
.subdevice = 0x0189,
.name = "Dell Inspiron 9300",
.type = AC97_TUNE_HP_MUTE_LED
},
{
.subvendor = 0x1028,
.subdevice = 0x0191,
.name = "Dell Inspiron 8600",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x103c,
.subdevice = 0x006d,
.name = "HP zv5000",
.type = AC97_TUNE_MUTE_LED /*AD1981B*/
},
{ /* FIXME: which codec? */
.subvendor = 0x103c,
.subdevice = 0x00c3,
.name = "HP xw6000",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x103c,
.subdevice = 0x088c,
.name = "HP nc8000",
.type = AC97_TUNE_HP_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x0890,
.name = "HP nc6000",
.type = AC97_TUNE_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x129d,
.name = "HP xw8000",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x103c,
.subdevice = 0x0938,
.name = "HP nc4200",
.type = AC97_TUNE_HP_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x099c,
.name = "HP nx6110/nc6120",
.type = AC97_TUNE_HP_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x0944,
.name = "HP nc6220",
.type = AC97_TUNE_HP_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x0934,
.name = "HP nc8220",
.type = AC97_TUNE_HP_MUTE_LED
},
{
.subvendor = 0x103c,
.subdevice = 0x12f1,
.name = "HP xw8200", /* AD1981B*/
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x103c,
.subdevice = 0x12f2,
.name = "HP xw6200",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x103c,
.subdevice = 0x3008,
.name = "HP xw4200", /* AD1981B*/
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x104d,
.subdevice = 0x8144,
.name = "Sony",
.type = AC97_TUNE_INV_EAPD
},
{
.subvendor = 0x104d,
.subdevice = 0x8197,
.name = "Sony S1XP",
.type = AC97_TUNE_INV_EAPD
},
{
.subvendor = 0x104d,
.subdevice = 0x81c0,
.name = "Sony VAIO VGN-T350P", /*AD1981B*/
.type = AC97_TUNE_INV_EAPD
},
{
.subvendor = 0x104d,
.subdevice = 0x81c5,
.name = "Sony VAIO VGN-B1VP", /*AD1981B*/
.type = AC97_TUNE_INV_EAPD
},
{
.subvendor = 0x1043,
.subdevice = 0x80f3,
.name = "ASUS ICH5/AD1985",
.type = AC97_TUNE_AD_SHARING
},
{
.subvendor = 0x10cf,
.subdevice = 0x11c3,
.name = "Fujitsu-Siemens E4010",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10cf,
.subdevice = 0x1225,
.name = "Fujitsu-Siemens T3010",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10cf,
.subdevice = 0x1253,
.name = "Fujitsu S6210", /* STAC9750/51 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10cf,
.subdevice = 0x127d,
.name = "Fujitsu Lifebook P7010",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10cf,
.subdevice = 0x127e,
.name = "Fujitsu Lifebook C1211D",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10cf,
.subdevice = 0x12ec,
.name = "Fujitsu-Siemens 4010",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10cf,
.subdevice = 0x12f2,
.name = "Fujitsu-Siemens Celsius H320",
.type = AC97_TUNE_SWAP_HP
},
{
.subvendor = 0x10f1,
.subdevice = 0x2665,
.name = "Fujitsu-Siemens Celsius", /* AD1981? */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10f1,
.subdevice = 0x2885,
.name = "AMD64 Mobo", /* ALC650 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10f1,
.subdevice = 0x2895,
.name = "Tyan Thunder K8WE",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x10f7,
.subdevice = 0x834c,
.name = "Panasonic CF-R4",
.type = AC97_TUNE_HP_ONLY,
},
{
.subvendor = 0x110a,
.subdevice = 0x0056,
.name = "Fujitsu-Siemens Scenic", /* AD1981? */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x11d4,
.subdevice = 0x5375,
.name = "ADI AD1985 (discrete)",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x1462,
.subdevice = 0x5470,
.name = "MSI P4 ATX 645 Ultra",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x161f,
.subdevice = 0x202f,
.name = "Gateway M520",
.type = AC97_TUNE_INV_EAPD
},
{
.subvendor = 0x161f,
.subdevice = 0x203a,
.name = "Gateway 4525GZ", /* AD1981B */
.type = AC97_TUNE_INV_EAPD
},
{
.subvendor = 0x1734,
.subdevice = 0x0088,
.name = "Fujitsu-Siemens D1522", /* AD1981 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x8086,
.subdevice = 0x2000,
.mask = 0xfff0,
.name = "Intel ICH5/AD1985",
.type = AC97_TUNE_AD_SHARING
},
{
.subvendor = 0x8086,
.subdevice = 0x4000,
.mask = 0xfff0,
.name = "Intel ICH5/AD1985",
.type = AC97_TUNE_AD_SHARING
},
{
.subvendor = 0x8086,
.subdevice = 0x4856,
.name = "Intel D845WN (82801BA)",
.type = AC97_TUNE_SWAP_HP
},
{
.subvendor = 0x8086,
.subdevice = 0x4d44,
.name = "Intel D850EMV2", /* AD1885 */
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x8086,
.subdevice = 0x4d56,
.name = "Intel ICH/AD1885",
.type = AC97_TUNE_HP_ONLY
},
{
.subvendor = 0x8086,
.subdevice = 0x6000,
.mask = 0xfff0,
.name = "Intel ICH5/AD1985",
.type = AC97_TUNE_AD_SHARING
},
{
.subvendor = 0x8086,
.subdevice = 0xe000,
.mask = 0xfff0,
.name = "Intel ICH5/AD1985",
.type = AC97_TUNE_AD_SHARING
},
#if 0 /* FIXME: this seems wrong on most boards */
{
.subvendor = 0x8086,
.subdevice = 0xa000,
.mask = 0xfff0,
.name = "Intel ICH5/AD1985",
.type = AC97_TUNE_HP_ONLY
},
#endif
{ } /* terminator */
};
static int snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
const char *quirk_override)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
int err;
unsigned int i, codecs;
unsigned int glob_sta = 0;
const struct snd_ac97_bus_ops *ops;
static const struct snd_ac97_bus_ops standard_bus_ops = {
.write = snd_intel8x0_codec_write,
.read = snd_intel8x0_codec_read,
};
static const struct snd_ac97_bus_ops ali_bus_ops = {
.write = snd_intel8x0_ali_codec_write,
.read = snd_intel8x0_ali_codec_read,
};
chip->spdif_idx = -1; /* use PCMOUT (or disabled) */
if (!spdif_aclink) {
switch (chip->device_type) {
case DEVICE_NFORCE:
chip->spdif_idx = NVD_SPBAR;
break;
case DEVICE_ALI:
chip->spdif_idx = ALID_AC97SPDIFOUT;
break;
case DEVICE_INTEL_ICH4:
chip->spdif_idx = ICHD_SPBAR;
break;
}
}
chip->in_ac97_init = 1;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.private_free = snd_intel8x0_mixer_free_ac97;
ac97.scaps = AC97_SCAP_SKIP_MODEM | AC97_SCAP_POWER_SAVE;
if (chip->xbox)
ac97.scaps |= AC97_SCAP_DETECT_BY_VENDOR;
if (chip->device_type != DEVICE_ALI) {
glob_sta = igetdword(chip, ICHREG(GLOB_STA));
ops = &standard_bus_ops;
chip->in_sdin_init = 1;
codecs = 0;
for (i = 0; i < chip->max_codecs; i++) {
if (! (glob_sta & chip->codec_bit[i]))
continue;
if (chip->device_type == DEVICE_INTEL_ICH4) {
snd_intel8x0_codec_read_test(chip, codecs);
chip->ac97_sdin[codecs] =
igetbyte(chip, ICHREG(SDM)) & ICH_LDI_MASK;
if (snd_BUG_ON(chip->ac97_sdin[codecs] >= 3))
chip->ac97_sdin[codecs] = 0;
} else
chip->ac97_sdin[codecs] = i;
codecs++;
}
chip->in_sdin_init = 0;
if (! codecs)
codecs = 1;
} else {
ops = &ali_bus_ops;
codecs = 1;
/* detect the secondary codec */
for (i = 0; i < 100; i++) {
unsigned int reg = igetdword(chip, ICHREG(ALI_RTSR));
if (reg & 0x40) {
codecs = 2;
break;
}
iputdword(chip, ICHREG(ALI_RTSR), reg | 0x40);
udelay(1);
}
}
err = snd_ac97_bus(chip->card, 0, ops, chip, &pbus);
if (err < 0)
goto __err;
pbus->private_free = snd_intel8x0_mixer_free_ac97_bus;
if (ac97_clock >= 8000 && ac97_clock <= 48000)
pbus->clock = ac97_clock;
/* FIXME: my test board doesn't work well with VRA... */
if (chip->device_type == DEVICE_ALI)
pbus->no_vra = 1;
else
pbus->dra = 1;
chip->ac97_bus = pbus;
chip->ncodecs = codecs;
ac97.pci = chip->pci;
for (i = 0; i < codecs; i++) {
ac97.num = i;
err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i]);
if (err < 0) {
if (err != -EACCES)
dev_err(chip->card->dev,
"Unable to initialize codec #%d\n", i);
if (i == 0)
goto __err;
}
}
/* tune up the primary codec */
snd_ac97_tune_hardware(chip->ac97[0], ac97_quirks, quirk_override);
/* enable separate SDINs for ICH4 */
if (chip->device_type == DEVICE_INTEL_ICH4)
pbus->isdin = 1;
/* find the available PCM streams */
i = ARRAY_SIZE(ac97_pcm_defs);
if (chip->device_type != DEVICE_INTEL_ICH4)
i -= 2; /* do not allocate PCM2IN and MIC2 */
if (chip->spdif_idx < 0)
i--; /* do not allocate S/PDIF */
err = snd_ac97_pcm_assign(pbus, i, ac97_pcm_defs);
if (err < 0)
goto __err;
chip->ichd[ICHD_PCMOUT].pcm = &pbus->pcms[0];
chip->ichd[ICHD_PCMIN].pcm = &pbus->pcms[1];
chip->ichd[ICHD_MIC].pcm = &pbus->pcms[2];
if (chip->spdif_idx >= 0)
chip->ichd[chip->spdif_idx].pcm = &pbus->pcms[3];
if (chip->device_type == DEVICE_INTEL_ICH4) {
chip->ichd[ICHD_PCM2IN].pcm = &pbus->pcms[4];
chip->ichd[ICHD_MIC2].pcm = &pbus->pcms[5];
}
/* enable separate SDINs for ICH4 */
if (chip->device_type == DEVICE_INTEL_ICH4) {
struct ac97_pcm *pcm = chip->ichd[ICHD_PCM2IN].pcm;
u8 tmp = igetbyte(chip, ICHREG(SDM));
tmp &= ~(ICH_DI2L_MASK|ICH_DI1L_MASK);
if (pcm) {
tmp |= ICH_SE; /* steer enable for multiple SDINs */
tmp |= chip->ac97_sdin[0] << ICH_DI1L_SHIFT;
for (i = 1; i < 4; i++) {
if (pcm->r[0].codec[i]) {
tmp |= chip->ac97_sdin[pcm->r[0].codec[1]->num] << ICH_DI2L_SHIFT;
break;
}
}
} else {
tmp &= ~ICH_SE; /* steer disable */
}
iputbyte(chip, ICHREG(SDM), tmp);
}
if (pbus->pcms[0].r[0].slots & (1 << AC97_SLOT_PCM_SLEFT)) {
chip->multi4 = 1;
if (pbus->pcms[0].r[0].slots & (1 << AC97_SLOT_LFE)) {
chip->multi6 = 1;
if (chip->ac97[0]->flags & AC97_HAS_8CH)
chip->multi8 = 1;
}
}
if (pbus->pcms[0].r[1].rslots[0]) {
chip->dra = 1;
}
if (chip->device_type == DEVICE_INTEL_ICH4) {
if ((igetdword(chip, ICHREG(GLOB_STA)) & ICH_SAMPLE_CAP) == ICH_SAMPLE_16_20)
chip->smp20bit = 1;
}
if (chip->device_type == DEVICE_NFORCE && !spdif_aclink) {
/* 48kHz only */
chip->ichd[chip->spdif_idx].pcm->rates = SNDRV_PCM_RATE_48000;
}
if (chip->device_type == DEVICE_INTEL_ICH4 && !spdif_aclink) {
/* use slot 10/11 for SPDIF */
u32 val;
val = igetdword(chip, ICHREG(GLOB_CNT)) & ~ICH_PCM_SPDIF_MASK;
val |= ICH_PCM_SPDIF_1011;
iputdword(chip, ICHREG(GLOB_CNT), val);
snd_ac97_update_bits(chip->ac97[0], AC97_EXTENDED_STATUS, 0x03 << 4, 0x03 << 4);
}
chip->in_ac97_init = 0;
return 0;
__err:
/* clear the cold-reset bit for the next chance */
if (chip->device_type != DEVICE_ALI)
iputdword(chip, ICHREG(GLOB_CNT),
igetdword(chip, ICHREG(GLOB_CNT)) & ~ICH_AC97COLD);
return err;
}
/*
*
*/
static void do_ali_reset(struct intel8x0 *chip)
{
iputdword(chip, ICHREG(ALI_SCR), ICH_ALI_SC_RESET);
iputdword(chip, ICHREG(ALI_FIFOCR1), 0x83838383);
iputdword(chip, ICHREG(ALI_FIFOCR2), 0x83838383);
iputdword(chip, ICHREG(ALI_FIFOCR3), 0x83838383);
iputdword(chip, ICHREG(ALI_INTERFACECR),
ICH_ALI_IF_PI|ICH_ALI_IF_PO);
iputdword(chip, ICHREG(ALI_INTERRUPTCR), 0x00000000);
iputdword(chip, ICHREG(ALI_INTERRUPTSR), 0x00000000);
}
#ifdef CONFIG_SND_AC97_POWER_SAVE
static const struct snd_pci_quirk ich_chip_reset_mode[] = {
SND_PCI_QUIRK(0x1014, 0x051f, "Thinkpad R32", 1),
{ } /* end */
};
static int snd_intel8x0_ich_chip_cold_reset(struct intel8x0 *chip)
{
unsigned int cnt;
/* ACLink on, 2 channels */
if (snd_pci_quirk_lookup(chip->pci, ich_chip_reset_mode))
return -EIO;
cnt = igetdword(chip, ICHREG(GLOB_CNT));
cnt &= ~(ICH_ACLINK | ICH_PCM_246_MASK);
/* do cold reset - the full ac97 powerdown may leave the controller
* in a warm state but actually it cannot communicate with the codec.
*/
iputdword(chip, ICHREG(GLOB_CNT), cnt & ~ICH_AC97COLD);
cnt = igetdword(chip, ICHREG(GLOB_CNT));
udelay(10);
iputdword(chip, ICHREG(GLOB_CNT), cnt | ICH_AC97COLD);
msleep(1);
return 0;
}
#define snd_intel8x0_ich_chip_can_cold_reset(chip) \
(!snd_pci_quirk_lookup(chip->pci, ich_chip_reset_mode))
#else
#define snd_intel8x0_ich_chip_cold_reset(chip) 0
#define snd_intel8x0_ich_chip_can_cold_reset(chip) (0)
#endif
static int snd_intel8x0_ich_chip_reset(struct intel8x0 *chip)
{
unsigned long end_time;
unsigned int cnt;
/* ACLink on, 2 channels */
cnt = igetdword(chip, ICHREG(GLOB_CNT));
cnt &= ~(ICH_ACLINK | ICH_PCM_246_MASK);
/* finish cold or do warm reset */
cnt |= (cnt & ICH_AC97COLD) == 0 ? ICH_AC97COLD : ICH_AC97WARM;
iputdword(chip, ICHREG(GLOB_CNT), cnt);
end_time = (jiffies + (HZ / 4)) + 1;
do {
if ((igetdword(chip, ICHREG(GLOB_CNT)) & ICH_AC97WARM) == 0)
return 0;
schedule_timeout_uninterruptible(1);
} while (time_after_eq(end_time, jiffies));
dev_err(chip->card->dev, "AC'97 warm reset still in progress? [0x%x]\n",
igetdword(chip, ICHREG(GLOB_CNT)));
return -EIO;
}
static int snd_intel8x0_ich_chip_init(struct intel8x0 *chip, int probing)
{
unsigned long end_time;
unsigned int status, nstatus;
unsigned int cnt;
int err;
/* put logic to right state */
/* first clear status bits */
status = ICH_RCS | ICH_MCINT | ICH_POINT | ICH_PIINT;
if (chip->device_type == DEVICE_NFORCE)
status |= ICH_NVSPINT;
cnt = igetdword(chip, ICHREG(GLOB_STA));
iputdword(chip, ICHREG(GLOB_STA), cnt & status);
if (snd_intel8x0_ich_chip_can_cold_reset(chip))
err = snd_intel8x0_ich_chip_cold_reset(chip);
else
err = snd_intel8x0_ich_chip_reset(chip);
if (err < 0)
return err;
if (probing) {
/* wait for any codec ready status.
* Once it becomes ready it should remain ready
* as long as we do not disable the ac97 link.
*/
end_time = jiffies + HZ;
do {
status = igetdword(chip, ICHREG(GLOB_STA)) &
chip->codec_isr_bits;
if (status)
break;
schedule_timeout_uninterruptible(1);
} while (time_after_eq(end_time, jiffies));
if (! status) {
/* no codec is found */
dev_err(chip->card->dev,
"codec_ready: codec is not ready [0x%x]\n",
igetdword(chip, ICHREG(GLOB_STA)));
return -EIO;
}
/* wait for other codecs ready status. */
end_time = jiffies + HZ / 4;
while (status != chip->codec_isr_bits &&
time_after_eq(end_time, jiffies)) {
schedule_timeout_uninterruptible(1);
status |= igetdword(chip, ICHREG(GLOB_STA)) &
chip->codec_isr_bits;
}
} else {
/* resume phase */
int i;
status = 0;
for (i = 0; i < chip->ncodecs; i++)
if (chip->ac97[i])
status |= chip->codec_bit[chip->ac97_sdin[i]];
/* wait until all the probed codecs are ready */
end_time = jiffies + HZ;
do {
nstatus = igetdword(chip, ICHREG(GLOB_STA)) &
chip->codec_isr_bits;
if (status == nstatus)
break;
schedule_timeout_uninterruptible(1);
} while (time_after_eq(end_time, jiffies));
}
if (chip->device_type == DEVICE_SIS) {
/* unmute the output on SIS7012 */
iputword(chip, 0x4c, igetword(chip, 0x4c) | 1);
}
if (chip->device_type == DEVICE_NFORCE && !spdif_aclink) {
/* enable SPDIF interrupt */
unsigned int val;
pci_read_config_dword(chip->pci, 0x4c, &val);
val |= 0x1000000;
pci_write_config_dword(chip->pci, 0x4c, val);
}
return 0;
}
static int snd_intel8x0_ali_chip_init(struct intel8x0 *chip, int probing)
{
u32 reg;
int i = 0;
reg = igetdword(chip, ICHREG(ALI_SCR));
if ((reg & 2) == 0) /* Cold required */
reg |= 2;
else
reg |= 1; /* Warm */
reg &= ~0x80000000; /* ACLink on */
iputdword(chip, ICHREG(ALI_SCR), reg);
for (i = 0; i < HZ / 2; i++) {
if (! (igetdword(chip, ICHREG(ALI_INTERRUPTSR)) & ALI_INT_GPIO))
goto __ok;
schedule_timeout_uninterruptible(1);
}
dev_err(chip->card->dev, "AC'97 reset failed.\n");
if (probing)
return -EIO;
__ok:
for (i = 0; i < HZ / 2; i++) {
reg = igetdword(chip, ICHREG(ALI_RTSR));
if (reg & 0x80) /* primary codec */
break;
iputdword(chip, ICHREG(ALI_RTSR), reg | 0x80);
schedule_timeout_uninterruptible(1);
}
do_ali_reset(chip);
return 0;
}
static int snd_intel8x0_chip_init(struct intel8x0 *chip, int probing)
{
unsigned int i, timeout;
int err;
if (chip->device_type != DEVICE_ALI) {
err = snd_intel8x0_ich_chip_init(chip, probing);
if (err < 0)
return err;
iagetword(chip, 0); /* clear semaphore flag */
} else {
err = snd_intel8x0_ali_chip_init(chip, probing);
if (err < 0)
return err;
}
/* disable interrupts */
for (i = 0; i < chip->bdbars_count; i++)
iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, 0x00);
/* reset channels */
for (i = 0; i < chip->bdbars_count; i++)
iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, ICH_RESETREGS);
for (i = 0; i < chip->bdbars_count; i++) {
timeout = 100000;
while (--timeout != 0) {
if ((igetbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset) & ICH_RESETREGS) == 0)
break;
}
if (timeout == 0)
dev_err(chip->card->dev, "reset of registers failed?\n");
}
/* initialize Buffer Descriptor Lists */
for (i = 0; i < chip->bdbars_count; i++)
iputdword(chip, ICH_REG_OFF_BDBAR + chip->ichd[i].reg_offset,
chip->ichd[i].bdbar_addr);
return 0;
}
static void snd_intel8x0_free(struct snd_card *card)
{
struct intel8x0 *chip = card->private_data;
unsigned int i;
if (chip->irq < 0)
goto __hw_end;
/* disable interrupts */
for (i = 0; i < chip->bdbars_count; i++)
iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, 0x00);
/* reset channels */
for (i = 0; i < chip->bdbars_count; i++)
iputbyte(chip, ICH_REG_OFF_CR + chip->ichd[i].reg_offset, ICH_RESETREGS);
if (chip->device_type == DEVICE_NFORCE && !spdif_aclink) {
/* stop the spdif interrupt */
unsigned int val;
pci_read_config_dword(chip->pci, 0x4c, &val);
val &= ~0x1000000;
pci_write_config_dword(chip->pci, 0x4c, val);
}
/* --- */
__hw_end:
if (chip->irq >= 0)
free_irq(chip->irq, chip);
}
#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
static int intel8x0_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0 *chip = card->private_data;
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < chip->ncodecs; i++)
snd_ac97_suspend(chip->ac97[i]);
if (chip->device_type == DEVICE_INTEL_ICH4)
chip->sdm_saved = igetbyte(chip, ICHREG(SDM));
if (chip->irq >= 0) {
free_irq(chip->irq, chip);
chip->irq = -1;
card->sync_irq = -1;
}
return 0;
}
static int intel8x0_resume(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0 *chip = card->private_data;
int i;
snd_intel8x0_chip_init(chip, 0);
if (request_irq(pci->irq, snd_intel8x0_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(dev, "unable to grab IRQ %d, disabling device\n",
pci->irq);
snd_card_disconnect(card);
return -EIO;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
/* re-initialize mixer stuff */
if (chip->device_type == DEVICE_INTEL_ICH4 && !spdif_aclink) {
/* enable separate SDINs for ICH4 */
iputbyte(chip, ICHREG(SDM), chip->sdm_saved);
/* use slot 10/11 for SPDIF */
iputdword(chip, ICHREG(GLOB_CNT),
(igetdword(chip, ICHREG(GLOB_CNT)) & ~ICH_PCM_SPDIF_MASK) |
ICH_PCM_SPDIF_1011);
}
for (i = 0; i < chip->ncodecs; i++)
snd_ac97_resume(chip->ac97[i]);
/* resume status */
for (i = 0; i < chip->bdbars_count; i++) {
struct ichdev *ichdev = &chip->ichd[i];
unsigned long port = ichdev->reg_offset;
if (! ichdev->substream || ! ichdev->suspended)
continue;
if (ichdev->ichd == ICHD_PCMOUT)
snd_intel8x0_setup_pcm_out(chip, ichdev->substream->runtime);
iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr);
iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi);
iputbyte(chip, port + ICH_REG_OFF_CIV, ichdev->civ);
iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI);
}
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(intel8x0_pm, intel8x0_suspend, intel8x0_resume);
#define INTEL8X0_PM_OPS &intel8x0_pm
#else
#define INTEL8X0_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
#define INTEL8X0_TESTBUF_SIZE 32768 /* enough large for one shot */
static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
{
struct snd_pcm_substream *subs;
struct ichdev *ichdev;
unsigned long port;
unsigned long pos, pos1, t;
int civ, timeout = 1000, attempt = 1;
ktime_t start_time, stop_time;
if (chip->ac97_bus->clock != 48000)
return; /* specified in module option */
if (chip->inside_vm && !ac97_clock)
return; /* no measurement on VM */
__again:
subs = chip->pcm[0]->streams[0].substream;
if (! subs || subs->dma_buffer.bytes < INTEL8X0_TESTBUF_SIZE) {
dev_warn(chip->card->dev,
"no playback buffer allocated - aborting measure ac97 clock\n");
return;
}
ichdev = &chip->ichd[ICHD_PCMOUT];
ichdev->physbuf = subs->dma_buffer.addr;
ichdev->size = ichdev->fragsize = INTEL8X0_TESTBUF_SIZE;
ichdev->substream = NULL; /* don't process interrupts */
/* set rate */
if (snd_ac97_set_rate(chip->ac97[0], AC97_PCM_FRONT_DAC_RATE, 48000) < 0) {
dev_err(chip->card->dev, "cannot set ac97 rate: clock = %d\n",
chip->ac97_bus->clock);
return;
}
snd_intel8x0_setup_periods(chip, ichdev);
port = ichdev->reg_offset;
spin_lock_irq(&chip->reg_lock);
chip->in_measurement = 1;
/* trigger */
if (chip->device_type != DEVICE_ALI)
iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE | ICH_STARTBM);
else {
iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE);
iputdword(chip, ICHREG(ALI_DMACR), 1 << ichdev->ali_slot);
}
start_time = ktime_get();
spin_unlock_irq(&chip->reg_lock);
msleep(50);
spin_lock_irq(&chip->reg_lock);
/* check the position */
do {
civ = igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV);
pos1 = igetword(chip, ichdev->reg_offset + ichdev->roff_picb);
if (pos1 == 0) {
udelay(10);
continue;
}
if (civ == igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV) &&
pos1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
break;
} while (timeout--);
if (pos1 == 0) { /* oops, this value is not reliable */
pos = 0;
} else {
pos = ichdev->fragsize1;
pos -= pos1 << ichdev->pos_shift;
pos += ichdev->position;
}
chip->in_measurement = 0;
stop_time = ktime_get();
/* stop */
if (chip->device_type == DEVICE_ALI) {
iputdword(chip, ICHREG(ALI_DMACR), 1 << (ichdev->ali_slot + 16));
iputbyte(chip, port + ICH_REG_OFF_CR, 0);
while (igetbyte(chip, port + ICH_REG_OFF_CR))
;
} else {
iputbyte(chip, port + ICH_REG_OFF_CR, 0);
while (!(igetbyte(chip, port + ichdev->roff_sr) & ICH_DCH))
;
}
iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS);
spin_unlock_irq(&chip->reg_lock);
if (pos == 0) {
dev_err(chip->card->dev,
"measure - unreliable DMA position..\n");
__retry:
if (attempt < 3) {
msleep(300);
attempt++;
goto __again;
}
goto __end;
}
pos /= 4;
t = ktime_us_delta(stop_time, start_time);
dev_info(chip->card->dev,
"%s: measured %lu usecs (%lu samples)\n", __func__, t, pos);
if (t == 0) {
dev_err(chip->card->dev, "?? calculation error..\n");
goto __retry;
}
pos *= 1000;
pos = (pos / t) * 1000 + ((pos % t) * 1000) / t;
if (pos < 40000 || pos >= 60000) {
/* abnormal value. hw problem? */
dev_info(chip->card->dev, "measured clock %ld rejected\n", pos);
goto __retry;
} else if (pos > 40500 && pos < 41500)
/* first exception - 41000Hz reference clock */
chip->ac97_bus->clock = 41000;
else if (pos > 43600 && pos < 44600)
/* second exception - 44100HZ reference clock */
chip->ac97_bus->clock = 44100;
else if (pos < 47500 || pos > 48500)
/* not 48000Hz, tuning the clock.. */
chip->ac97_bus->clock = (chip->ac97_bus->clock * 48000) / pos;
__end:
dev_info(chip->card->dev, "clocking to %d\n", chip->ac97_bus->clock);
snd_ac97_update_power(chip->ac97[0], AC97_PCM_FRONT_DAC_RATE, 0);
}
static const struct snd_pci_quirk intel8x0_clock_list[] = {
SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
SND_PCI_QUIRK(0x1043, 0x80f3, "AD1985", 48000),
{ } /* terminator */
};
static int intel8x0_in_clock_list(struct intel8x0 *chip)
{
struct pci_dev *pci = chip->pci;
const struct snd_pci_quirk *wl;
wl = snd_pci_quirk_lookup(pci, intel8x0_clock_list);
if (!wl)
return 0;
dev_info(chip->card->dev, "allow list rate for %04x:%04x is %i\n",
pci->subsystem_vendor, pci->subsystem_device, wl->value);
chip->ac97_bus->clock = wl->value;
return 1;
}
static void snd_intel8x0_proc_read(struct snd_info_entry * entry,
struct snd_info_buffer *buffer)
{
struct intel8x0 *chip = entry->private_data;
unsigned int tmp;
snd_iprintf(buffer, "Intel8x0\n\n");
if (chip->device_type == DEVICE_ALI)
return;
tmp = igetdword(chip, ICHREG(GLOB_STA));
snd_iprintf(buffer, "Global control : 0x%08x\n", igetdword(chip, ICHREG(GLOB_CNT)));
snd_iprintf(buffer, "Global status : 0x%08x\n", tmp);
if (chip->device_type == DEVICE_INTEL_ICH4)
snd_iprintf(buffer, "SDM : 0x%08x\n", igetdword(chip, ICHREG(SDM)));
snd_iprintf(buffer, "AC'97 codecs ready :");
if (tmp & chip->codec_isr_bits) {
int i;
static const char *codecs[3] = {
"primary", "secondary", "tertiary"
};
for (i = 0; i < chip->max_codecs; i++)
if (tmp & chip->codec_bit[i])
snd_iprintf(buffer, " %s", codecs[i]);
} else
snd_iprintf(buffer, " none");
snd_iprintf(buffer, "\n");
if (chip->device_type == DEVICE_INTEL_ICH4 ||
chip->device_type == DEVICE_SIS)
snd_iprintf(buffer, "AC'97 codecs SDIN : %i %i %i\n",
chip->ac97_sdin[0],
chip->ac97_sdin[1],
chip->ac97_sdin[2]);
}
static void snd_intel8x0_proc_init(struct intel8x0 *chip)
{
snd_card_ro_proc_new(chip->card, "intel8x0", chip,
snd_intel8x0_proc_read);
}
struct ich_reg_info {
unsigned int int_sta_mask;
unsigned int offset;
};
static const unsigned int ich_codec_bits[3] = {
ICH_PCR, ICH_SCR, ICH_TCR
};
static const unsigned int sis_codec_bits[3] = {
ICH_PCR, ICH_SCR, ICH_SIS_TCR
};
static int snd_intel8x0_inside_vm(struct pci_dev *pci)
{
int result = inside_vm;
char *msg = NULL;
/* check module parameter first (override detection) */
if (result >= 0) {
msg = result ? "enable (forced) VM" : "disable (forced) VM";
goto fini;
}
/* check for known (emulated) devices */
result = 0;
if (pci->subsystem_vendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pci->subsystem_device == PCI_SUBDEVICE_ID_QEMU) {
/* KVM emulated sound, PCI SSID: 1af4:1100 */
msg = "enable KVM";
result = 1;
} else if (pci->subsystem_vendor == 0x1ab8) {
/* Parallels VM emulated sound, PCI SSID: 1ab8:xxxx */
msg = "enable Parallels VM";
result = 1;
}
fini:
if (msg != NULL)
dev_info(&pci->dev, "%s optimization\n", msg);
return result;
}
static int snd_intel8x0_init(struct snd_card *card,
struct pci_dev *pci,
unsigned long device_type)
{
struct intel8x0 *chip = card->private_data;
int err;
unsigned int i;
unsigned int int_sta_masks;
struct ichdev *ichdev;
static const unsigned int bdbars[] = {
3, /* DEVICE_INTEL */
6, /* DEVICE_INTEL_ICH4 */
3, /* DEVICE_SIS */
6, /* DEVICE_ALI */
4, /* DEVICE_NFORCE */
};
static const struct ich_reg_info intel_regs[6] = {
{ ICH_PIINT, 0 },
{ ICH_POINT, 0x10 },
{ ICH_MCINT, 0x20 },
{ ICH_M2INT, 0x40 },
{ ICH_P2INT, 0x50 },
{ ICH_SPINT, 0x60 },
};
static const struct ich_reg_info nforce_regs[4] = {
{ ICH_PIINT, 0 },
{ ICH_POINT, 0x10 },
{ ICH_MCINT, 0x20 },
{ ICH_NVSPINT, 0x70 },
};
static const struct ich_reg_info ali_regs[6] = {
{ ALI_INT_PCMIN, 0x40 },
{ ALI_INT_PCMOUT, 0x50 },
{ ALI_INT_MICIN, 0x60 },
{ ALI_INT_CODECSPDIFOUT, 0x70 },
{ ALI_INT_SPDIFIN, 0xa0 },
{ ALI_INT_SPDIFOUT, 0xb0 },
};
const struct ich_reg_info *tbl;
err = pcim_enable_device(pci);
if (err < 0)
return err;
spin_lock_init(&chip->reg_lock);
chip->device_type = device_type;
chip->card = card;
chip->pci = pci;
chip->irq = -1;
/* module parameters */
chip->buggy_irq = buggy_irq;
chip->buggy_semaphore = buggy_semaphore;
if (xbox)
chip->xbox = 1;
chip->inside_vm = snd_intel8x0_inside_vm(pci);
/*
* Intel 82443MX running a 100MHz processor system bus has a hardware
* bug, which aborts PCI busmaster for audio transfer. A workaround
* is to set the pages as non-cached. For details, see the errata in
* http://download.intel.com/design/chipsets/specupdt/24505108.pdf
*/
if (pci->vendor == PCI_VENDOR_ID_INTEL &&
pci->device == PCI_DEVICE_ID_INTEL_440MX)
chip->fix_nocache = 1; /* enable workaround */
err = pci_request_regions(pci, card->shortname);
if (err < 0)
return err;
if (device_type == DEVICE_ALI) {
/* ALI5455 has no ac97 region */
chip->bmaddr = pcim_iomap(pci, 0, 0);
} else {
if (pci_resource_flags(pci, 2) & IORESOURCE_MEM) /* ICH4 and Nforce */
chip->addr = pcim_iomap(pci, 2, 0);
else
chip->addr = pcim_iomap(pci, 0, 0);
if (pci_resource_flags(pci, 3) & IORESOURCE_MEM) /* ICH4 */
chip->bmaddr = pcim_iomap(pci, 3, 0);
else
chip->bmaddr = pcim_iomap(pci, 1, 0);
}
chip->bdbars_count = bdbars[device_type];
/* initialize offsets */
switch (device_type) {
case DEVICE_NFORCE:
tbl = nforce_regs;
break;
case DEVICE_ALI:
tbl = ali_regs;
break;
default:
tbl = intel_regs;
break;
}
for (i = 0; i < chip->bdbars_count; i++) {
ichdev = &chip->ichd[i];
ichdev->ichd = i;
ichdev->reg_offset = tbl[i].offset;
ichdev->int_sta_mask = tbl[i].int_sta_mask;
if (device_type == DEVICE_SIS) {
/* SiS 7012 swaps the registers */
ichdev->roff_sr = ICH_REG_OFF_PICB;
ichdev->roff_picb = ICH_REG_OFF_SR;
} else {
ichdev->roff_sr = ICH_REG_OFF_SR;
ichdev->roff_picb = ICH_REG_OFF_PICB;
}
if (device_type == DEVICE_ALI)
ichdev->ali_slot = (ichdev->reg_offset - 0x40) / 0x10;
/* SIS7012 handles the pcm data in bytes, others are in samples */
ichdev->pos_shift = (device_type == DEVICE_SIS) ? 0 : 1;
}
/* allocate buffer descriptor lists */
/* the start of each lists must be aligned to 8 bytes */
chip->bdbars = snd_devm_alloc_pages(&pci->dev, intel8x0_dma_type(chip),
chip->bdbars_count * sizeof(u32) *
ICH_MAX_FRAGS * 2);
if (!chip->bdbars)
return -ENOMEM;
/* tables must be aligned to 8 bytes here, but the kernel pages
are much bigger, so we don't care (on i386) */
int_sta_masks = 0;
for (i = 0; i < chip->bdbars_count; i++) {
ichdev = &chip->ichd[i];
ichdev->bdbar = ((__le32 *)chip->bdbars->area) +
(i * ICH_MAX_FRAGS * 2);
ichdev->bdbar_addr = chip->bdbars->addr +
(i * sizeof(u32) * ICH_MAX_FRAGS * 2);
int_sta_masks |= ichdev->int_sta_mask;
}
chip->int_sta_reg = device_type == DEVICE_ALI ?
ICH_REG_ALI_INTERRUPTSR : ICH_REG_GLOB_STA;
chip->int_sta_mask = int_sta_masks;
pci_set_master(pci);
switch(chip->device_type) {
case DEVICE_INTEL_ICH4:
/* ICH4 can have three codecs */
chip->max_codecs = 3;
chip->codec_bit = ich_codec_bits;
chip->codec_ready_bits = ICH_PRI | ICH_SRI | ICH_TRI;
break;
case DEVICE_SIS:
/* recent SIS7012 can have three codecs */
chip->max_codecs = 3;
chip->codec_bit = sis_codec_bits;
chip->codec_ready_bits = ICH_PRI | ICH_SRI | ICH_SIS_TRI;
break;
default:
/* others up to two codecs */
chip->max_codecs = 2;
chip->codec_bit = ich_codec_bits;
chip->codec_ready_bits = ICH_PRI | ICH_SRI;
break;
}
for (i = 0; i < chip->max_codecs; i++)
chip->codec_isr_bits |= chip->codec_bit[i];
err = snd_intel8x0_chip_init(chip, 1);
if (err < 0)
return err;
/* request irq after initializaing int_sta_mask, etc */
/* NOTE: we don't use devm version here since it's released /
* re-acquired in PM callbacks.
* It's released explicitly in snd_intel8x0_free(), too.
*/
if (request_irq(pci->irq, snd_intel8x0_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
return -EBUSY;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
card->private_free = snd_intel8x0_free;
return 0;
}
static struct shortname_table {
unsigned int id;
const char *s;
} shortnames[] = {
{ PCI_DEVICE_ID_INTEL_82801AA_5, "Intel 82801AA-ICH" },
{ PCI_DEVICE_ID_INTEL_82801AB_5, "Intel 82901AB-ICH0" },
{ PCI_DEVICE_ID_INTEL_82801BA_4, "Intel 82801BA-ICH2" },
{ PCI_DEVICE_ID_INTEL_440MX, "Intel 440MX" },
{ PCI_DEVICE_ID_INTEL_82801CA_5, "Intel 82801CA-ICH3" },
{ PCI_DEVICE_ID_INTEL_82801DB_5, "Intel 82801DB-ICH4" },
{ PCI_DEVICE_ID_INTEL_82801EB_5, "Intel ICH5" },
{ PCI_DEVICE_ID_INTEL_ESB_5, "Intel 6300ESB" },
{ PCI_DEVICE_ID_INTEL_ICH6_18, "Intel ICH6" },
{ PCI_DEVICE_ID_INTEL_ICH7_20, "Intel ICH7" },
{ PCI_DEVICE_ID_INTEL_ESB2_14, "Intel ESB2" },
{ PCI_DEVICE_ID_SI_7012, "SiS SI7012" },
{ PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO, "NVidia nForce" },
{ PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO, "NVidia nForce2" },
{ PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO, "NVidia nForce3" },
{ PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO, "NVidia CK8S" },
{ PCI_DEVICE_ID_NVIDIA_CK804_AUDIO, "NVidia CK804" },
{ PCI_DEVICE_ID_NVIDIA_CK8_AUDIO, "NVidia CK8" },
{ 0x003a, "NVidia MCP04" },
{ 0x746d, "AMD AMD8111" },
{ 0x7445, "AMD AMD768" },
{ 0x5455, "ALi M5455" },
{ 0, NULL },
};
static const struct snd_pci_quirk spdif_aclink_defaults[] = {
SND_PCI_QUIRK(0x147b, 0x1c1a, "ASUS KN8", 1),
{ } /* end */
};
/* look up allow/deny list for SPDIF over ac-link */
static int check_default_spdif_aclink(struct pci_dev *pci)
{
const struct snd_pci_quirk *w;
w = snd_pci_quirk_lookup(pci, spdif_aclink_defaults);
if (w) {
if (w->value)
dev_dbg(&pci->dev,
"Using SPDIF over AC-Link for %s\n",
snd_pci_quirk_name(w));
else
dev_dbg(&pci->dev,
"Using integrated SPDIF DMA for %s\n",
snd_pci_quirk_name(w));
return w->value;
}
return 0;
}
static int __snd_intel8x0_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct intel8x0 *chip;
int err;
struct shortname_table *name;
err = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
if (spdif_aclink < 0)
spdif_aclink = check_default_spdif_aclink(pci);
strcpy(card->driver, "ICH");
if (!spdif_aclink) {
switch (pci_id->driver_data) {
case DEVICE_NFORCE:
strcpy(card->driver, "NFORCE");
break;
case DEVICE_INTEL_ICH4:
strcpy(card->driver, "ICH4");
}
}
strcpy(card->shortname, "Intel ICH");
for (name = shortnames; name->id; name++) {
if (pci->device == name->id) {
strcpy(card->shortname, name->s);
break;
}
}
if (buggy_irq < 0) {
/* some Nforce[2] and ICH boards have problems with IRQ handling.
* Needs to return IRQ_HANDLED for unknown irqs.
*/
if (pci_id->driver_data == DEVICE_NFORCE)
buggy_irq = 1;
else
buggy_irq = 0;
}
err = snd_intel8x0_init(card, pci, pci_id->driver_data);
if (err < 0)
return err;
err = snd_intel8x0_mixer(chip, ac97_clock, ac97_quirk);
if (err < 0)
return err;
err = snd_intel8x0_pcm(chip);
if (err < 0)
return err;
snd_intel8x0_proc_init(chip);
snprintf(card->longname, sizeof(card->longname),
"%s with %s at irq %i", card->shortname,
snd_ac97_get_short_name(chip->ac97[0]), chip->irq);
if (ac97_clock == 0 || ac97_clock == 1) {
if (ac97_clock == 0) {
if (intel8x0_in_clock_list(chip) == 0)
intel8x0_measure_ac97_clock(chip);
} else {
intel8x0_measure_ac97_clock(chip);
}
}
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
return 0;
}
static int snd_intel8x0_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_intel8x0_probe(pci, pci_id));
}
static struct pci_driver intel8x0_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_intel8x0_ids,
.probe = snd_intel8x0_probe,
.driver = {
.pm = INTEL8X0_PM_OPS,
},
};
module_pci_driver(intel8x0_driver);
| linux-master | sound/pci/intel8x0.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ALSA driver for ATI IXP 150/200/250 AC97 modem controllers
*
* Copyright (c) 2004 Takashi Iwai <[email protected]>
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
MODULE_AUTHOR("Takashi Iwai <[email protected]>");
MODULE_DESCRIPTION("ATI IXP MC97 controller");
MODULE_LICENSE("GPL");
static int index = -2; /* Exclude the first card */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int ac97_clock = 48000;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for ATI IXP controller.");
module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for ATI IXP controller.");
module_param(ac97_clock, int, 0444);
MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (default 48000Hz).");
/* just for backward compatibility */
static bool enable;
module_param(enable, bool, 0444);
/*
*/
#define ATI_REG_ISR 0x00 /* interrupt source */
#define ATI_REG_ISR_MODEM_IN_XRUN (1U<<0)
#define ATI_REG_ISR_MODEM_IN_STATUS (1U<<1)
#define ATI_REG_ISR_MODEM_OUT1_XRUN (1U<<2)
#define ATI_REG_ISR_MODEM_OUT1_STATUS (1U<<3)
#define ATI_REG_ISR_MODEM_OUT2_XRUN (1U<<4)
#define ATI_REG_ISR_MODEM_OUT2_STATUS (1U<<5)
#define ATI_REG_ISR_MODEM_OUT3_XRUN (1U<<6)
#define ATI_REG_ISR_MODEM_OUT3_STATUS (1U<<7)
#define ATI_REG_ISR_PHYS_INTR (1U<<8)
#define ATI_REG_ISR_PHYS_MISMATCH (1U<<9)
#define ATI_REG_ISR_CODEC0_NOT_READY (1U<<10)
#define ATI_REG_ISR_CODEC1_NOT_READY (1U<<11)
#define ATI_REG_ISR_CODEC2_NOT_READY (1U<<12)
#define ATI_REG_ISR_NEW_FRAME (1U<<13)
#define ATI_REG_ISR_MODEM_GPIO_DATA (1U<<14)
#define ATI_REG_IER 0x04 /* interrupt enable */
#define ATI_REG_IER_MODEM_IN_XRUN_EN (1U<<0)
#define ATI_REG_IER_MODEM_STATUS_EN (1U<<1)
#define ATI_REG_IER_MODEM_OUT1_XRUN_EN (1U<<2)
#define ATI_REG_IER_MODEM_OUT2_XRUN_EN (1U<<4)
#define ATI_REG_IER_MODEM_OUT3_XRUN_EN (1U<<6)
#define ATI_REG_IER_PHYS_INTR_EN (1U<<8)
#define ATI_REG_IER_PHYS_MISMATCH_EN (1U<<9)
#define ATI_REG_IER_CODEC0_INTR_EN (1U<<10)
#define ATI_REG_IER_CODEC1_INTR_EN (1U<<11)
#define ATI_REG_IER_CODEC2_INTR_EN (1U<<12)
#define ATI_REG_IER_NEW_FRAME_EN (1U<<13) /* (RO */
#define ATI_REG_IER_MODEM_GPIO_DATA_EN (1U<<14) /* (WO) modem is running */
#define ATI_REG_IER_MODEM_SET_BUS_BUSY (1U<<15)
#define ATI_REG_CMD 0x08 /* command */
#define ATI_REG_CMD_POWERDOWN (1U<<0)
#define ATI_REG_CMD_MODEM_RECEIVE_EN (1U<<1) /* modem only */
#define ATI_REG_CMD_MODEM_SEND1_EN (1U<<2) /* modem only */
#define ATI_REG_CMD_MODEM_SEND2_EN (1U<<3) /* modem only */
#define ATI_REG_CMD_MODEM_SEND3_EN (1U<<4) /* modem only */
#define ATI_REG_CMD_MODEM_STATUS_MEM (1U<<5) /* modem only */
#define ATI_REG_CMD_MODEM_IN_DMA_EN (1U<<8) /* modem only */
#define ATI_REG_CMD_MODEM_OUT_DMA1_EN (1U<<9) /* modem only */
#define ATI_REG_CMD_MODEM_OUT_DMA2_EN (1U<<10) /* modem only */
#define ATI_REG_CMD_MODEM_OUT_DMA3_EN (1U<<11) /* modem only */
#define ATI_REG_CMD_AUDIO_PRESENT (1U<<20)
#define ATI_REG_CMD_MODEM_GPIO_THRU_DMA (1U<<22) /* modem only */
#define ATI_REG_CMD_LOOPBACK_EN (1U<<23)
#define ATI_REG_CMD_PACKED_DIS (1U<<24)
#define ATI_REG_CMD_BURST_EN (1U<<25)
#define ATI_REG_CMD_PANIC_EN (1U<<26)
#define ATI_REG_CMD_MODEM_PRESENT (1U<<27)
#define ATI_REG_CMD_ACLINK_ACTIVE (1U<<28)
#define ATI_REG_CMD_AC_SOFT_RESET (1U<<29)
#define ATI_REG_CMD_AC_SYNC (1U<<30)
#define ATI_REG_CMD_AC_RESET (1U<<31)
#define ATI_REG_PHYS_OUT_ADDR 0x0c
#define ATI_REG_PHYS_OUT_CODEC_MASK (3U<<0)
#define ATI_REG_PHYS_OUT_RW (1U<<2)
#define ATI_REG_PHYS_OUT_ADDR_EN (1U<<8)
#define ATI_REG_PHYS_OUT_ADDR_SHIFT 9
#define ATI_REG_PHYS_OUT_DATA_SHIFT 16
#define ATI_REG_PHYS_IN_ADDR 0x10
#define ATI_REG_PHYS_IN_READ_FLAG (1U<<8)
#define ATI_REG_PHYS_IN_ADDR_SHIFT 9
#define ATI_REG_PHYS_IN_DATA_SHIFT 16
#define ATI_REG_SLOTREQ 0x14
#define ATI_REG_COUNTER 0x18
#define ATI_REG_COUNTER_SLOT (3U<<0) /* slot # */
#define ATI_REG_COUNTER_BITCLOCK (31U<<8)
#define ATI_REG_IN_FIFO_THRESHOLD 0x1c
#define ATI_REG_MODEM_IN_DMA_LINKPTR 0x20
#define ATI_REG_MODEM_IN_DMA_DT_START 0x24 /* RO */
#define ATI_REG_MODEM_IN_DMA_DT_NEXT 0x28 /* RO */
#define ATI_REG_MODEM_IN_DMA_DT_CUR 0x2c /* RO */
#define ATI_REG_MODEM_IN_DMA_DT_SIZE 0x30
#define ATI_REG_MODEM_OUT_FIFO 0x34 /* output threshold */
#define ATI_REG_MODEM_OUT1_DMA_THRESHOLD_MASK (0xf<<16)
#define ATI_REG_MODEM_OUT1_DMA_THRESHOLD_SHIFT 16
#define ATI_REG_MODEM_OUT_DMA1_LINKPTR 0x38
#define ATI_REG_MODEM_OUT_DMA2_LINKPTR 0x3c
#define ATI_REG_MODEM_OUT_DMA3_LINKPTR 0x40
#define ATI_REG_MODEM_OUT_DMA1_DT_START 0x44
#define ATI_REG_MODEM_OUT_DMA1_DT_NEXT 0x48
#define ATI_REG_MODEM_OUT_DMA1_DT_CUR 0x4c
#define ATI_REG_MODEM_OUT_DMA2_DT_START 0x50
#define ATI_REG_MODEM_OUT_DMA2_DT_NEXT 0x54
#define ATI_REG_MODEM_OUT_DMA2_DT_CUR 0x58
#define ATI_REG_MODEM_OUT_DMA3_DT_START 0x5c
#define ATI_REG_MODEM_OUT_DMA3_DT_NEXT 0x60
#define ATI_REG_MODEM_OUT_DMA3_DT_CUR 0x64
#define ATI_REG_MODEM_OUT_DMA12_DT_SIZE 0x68
#define ATI_REG_MODEM_OUT_DMA3_DT_SIZE 0x6c
#define ATI_REG_MODEM_OUT_FIFO_USED 0x70
#define ATI_REG_MODEM_OUT_GPIO 0x74
#define ATI_REG_MODEM_OUT_GPIO_EN 1
#define ATI_REG_MODEM_OUT_GPIO_DATA_SHIFT 5
#define ATI_REG_MODEM_IN_GPIO 0x78
#define ATI_REG_MODEM_MIRROR 0x7c
#define ATI_REG_AUDIO_MIRROR 0x80
#define ATI_REG_MODEM_FIFO_FLUSH 0x88
#define ATI_REG_MODEM_FIFO_OUT1_FLUSH (1U<<0)
#define ATI_REG_MODEM_FIFO_OUT2_FLUSH (1U<<1)
#define ATI_REG_MODEM_FIFO_OUT3_FLUSH (1U<<2)
#define ATI_REG_MODEM_FIFO_IN_FLUSH (1U<<3)
/* LINKPTR */
#define ATI_REG_LINKPTR_EN (1U<<0)
#define ATI_MAX_DESCRIPTORS 256 /* max number of descriptor packets */
struct atiixp_modem;
/*
* DMA packate descriptor
*/
struct atiixp_dma_desc {
__le32 addr; /* DMA buffer address */
u16 status; /* status bits */
u16 size; /* size of the packet in dwords */
__le32 next; /* address of the next packet descriptor */
};
/*
* stream enum
*/
enum { ATI_DMA_PLAYBACK, ATI_DMA_CAPTURE, NUM_ATI_DMAS }; /* DMAs */
enum { ATI_PCM_OUT, ATI_PCM_IN, NUM_ATI_PCMS }; /* AC97 pcm slots */
enum { ATI_PCMDEV_ANALOG, NUM_ATI_PCMDEVS }; /* pcm devices */
#define NUM_ATI_CODECS 3
/*
* constants and callbacks for each DMA type
*/
struct atiixp_dma_ops {
int type; /* ATI_DMA_XXX */
unsigned int llp_offset; /* LINKPTR offset */
unsigned int dt_cur; /* DT_CUR offset */
/* called from open callback */
void (*enable_dma)(struct atiixp_modem *chip, int on);
/* called from trigger (START/STOP) */
void (*enable_transfer)(struct atiixp_modem *chip, int on);
/* called from trigger (STOP only) */
void (*flush_dma)(struct atiixp_modem *chip);
};
/*
* DMA stream
*/
struct atiixp_dma {
const struct atiixp_dma_ops *ops;
struct snd_dma_buffer desc_buf;
struct snd_pcm_substream *substream; /* assigned PCM substream */
unsigned int buf_addr, buf_bytes; /* DMA buffer address, bytes */
unsigned int period_bytes, periods;
int opened;
int running;
int pcm_open_flag;
int ac97_pcm_type; /* index # of ac97_pcm to access, -1 = not used */
};
/*
* ATI IXP chip
*/
struct atiixp_modem {
struct snd_card *card;
struct pci_dev *pci;
struct resource *res; /* memory i/o */
unsigned long addr;
void __iomem *remap_addr;
int irq;
struct snd_ac97_bus *ac97_bus;
struct snd_ac97 *ac97[NUM_ATI_CODECS];
spinlock_t reg_lock;
struct atiixp_dma dmas[NUM_ATI_DMAS];
struct ac97_pcm *pcms[NUM_ATI_PCMS];
struct snd_pcm *pcmdevs[NUM_ATI_PCMDEVS];
int max_channels; /* max. channels for PCM out */
unsigned int codec_not_ready_bits; /* for codec detection */
int spdif_over_aclink; /* passed from the module option */
struct mutex open_mutex; /* playback open mutex */
};
/*
*/
static const struct pci_device_id snd_atiixp_ids[] = {
{ PCI_VDEVICE(ATI, 0x434d), 0 }, /* SB200 */
{ PCI_VDEVICE(ATI, 0x4378), 0 }, /* SB400 */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_atiixp_ids);
/*
* lowlevel functions
*/
/*
* update the bits of the given register.
* return 1 if the bits changed.
*/
static int snd_atiixp_update_bits(struct atiixp_modem *chip, unsigned int reg,
unsigned int mask, unsigned int value)
{
void __iomem *addr = chip->remap_addr + reg;
unsigned int data, old_data;
old_data = data = readl(addr);
data &= ~mask;
data |= value;
if (old_data == data)
return 0;
writel(data, addr);
return 1;
}
/*
* macros for easy use
*/
#define atiixp_write(chip,reg,value) \
writel(value, chip->remap_addr + ATI_REG_##reg)
#define atiixp_read(chip,reg) \
readl(chip->remap_addr + ATI_REG_##reg)
#define atiixp_update(chip,reg,mask,val) \
snd_atiixp_update_bits(chip, ATI_REG_##reg, mask, val)
/*
* handling DMA packets
*
* we allocate a linear buffer for the DMA, and split it to each packet.
* in a future version, a scatter-gather buffer should be implemented.
*/
#define ATI_DESC_LIST_SIZE \
PAGE_ALIGN(ATI_MAX_DESCRIPTORS * sizeof(struct atiixp_dma_desc))
/*
* build packets ring for the given buffer size.
*
* IXP handles the buffer descriptors, which are connected as a linked
* list. although we can change the list dynamically, in this version,
* a static RING of buffer descriptors is used.
*
* the ring is built in this function, and is set up to the hardware.
*/
static int atiixp_build_dma_packets(struct atiixp_modem *chip,
struct atiixp_dma *dma,
struct snd_pcm_substream *substream,
unsigned int periods,
unsigned int period_bytes)
{
unsigned int i;
u32 addr, desc_addr;
unsigned long flags;
if (periods > ATI_MAX_DESCRIPTORS)
return -ENOMEM;
if (dma->desc_buf.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
ATI_DESC_LIST_SIZE, &dma->desc_buf) < 0)
return -ENOMEM;
dma->period_bytes = dma->periods = 0; /* clear */
}
if (dma->periods == periods && dma->period_bytes == period_bytes)
return 0;
/* reset DMA before changing the descriptor table */
spin_lock_irqsave(&chip->reg_lock, flags);
writel(0, chip->remap_addr + dma->ops->llp_offset);
dma->ops->enable_dma(chip, 0);
dma->ops->enable_dma(chip, 1);
spin_unlock_irqrestore(&chip->reg_lock, flags);
/* fill the entries */
addr = (u32)substream->runtime->dma_addr;
desc_addr = (u32)dma->desc_buf.addr;
for (i = 0; i < periods; i++) {
struct atiixp_dma_desc *desc;
desc = &((struct atiixp_dma_desc *)dma->desc_buf.area)[i];
desc->addr = cpu_to_le32(addr);
desc->status = 0;
desc->size = period_bytes >> 2; /* in dwords */
desc_addr += sizeof(struct atiixp_dma_desc);
if (i == periods - 1)
desc->next = cpu_to_le32((u32)dma->desc_buf.addr);
else
desc->next = cpu_to_le32(desc_addr);
addr += period_bytes;
}
writel((u32)dma->desc_buf.addr | ATI_REG_LINKPTR_EN,
chip->remap_addr + dma->ops->llp_offset);
dma->period_bytes = period_bytes;
dma->periods = periods;
return 0;
}
/*
* remove the ring buffer and release it if assigned
*/
static void atiixp_clear_dma_packets(struct atiixp_modem *chip,
struct atiixp_dma *dma,
struct snd_pcm_substream *substream)
{
if (dma->desc_buf.area) {
writel(0, chip->remap_addr + dma->ops->llp_offset);
snd_dma_free_pages(&dma->desc_buf);
dma->desc_buf.area = NULL;
}
}
/*
* AC97 interface
*/
static int snd_atiixp_acquire_codec(struct atiixp_modem *chip)
{
int timeout = 1000;
while (atiixp_read(chip, PHYS_OUT_ADDR) & ATI_REG_PHYS_OUT_ADDR_EN) {
if (! timeout--) {
dev_warn(chip->card->dev, "codec acquire timeout\n");
return -EBUSY;
}
udelay(1);
}
return 0;
}
static unsigned short snd_atiixp_codec_read(struct atiixp_modem *chip,
unsigned short codec,
unsigned short reg)
{
unsigned int data;
int timeout;
if (snd_atiixp_acquire_codec(chip) < 0)
return 0xffff;
data = (reg << ATI_REG_PHYS_OUT_ADDR_SHIFT) |
ATI_REG_PHYS_OUT_ADDR_EN |
ATI_REG_PHYS_OUT_RW |
codec;
atiixp_write(chip, PHYS_OUT_ADDR, data);
if (snd_atiixp_acquire_codec(chip) < 0)
return 0xffff;
timeout = 1000;
do {
data = atiixp_read(chip, PHYS_IN_ADDR);
if (data & ATI_REG_PHYS_IN_READ_FLAG)
return data >> ATI_REG_PHYS_IN_DATA_SHIFT;
udelay(1);
} while (--timeout);
/* time out may happen during reset */
if (reg < 0x7c)
dev_warn(chip->card->dev, "codec read timeout (reg %x)\n", reg);
return 0xffff;
}
static void snd_atiixp_codec_write(struct atiixp_modem *chip,
unsigned short codec,
unsigned short reg, unsigned short val)
{
unsigned int data;
if (snd_atiixp_acquire_codec(chip) < 0)
return;
data = ((unsigned int)val << ATI_REG_PHYS_OUT_DATA_SHIFT) |
((unsigned int)reg << ATI_REG_PHYS_OUT_ADDR_SHIFT) |
ATI_REG_PHYS_OUT_ADDR_EN | codec;
atiixp_write(chip, PHYS_OUT_ADDR, data);
}
static unsigned short snd_atiixp_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct atiixp_modem *chip = ac97->private_data;
return snd_atiixp_codec_read(chip, ac97->num, reg);
}
static void snd_atiixp_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct atiixp_modem *chip = ac97->private_data;
if (reg == AC97_GPIO_STATUS) {
atiixp_write(chip, MODEM_OUT_GPIO,
(val << ATI_REG_MODEM_OUT_GPIO_DATA_SHIFT) | ATI_REG_MODEM_OUT_GPIO_EN);
return;
}
snd_atiixp_codec_write(chip, ac97->num, reg, val);
}
/*
* reset AC link
*/
static int snd_atiixp_aclink_reset(struct atiixp_modem *chip)
{
int timeout;
/* reset powerdoewn */
if (atiixp_update(chip, CMD, ATI_REG_CMD_POWERDOWN, 0))
udelay(10);
/* perform a software reset */
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SOFT_RESET, ATI_REG_CMD_AC_SOFT_RESET);
atiixp_read(chip, CMD);
udelay(10);
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SOFT_RESET, 0);
timeout = 10;
while (! (atiixp_read(chip, CMD) & ATI_REG_CMD_ACLINK_ACTIVE)) {
/* do a hard reset */
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SYNC|ATI_REG_CMD_AC_RESET,
ATI_REG_CMD_AC_SYNC);
atiixp_read(chip, CMD);
msleep(1);
atiixp_update(chip, CMD, ATI_REG_CMD_AC_RESET, ATI_REG_CMD_AC_RESET);
if (!--timeout) {
dev_err(chip->card->dev, "codec reset timeout\n");
break;
}
}
/* deassert RESET and assert SYNC to make sure */
atiixp_update(chip, CMD, ATI_REG_CMD_AC_SYNC|ATI_REG_CMD_AC_RESET,
ATI_REG_CMD_AC_SYNC|ATI_REG_CMD_AC_RESET);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int snd_atiixp_aclink_down(struct atiixp_modem *chip)
{
// if (atiixp_read(chip, MODEM_MIRROR) & 0x1) /* modem running, too? */
// return -EBUSY;
atiixp_update(chip, CMD,
ATI_REG_CMD_POWERDOWN | ATI_REG_CMD_AC_RESET,
ATI_REG_CMD_POWERDOWN);
return 0;
}
#endif
/*
* auto-detection of codecs
*
* the IXP chip can generate interrupts for the non-existing codecs.
* NEW_FRAME interrupt is used to make sure that the interrupt is generated
* even if all three codecs are connected.
*/
#define ALL_CODEC_NOT_READY \
(ATI_REG_ISR_CODEC0_NOT_READY |\
ATI_REG_ISR_CODEC1_NOT_READY |\
ATI_REG_ISR_CODEC2_NOT_READY)
#define CODEC_CHECK_BITS (ALL_CODEC_NOT_READY|ATI_REG_ISR_NEW_FRAME)
static int snd_atiixp_codec_detect(struct atiixp_modem *chip)
{
int timeout;
chip->codec_not_ready_bits = 0;
atiixp_write(chip, IER, CODEC_CHECK_BITS);
/* wait for the interrupts */
timeout = 50;
while (timeout-- > 0) {
msleep(1);
if (chip->codec_not_ready_bits)
break;
}
atiixp_write(chip, IER, 0); /* disable irqs */
if ((chip->codec_not_ready_bits & ALL_CODEC_NOT_READY) == ALL_CODEC_NOT_READY) {
dev_err(chip->card->dev, "no codec detected!\n");
return -ENXIO;
}
return 0;
}
/*
* enable DMA and irqs
*/
static int snd_atiixp_chip_start(struct atiixp_modem *chip)
{
unsigned int reg;
/* set up spdif, enable burst mode */
reg = atiixp_read(chip, CMD);
reg |= ATI_REG_CMD_BURST_EN;
if(!(reg & ATI_REG_CMD_MODEM_PRESENT))
reg |= ATI_REG_CMD_MODEM_PRESENT;
atiixp_write(chip, CMD, reg);
/* clear all interrupt source */
atiixp_write(chip, ISR, 0xffffffff);
/* enable irqs */
atiixp_write(chip, IER,
ATI_REG_IER_MODEM_STATUS_EN |
ATI_REG_IER_MODEM_IN_XRUN_EN |
ATI_REG_IER_MODEM_OUT1_XRUN_EN);
return 0;
}
/*
* disable DMA and IRQs
*/
static int snd_atiixp_chip_stop(struct atiixp_modem *chip)
{
/* clear interrupt source */
atiixp_write(chip, ISR, atiixp_read(chip, ISR));
/* disable irqs */
atiixp_write(chip, IER, 0);
return 0;
}
/*
* PCM section
*/
/*
* pointer callback simplly reads XXX_DMA_DT_CUR register as the current
* position. when SG-buffer is implemented, the offset must be calculated
* correctly...
*/
static snd_pcm_uframes_t snd_atiixp_pcm_pointer(struct snd_pcm_substream *substream)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct atiixp_dma *dma = runtime->private_data;
unsigned int curptr;
int timeout = 1000;
while (timeout--) {
curptr = readl(chip->remap_addr + dma->ops->dt_cur);
if (curptr < dma->buf_addr)
continue;
curptr -= dma->buf_addr;
if (curptr >= dma->buf_bytes)
continue;
return bytes_to_frames(runtime, curptr);
}
dev_dbg(chip->card->dev, "invalid DMA pointer read 0x%x (buf=%x)\n",
readl(chip->remap_addr + dma->ops->dt_cur), dma->buf_addr);
return 0;
}
/*
* XRUN detected, and stop the PCM substream
*/
static void snd_atiixp_xrun_dma(struct atiixp_modem *chip,
struct atiixp_dma *dma)
{
if (! dma->substream || ! dma->running)
return;
dev_dbg(chip->card->dev, "XRUN detected (DMA %d)\n", dma->ops->type);
snd_pcm_stop_xrun(dma->substream);
}
/*
* the period ack. update the substream.
*/
static void snd_atiixp_update_dma(struct atiixp_modem *chip,
struct atiixp_dma *dma)
{
if (! dma->substream || ! dma->running)
return;
snd_pcm_period_elapsed(dma->substream);
}
/* set BUS_BUSY interrupt bit if any DMA is running */
/* call with spinlock held */
static void snd_atiixp_check_bus_busy(struct atiixp_modem *chip)
{
unsigned int bus_busy;
if (atiixp_read(chip, CMD) & (ATI_REG_CMD_MODEM_SEND1_EN |
ATI_REG_CMD_MODEM_RECEIVE_EN))
bus_busy = ATI_REG_IER_MODEM_SET_BUS_BUSY;
else
bus_busy = 0;
atiixp_update(chip, IER, ATI_REG_IER_MODEM_SET_BUS_BUSY, bus_busy);
}
/* common trigger callback
* calling the lowlevel callbacks in it
*/
static int snd_atiixp_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
struct atiixp_dma *dma = substream->runtime->private_data;
int err = 0;
if (snd_BUG_ON(!dma->ops->enable_transfer ||
!dma->ops->flush_dma))
return -EINVAL;
spin_lock(&chip->reg_lock);
switch(cmd) {
case SNDRV_PCM_TRIGGER_START:
dma->ops->enable_transfer(chip, 1);
dma->running = 1;
break;
case SNDRV_PCM_TRIGGER_STOP:
dma->ops->enable_transfer(chip, 0);
dma->running = 0;
break;
default:
err = -EINVAL;
break;
}
if (! err) {
snd_atiixp_check_bus_busy(chip);
if (cmd == SNDRV_PCM_TRIGGER_STOP) {
dma->ops->flush_dma(chip);
snd_atiixp_check_bus_busy(chip);
}
}
spin_unlock(&chip->reg_lock);
return err;
}
/*
* lowlevel callbacks for each DMA type
*
* every callback is supposed to be called in chip->reg_lock spinlock
*/
/* flush FIFO of analog OUT DMA */
static void atiixp_out_flush_dma(struct atiixp_modem *chip)
{
atiixp_write(chip, MODEM_FIFO_FLUSH, ATI_REG_MODEM_FIFO_OUT1_FLUSH);
}
/* enable/disable analog OUT DMA */
static void atiixp_out_enable_dma(struct atiixp_modem *chip, int on)
{
unsigned int data;
data = atiixp_read(chip, CMD);
if (on) {
if (data & ATI_REG_CMD_MODEM_OUT_DMA1_EN)
return;
atiixp_out_flush_dma(chip);
data |= ATI_REG_CMD_MODEM_OUT_DMA1_EN;
} else
data &= ~ATI_REG_CMD_MODEM_OUT_DMA1_EN;
atiixp_write(chip, CMD, data);
}
/* start/stop transfer over OUT DMA */
static void atiixp_out_enable_transfer(struct atiixp_modem *chip, int on)
{
atiixp_update(chip, CMD, ATI_REG_CMD_MODEM_SEND1_EN,
on ? ATI_REG_CMD_MODEM_SEND1_EN : 0);
}
/* enable/disable analog IN DMA */
static void atiixp_in_enable_dma(struct atiixp_modem *chip, int on)
{
atiixp_update(chip, CMD, ATI_REG_CMD_MODEM_IN_DMA_EN,
on ? ATI_REG_CMD_MODEM_IN_DMA_EN : 0);
}
/* start/stop analog IN DMA */
static void atiixp_in_enable_transfer(struct atiixp_modem *chip, int on)
{
if (on) {
unsigned int data = atiixp_read(chip, CMD);
if (! (data & ATI_REG_CMD_MODEM_RECEIVE_EN)) {
data |= ATI_REG_CMD_MODEM_RECEIVE_EN;
atiixp_write(chip, CMD, data);
}
} else
atiixp_update(chip, CMD, ATI_REG_CMD_MODEM_RECEIVE_EN, 0);
}
/* flush FIFO of analog IN DMA */
static void atiixp_in_flush_dma(struct atiixp_modem *chip)
{
atiixp_write(chip, MODEM_FIFO_FLUSH, ATI_REG_MODEM_FIFO_IN_FLUSH);
}
/* set up slots and formats for analog OUT */
static int snd_atiixp_playback_prepare(struct snd_pcm_substream *substream)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
unsigned int data;
spin_lock_irq(&chip->reg_lock);
/* set output threshold */
data = atiixp_read(chip, MODEM_OUT_FIFO);
data &= ~ATI_REG_MODEM_OUT1_DMA_THRESHOLD_MASK;
data |= 0x04 << ATI_REG_MODEM_OUT1_DMA_THRESHOLD_SHIFT;
atiixp_write(chip, MODEM_OUT_FIFO, data);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
/* set up slots and formats for analog IN */
static int snd_atiixp_capture_prepare(struct snd_pcm_substream *substream)
{
return 0;
}
/*
* hw_params - allocate the buffer and set up buffer descriptors
*/
static int snd_atiixp_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
struct atiixp_dma *dma = substream->runtime->private_data;
int err;
int i;
dma->buf_addr = substream->runtime->dma_addr;
dma->buf_bytes = params_buffer_bytes(hw_params);
err = atiixp_build_dma_packets(chip, dma, substream,
params_periods(hw_params),
params_period_bytes(hw_params));
if (err < 0)
return err;
/* set up modem rate */
for (i = 0; i < NUM_ATI_CODECS; i++) {
if (! chip->ac97[i])
continue;
snd_ac97_write(chip->ac97[i], AC97_LINE1_RATE, params_rate(hw_params));
snd_ac97_write(chip->ac97[i], AC97_LINE1_LEVEL, 0);
}
return err;
}
static int snd_atiixp_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
struct atiixp_dma *dma = substream->runtime->private_data;
atiixp_clear_dma_packets(chip, dma, substream);
return 0;
}
/*
* pcm hardware definition, identical for all DMA types
*/
static const struct snd_pcm_hardware snd_atiixp_pcm_hw =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = (SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000 |
SNDRV_PCM_RATE_KNOT),
.rate_min = 8000,
.rate_max = 16000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 256 * 1024,
.period_bytes_min = 32,
.period_bytes_max = 128 * 1024,
.periods_min = 2,
.periods_max = ATI_MAX_DESCRIPTORS,
};
static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream,
struct atiixp_dma *dma, int pcm_type)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
static const unsigned int rates[] = { 8000, 9600, 12000, 16000 };
static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
return -EINVAL;
if (dma->opened)
return -EBUSY;
dma->substream = substream;
runtime->hw = snd_atiixp_pcm_hw;
dma->ac97_pcm_type = pcm_type;
err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
runtime->private_data = dma;
/* enable DMA bits */
spin_lock_irq(&chip->reg_lock);
dma->ops->enable_dma(chip, 1);
spin_unlock_irq(&chip->reg_lock);
dma->opened = 1;
return 0;
}
static int snd_atiixp_pcm_close(struct snd_pcm_substream *substream,
struct atiixp_dma *dma)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
/* disable DMA bits */
if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma))
return -EINVAL;
spin_lock_irq(&chip->reg_lock);
dma->ops->enable_dma(chip, 0);
spin_unlock_irq(&chip->reg_lock);
dma->substream = NULL;
dma->opened = 0;
return 0;
}
/*
*/
static int snd_atiixp_playback_open(struct snd_pcm_substream *substream)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
int err;
mutex_lock(&chip->open_mutex);
err = snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_PLAYBACK], 0);
mutex_unlock(&chip->open_mutex);
if (err < 0)
return err;
return 0;
}
static int snd_atiixp_playback_close(struct snd_pcm_substream *substream)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
int err;
mutex_lock(&chip->open_mutex);
err = snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_PLAYBACK]);
mutex_unlock(&chip->open_mutex);
return err;
}
static int snd_atiixp_capture_open(struct snd_pcm_substream *substream)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
return snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_CAPTURE], 1);
}
static int snd_atiixp_capture_close(struct snd_pcm_substream *substream)
{
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
return snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_CAPTURE]);
}
/* AC97 playback */
static const struct snd_pcm_ops snd_atiixp_playback_ops = {
.open = snd_atiixp_playback_open,
.close = snd_atiixp_playback_close,
.hw_params = snd_atiixp_pcm_hw_params,
.hw_free = snd_atiixp_pcm_hw_free,
.prepare = snd_atiixp_playback_prepare,
.trigger = snd_atiixp_pcm_trigger,
.pointer = snd_atiixp_pcm_pointer,
};
/* AC97 capture */
static const struct snd_pcm_ops snd_atiixp_capture_ops = {
.open = snd_atiixp_capture_open,
.close = snd_atiixp_capture_close,
.hw_params = snd_atiixp_pcm_hw_params,
.hw_free = snd_atiixp_pcm_hw_free,
.prepare = snd_atiixp_capture_prepare,
.trigger = snd_atiixp_pcm_trigger,
.pointer = snd_atiixp_pcm_pointer,
};
static const struct atiixp_dma_ops snd_atiixp_playback_dma_ops = {
.type = ATI_DMA_PLAYBACK,
.llp_offset = ATI_REG_MODEM_OUT_DMA1_LINKPTR,
.dt_cur = ATI_REG_MODEM_OUT_DMA1_DT_CUR,
.enable_dma = atiixp_out_enable_dma,
.enable_transfer = atiixp_out_enable_transfer,
.flush_dma = atiixp_out_flush_dma,
};
static const struct atiixp_dma_ops snd_atiixp_capture_dma_ops = {
.type = ATI_DMA_CAPTURE,
.llp_offset = ATI_REG_MODEM_IN_DMA_LINKPTR,
.dt_cur = ATI_REG_MODEM_IN_DMA_DT_CUR,
.enable_dma = atiixp_in_enable_dma,
.enable_transfer = atiixp_in_enable_transfer,
.flush_dma = atiixp_in_flush_dma,
};
static int snd_atiixp_pcm_new(struct atiixp_modem *chip)
{
struct snd_pcm *pcm;
int err;
/* initialize constants */
chip->dmas[ATI_DMA_PLAYBACK].ops = &snd_atiixp_playback_dma_ops;
chip->dmas[ATI_DMA_CAPTURE].ops = &snd_atiixp_capture_dma_ops;
/* PCM #0: analog I/O */
err = snd_pcm_new(chip->card, "ATI IXP MC97", ATI_PCMDEV_ANALOG, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_atiixp_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_atiixp_capture_ops);
pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
pcm->private_data = chip;
strcpy(pcm->name, "ATI IXP MC97");
chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pci->dev, 64*1024, 128*1024);
return 0;
}
/*
* interrupt handler
*/
static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id)
{
struct atiixp_modem *chip = dev_id;
unsigned int status;
status = atiixp_read(chip, ISR);
if (! status)
return IRQ_NONE;
/* process audio DMA */
if (status & ATI_REG_ISR_MODEM_OUT1_XRUN)
snd_atiixp_xrun_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]);
else if (status & ATI_REG_ISR_MODEM_OUT1_STATUS)
snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]);
if (status & ATI_REG_ISR_MODEM_IN_XRUN)
snd_atiixp_xrun_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]);
else if (status & ATI_REG_ISR_MODEM_IN_STATUS)
snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]);
/* for codec detection */
if (status & CODEC_CHECK_BITS) {
unsigned int detected;
detected = status & CODEC_CHECK_BITS;
spin_lock(&chip->reg_lock);
chip->codec_not_ready_bits |= detected;
atiixp_update(chip, IER, detected, 0); /* disable the detected irqs */
spin_unlock(&chip->reg_lock);
}
/* ack */
atiixp_write(chip, ISR, status);
return IRQ_HANDLED;
}
/*
* ac97 mixer section
*/
static int snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
int i, err;
int codec_count;
static const struct snd_ac97_bus_ops ops = {
.write = snd_atiixp_ac97_write,
.read = snd_atiixp_ac97_read,
};
static const unsigned int codec_skip[NUM_ATI_CODECS] = {
ATI_REG_ISR_CODEC0_NOT_READY,
ATI_REG_ISR_CODEC1_NOT_READY,
ATI_REG_ISR_CODEC2_NOT_READY,
};
if (snd_atiixp_codec_detect(chip) < 0)
return -ENXIO;
err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus);
if (err < 0)
return err;
pbus->clock = clock;
chip->ac97_bus = pbus;
codec_count = 0;
for (i = 0; i < NUM_ATI_CODECS; i++) {
if (chip->codec_not_ready_bits & codec_skip[i])
continue;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.pci = chip->pci;
ac97.num = i;
ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE;
err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i]);
if (err < 0) {
chip->ac97[i] = NULL; /* to be sure */
dev_dbg(chip->card->dev,
"codec %d not available for modem\n", i);
continue;
}
codec_count++;
}
if (! codec_count) {
dev_err(chip->card->dev, "no codec available\n");
return -ENODEV;
}
/* snd_ac97_tune_hardware(chip->ac97, ac97_quirks); */
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
static int snd_atiixp_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp_modem *chip = card->private_data;
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < NUM_ATI_CODECS; i++)
snd_ac97_suspend(chip->ac97[i]);
snd_atiixp_aclink_down(chip);
snd_atiixp_chip_stop(chip);
return 0;
}
static int snd_atiixp_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct atiixp_modem *chip = card->private_data;
int i;
snd_atiixp_aclink_reset(chip);
snd_atiixp_chip_start(chip);
for (i = 0; i < NUM_ATI_CODECS; i++)
snd_ac97_resume(chip->ac97[i]);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_atiixp_pm, snd_atiixp_suspend, snd_atiixp_resume);
#define SND_ATIIXP_PM_OPS &snd_atiixp_pm
#else
#define SND_ATIIXP_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
/*
* proc interface for register dump
*/
static void snd_atiixp_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct atiixp_modem *chip = entry->private_data;
int i;
for (i = 0; i < 256; i += 4)
snd_iprintf(buffer, "%02x: %08x\n", i, readl(chip->remap_addr + i));
}
static void snd_atiixp_proc_init(struct atiixp_modem *chip)
{
snd_card_ro_proc_new(chip->card, "atiixp-modem", chip,
snd_atiixp_proc_read);
}
/*
* destructor
*/
static void snd_atiixp_free(struct snd_card *card)
{
snd_atiixp_chip_stop(card->private_data);
}
/*
* constructor for chip instance
*/
static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
{
struct atiixp_modem *chip = card->private_data;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
spin_lock_init(&chip->reg_lock);
mutex_init(&chip->open_mutex);
chip->card = card;
chip->pci = pci;
chip->irq = -1;
err = pcim_iomap_regions(pci, 1 << 0, "ATI IXP MC97");
if (err < 0)
return err;
chip->addr = pci_resource_start(pci, 0);
chip->remap_addr = pcim_iomap_table(pci)[0];
if (devm_request_irq(&pci->dev, pci->irq, snd_atiixp_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
return -EBUSY;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
card->private_free = snd_atiixp_free;
pci_set_master(pci);
return 0;
}
static int __snd_atiixp_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct atiixp_modem *chip;
int err;
err = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
strcpy(card->driver, "ATIIXP-MODEM");
strcpy(card->shortname, "ATI IXP Modem");
err = snd_atiixp_init(card, pci);
if (err < 0)
return err;
err = snd_atiixp_aclink_reset(chip);
if (err < 0)
return err;
err = snd_atiixp_mixer_new(chip, ac97_clock);
if (err < 0)
return err;
err = snd_atiixp_pcm_new(chip);
if (err < 0)
return err;
snd_atiixp_proc_init(chip);
snd_atiixp_chip_start(chip);
sprintf(card->longname, "%s rev %x at 0x%lx, irq %i",
card->shortname, pci->revision, chip->addr, chip->irq);
err = snd_card_register(card);
if (err < 0)
return err;
pci_set_drvdata(pci, card);
return 0;
}
static int snd_atiixp_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
}
static struct pci_driver atiixp_modem_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
.driver = {
.pm = SND_ATIIXP_PM_OPS,
},
};
module_pci_driver(atiixp_modem_driver);
| linux-master | sound/pci/atiixp_modem.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* als300.c - driver for Avance Logic ALS300/ALS300+ soundcards.
* Copyright (C) 2005 by Ash Willis <[email protected]>
*
* TODO
* 4 channel playback for ALS300+
* gameport
* mpu401
* opl3
*
* NOTES
* The BLOCK_COUNTER registers for the ALS300(+) return a figure related to
* the position in the current period, NOT the whole buffer. It is important
* to know which period we are in so we can calculate the correct pointer.
* This is why we always use 2 periods. We can then use a flip-flop variable
* to keep track of what period we are in.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/ac97_codec.h>
#include <sound/opl3.h>
/* snd_als300_set_irq_flag */
#define IRQ_DISABLE 0
#define IRQ_ENABLE 1
/* I/O port layout */
#define AC97_ACCESS 0x00
#define AC97_READ 0x04
#define AC97_STATUS 0x06
#define AC97_DATA_AVAIL (1<<6)
#define AC97_BUSY (1<<7)
#define ALS300_IRQ_STATUS 0x07 /* ALS300 Only */
#define IRQ_PLAYBACK (1<<3)
#define IRQ_CAPTURE (1<<2)
#define GCR_DATA 0x08
#define GCR_INDEX 0x0C
#define ALS300P_DRAM_IRQ_STATUS 0x0D /* ALS300+ Only */
#define MPU_IRQ_STATUS 0x0E /* ALS300 Rev. E+, ALS300+ */
#define ALS300P_IRQ_STATUS 0x0F /* ALS300+ Only */
/* General Control Registers */
#define PLAYBACK_START 0x80
#define PLAYBACK_END 0x81
#define PLAYBACK_CONTROL 0x82
#define TRANSFER_START (1<<16)
#define FIFO_PAUSE (1<<17)
#define RECORD_START 0x83
#define RECORD_END 0x84
#define RECORD_CONTROL 0x85
#define DRAM_WRITE_CONTROL 0x8B
#define WRITE_TRANS_START (1<<16)
#define DRAM_MODE_2 (1<<17)
#define MISC_CONTROL 0x8C
#define IRQ_SET_BIT (1<<15)
#define VMUTE_NORMAL (1<<20)
#define MMUTE_NORMAL (1<<21)
#define MUS_VOC_VOL 0x8E
#define PLAYBACK_BLOCK_COUNTER 0x9A
#define RECORD_BLOCK_COUNTER 0x9B
#define DEBUG_PLAY_REC 0
#if DEBUG_PLAY_REC
#define snd_als300_dbgplay(format, args...) printk(KERN_ERR format, ##args)
#else
#define snd_als300_dbgplay(format, args...)
#endif
enum {DEVICE_ALS300, DEVICE_ALS300_PLUS};
MODULE_AUTHOR("Ash Willis <[email protected]>");
MODULE_DESCRIPTION("Avance Logic ALS300");
MODULE_LICENSE("GPL");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for ALS300 sound card.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for ALS300 sound card.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable ALS300 sound card.");
struct snd_als300 {
unsigned long port;
spinlock_t reg_lock;
struct snd_card *card;
struct pci_dev *pci;
struct snd_pcm *pcm;
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
struct snd_ac97 *ac97;
struct snd_opl3 *opl3;
struct resource *res_port;
int irq;
int chip_type; /* ALS300 or ALS300+ */
char revision;
};
struct snd_als300_substream_data {
int period_flipflop;
int control_register;
int block_counter_register;
};
static const struct pci_device_id snd_als300_ids[] = {
{ 0x4005, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALS300 },
{ 0x4005, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALS300_PLUS },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_als300_ids);
static inline u32 snd_als300_gcr_read(unsigned long port, unsigned short reg)
{
outb(reg, port+GCR_INDEX);
return inl(port+GCR_DATA);
}
static inline void snd_als300_gcr_write(unsigned long port,
unsigned short reg, u32 val)
{
outb(reg, port+GCR_INDEX);
outl(val, port+GCR_DATA);
}
/* Enable/Disable Interrupts */
static void snd_als300_set_irq_flag(struct snd_als300 *chip, int cmd)
{
u32 tmp = snd_als300_gcr_read(chip->port, MISC_CONTROL);
/* boolean XOR check, since old vs. new hardware have
directly reversed bit setting for ENABLE and DISABLE.
ALS300+ acts like newer versions of ALS300 */
if (((chip->revision > 5 || chip->chip_type == DEVICE_ALS300_PLUS) ^
(cmd == IRQ_ENABLE)) == 0)
tmp |= IRQ_SET_BIT;
else
tmp &= ~IRQ_SET_BIT;
snd_als300_gcr_write(chip->port, MISC_CONTROL, tmp);
}
static void snd_als300_free(struct snd_card *card)
{
struct snd_als300 *chip = card->private_data;
snd_als300_set_irq_flag(chip, IRQ_DISABLE);
}
static irqreturn_t snd_als300_interrupt(int irq, void *dev_id)
{
u8 status;
struct snd_als300 *chip = dev_id;
struct snd_als300_substream_data *data;
status = inb(chip->port+ALS300_IRQ_STATUS);
if (!status) /* shared IRQ, for different device?? Exit ASAP! */
return IRQ_NONE;
/* ACK everything ASAP */
outb(status, chip->port+ALS300_IRQ_STATUS);
if (status & IRQ_PLAYBACK) {
if (chip->pcm && chip->playback_substream) {
data = chip->playback_substream->runtime->private_data;
data->period_flipflop ^= 1;
snd_pcm_period_elapsed(chip->playback_substream);
snd_als300_dbgplay("IRQ_PLAYBACK\n");
}
}
if (status & IRQ_CAPTURE) {
if (chip->pcm && chip->capture_substream) {
data = chip->capture_substream->runtime->private_data;
data->period_flipflop ^= 1;
snd_pcm_period_elapsed(chip->capture_substream);
snd_als300_dbgplay("IRQ_CAPTURE\n");
}
}
return IRQ_HANDLED;
}
static irqreturn_t snd_als300plus_interrupt(int irq, void *dev_id)
{
u8 general, mpu, dram;
struct snd_als300 *chip = dev_id;
struct snd_als300_substream_data *data;
general = inb(chip->port+ALS300P_IRQ_STATUS);
mpu = inb(chip->port+MPU_IRQ_STATUS);
dram = inb(chip->port+ALS300P_DRAM_IRQ_STATUS);
/* shared IRQ, for different device?? Exit ASAP! */
if ((general == 0) && ((mpu & 0x80) == 0) && ((dram & 0x01) == 0))
return IRQ_NONE;
if (general & IRQ_PLAYBACK) {
if (chip->pcm && chip->playback_substream) {
outb(IRQ_PLAYBACK, chip->port+ALS300P_IRQ_STATUS);
data = chip->playback_substream->runtime->private_data;
data->period_flipflop ^= 1;
snd_pcm_period_elapsed(chip->playback_substream);
snd_als300_dbgplay("IRQ_PLAYBACK\n");
}
}
if (general & IRQ_CAPTURE) {
if (chip->pcm && chip->capture_substream) {
outb(IRQ_CAPTURE, chip->port+ALS300P_IRQ_STATUS);
data = chip->capture_substream->runtime->private_data;
data->period_flipflop ^= 1;
snd_pcm_period_elapsed(chip->capture_substream);
snd_als300_dbgplay("IRQ_CAPTURE\n");
}
}
/* FIXME: Ack other interrupt types. Not important right now as
* those other devices aren't enabled. */
return IRQ_HANDLED;
}
static unsigned short snd_als300_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
int i;
struct snd_als300 *chip = ac97->private_data;
for (i = 0; i < 1000; i++) {
if ((inb(chip->port+AC97_STATUS) & (AC97_BUSY)) == 0)
break;
udelay(10);
}
outl((reg << 24) | (1 << 31), chip->port+AC97_ACCESS);
for (i = 0; i < 1000; i++) {
if ((inb(chip->port+AC97_STATUS) & (AC97_DATA_AVAIL)) != 0)
break;
udelay(10);
}
return inw(chip->port+AC97_READ);
}
static void snd_als300_ac97_write(struct snd_ac97 *ac97,
unsigned short reg, unsigned short val)
{
int i;
struct snd_als300 *chip = ac97->private_data;
for (i = 0; i < 1000; i++) {
if ((inb(chip->port+AC97_STATUS) & (AC97_BUSY)) == 0)
break;
udelay(10);
}
outl((reg << 24) | val, chip->port+AC97_ACCESS);
}
static int snd_als300_ac97(struct snd_als300 *chip)
{
struct snd_ac97_bus *bus;
struct snd_ac97_template ac97;
int err;
static const struct snd_ac97_bus_ops ops = {
.write = snd_als300_ac97_write,
.read = snd_als300_ac97_read,
};
err = snd_ac97_bus(chip->card, 0, &ops, NULL, &bus);
if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
return snd_ac97_mixer(bus, &ac97, &chip->ac97);
}
/* hardware definition
*
* In AC97 mode, we always use 48k/16bit/stereo.
* Any request to change data type is ignored by
* the card when it is running outside of legacy
* mode.
*/
static const struct snd_pcm_hardware snd_als300_playback_hw =
{
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_S16,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 64 * 1024,
.period_bytes_min = 64,
.period_bytes_max = 32 * 1024,
.periods_min = 2,
.periods_max = 2,
};
static const struct snd_pcm_hardware snd_als300_capture_hw =
{
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_S16,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 64 * 1024,
.period_bytes_min = 64,
.period_bytes_max = 32 * 1024,
.periods_min = 2,
.periods_max = 2,
};
static int snd_als300_playback_open(struct snd_pcm_substream *substream)
{
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
chip->playback_substream = substream;
runtime->hw = snd_als300_playback_hw;
runtime->private_data = data;
data->control_register = PLAYBACK_CONTROL;
data->block_counter_register = PLAYBACK_BLOCK_COUNTER;
return 0;
}
static int snd_als300_playback_close(struct snd_pcm_substream *substream)
{
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
struct snd_als300_substream_data *data;
data = substream->runtime->private_data;
kfree(data);
chip->playback_substream = NULL;
return 0;
}
static int snd_als300_capture_open(struct snd_pcm_substream *substream)
{
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
chip->capture_substream = substream;
runtime->hw = snd_als300_capture_hw;
runtime->private_data = data;
data->control_register = RECORD_CONTROL;
data->block_counter_register = RECORD_BLOCK_COUNTER;
return 0;
}
static int snd_als300_capture_close(struct snd_pcm_substream *substream)
{
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
struct snd_als300_substream_data *data;
data = substream->runtime->private_data;
kfree(data);
chip->capture_substream = NULL;
return 0;
}
static int snd_als300_playback_prepare(struct snd_pcm_substream *substream)
{
u32 tmp;
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned short period_bytes = snd_pcm_lib_period_bytes(substream);
unsigned short buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
spin_lock_irq(&chip->reg_lock);
tmp = snd_als300_gcr_read(chip->port, PLAYBACK_CONTROL);
tmp &= ~TRANSFER_START;
snd_als300_dbgplay("Period bytes: %d Buffer bytes %d\n",
period_bytes, buffer_bytes);
/* set block size */
tmp &= 0xffff0000;
tmp |= period_bytes - 1;
snd_als300_gcr_write(chip->port, PLAYBACK_CONTROL, tmp);
/* set dma area */
snd_als300_gcr_write(chip->port, PLAYBACK_START,
runtime->dma_addr);
snd_als300_gcr_write(chip->port, PLAYBACK_END,
runtime->dma_addr + buffer_bytes - 1);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int snd_als300_capture_prepare(struct snd_pcm_substream *substream)
{
u32 tmp;
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned short period_bytes = snd_pcm_lib_period_bytes(substream);
unsigned short buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
spin_lock_irq(&chip->reg_lock);
tmp = snd_als300_gcr_read(chip->port, RECORD_CONTROL);
tmp &= ~TRANSFER_START;
snd_als300_dbgplay("Period bytes: %d Buffer bytes %d\n", period_bytes,
buffer_bytes);
/* set block size */
tmp &= 0xffff0000;
tmp |= period_bytes - 1;
/* set dma area */
snd_als300_gcr_write(chip->port, RECORD_CONTROL, tmp);
snd_als300_gcr_write(chip->port, RECORD_START,
runtime->dma_addr);
snd_als300_gcr_write(chip->port, RECORD_END,
runtime->dma_addr + buffer_bytes - 1);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int snd_als300_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
u32 tmp;
struct snd_als300_substream_data *data;
unsigned short reg;
int ret = 0;
data = substream->runtime->private_data;
reg = data->control_register;
spin_lock(&chip->reg_lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
tmp = snd_als300_gcr_read(chip->port, reg);
data->period_flipflop = 1;
snd_als300_gcr_write(chip->port, reg, tmp | TRANSFER_START);
snd_als300_dbgplay("TRIGGER START\n");
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
tmp = snd_als300_gcr_read(chip->port, reg);
snd_als300_gcr_write(chip->port, reg, tmp & ~TRANSFER_START);
snd_als300_dbgplay("TRIGGER STOP\n");
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
tmp = snd_als300_gcr_read(chip->port, reg);
snd_als300_gcr_write(chip->port, reg, tmp | FIFO_PAUSE);
snd_als300_dbgplay("TRIGGER PAUSE\n");
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
tmp = snd_als300_gcr_read(chip->port, reg);
snd_als300_gcr_write(chip->port, reg, tmp & ~FIFO_PAUSE);
snd_als300_dbgplay("TRIGGER RELEASE\n");
break;
default:
snd_als300_dbgplay("TRIGGER INVALID\n");
ret = -EINVAL;
}
spin_unlock(&chip->reg_lock);
return ret;
}
static snd_pcm_uframes_t snd_als300_pointer(struct snd_pcm_substream *substream)
{
u16 current_ptr;
struct snd_als300 *chip = snd_pcm_substream_chip(substream);
struct snd_als300_substream_data *data;
unsigned short period_bytes;
data = substream->runtime->private_data;
period_bytes = snd_pcm_lib_period_bytes(substream);
spin_lock(&chip->reg_lock);
current_ptr = (u16) snd_als300_gcr_read(chip->port,
data->block_counter_register) + 4;
spin_unlock(&chip->reg_lock);
if (current_ptr > period_bytes)
current_ptr = 0;
else
current_ptr = period_bytes - current_ptr;
if (data->period_flipflop == 0)
current_ptr += period_bytes;
snd_als300_dbgplay("Pointer (bytes): %d\n", current_ptr);
return bytes_to_frames(substream->runtime, current_ptr);
}
static const struct snd_pcm_ops snd_als300_playback_ops = {
.open = snd_als300_playback_open,
.close = snd_als300_playback_close,
.prepare = snd_als300_playback_prepare,
.trigger = snd_als300_trigger,
.pointer = snd_als300_pointer,
};
static const struct snd_pcm_ops snd_als300_capture_ops = {
.open = snd_als300_capture_open,
.close = snd_als300_capture_close,
.prepare = snd_als300_capture_prepare,
.trigger = snd_als300_trigger,
.pointer = snd_als300_pointer,
};
static int snd_als300_new_pcm(struct snd_als300 *chip)
{
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(chip->card, "ALS300", 0, 1, 1, &pcm);
if (err < 0)
return err;
pcm->private_data = chip;
strcpy(pcm->name, "ALS300");
chip->pcm = pcm;
/* set operators */
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_als300_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_als300_capture_ops);
/* pre-allocation of buffers */
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
64*1024, 64*1024);
return 0;
}
static void snd_als300_init(struct snd_als300 *chip)
{
unsigned long flags;
u32 tmp;
spin_lock_irqsave(&chip->reg_lock, flags);
chip->revision = (snd_als300_gcr_read(chip->port, MISC_CONTROL) >> 16)
& 0x0000000F;
/* Setup DRAM */
tmp = snd_als300_gcr_read(chip->port, DRAM_WRITE_CONTROL);
snd_als300_gcr_write(chip->port, DRAM_WRITE_CONTROL,
(tmp | DRAM_MODE_2)
& ~WRITE_TRANS_START);
/* Enable IRQ output */
snd_als300_set_irq_flag(chip, IRQ_ENABLE);
/* Unmute hardware devices so their outputs get routed to
* the onboard mixer */
tmp = snd_als300_gcr_read(chip->port, MISC_CONTROL);
snd_als300_gcr_write(chip->port, MISC_CONTROL,
tmp | VMUTE_NORMAL | MMUTE_NORMAL);
/* Reset volumes */
snd_als300_gcr_write(chip->port, MUS_VOC_VOL, 0);
/* Make sure playback transfer is stopped */
tmp = snd_als300_gcr_read(chip->port, PLAYBACK_CONTROL);
snd_als300_gcr_write(chip->port, PLAYBACK_CONTROL,
tmp & ~TRANSFER_START);
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
static int snd_als300_create(struct snd_card *card,
struct pci_dev *pci, int chip_type)
{
struct snd_als300 *chip = card->private_data;
void *irq_handler;
int err;
err = pcim_enable_device(pci);
if (err < 0)
return err;
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(28))) {
dev_err(card->dev, "error setting 28bit DMA mask\n");
return -ENXIO;
}
pci_set_master(pci);
chip->card = card;
chip->pci = pci;
chip->irq = -1;
chip->chip_type = chip_type;
spin_lock_init(&chip->reg_lock);
err = pci_request_regions(pci, "ALS300");
if (err < 0)
return err;
chip->port = pci_resource_start(pci, 0);
if (chip->chip_type == DEVICE_ALS300_PLUS)
irq_handler = snd_als300plus_interrupt;
else
irq_handler = snd_als300_interrupt;
if (devm_request_irq(&pci->dev, pci->irq, irq_handler, IRQF_SHARED,
KBUILD_MODNAME, chip)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
return -EBUSY;
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
card->private_free = snd_als300_free;
snd_als300_init(chip);
err = snd_als300_ac97(chip);
if (err < 0) {
dev_err(card->dev, "Could not create ac97\n");
return err;
}
err = snd_als300_new_pcm(chip);
if (err < 0) {
dev_err(card->dev, "Could not create PCM\n");
return err;
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int snd_als300_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct snd_als300 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_ac97_suspend(chip->ac97);
return 0;
}
static int snd_als300_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct snd_als300 *chip = card->private_data;
snd_als300_init(chip);
snd_ac97_resume(chip->ac97);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_als300_pm, snd_als300_suspend, snd_als300_resume);
#define SND_ALS300_PM_OPS &snd_als300_pm
#else
#define SND_ALS300_PM_OPS NULL
#endif
static int snd_als300_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct snd_als300 *chip;
int err, chip_type;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
sizeof(*chip), &card);
if (err < 0)
return err;
chip = card->private_data;
chip_type = pci_id->driver_data;
err = snd_als300_create(card, pci, chip_type);
if (err < 0)
goto error;
strcpy(card->driver, "ALS300");
if (chip->chip_type == DEVICE_ALS300_PLUS)
/* don't know much about ALS300+ yet
* print revision number for now */
sprintf(card->shortname, "ALS300+ (Rev. %d)", chip->revision);
else
sprintf(card->shortname, "ALS300 (Rev. %c)", 'A' +
chip->revision - 1);
sprintf(card->longname, "%s at 0x%lx irq %i",
card->shortname, chip->port, chip->irq);
err = snd_card_register(card);
if (err < 0)
goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
error:
snd_card_free(card);
return err;
}
static struct pci_driver als300_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_als300_ids,
.probe = snd_als300_probe,
.driver = {
.pm = SND_ALS300_PM_OPS,
},
};
module_pci_driver(als300_driver);
| linux-master | sound/pci/als300.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.